1 /*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/debugfs.h>
34 #include <linux/mlx5/qp.h>
35 #include <linux/mlx5/cq.h>
36 #include <linux/mlx5/driver.h>
37 #include "mlx5_core.h"
38 #include "lib/eq.h"
39
40 enum {
41 QP_PID,
42 QP_STATE,
43 QP_XPORT,
44 QP_MTU,
45 QP_N_RECV,
46 QP_RECV_SZ,
47 QP_N_SEND,
48 QP_LOG_PG_SZ,
49 QP_RQPN,
50 };
51
52 static char *qp_fields[] = {
53 [QP_PID] = "pid",
54 [QP_STATE] = "state",
55 [QP_XPORT] = "transport",
56 [QP_MTU] = "mtu",
57 [QP_N_RECV] = "num_recv",
58 [QP_RECV_SZ] = "rcv_wqe_sz",
59 [QP_N_SEND] = "num_send",
60 [QP_LOG_PG_SZ] = "log2_page_sz",
61 [QP_RQPN] = "remote_qpn",
62 };
63
64 enum {
65 EQ_NUM_EQES,
66 EQ_INTR,
67 EQ_LOG_PG_SZ,
68 };
69
70 static char *eq_fields[] = {
71 [EQ_NUM_EQES] = "num_eqes",
72 [EQ_INTR] = "intr",
73 [EQ_LOG_PG_SZ] = "log_page_size",
74 };
75
76 enum {
77 CQ_PID,
78 CQ_NUM_CQES,
79 CQ_LOG_PG_SZ,
80 };
81
82 static char *cq_fields[] = {
83 [CQ_PID] = "pid",
84 [CQ_NUM_CQES] = "num_cqes",
85 [CQ_LOG_PG_SZ] = "log_page_size",
86 };
87
88 struct dentry *mlx5_debugfs_root;
89 EXPORT_SYMBOL(mlx5_debugfs_root);
90
mlx5_register_debugfs(void)91 void mlx5_register_debugfs(void)
92 {
93 mlx5_debugfs_root = debugfs_create_dir("mlx5", NULL);
94 }
95
mlx5_unregister_debugfs(void)96 void mlx5_unregister_debugfs(void)
97 {
98 debugfs_remove(mlx5_debugfs_root);
99 }
100
mlx5_debugfs_get_dev_root(struct mlx5_core_dev * dev)101 struct dentry *mlx5_debugfs_get_dev_root(struct mlx5_core_dev *dev)
102 {
103 return dev->priv.dbg.dbg_root;
104 }
105 EXPORT_SYMBOL(mlx5_debugfs_get_dev_root);
106
mlx5_qp_debugfs_init(struct mlx5_core_dev * dev)107 void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev)
108 {
109 dev->priv.dbg.qp_debugfs = debugfs_create_dir("QPs", dev->priv.dbg.dbg_root);
110 }
111 EXPORT_SYMBOL(mlx5_qp_debugfs_init);
112
mlx5_qp_debugfs_cleanup(struct mlx5_core_dev * dev)113 void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev)
114 {
115 debugfs_remove_recursive(dev->priv.dbg.qp_debugfs);
116 }
117 EXPORT_SYMBOL(mlx5_qp_debugfs_cleanup);
118
mlx5_eq_debugfs_init(struct mlx5_core_dev * dev)119 void mlx5_eq_debugfs_init(struct mlx5_core_dev *dev)
120 {
121 dev->priv.dbg.eq_debugfs = debugfs_create_dir("EQs", dev->priv.dbg.dbg_root);
122 }
123
mlx5_eq_debugfs_cleanup(struct mlx5_core_dev * dev)124 void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev)
125 {
126 debugfs_remove_recursive(dev->priv.dbg.eq_debugfs);
127 }
128
average_read(struct file * filp,char __user * buf,size_t count,loff_t * pos)129 static ssize_t average_read(struct file *filp, char __user *buf, size_t count,
130 loff_t *pos)
131 {
132 struct mlx5_cmd_stats *stats;
133 u64 field = 0;
134 int ret;
135 char tbuf[22];
136
137 stats = filp->private_data;
138 spin_lock_irq(&stats->lock);
139 if (stats->n)
140 field = div64_u64(stats->sum, stats->n);
141 spin_unlock_irq(&stats->lock);
142 ret = snprintf(tbuf, sizeof(tbuf), "%llu\n", field);
143 return simple_read_from_buffer(buf, count, pos, tbuf, ret);
144 }
145
average_write(struct file * filp,const char __user * buf,size_t count,loff_t * pos)146 static ssize_t average_write(struct file *filp, const char __user *buf,
147 size_t count, loff_t *pos)
148 {
149 struct mlx5_cmd_stats *stats;
150
151 stats = filp->private_data;
152 spin_lock_irq(&stats->lock);
153 stats->sum = 0;
154 stats->n = 0;
155 spin_unlock_irq(&stats->lock);
156
157 *pos += count;
158
159 return count;
160 }
161
162 static const struct file_operations stats_fops = {
163 .owner = THIS_MODULE,
164 .open = simple_open,
165 .read = average_read,
166 .write = average_write,
167 };
168
slots_read(struct file * filp,char __user * buf,size_t count,loff_t * pos)169 static ssize_t slots_read(struct file *filp, char __user *buf, size_t count,
170 loff_t *pos)
171 {
172 struct mlx5_cmd *cmd;
173 char tbuf[6];
174 int weight;
175 int field;
176 int ret;
177
178 cmd = filp->private_data;
179 weight = bitmap_weight(&cmd->vars.bitmask, cmd->vars.max_reg_cmds);
180 field = cmd->vars.max_reg_cmds - weight;
181 ret = snprintf(tbuf, sizeof(tbuf), "%d\n", field);
182 return simple_read_from_buffer(buf, count, pos, tbuf, ret);
183 }
184
185 static const struct file_operations slots_fops = {
186 .owner = THIS_MODULE,
187 .open = simple_open,
188 .read = slots_read,
189 };
190
mlx5_cmdif_debugfs_init(struct mlx5_core_dev * dev)191 void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
192 {
193 struct mlx5_cmd_stats *stats;
194 struct dentry **cmd;
195 const char *namep;
196 int i;
197
198 cmd = &dev->priv.dbg.cmdif_debugfs;
199 *cmd = debugfs_create_dir("commands", dev->priv.dbg.dbg_root);
200
201 debugfs_create_file("slots_inuse", 0400, *cmd, &dev->cmd, &slots_fops);
202
203 for (i = 0; i < MLX5_CMD_OP_MAX; i++) {
204 stats = &dev->cmd.stats[i];
205 namep = mlx5_command_str(i);
206 if (strcmp(namep, "unknown command opcode")) {
207 stats->root = debugfs_create_dir(namep, *cmd);
208
209 debugfs_create_file("average", 0400, stats->root, stats,
210 &stats_fops);
211 debugfs_create_u64("n", 0400, stats->root, &stats->n);
212 debugfs_create_u64("failed", 0400, stats->root, &stats->failed);
213 debugfs_create_u64("failed_mbox_status", 0400, stats->root,
214 &stats->failed_mbox_status);
215 debugfs_create_u32("last_failed_errno", 0400, stats->root,
216 &stats->last_failed_errno);
217 debugfs_create_u8("last_failed_mbox_status", 0400, stats->root,
218 &stats->last_failed_mbox_status);
219 debugfs_create_x32("last_failed_syndrome", 0400, stats->root,
220 &stats->last_failed_syndrome);
221 }
222 }
223 }
224
mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev * dev)225 void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev)
226 {
227 debugfs_remove_recursive(dev->priv.dbg.cmdif_debugfs);
228 }
229
mlx5_cq_debugfs_init(struct mlx5_core_dev * dev)230 void mlx5_cq_debugfs_init(struct mlx5_core_dev *dev)
231 {
232 dev->priv.dbg.cq_debugfs = debugfs_create_dir("CQs", dev->priv.dbg.dbg_root);
233 }
234
mlx5_cq_debugfs_cleanup(struct mlx5_core_dev * dev)235 void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev)
236 {
237 debugfs_remove_recursive(dev->priv.dbg.cq_debugfs);
238 }
239
mlx5_pages_debugfs_init(struct mlx5_core_dev * dev)240 void mlx5_pages_debugfs_init(struct mlx5_core_dev *dev)
241 {
242 struct dentry *pages;
243
244 dev->priv.dbg.pages_debugfs = debugfs_create_dir("pages", dev->priv.dbg.dbg_root);
245 pages = dev->priv.dbg.pages_debugfs;
246
247 debugfs_create_u32("fw_pages_total", 0400, pages, &dev->priv.fw_pages);
248 debugfs_create_u32("fw_pages_vfs", 0400, pages, &dev->priv.page_counters[MLX5_VF]);
249 debugfs_create_u32("fw_pages_sfs", 0400, pages, &dev->priv.page_counters[MLX5_SF]);
250 debugfs_create_u32("fw_pages_host_pf", 0400, pages, &dev->priv.page_counters[MLX5_HOST_PF]);
251 debugfs_create_u32("fw_pages_alloc_failed", 0400, pages, &dev->priv.fw_pages_alloc_failed);
252 debugfs_create_u32("fw_pages_give_dropped", 0400, pages, &dev->priv.give_pages_dropped);
253 debugfs_create_u32("fw_pages_reclaim_discard", 0400, pages,
254 &dev->priv.reclaim_pages_discard);
255 }
256
mlx5_pages_debugfs_cleanup(struct mlx5_core_dev * dev)257 void mlx5_pages_debugfs_cleanup(struct mlx5_core_dev *dev)
258 {
259 debugfs_remove_recursive(dev->priv.dbg.pages_debugfs);
260 }
261
qp_read_field(struct mlx5_core_dev * dev,struct mlx5_core_qp * qp,int index,int * is_str)262 static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
263 int index, int *is_str)
264 {
265 int outlen = MLX5_ST_SZ_BYTES(query_qp_out);
266 u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {};
267 u64 param = 0;
268 u32 *out;
269 int state;
270 u32 *qpc;
271 int err;
272
273 out = kzalloc(outlen, GFP_KERNEL);
274 if (!out)
275 return 0;
276
277 MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP);
278 MLX5_SET(query_qp_in, in, qpn, qp->qpn);
279 err = mlx5_cmd_exec_inout(dev, query_qp, in, out);
280 if (err)
281 goto out;
282
283 *is_str = 0;
284
285 qpc = MLX5_ADDR_OF(query_qp_out, out, qpc);
286 switch (index) {
287 case QP_PID:
288 param = qp->pid;
289 break;
290 case QP_STATE:
291 state = MLX5_GET(qpc, qpc, state);
292 param = (unsigned long)mlx5_qp_state_str(state);
293 *is_str = 1;
294 break;
295 case QP_XPORT:
296 param = (unsigned long)mlx5_qp_type_str(MLX5_GET(qpc, qpc, st));
297 *is_str = 1;
298 break;
299 case QP_MTU:
300 switch (MLX5_GET(qpc, qpc, mtu)) {
301 case IB_MTU_256:
302 param = 256;
303 break;
304 case IB_MTU_512:
305 param = 512;
306 break;
307 case IB_MTU_1024:
308 param = 1024;
309 break;
310 case IB_MTU_2048:
311 param = 2048;
312 break;
313 case IB_MTU_4096:
314 param = 4096;
315 break;
316 default:
317 param = 0;
318 }
319 break;
320 case QP_N_RECV:
321 param = 1 << MLX5_GET(qpc, qpc, log_rq_size);
322 break;
323 case QP_RECV_SZ:
324 param = 1 << (MLX5_GET(qpc, qpc, log_rq_stride) + 4);
325 break;
326 case QP_N_SEND:
327 if (!MLX5_GET(qpc, qpc, no_sq))
328 param = 1 << MLX5_GET(qpc, qpc, log_sq_size);
329 break;
330 case QP_LOG_PG_SZ:
331 param = MLX5_GET(qpc, qpc, log_page_size) + 12;
332 break;
333 case QP_RQPN:
334 param = MLX5_GET(qpc, qpc, remote_qpn);
335 break;
336 }
337 out:
338 kfree(out);
339 return param;
340 }
341
eq_read_field(struct mlx5_core_dev * dev,struct mlx5_eq * eq,int index)342 static u64 eq_read_field(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
343 int index)
344 {
345 int outlen = MLX5_ST_SZ_BYTES(query_eq_out);
346 u32 in[MLX5_ST_SZ_DW(query_eq_in)] = {};
347 u64 param = 0;
348 void *ctx;
349 u32 *out;
350 int err;
351
352 out = kzalloc(outlen, GFP_KERNEL);
353 if (!out)
354 return param;
355
356 MLX5_SET(query_eq_in, in, opcode, MLX5_CMD_OP_QUERY_EQ);
357 MLX5_SET(query_eq_in, in, eq_number, eq->eqn);
358 err = mlx5_cmd_exec_inout(dev, query_eq, in, out);
359 if (err) {
360 mlx5_core_warn(dev, "failed to query eq\n");
361 goto out;
362 }
363 ctx = MLX5_ADDR_OF(query_eq_out, out, eq_context_entry);
364
365 switch (index) {
366 case EQ_NUM_EQES:
367 param = 1 << MLX5_GET(eqc, ctx, log_eq_size);
368 break;
369 case EQ_INTR:
370 param = MLX5_GET(eqc, ctx, intr);
371 break;
372 case EQ_LOG_PG_SZ:
373 param = MLX5_GET(eqc, ctx, log_page_size) + 12;
374 break;
375 }
376
377 out:
378 kfree(out);
379 return param;
380 }
381
cq_read_field(struct mlx5_core_dev * dev,struct mlx5_core_cq * cq,int index)382 static u64 cq_read_field(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
383 int index)
384 {
385 int outlen = MLX5_ST_SZ_BYTES(query_cq_out);
386 u64 param = 0;
387 void *ctx;
388 u32 *out;
389 int err;
390
391 out = kvzalloc(outlen, GFP_KERNEL);
392 if (!out)
393 return param;
394
395 err = mlx5_core_query_cq(dev, cq, out);
396 if (err) {
397 mlx5_core_warn(dev, "failed to query cq\n");
398 goto out;
399 }
400 ctx = MLX5_ADDR_OF(query_cq_out, out, cq_context);
401
402 switch (index) {
403 case CQ_PID:
404 param = cq->pid;
405 break;
406 case CQ_NUM_CQES:
407 param = 1 << MLX5_GET(cqc, ctx, log_cq_size);
408 break;
409 case CQ_LOG_PG_SZ:
410 param = MLX5_GET(cqc, ctx, log_page_size);
411 break;
412 }
413
414 out:
415 kvfree(out);
416 return param;
417 }
418
dbg_read(struct file * filp,char __user * buf,size_t count,loff_t * pos)419 static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count,
420 loff_t *pos)
421 {
422 struct mlx5_field_desc *desc;
423 struct mlx5_rsc_debug *d;
424 char tbuf[18];
425 int is_str = 0;
426 u64 field;
427 int ret;
428
429 desc = filp->private_data;
430 d = (void *)(desc - desc->i) - sizeof(*d);
431 switch (d->type) {
432 case MLX5_DBG_RSC_QP:
433 field = qp_read_field(d->dev, d->object, desc->i, &is_str);
434 break;
435
436 case MLX5_DBG_RSC_EQ:
437 field = eq_read_field(d->dev, d->object, desc->i);
438 break;
439
440 case MLX5_DBG_RSC_CQ:
441 field = cq_read_field(d->dev, d->object, desc->i);
442 break;
443
444 default:
445 mlx5_core_warn(d->dev, "invalid resource type %d\n", d->type);
446 return -EINVAL;
447 }
448
449 if (is_str)
450 ret = snprintf(tbuf, sizeof(tbuf), "%s\n", (const char *)(unsigned long)field);
451 else
452 ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field);
453
454 return simple_read_from_buffer(buf, count, pos, tbuf, ret);
455 }
456
457 static const struct file_operations fops = {
458 .owner = THIS_MODULE,
459 .open = simple_open,
460 .read = dbg_read,
461 };
462
add_res_tree(struct mlx5_core_dev * dev,enum dbg_rsc_type type,struct dentry * root,struct mlx5_rsc_debug ** dbg,int rsn,char ** field,int nfile,void * data)463 static int add_res_tree(struct mlx5_core_dev *dev, enum dbg_rsc_type type,
464 struct dentry *root, struct mlx5_rsc_debug **dbg,
465 int rsn, char **field, int nfile, void *data)
466 {
467 struct mlx5_rsc_debug *d;
468 char resn[32];
469 int i;
470
471 d = kzalloc(struct_size(d, fields, nfile), GFP_KERNEL);
472 if (!d)
473 return -ENOMEM;
474
475 d->dev = dev;
476 d->object = data;
477 d->type = type;
478 sprintf(resn, "0x%x", rsn);
479 d->root = debugfs_create_dir(resn, root);
480
481 for (i = 0; i < nfile; i++) {
482 d->fields[i].i = i;
483 debugfs_create_file(field[i], 0400, d->root, &d->fields[i],
484 &fops);
485 }
486 *dbg = d;
487
488 return 0;
489 }
490
rem_res_tree(struct mlx5_rsc_debug * d)491 static void rem_res_tree(struct mlx5_rsc_debug *d)
492 {
493 debugfs_remove_recursive(d->root);
494 kfree(d);
495 }
496
mlx5_debug_qp_add(struct mlx5_core_dev * dev,struct mlx5_core_qp * qp)497 int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
498 {
499 int err;
500
501 if (!mlx5_debugfs_root)
502 return 0;
503
504 err = add_res_tree(dev, MLX5_DBG_RSC_QP, dev->priv.dbg.qp_debugfs,
505 &qp->dbg, qp->qpn, qp_fields,
506 ARRAY_SIZE(qp_fields), qp);
507 if (err)
508 qp->dbg = NULL;
509
510 return err;
511 }
512 EXPORT_SYMBOL(mlx5_debug_qp_add);
513
mlx5_debug_qp_remove(struct mlx5_core_dev * dev,struct mlx5_core_qp * qp)514 void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
515 {
516 if (!mlx5_debugfs_root)
517 return;
518
519 if (qp->dbg)
520 rem_res_tree(qp->dbg);
521 }
522 EXPORT_SYMBOL(mlx5_debug_qp_remove);
523
mlx5_debug_eq_add(struct mlx5_core_dev * dev,struct mlx5_eq * eq)524 int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
525 {
526 int err;
527
528 if (!mlx5_debugfs_root)
529 return 0;
530
531 err = add_res_tree(dev, MLX5_DBG_RSC_EQ, dev->priv.dbg.eq_debugfs,
532 &eq->dbg, eq->eqn, eq_fields,
533 ARRAY_SIZE(eq_fields), eq);
534 if (err)
535 eq->dbg = NULL;
536
537 return err;
538 }
539
mlx5_debug_eq_remove(struct mlx5_core_dev * dev,struct mlx5_eq * eq)540 void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
541 {
542 if (!mlx5_debugfs_root)
543 return;
544
545 if (eq->dbg)
546 rem_res_tree(eq->dbg);
547 }
548
mlx5_debug_cq_add(struct mlx5_core_dev * dev,struct mlx5_core_cq * cq)549 int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
550 {
551 int err;
552
553 if (!mlx5_debugfs_root)
554 return 0;
555
556 err = add_res_tree(dev, MLX5_DBG_RSC_CQ, dev->priv.dbg.cq_debugfs,
557 &cq->dbg, cq->cqn, cq_fields,
558 ARRAY_SIZE(cq_fields), cq);
559 if (err)
560 cq->dbg = NULL;
561
562 return err;
563 }
564
mlx5_debug_cq_remove(struct mlx5_core_dev * dev,struct mlx5_core_cq * cq)565 void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
566 {
567 if (!mlx5_debugfs_root)
568 return;
569
570 if (cq->dbg) {
571 rem_res_tree(cq->dbg);
572 cq->dbg = NULL;
573 }
574 }
575