1 /*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/module.h>
34 #include <linux/debugfs.h>
35 #include <linux/mlx5/qp.h>
36 #include <linux/mlx5/cq.h>
37 #include <linux/mlx5/driver.h>
38 #include "mlx5_core.h"
39 #include "lib/eq.h"
40
41 enum {
42 QP_PID,
43 QP_STATE,
44 QP_XPORT,
45 QP_MTU,
46 QP_N_RECV,
47 QP_RECV_SZ,
48 QP_N_SEND,
49 QP_LOG_PG_SZ,
50 QP_RQPN,
51 };
52
53 static char *qp_fields[] = {
54 [QP_PID] = "pid",
55 [QP_STATE] = "state",
56 [QP_XPORT] = "transport",
57 [QP_MTU] = "mtu",
58 [QP_N_RECV] = "num_recv",
59 [QP_RECV_SZ] = "rcv_wqe_sz",
60 [QP_N_SEND] = "num_send",
61 [QP_LOG_PG_SZ] = "log2_page_sz",
62 [QP_RQPN] = "remote_qpn",
63 };
64
65 enum {
66 EQ_NUM_EQES,
67 EQ_INTR,
68 EQ_LOG_PG_SZ,
69 };
70
71 static char *eq_fields[] = {
72 [EQ_NUM_EQES] = "num_eqes",
73 [EQ_INTR] = "intr",
74 [EQ_LOG_PG_SZ] = "log_page_size",
75 };
76
77 enum {
78 CQ_PID,
79 CQ_NUM_CQES,
80 CQ_LOG_PG_SZ,
81 };
82
83 static char *cq_fields[] = {
84 [CQ_PID] = "pid",
85 [CQ_NUM_CQES] = "num_cqes",
86 [CQ_LOG_PG_SZ] = "log_page_size",
87 };
88
89 struct dentry *mlx5_debugfs_root;
90 EXPORT_SYMBOL(mlx5_debugfs_root);
91
mlx5_register_debugfs(void)92 void mlx5_register_debugfs(void)
93 {
94 mlx5_debugfs_root = debugfs_create_dir("mlx5", NULL);
95 }
96
mlx5_unregister_debugfs(void)97 void mlx5_unregister_debugfs(void)
98 {
99 debugfs_remove(mlx5_debugfs_root);
100 }
101
mlx5_qp_debugfs_init(struct mlx5_core_dev * dev)102 void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev)
103 {
104 dev->priv.qp_debugfs = debugfs_create_dir("QPs", dev->priv.dbg_root);
105 }
106 EXPORT_SYMBOL(mlx5_qp_debugfs_init);
107
mlx5_qp_debugfs_cleanup(struct mlx5_core_dev * dev)108 void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev)
109 {
110 debugfs_remove_recursive(dev->priv.qp_debugfs);
111 }
112 EXPORT_SYMBOL(mlx5_qp_debugfs_cleanup);
113
mlx5_eq_debugfs_init(struct mlx5_core_dev * dev)114 void mlx5_eq_debugfs_init(struct mlx5_core_dev *dev)
115 {
116 dev->priv.eq_debugfs = debugfs_create_dir("EQs", dev->priv.dbg_root);
117 }
118
mlx5_eq_debugfs_cleanup(struct mlx5_core_dev * dev)119 void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev)
120 {
121 debugfs_remove_recursive(dev->priv.eq_debugfs);
122 }
123
average_read(struct file * filp,char __user * buf,size_t count,loff_t * pos)124 static ssize_t average_read(struct file *filp, char __user *buf, size_t count,
125 loff_t *pos)
126 {
127 struct mlx5_cmd_stats *stats;
128 u64 field = 0;
129 int ret;
130 char tbuf[22];
131
132 stats = filp->private_data;
133 spin_lock_irq(&stats->lock);
134 if (stats->n)
135 field = div64_u64(stats->sum, stats->n);
136 spin_unlock_irq(&stats->lock);
137 ret = snprintf(tbuf, sizeof(tbuf), "%llu\n", field);
138 return simple_read_from_buffer(buf, count, pos, tbuf, ret);
139 }
140
average_write(struct file * filp,const char __user * buf,size_t count,loff_t * pos)141 static ssize_t average_write(struct file *filp, const char __user *buf,
142 size_t count, loff_t *pos)
143 {
144 struct mlx5_cmd_stats *stats;
145
146 stats = filp->private_data;
147 spin_lock_irq(&stats->lock);
148 stats->sum = 0;
149 stats->n = 0;
150 spin_unlock_irq(&stats->lock);
151
152 *pos += count;
153
154 return count;
155 }
156
157 static const struct file_operations stats_fops = {
158 .owner = THIS_MODULE,
159 .open = simple_open,
160 .read = average_read,
161 .write = average_write,
162 };
163
mlx5_cmdif_debugfs_init(struct mlx5_core_dev * dev)164 void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
165 {
166 struct mlx5_cmd_stats *stats;
167 struct dentry **cmd;
168 const char *namep;
169 int i;
170
171 cmd = &dev->priv.cmdif_debugfs;
172 *cmd = debugfs_create_dir("commands", dev->priv.dbg_root);
173
174 for (i = 0; i < MLX5_CMD_OP_MAX; i++) {
175 stats = &dev->cmd.stats[i];
176 namep = mlx5_command_str(i);
177 if (strcmp(namep, "unknown command opcode")) {
178 stats->root = debugfs_create_dir(namep, *cmd);
179
180 debugfs_create_file("average", 0400, stats->root, stats,
181 &stats_fops);
182 debugfs_create_u64("n", 0400, stats->root, &stats->n);
183 }
184 }
185 }
186
mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev * dev)187 void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev)
188 {
189 debugfs_remove_recursive(dev->priv.cmdif_debugfs);
190 }
191
mlx5_cq_debugfs_init(struct mlx5_core_dev * dev)192 void mlx5_cq_debugfs_init(struct mlx5_core_dev *dev)
193 {
194 dev->priv.cq_debugfs = debugfs_create_dir("CQs", dev->priv.dbg_root);
195 }
196
mlx5_cq_debugfs_cleanup(struct mlx5_core_dev * dev)197 void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev)
198 {
199 debugfs_remove_recursive(dev->priv.cq_debugfs);
200 }
201
qp_read_field(struct mlx5_core_dev * dev,struct mlx5_core_qp * qp,int index,int * is_str)202 static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
203 int index, int *is_str)
204 {
205 int outlen = MLX5_ST_SZ_BYTES(query_qp_out);
206 u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {};
207 u64 param = 0;
208 u32 *out;
209 int state;
210 u32 *qpc;
211 int err;
212
213 out = kzalloc(outlen, GFP_KERNEL);
214 if (!out)
215 return 0;
216
217 MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP);
218 MLX5_SET(query_qp_in, in, qpn, qp->qpn);
219 err = mlx5_cmd_exec_inout(dev, query_qp, in, out);
220 if (err)
221 goto out;
222
223 *is_str = 0;
224
225 qpc = MLX5_ADDR_OF(query_qp_out, out, qpc);
226 switch (index) {
227 case QP_PID:
228 param = qp->pid;
229 break;
230 case QP_STATE:
231 state = MLX5_GET(qpc, qpc, state);
232 param = (unsigned long)mlx5_qp_state_str(state);
233 *is_str = 1;
234 break;
235 case QP_XPORT:
236 param = (unsigned long)mlx5_qp_type_str(MLX5_GET(qpc, qpc, st));
237 *is_str = 1;
238 break;
239 case QP_MTU:
240 switch (MLX5_GET(qpc, qpc, mtu)) {
241 case IB_MTU_256:
242 param = 256;
243 break;
244 case IB_MTU_512:
245 param = 512;
246 break;
247 case IB_MTU_1024:
248 param = 1024;
249 break;
250 case IB_MTU_2048:
251 param = 2048;
252 break;
253 case IB_MTU_4096:
254 param = 4096;
255 break;
256 default:
257 param = 0;
258 }
259 break;
260 case QP_N_RECV:
261 param = 1 << MLX5_GET(qpc, qpc, log_rq_size);
262 break;
263 case QP_RECV_SZ:
264 param = 1 << (MLX5_GET(qpc, qpc, log_rq_stride) + 4);
265 break;
266 case QP_N_SEND:
267 if (!MLX5_GET(qpc, qpc, no_sq))
268 param = 1 << MLX5_GET(qpc, qpc, log_sq_size);
269 break;
270 case QP_LOG_PG_SZ:
271 param = MLX5_GET(qpc, qpc, log_page_size) + 12;
272 break;
273 case QP_RQPN:
274 param = MLX5_GET(qpc, qpc, remote_qpn);
275 break;
276 }
277 out:
278 kfree(out);
279 return param;
280 }
281
eq_read_field(struct mlx5_core_dev * dev,struct mlx5_eq * eq,int index)282 static u64 eq_read_field(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
283 int index)
284 {
285 int outlen = MLX5_ST_SZ_BYTES(query_eq_out);
286 u32 in[MLX5_ST_SZ_DW(query_eq_in)] = {};
287 u64 param = 0;
288 void *ctx;
289 u32 *out;
290 int err;
291
292 out = kzalloc(outlen, GFP_KERNEL);
293 if (!out)
294 return param;
295
296 MLX5_SET(query_eq_in, in, opcode, MLX5_CMD_OP_QUERY_EQ);
297 MLX5_SET(query_eq_in, in, eq_number, eq->eqn);
298 err = mlx5_cmd_exec_inout(dev, query_eq, in, out);
299 if (err) {
300 mlx5_core_warn(dev, "failed to query eq\n");
301 goto out;
302 }
303 ctx = MLX5_ADDR_OF(query_eq_out, out, eq_context_entry);
304
305 switch (index) {
306 case EQ_NUM_EQES:
307 param = 1 << MLX5_GET(eqc, ctx, log_eq_size);
308 break;
309 case EQ_INTR:
310 param = MLX5_GET(eqc, ctx, intr);
311 break;
312 case EQ_LOG_PG_SZ:
313 param = MLX5_GET(eqc, ctx, log_page_size) + 12;
314 break;
315 }
316
317 out:
318 kfree(out);
319 return param;
320 }
321
cq_read_field(struct mlx5_core_dev * dev,struct mlx5_core_cq * cq,int index)322 static u64 cq_read_field(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
323 int index)
324 {
325 int outlen = MLX5_ST_SZ_BYTES(query_cq_out);
326 u64 param = 0;
327 void *ctx;
328 u32 *out;
329 int err;
330
331 out = kvzalloc(outlen, GFP_KERNEL);
332 if (!out)
333 return param;
334
335 err = mlx5_core_query_cq(dev, cq, out);
336 if (err) {
337 mlx5_core_warn(dev, "failed to query cq\n");
338 goto out;
339 }
340 ctx = MLX5_ADDR_OF(query_cq_out, out, cq_context);
341
342 switch (index) {
343 case CQ_PID:
344 param = cq->pid;
345 break;
346 case CQ_NUM_CQES:
347 param = 1 << MLX5_GET(cqc, ctx, log_cq_size);
348 break;
349 case CQ_LOG_PG_SZ:
350 param = MLX5_GET(cqc, ctx, log_page_size);
351 break;
352 }
353
354 out:
355 kvfree(out);
356 return param;
357 }
358
dbg_read(struct file * filp,char __user * buf,size_t count,loff_t * pos)359 static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count,
360 loff_t *pos)
361 {
362 struct mlx5_field_desc *desc;
363 struct mlx5_rsc_debug *d;
364 char tbuf[18];
365 int is_str = 0;
366 u64 field;
367 int ret;
368
369 desc = filp->private_data;
370 d = (void *)(desc - desc->i) - sizeof(*d);
371 switch (d->type) {
372 case MLX5_DBG_RSC_QP:
373 field = qp_read_field(d->dev, d->object, desc->i, &is_str);
374 break;
375
376 case MLX5_DBG_RSC_EQ:
377 field = eq_read_field(d->dev, d->object, desc->i);
378 break;
379
380 case MLX5_DBG_RSC_CQ:
381 field = cq_read_field(d->dev, d->object, desc->i);
382 break;
383
384 default:
385 mlx5_core_warn(d->dev, "invalid resource type %d\n", d->type);
386 return -EINVAL;
387 }
388
389 if (is_str)
390 ret = snprintf(tbuf, sizeof(tbuf), "%s\n", (const char *)(unsigned long)field);
391 else
392 ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field);
393
394 return simple_read_from_buffer(buf, count, pos, tbuf, ret);
395 }
396
397 static const struct file_operations fops = {
398 .owner = THIS_MODULE,
399 .open = simple_open,
400 .read = dbg_read,
401 };
402
add_res_tree(struct mlx5_core_dev * dev,enum dbg_rsc_type type,struct dentry * root,struct mlx5_rsc_debug ** dbg,int rsn,char ** field,int nfile,void * data)403 static int add_res_tree(struct mlx5_core_dev *dev, enum dbg_rsc_type type,
404 struct dentry *root, struct mlx5_rsc_debug **dbg,
405 int rsn, char **field, int nfile, void *data)
406 {
407 struct mlx5_rsc_debug *d;
408 char resn[32];
409 int i;
410
411 d = kzalloc(struct_size(d, fields, nfile), GFP_KERNEL);
412 if (!d)
413 return -ENOMEM;
414
415 d->dev = dev;
416 d->object = data;
417 d->type = type;
418 sprintf(resn, "0x%x", rsn);
419 d->root = debugfs_create_dir(resn, root);
420
421 for (i = 0; i < nfile; i++) {
422 d->fields[i].i = i;
423 debugfs_create_file(field[i], 0400, d->root, &d->fields[i],
424 &fops);
425 }
426 *dbg = d;
427
428 return 0;
429 }
430
rem_res_tree(struct mlx5_rsc_debug * d)431 static void rem_res_tree(struct mlx5_rsc_debug *d)
432 {
433 debugfs_remove_recursive(d->root);
434 kfree(d);
435 }
436
mlx5_debug_qp_add(struct mlx5_core_dev * dev,struct mlx5_core_qp * qp)437 int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
438 {
439 int err;
440
441 if (!mlx5_debugfs_root)
442 return 0;
443
444 err = add_res_tree(dev, MLX5_DBG_RSC_QP, dev->priv.qp_debugfs,
445 &qp->dbg, qp->qpn, qp_fields,
446 ARRAY_SIZE(qp_fields), qp);
447 if (err)
448 qp->dbg = NULL;
449
450 return err;
451 }
452 EXPORT_SYMBOL(mlx5_debug_qp_add);
453
mlx5_debug_qp_remove(struct mlx5_core_dev * dev,struct mlx5_core_qp * qp)454 void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
455 {
456 if (!mlx5_debugfs_root)
457 return;
458
459 if (qp->dbg)
460 rem_res_tree(qp->dbg);
461 }
462 EXPORT_SYMBOL(mlx5_debug_qp_remove);
463
mlx5_debug_eq_add(struct mlx5_core_dev * dev,struct mlx5_eq * eq)464 int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
465 {
466 int err;
467
468 if (!mlx5_debugfs_root)
469 return 0;
470
471 err = add_res_tree(dev, MLX5_DBG_RSC_EQ, dev->priv.eq_debugfs,
472 &eq->dbg, eq->eqn, eq_fields,
473 ARRAY_SIZE(eq_fields), eq);
474 if (err)
475 eq->dbg = NULL;
476
477 return err;
478 }
479
mlx5_debug_eq_remove(struct mlx5_core_dev * dev,struct mlx5_eq * eq)480 void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
481 {
482 if (!mlx5_debugfs_root)
483 return;
484
485 if (eq->dbg)
486 rem_res_tree(eq->dbg);
487 }
488
mlx5_debug_cq_add(struct mlx5_core_dev * dev,struct mlx5_core_cq * cq)489 int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
490 {
491 int err;
492
493 if (!mlx5_debugfs_root)
494 return 0;
495
496 err = add_res_tree(dev, MLX5_DBG_RSC_CQ, dev->priv.cq_debugfs,
497 &cq->dbg, cq->cqn, cq_fields,
498 ARRAY_SIZE(cq_fields), cq);
499 if (err)
500 cq->dbg = NULL;
501
502 return err;
503 }
504
mlx5_debug_cq_remove(struct mlx5_core_dev * dev,struct mlx5_core_cq * cq)505 void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
506 {
507 if (!mlx5_debugfs_root)
508 return;
509
510 if (cq->dbg) {
511 rem_res_tree(cq->dbg);
512 cq->dbg = NULL;
513 }
514 }
515