1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
3 *
4 * Copyright (C) 2019 Marvell International Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11 #ifdef CONFIG_DEBUG_FS
12
13 #include <linux/fs.h>
14 #include <linux/debugfs.h>
15 #include <linux/module.h>
16 #include <linux/pci.h>
17
18 #include "rvu_struct.h"
19 #include "rvu_reg.h"
20 #include "rvu.h"
21 #include "cgx.h"
22 #include "npc.h"
23
24 #define DEBUGFS_DIR_NAME "octeontx2"
25
26 enum {
27 CGX_STAT0,
28 CGX_STAT1,
29 CGX_STAT2,
30 CGX_STAT3,
31 CGX_STAT4,
32 CGX_STAT5,
33 CGX_STAT6,
34 CGX_STAT7,
35 CGX_STAT8,
36 CGX_STAT9,
37 CGX_STAT10,
38 CGX_STAT11,
39 CGX_STAT12,
40 CGX_STAT13,
41 CGX_STAT14,
42 CGX_STAT15,
43 CGX_STAT16,
44 CGX_STAT17,
45 CGX_STAT18,
46 };
47
48 /* NIX TX stats */
49 enum nix_stat_lf_tx {
50 TX_UCAST = 0x0,
51 TX_BCAST = 0x1,
52 TX_MCAST = 0x2,
53 TX_DROP = 0x3,
54 TX_OCTS = 0x4,
55 TX_STATS_ENUM_LAST,
56 };
57
58 /* NIX RX stats */
59 enum nix_stat_lf_rx {
60 RX_OCTS = 0x0,
61 RX_UCAST = 0x1,
62 RX_BCAST = 0x2,
63 RX_MCAST = 0x3,
64 RX_DROP = 0x4,
65 RX_DROP_OCTS = 0x5,
66 RX_FCS = 0x6,
67 RX_ERR = 0x7,
68 RX_DRP_BCAST = 0x8,
69 RX_DRP_MCAST = 0x9,
70 RX_DRP_L3BCAST = 0xa,
71 RX_DRP_L3MCAST = 0xb,
72 RX_STATS_ENUM_LAST,
73 };
74
75 static char *cgx_rx_stats_fields[] = {
76 [CGX_STAT0] = "Received packets",
77 [CGX_STAT1] = "Octets of received packets",
78 [CGX_STAT2] = "Received PAUSE packets",
79 [CGX_STAT3] = "Received PAUSE and control packets",
80 [CGX_STAT4] = "Filtered DMAC0 (NIX-bound) packets",
81 [CGX_STAT5] = "Filtered DMAC0 (NIX-bound) octets",
82 [CGX_STAT6] = "Packets dropped due to RX FIFO full",
83 [CGX_STAT7] = "Octets dropped due to RX FIFO full",
84 [CGX_STAT8] = "Error packets",
85 [CGX_STAT9] = "Filtered DMAC1 (NCSI-bound) packets",
86 [CGX_STAT10] = "Filtered DMAC1 (NCSI-bound) octets",
87 [CGX_STAT11] = "NCSI-bound packets dropped",
88 [CGX_STAT12] = "NCSI-bound octets dropped",
89 };
90
91 static char *cgx_tx_stats_fields[] = {
92 [CGX_STAT0] = "Packets dropped due to excessive collisions",
93 [CGX_STAT1] = "Packets dropped due to excessive deferral",
94 [CGX_STAT2] = "Multiple collisions before successful transmission",
95 [CGX_STAT3] = "Single collisions before successful transmission",
96 [CGX_STAT4] = "Total octets sent on the interface",
97 [CGX_STAT5] = "Total frames sent on the interface",
98 [CGX_STAT6] = "Packets sent with an octet count < 64",
99 [CGX_STAT7] = "Packets sent with an octet count == 64",
100 [CGX_STAT8] = "Packets sent with an octet count of 65–127",
101 [CGX_STAT9] = "Packets sent with an octet count of 128-255",
102 [CGX_STAT10] = "Packets sent with an octet count of 256-511",
103 [CGX_STAT11] = "Packets sent with an octet count of 512-1023",
104 [CGX_STAT12] = "Packets sent with an octet count of 1024-1518",
105 [CGX_STAT13] = "Packets sent with an octet count of > 1518",
106 [CGX_STAT14] = "Packets sent to a broadcast DMAC",
107 [CGX_STAT15] = "Packets sent to the multicast DMAC",
108 [CGX_STAT16] = "Transmit underflow and were truncated",
109 [CGX_STAT17] = "Control/PAUSE packets sent",
110 };
111
112 #define NDC_MAX_BANK(rvu, blk_addr) (rvu_read64(rvu, \
113 blk_addr, NDC_AF_CONST) & 0xFF)
114
115 #define rvu_dbg_NULL NULL
116 #define rvu_dbg_open_NULL NULL
117
118 #define RVU_DEBUG_SEQ_FOPS(name, read_op, write_op) \
119 static int rvu_dbg_open_##name(struct inode *inode, struct file *file) \
120 { \
121 return single_open(file, rvu_dbg_##read_op, inode->i_private); \
122 } \
123 static const struct file_operations rvu_dbg_##name##_fops = { \
124 .owner = THIS_MODULE, \
125 .open = rvu_dbg_open_##name, \
126 .read = seq_read, \
127 .write = rvu_dbg_##write_op, \
128 .llseek = seq_lseek, \
129 .release = single_release, \
130 }
131
132 #define RVU_DEBUG_FOPS(name, read_op, write_op) \
133 static const struct file_operations rvu_dbg_##name##_fops = { \
134 .owner = THIS_MODULE, \
135 .open = simple_open, \
136 .read = rvu_dbg_##read_op, \
137 .write = rvu_dbg_##write_op \
138 }
139
140 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
141
get_lf_str_list(struct rvu_block block,int pcifunc,char * lfs)142 static void get_lf_str_list(struct rvu_block block, int pcifunc,
143 char *lfs)
144 {
145 int lf = 0, seq = 0, len = 0, prev_lf = block.lf.max;
146
147 for_each_set_bit(lf, block.lf.bmap, block.lf.max) {
148 if (lf >= block.lf.max)
149 break;
150
151 if (block.fn_map[lf] != pcifunc)
152 continue;
153
154 if (lf == prev_lf + 1) {
155 prev_lf = lf;
156 seq = 1;
157 continue;
158 }
159
160 if (seq)
161 len += sprintf(lfs + len, "-%d,%d", prev_lf, lf);
162 else
163 len += (len ? sprintf(lfs + len, ",%d", lf) :
164 sprintf(lfs + len, "%d", lf));
165
166 prev_lf = lf;
167 seq = 0;
168 }
169
170 if (seq)
171 len += sprintf(lfs + len, "-%d", prev_lf);
172
173 lfs[len] = '\0';
174 }
175
get_max_column_width(struct rvu * rvu)176 static int get_max_column_width(struct rvu *rvu)
177 {
178 int index, pf, vf, lf_str_size = 12, buf_size = 256;
179 struct rvu_block block;
180 u16 pcifunc;
181 char *buf;
182
183 buf = kzalloc(buf_size, GFP_KERNEL);
184 if (!buf)
185 return -ENOMEM;
186
187 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
188 for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
189 pcifunc = pf << 10 | vf;
190 if (!pcifunc)
191 continue;
192
193 for (index = 0; index < BLK_COUNT; index++) {
194 block = rvu->hw->block[index];
195 if (!strlen(block.name))
196 continue;
197
198 get_lf_str_list(block, pcifunc, buf);
199 if (lf_str_size <= strlen(buf))
200 lf_str_size = strlen(buf) + 1;
201 }
202 }
203 }
204
205 kfree(buf);
206 return lf_str_size;
207 }
208
209 /* Dumps current provisioning status of all RVU block LFs */
rvu_dbg_rsrc_attach_status(struct file * filp,char __user * buffer,size_t count,loff_t * ppos)210 static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
211 char __user *buffer,
212 size_t count, loff_t *ppos)
213 {
214 int index, off = 0, flag = 0, len = 0, i = 0;
215 struct rvu *rvu = filp->private_data;
216 int bytes_not_copied = 0;
217 struct rvu_block block;
218 int pf, vf, pcifunc;
219 int buf_size = 2048;
220 int lf_str_size;
221 char *lfs;
222 char *buf;
223
224 /* don't allow partial reads */
225 if (*ppos != 0)
226 return 0;
227
228 buf = kzalloc(buf_size, GFP_KERNEL);
229 if (!buf)
230 return -ENOSPC;
231
232 /* Get the maximum width of a column */
233 lf_str_size = get_max_column_width(rvu);
234
235 lfs = kzalloc(lf_str_size, GFP_KERNEL);
236 if (!lfs) {
237 kfree(buf);
238 return -ENOMEM;
239 }
240 off += scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size,
241 "pcifunc");
242 for (index = 0; index < BLK_COUNT; index++)
243 if (strlen(rvu->hw->block[index].name)) {
244 off += scnprintf(&buf[off], buf_size - 1 - off,
245 "%-*s", lf_str_size,
246 rvu->hw->block[index].name);
247 }
248
249 off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
250 bytes_not_copied = copy_to_user(buffer + (i * off), buf, off);
251 if (bytes_not_copied)
252 goto out;
253
254 i++;
255 *ppos += off;
256 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
257 for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
258 off = 0;
259 flag = 0;
260 pcifunc = pf << 10 | vf;
261 if (!pcifunc)
262 continue;
263
264 if (vf) {
265 sprintf(lfs, "PF%d:VF%d", pf, vf - 1);
266 off = scnprintf(&buf[off],
267 buf_size - 1 - off,
268 "%-*s", lf_str_size, lfs);
269 } else {
270 sprintf(lfs, "PF%d", pf);
271 off = scnprintf(&buf[off],
272 buf_size - 1 - off,
273 "%-*s", lf_str_size, lfs);
274 }
275
276 for (index = 0; index < BLK_COUNT; index++) {
277 block = rvu->hw->block[index];
278 if (!strlen(block.name))
279 continue;
280 len = 0;
281 lfs[len] = '\0';
282 get_lf_str_list(block, pcifunc, lfs);
283 if (strlen(lfs))
284 flag = 1;
285
286 off += scnprintf(&buf[off], buf_size - 1 - off,
287 "%-*s", lf_str_size, lfs);
288 }
289 if (flag) {
290 off += scnprintf(&buf[off],
291 buf_size - 1 - off, "\n");
292 bytes_not_copied = copy_to_user(buffer +
293 (i * off),
294 buf, off);
295 if (bytes_not_copied)
296 goto out;
297
298 i++;
299 *ppos += off;
300 }
301 }
302 }
303
304 out:
305 kfree(lfs);
306 kfree(buf);
307 if (bytes_not_copied)
308 return -EFAULT;
309
310 return *ppos;
311 }
312
313 RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
314
rvu_dbg_is_valid_lf(struct rvu * rvu,int blktype,int lf,u16 * pcifunc)315 static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blktype, int lf,
316 u16 *pcifunc)
317 {
318 struct rvu_block *block;
319 struct rvu_hwinfo *hw;
320 int blkaddr;
321
322 blkaddr = rvu_get_blkaddr(rvu, blktype, 0);
323 if (blkaddr < 0) {
324 dev_warn(rvu->dev, "Invalid blktype\n");
325 return false;
326 }
327
328 hw = rvu->hw;
329 block = &hw->block[blkaddr];
330
331 if (lf < 0 || lf >= block->lf.max) {
332 dev_warn(rvu->dev, "Invalid LF: valid range: 0-%d\n",
333 block->lf.max - 1);
334 return false;
335 }
336
337 *pcifunc = block->fn_map[lf];
338 if (!*pcifunc) {
339 dev_warn(rvu->dev,
340 "This LF is not attached to any RVU PFFUNC\n");
341 return false;
342 }
343 return true;
344 }
345
print_npa_qsize(struct seq_file * m,struct rvu_pfvf * pfvf)346 static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf)
347 {
348 char *buf;
349
350 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
351 if (!buf)
352 return;
353
354 if (!pfvf->aura_ctx) {
355 seq_puts(m, "Aura context is not initialized\n");
356 } else {
357 bitmap_print_to_pagebuf(false, buf, pfvf->aura_bmap,
358 pfvf->aura_ctx->qsize);
359 seq_printf(m, "Aura count : %d\n", pfvf->aura_ctx->qsize);
360 seq_printf(m, "Aura context ena/dis bitmap : %s\n", buf);
361 }
362
363 if (!pfvf->pool_ctx) {
364 seq_puts(m, "Pool context is not initialized\n");
365 } else {
366 bitmap_print_to_pagebuf(false, buf, pfvf->pool_bmap,
367 pfvf->pool_ctx->qsize);
368 seq_printf(m, "Pool count : %d\n", pfvf->pool_ctx->qsize);
369 seq_printf(m, "Pool context ena/dis bitmap : %s\n", buf);
370 }
371 kfree(buf);
372 }
373
374 /* The 'qsize' entry dumps current Aura/Pool context Qsize
375 * and each context's current enable/disable status in a bitmap.
376 */
rvu_dbg_qsize_display(struct seq_file * filp,void * unsused,int blktype)377 static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
378 int blktype)
379 {
380 void (*print_qsize)(struct seq_file *filp,
381 struct rvu_pfvf *pfvf) = NULL;
382 struct rvu_pfvf *pfvf;
383 struct rvu *rvu;
384 int qsize_id;
385 u16 pcifunc;
386
387 rvu = filp->private;
388 switch (blktype) {
389 case BLKTYPE_NPA:
390 qsize_id = rvu->rvu_dbg.npa_qsize_id;
391 print_qsize = print_npa_qsize;
392 break;
393
394 case BLKTYPE_NIX:
395 qsize_id = rvu->rvu_dbg.nix_qsize_id;
396 print_qsize = print_nix_qsize;
397 break;
398
399 default:
400 return -EINVAL;
401 }
402
403 if (!rvu_dbg_is_valid_lf(rvu, blktype, qsize_id, &pcifunc))
404 return -EINVAL;
405
406 pfvf = rvu_get_pfvf(rvu, pcifunc);
407 print_qsize(filp, pfvf);
408
409 return 0;
410 }
411
rvu_dbg_qsize_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos,int blktype)412 static ssize_t rvu_dbg_qsize_write(struct file *filp,
413 const char __user *buffer, size_t count,
414 loff_t *ppos, int blktype)
415 {
416 char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix";
417 struct seq_file *seqfile = filp->private_data;
418 char *cmd_buf, *cmd_buf_tmp, *subtoken;
419 struct rvu *rvu = seqfile->private;
420 u16 pcifunc;
421 int ret, lf;
422
423 cmd_buf = memdup_user(buffer, count + 1);
424 if (IS_ERR(cmd_buf))
425 return -ENOMEM;
426
427 cmd_buf[count] = '\0';
428
429 cmd_buf_tmp = strchr(cmd_buf, '\n');
430 if (cmd_buf_tmp) {
431 *cmd_buf_tmp = '\0';
432 count = cmd_buf_tmp - cmd_buf + 1;
433 }
434
435 cmd_buf_tmp = cmd_buf;
436 subtoken = strsep(&cmd_buf, " ");
437 ret = subtoken ? kstrtoint(subtoken, 10, &lf) : -EINVAL;
438 if (cmd_buf)
439 ret = -EINVAL;
440
441 if (!strncmp(subtoken, "help", 4) || ret < 0) {
442 dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
443 goto qsize_write_done;
444 }
445
446 if (!rvu_dbg_is_valid_lf(rvu, blktype, lf, &pcifunc)) {
447 ret = -EINVAL;
448 goto qsize_write_done;
449 }
450 if (blktype == BLKTYPE_NPA)
451 rvu->rvu_dbg.npa_qsize_id = lf;
452 else
453 rvu->rvu_dbg.nix_qsize_id = lf;
454
455 qsize_write_done:
456 kfree(cmd_buf_tmp);
457 return ret ? ret : count;
458 }
459
rvu_dbg_npa_qsize_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)460 static ssize_t rvu_dbg_npa_qsize_write(struct file *filp,
461 const char __user *buffer,
462 size_t count, loff_t *ppos)
463 {
464 return rvu_dbg_qsize_write(filp, buffer, count, ppos,
465 BLKTYPE_NPA);
466 }
467
rvu_dbg_npa_qsize_display(struct seq_file * filp,void * unused)468 static int rvu_dbg_npa_qsize_display(struct seq_file *filp, void *unused)
469 {
470 return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NPA);
471 }
472
473 RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write);
474
475 /* Dumps given NPA Aura's context */
print_npa_aura_ctx(struct seq_file * m,struct npa_aq_enq_rsp * rsp)476 static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
477 {
478 struct npa_aura_s *aura = &rsp->aura;
479
480 seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
481
482 seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n",
483 aura->ena, aura->pool_caching);
484 seq_printf(m, "W1: pool way mask\t%d\nW1: avg con\t\t%d\n",
485 aura->pool_way_mask, aura->avg_con);
486 seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n",
487 aura->pool_drop_ena, aura->aura_drop_ena);
488 seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n",
489 aura->bp_ena, aura->aura_drop);
490 seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n",
491 aura->shift, aura->avg_level);
492
493 seq_printf(m, "W2: count\t\t%llu\nW2: nix0_bpid\t\t%d\nW2: nix1_bpid\t\t%d\n",
494 (u64)aura->count, aura->nix0_bpid, aura->nix1_bpid);
495
496 seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
497 (u64)aura->limit, aura->bp, aura->fc_ena);
498 seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n",
499 aura->fc_up_crossing, aura->fc_stype);
500 seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits);
501
502 seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr);
503
504 seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n",
505 aura->pool_drop, aura->update_time);
506 seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n",
507 aura->err_int, aura->err_int_ena);
508 seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n",
509 aura->thresh_int, aura->thresh_int_ena);
510 seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n",
511 aura->thresh_up, aura->thresh_qint_idx);
512 seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx);
513
514 seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh);
515 }
516
517 /* Dumps given NPA Pool's context */
print_npa_pool_ctx(struct seq_file * m,struct npa_aq_enq_rsp * rsp)518 static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
519 {
520 struct npa_pool_s *pool = &rsp->pool;
521
522 seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
523
524 seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n",
525 pool->ena, pool->nat_align);
526 seq_printf(m, "W1: stack_caching\t%d\nW1: stack_way_mask\t%d\n",
527 pool->stack_caching, pool->stack_way_mask);
528 seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n",
529 pool->buf_offset, pool->buf_size);
530
531 seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n",
532 pool->stack_max_pages, pool->stack_pages);
533
534 seq_printf(m, "W3: op_pc \t\t%llu\n", (u64)pool->op_pc);
535
536 seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n",
537 pool->stack_offset, pool->shift, pool->avg_level);
538 seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n",
539 pool->avg_con, pool->fc_ena, pool->fc_stype);
540 seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n",
541 pool->fc_hyst_bits, pool->fc_up_crossing);
542 seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time);
543
544 seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr);
545
546 seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start);
547
548 seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end);
549
550 seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n",
551 pool->err_int, pool->err_int_ena);
552 seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int);
553 seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n",
554 pool->thresh_int_ena, pool->thresh_up);
555 seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t\t%d\n",
556 pool->thresh_qint_idx, pool->err_qint_idx);
557 }
558
559 /* Reads aura/pool's ctx from admin queue */
rvu_dbg_npa_ctx_display(struct seq_file * m,void * unused,int ctype)560 static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
561 {
562 void (*print_npa_ctx)(struct seq_file *m, struct npa_aq_enq_rsp *rsp);
563 struct npa_aq_enq_req aq_req;
564 struct npa_aq_enq_rsp rsp;
565 struct rvu_pfvf *pfvf;
566 int aura, rc, max_id;
567 int npalf, id, all;
568 struct rvu *rvu;
569 u16 pcifunc;
570
571 rvu = m->private;
572
573 switch (ctype) {
574 case NPA_AQ_CTYPE_AURA:
575 npalf = rvu->rvu_dbg.npa_aura_ctx.lf;
576 id = rvu->rvu_dbg.npa_aura_ctx.id;
577 all = rvu->rvu_dbg.npa_aura_ctx.all;
578 break;
579
580 case NPA_AQ_CTYPE_POOL:
581 npalf = rvu->rvu_dbg.npa_pool_ctx.lf;
582 id = rvu->rvu_dbg.npa_pool_ctx.id;
583 all = rvu->rvu_dbg.npa_pool_ctx.all;
584 break;
585 default:
586 return -EINVAL;
587 }
588
589 if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NPA, npalf, &pcifunc))
590 return -EINVAL;
591
592 pfvf = rvu_get_pfvf(rvu, pcifunc);
593 if (ctype == NPA_AQ_CTYPE_AURA && !pfvf->aura_ctx) {
594 seq_puts(m, "Aura context is not initialized\n");
595 return -EINVAL;
596 } else if (ctype == NPA_AQ_CTYPE_POOL && !pfvf->pool_ctx) {
597 seq_puts(m, "Pool context is not initialized\n");
598 return -EINVAL;
599 }
600
601 memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
602 aq_req.hdr.pcifunc = pcifunc;
603 aq_req.ctype = ctype;
604 aq_req.op = NPA_AQ_INSTOP_READ;
605 if (ctype == NPA_AQ_CTYPE_AURA) {
606 max_id = pfvf->aura_ctx->qsize;
607 print_npa_ctx = print_npa_aura_ctx;
608 } else {
609 max_id = pfvf->pool_ctx->qsize;
610 print_npa_ctx = print_npa_pool_ctx;
611 }
612
613 if (id < 0 || id >= max_id) {
614 seq_printf(m, "Invalid %s, valid range is 0-%d\n",
615 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
616 max_id - 1);
617 return -EINVAL;
618 }
619
620 if (all)
621 id = 0;
622 else
623 max_id = id + 1;
624
625 for (aura = id; aura < max_id; aura++) {
626 aq_req.aura_id = aura;
627 seq_printf(m, "======%s : %d=======\n",
628 (ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL",
629 aq_req.aura_id);
630 rc = rvu_npa_aq_enq_inst(rvu, &aq_req, &rsp);
631 if (rc) {
632 seq_puts(m, "Failed to read context\n");
633 return -EINVAL;
634 }
635 print_npa_ctx(m, &rsp);
636 }
637 return 0;
638 }
639
write_npa_ctx(struct rvu * rvu,bool all,int npalf,int id,int ctype)640 static int write_npa_ctx(struct rvu *rvu, bool all,
641 int npalf, int id, int ctype)
642 {
643 struct rvu_pfvf *pfvf;
644 int max_id = 0;
645 u16 pcifunc;
646
647 if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NPA, npalf, &pcifunc))
648 return -EINVAL;
649
650 pfvf = rvu_get_pfvf(rvu, pcifunc);
651
652 if (ctype == NPA_AQ_CTYPE_AURA) {
653 if (!pfvf->aura_ctx) {
654 dev_warn(rvu->dev, "Aura context is not initialized\n");
655 return -EINVAL;
656 }
657 max_id = pfvf->aura_ctx->qsize;
658 } else if (ctype == NPA_AQ_CTYPE_POOL) {
659 if (!pfvf->pool_ctx) {
660 dev_warn(rvu->dev, "Pool context is not initialized\n");
661 return -EINVAL;
662 }
663 max_id = pfvf->pool_ctx->qsize;
664 }
665
666 if (id < 0 || id >= max_id) {
667 dev_warn(rvu->dev, "Invalid %s, valid range is 0-%d\n",
668 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
669 max_id - 1);
670 return -EINVAL;
671 }
672
673 switch (ctype) {
674 case NPA_AQ_CTYPE_AURA:
675 rvu->rvu_dbg.npa_aura_ctx.lf = npalf;
676 rvu->rvu_dbg.npa_aura_ctx.id = id;
677 rvu->rvu_dbg.npa_aura_ctx.all = all;
678 break;
679
680 case NPA_AQ_CTYPE_POOL:
681 rvu->rvu_dbg.npa_pool_ctx.lf = npalf;
682 rvu->rvu_dbg.npa_pool_ctx.id = id;
683 rvu->rvu_dbg.npa_pool_ctx.all = all;
684 break;
685 default:
686 return -EINVAL;
687 }
688 return 0;
689 }
690
parse_cmd_buffer_ctx(char * cmd_buf,size_t * count,const char __user * buffer,int * npalf,int * id,bool * all)691 static int parse_cmd_buffer_ctx(char *cmd_buf, size_t *count,
692 const char __user *buffer, int *npalf,
693 int *id, bool *all)
694 {
695 int bytes_not_copied;
696 char *cmd_buf_tmp;
697 char *subtoken;
698 int ret;
699
700 bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
701 if (bytes_not_copied)
702 return -EFAULT;
703
704 cmd_buf[*count] = '\0';
705 cmd_buf_tmp = strchr(cmd_buf, '\n');
706
707 if (cmd_buf_tmp) {
708 *cmd_buf_tmp = '\0';
709 *count = cmd_buf_tmp - cmd_buf + 1;
710 }
711
712 subtoken = strsep(&cmd_buf, " ");
713 ret = subtoken ? kstrtoint(subtoken, 10, npalf) : -EINVAL;
714 if (ret < 0)
715 return ret;
716 subtoken = strsep(&cmd_buf, " ");
717 if (subtoken && strcmp(subtoken, "all") == 0) {
718 *all = true;
719 } else {
720 ret = subtoken ? kstrtoint(subtoken, 10, id) : -EINVAL;
721 if (ret < 0)
722 return ret;
723 }
724 if (cmd_buf)
725 return -EINVAL;
726 return ret;
727 }
728
rvu_dbg_npa_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos,int ctype)729 static ssize_t rvu_dbg_npa_ctx_write(struct file *filp,
730 const char __user *buffer,
731 size_t count, loff_t *ppos, int ctype)
732 {
733 char *cmd_buf, *ctype_string = (ctype == NPA_AQ_CTYPE_AURA) ?
734 "aura" : "pool";
735 struct seq_file *seqfp = filp->private_data;
736 struct rvu *rvu = seqfp->private;
737 int npalf, id = 0, ret;
738 bool all = false;
739
740 if ((*ppos != 0) || !count)
741 return -EINVAL;
742
743 cmd_buf = kzalloc(count + 1, GFP_KERNEL);
744 if (!cmd_buf)
745 return count;
746 ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
747 &npalf, &id, &all);
748 if (ret < 0) {
749 dev_info(rvu->dev,
750 "Usage: echo <npalf> [%s number/all] > %s_ctx\n",
751 ctype_string, ctype_string);
752 goto done;
753 } else {
754 ret = write_npa_ctx(rvu, all, npalf, id, ctype);
755 }
756 done:
757 kfree(cmd_buf);
758 return ret ? ret : count;
759 }
760
rvu_dbg_npa_aura_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)761 static ssize_t rvu_dbg_npa_aura_ctx_write(struct file *filp,
762 const char __user *buffer,
763 size_t count, loff_t *ppos)
764 {
765 return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
766 NPA_AQ_CTYPE_AURA);
767 }
768
rvu_dbg_npa_aura_ctx_display(struct seq_file * filp,void * unused)769 static int rvu_dbg_npa_aura_ctx_display(struct seq_file *filp, void *unused)
770 {
771 return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_AURA);
772 }
773
774 RVU_DEBUG_SEQ_FOPS(npa_aura_ctx, npa_aura_ctx_display, npa_aura_ctx_write);
775
rvu_dbg_npa_pool_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)776 static ssize_t rvu_dbg_npa_pool_ctx_write(struct file *filp,
777 const char __user *buffer,
778 size_t count, loff_t *ppos)
779 {
780 return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
781 NPA_AQ_CTYPE_POOL);
782 }
783
rvu_dbg_npa_pool_ctx_display(struct seq_file * filp,void * unused)784 static int rvu_dbg_npa_pool_ctx_display(struct seq_file *filp, void *unused)
785 {
786 return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_POOL);
787 }
788
789 RVU_DEBUG_SEQ_FOPS(npa_pool_ctx, npa_pool_ctx_display, npa_pool_ctx_write);
790
ndc_cache_stats(struct seq_file * s,int blk_addr,int ctype,int transaction)791 static void ndc_cache_stats(struct seq_file *s, int blk_addr,
792 int ctype, int transaction)
793 {
794 u64 req, out_req, lat, cant_alloc;
795 struct rvu *rvu = s->private;
796 int port;
797
798 for (port = 0; port < NDC_MAX_PORT; port++) {
799 req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC
800 (port, ctype, transaction));
801 lat = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_LAT_PC
802 (port, ctype, transaction));
803 out_req = rvu_read64(rvu, blk_addr,
804 NDC_AF_PORTX_RTX_RWX_OSTDN_PC
805 (port, ctype, transaction));
806 cant_alloc = rvu_read64(rvu, blk_addr,
807 NDC_AF_PORTX_RTX_CANT_ALLOC_PC
808 (port, transaction));
809 seq_printf(s, "\nPort:%d\n", port);
810 seq_printf(s, "\tTotal Requests:\t\t%lld\n", req);
811 seq_printf(s, "\tTotal Time Taken:\t%lld cycles\n", lat);
812 seq_printf(s, "\tAvg Latency:\t\t%lld cycles\n", lat / req);
813 seq_printf(s, "\tOutstanding Requests:\t%lld\n", out_req);
814 seq_printf(s, "\tCant Alloc Requests:\t%lld\n", cant_alloc);
815 }
816 }
817
ndc_blk_cache_stats(struct seq_file * s,int idx,int blk_addr)818 static int ndc_blk_cache_stats(struct seq_file *s, int idx, int blk_addr)
819 {
820 seq_puts(s, "\n***** CACHE mode read stats *****\n");
821 ndc_cache_stats(s, blk_addr, CACHING, NDC_READ_TRANS);
822 seq_puts(s, "\n***** CACHE mode write stats *****\n");
823 ndc_cache_stats(s, blk_addr, CACHING, NDC_WRITE_TRANS);
824 seq_puts(s, "\n***** BY-PASS mode read stats *****\n");
825 ndc_cache_stats(s, blk_addr, BYPASS, NDC_READ_TRANS);
826 seq_puts(s, "\n***** BY-PASS mode write stats *****\n");
827 ndc_cache_stats(s, blk_addr, BYPASS, NDC_WRITE_TRANS);
828 return 0;
829 }
830
rvu_dbg_npa_ndc_cache_display(struct seq_file * filp,void * unused)831 static int rvu_dbg_npa_ndc_cache_display(struct seq_file *filp, void *unused)
832 {
833 return ndc_blk_cache_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
834 }
835
836 RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL);
837
ndc_blk_hits_miss_stats(struct seq_file * s,int idx,int blk_addr)838 static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
839 {
840 struct rvu *rvu = s->private;
841 int bank, max_bank;
842
843 max_bank = NDC_MAX_BANK(rvu, blk_addr);
844 for (bank = 0; bank < max_bank; bank++) {
845 seq_printf(s, "BANK:%d\n", bank);
846 seq_printf(s, "\tHits:\t%lld\n",
847 (u64)rvu_read64(rvu, blk_addr,
848 NDC_AF_BANKX_HIT_PC(bank)));
849 seq_printf(s, "\tMiss:\t%lld\n",
850 (u64)rvu_read64(rvu, blk_addr,
851 NDC_AF_BANKX_MISS_PC(bank)));
852 }
853 return 0;
854 }
855
rvu_dbg_nix_ndc_rx_cache_display(struct seq_file * filp,void * unused)856 static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused)
857 {
858 return ndc_blk_cache_stats(filp, NIX0_RX,
859 BLKADDR_NDC_NIX0_RX);
860 }
861
862 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL);
863
rvu_dbg_nix_ndc_tx_cache_display(struct seq_file * filp,void * unused)864 static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused)
865 {
866 return ndc_blk_cache_stats(filp, NIX0_TX,
867 BLKADDR_NDC_NIX0_TX);
868 }
869
870 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL);
871
rvu_dbg_npa_ndc_hits_miss_display(struct seq_file * filp,void * unused)872 static int rvu_dbg_npa_ndc_hits_miss_display(struct seq_file *filp,
873 void *unused)
874 {
875 return ndc_blk_hits_miss_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
876 }
877
878 RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL);
879
rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file * filp,void * unused)880 static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp,
881 void *unused)
882 {
883 return ndc_blk_hits_miss_stats(filp,
884 NPA0_U, BLKADDR_NDC_NIX0_RX);
885 }
886
887 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL);
888
rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file * filp,void * unused)889 static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp,
890 void *unused)
891 {
892 return ndc_blk_hits_miss_stats(filp,
893 NPA0_U, BLKADDR_NDC_NIX0_TX);
894 }
895
896 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL);
897
898 /* Dumps given nix_sq's context */
print_nix_sq_ctx(struct seq_file * m,struct nix_aq_enq_rsp * rsp)899 static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
900 {
901 struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
902
903 seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n",
904 sq_ctx->sqe_way_mask, sq_ctx->cq);
905 seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
906 sq_ctx->sdp_mcast, sq_ctx->substream);
907 seq_printf(m, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n\n",
908 sq_ctx->qint_idx, sq_ctx->ena);
909
910 seq_printf(m, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d\n",
911 sq_ctx->sqb_count, sq_ctx->default_chan);
912 seq_printf(m, "W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d\n",
913 sq_ctx->smq_rr_quantum, sq_ctx->sso_ena);
914 seq_printf(m, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n\n",
915 sq_ctx->xoff, sq_ctx->cq_ena, sq_ctx->smq);
916
917 seq_printf(m, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d\n",
918 sq_ctx->sqe_stype, sq_ctx->sq_int_ena);
919 seq_printf(m, "W2: sq_int \t\t\t%d\nW2: sqb_aura \t\t\t%d\n",
920 sq_ctx->sq_int, sq_ctx->sqb_aura);
921 seq_printf(m, "W2: smq_rr_count \t\t%d\n\n", sq_ctx->smq_rr_count);
922
923 seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
924 sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
925 seq_printf(m, "W3: smenq_next_sqb_vld \t\t%d\nW3: head_offset\t\t\t%d\n",
926 sq_ctx->smenq_next_sqb_vld, sq_ctx->head_offset);
927 seq_printf(m, "W3: smenq_offset\t\t%d\nW3: tail_offset\t\t\t%d\n",
928 sq_ctx->smenq_offset, sq_ctx->tail_offset);
929 seq_printf(m, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq\t\t\t%d\n",
930 sq_ctx->smq_lso_segnum, sq_ctx->smq_next_sq);
931 seq_printf(m, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d\n",
932 sq_ctx->mnq_dis, sq_ctx->lmt_dis);
933 seq_printf(m, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n\n",
934 sq_ctx->cq_limit, sq_ctx->max_sqe_size);
935
936 seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
937 seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
938 seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
939 seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
940 sq_ctx->smenq_next_sqb);
941
942 seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
943
944 seq_printf(m, "W9: vfi_lso_vld\t\t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d\n",
945 sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
946 seq_printf(m, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d\n",
947 sq_ctx->vfi_lso_vlan0_ins_ena, sq_ctx->vfi_lso_mps);
948 seq_printf(m, "W9: vfi_lso_sb\t\t\t%d\nW9: vfi_lso_sizem1\t\t%d\n",
949 sq_ctx->vfi_lso_sb, sq_ctx->vfi_lso_sizem1);
950 seq_printf(m, "W9: vfi_lso_total\t\t%d\n\n", sq_ctx->vfi_lso_total);
951
952 seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
953 (u64)sq_ctx->scm_lso_rem);
954 seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
955 seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
956 seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
957 (u64)sq_ctx->dropped_octs);
958 seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
959 (u64)sq_ctx->dropped_pkts);
960 }
961
962 /* Dumps given nix_rq's context */
print_nix_rq_ctx(struct seq_file * m,struct nix_aq_enq_rsp * rsp)963 static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
964 {
965 struct nix_rq_ctx_s *rq_ctx = &rsp->rq;
966
967 seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
968 rq_ctx->wqe_aura, rq_ctx->substream);
969 seq_printf(m, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
970 rq_ctx->cq, rq_ctx->ena_wqwd);
971 seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
972 rq_ctx->ipsech_ena, rq_ctx->sso_ena);
973 seq_printf(m, "W0: ena \t\t\t%d\n\n", rq_ctx->ena);
974
975 seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
976 rq_ctx->lpb_drop_ena, rq_ctx->spb_drop_ena);
977 seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d\n",
978 rq_ctx->xqe_drop_ena, rq_ctx->wqe_caching);
979 seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
980 rq_ctx->pb_caching, rq_ctx->sso_tt);
981 seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
982 rq_ctx->sso_grp, rq_ctx->lpb_aura);
983 seq_printf(m, "W1: spb_aura \t\t\t%d\n\n", rq_ctx->spb_aura);
984
985 seq_printf(m, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d\n",
986 rq_ctx->xqe_hdr_split, rq_ctx->xqe_imm_copy);
987 seq_printf(m, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d\n",
988 rq_ctx->xqe_imm_size, rq_ctx->later_skip);
989 seq_printf(m, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d\n",
990 rq_ctx->first_skip, rq_ctx->lpb_sizem1);
991 seq_printf(m, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d\n",
992 rq_ctx->spb_ena, rq_ctx->wqe_skip);
993 seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n\n", rq_ctx->spb_sizem1);
994
995 seq_printf(m, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d\n",
996 rq_ctx->spb_pool_pass, rq_ctx->spb_pool_drop);
997 seq_printf(m, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d\n",
998 rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
999 seq_printf(m, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d\n",
1000 rq_ctx->wqe_pool_pass, rq_ctx->wqe_pool_drop);
1001 seq_printf(m, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n\n",
1002 rq_ctx->xqe_pass, rq_ctx->xqe_drop);
1003
1004 seq_printf(m, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d\n",
1005 rq_ctx->qint_idx, rq_ctx->rq_int_ena);
1006 seq_printf(m, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d\n",
1007 rq_ctx->rq_int, rq_ctx->lpb_pool_pass);
1008 seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d\n",
1009 rq_ctx->lpb_pool_drop, rq_ctx->lpb_aura_pass);
1010 seq_printf(m, "W4: lpb_aura_drop \t\t%d\n\n", rq_ctx->lpb_aura_drop);
1011
1012 seq_printf(m, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d\n",
1013 rq_ctx->flow_tagw, rq_ctx->bad_utag);
1014 seq_printf(m, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n\n",
1015 rq_ctx->good_utag, rq_ctx->ltag);
1016
1017 seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
1018 seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
1019 seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
1020 seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
1021 seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
1022 }
1023
1024 /* Dumps given nix_cq's context */
print_nix_cq_ctx(struct seq_file * m,struct nix_aq_enq_rsp * rsp)1025 static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1026 {
1027 struct nix_cq_ctx_s *cq_ctx = &rsp->cq;
1028
1029 seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base);
1030
1031 seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr);
1032 seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n",
1033 cq_ctx->avg_con, cq_ctx->cint_idx);
1034 seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n",
1035 cq_ctx->cq_err, cq_ctx->qint_idx);
1036 seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n",
1037 cq_ctx->bpid, cq_ctx->bp_ena);
1038
1039 seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n",
1040 cq_ctx->update_time, cq_ctx->avg_level);
1041 seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n",
1042 cq_ctx->head, cq_ctx->tail);
1043
1044 seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n",
1045 cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int);
1046 seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n",
1047 cq_ctx->qsize, cq_ctx->caching);
1048 seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n",
1049 cq_ctx->substream, cq_ctx->ena);
1050 seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n",
1051 cq_ctx->drop_ena, cq_ctx->drop);
1052 seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp);
1053 }
1054
rvu_dbg_nix_queue_ctx_display(struct seq_file * filp,void * unused,int ctype)1055 static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
1056 void *unused, int ctype)
1057 {
1058 void (*print_nix_ctx)(struct seq_file *filp,
1059 struct nix_aq_enq_rsp *rsp) = NULL;
1060 struct rvu *rvu = filp->private;
1061 struct nix_aq_enq_req aq_req;
1062 struct nix_aq_enq_rsp rsp;
1063 char *ctype_string = NULL;
1064 int qidx, rc, max_id = 0;
1065 struct rvu_pfvf *pfvf;
1066 int nixlf, id, all;
1067 u16 pcifunc;
1068
1069 switch (ctype) {
1070 case NIX_AQ_CTYPE_CQ:
1071 nixlf = rvu->rvu_dbg.nix_cq_ctx.lf;
1072 id = rvu->rvu_dbg.nix_cq_ctx.id;
1073 all = rvu->rvu_dbg.nix_cq_ctx.all;
1074 break;
1075
1076 case NIX_AQ_CTYPE_SQ:
1077 nixlf = rvu->rvu_dbg.nix_sq_ctx.lf;
1078 id = rvu->rvu_dbg.nix_sq_ctx.id;
1079 all = rvu->rvu_dbg.nix_sq_ctx.all;
1080 break;
1081
1082 case NIX_AQ_CTYPE_RQ:
1083 nixlf = rvu->rvu_dbg.nix_rq_ctx.lf;
1084 id = rvu->rvu_dbg.nix_rq_ctx.id;
1085 all = rvu->rvu_dbg.nix_rq_ctx.all;
1086 break;
1087
1088 default:
1089 return -EINVAL;
1090 }
1091
1092 if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NIX, nixlf, &pcifunc))
1093 return -EINVAL;
1094
1095 pfvf = rvu_get_pfvf(rvu, pcifunc);
1096 if (ctype == NIX_AQ_CTYPE_SQ && !pfvf->sq_ctx) {
1097 seq_puts(filp, "SQ context is not initialized\n");
1098 return -EINVAL;
1099 } else if (ctype == NIX_AQ_CTYPE_RQ && !pfvf->rq_ctx) {
1100 seq_puts(filp, "RQ context is not initialized\n");
1101 return -EINVAL;
1102 } else if (ctype == NIX_AQ_CTYPE_CQ && !pfvf->cq_ctx) {
1103 seq_puts(filp, "CQ context is not initialized\n");
1104 return -EINVAL;
1105 }
1106
1107 if (ctype == NIX_AQ_CTYPE_SQ) {
1108 max_id = pfvf->sq_ctx->qsize;
1109 ctype_string = "sq";
1110 print_nix_ctx = print_nix_sq_ctx;
1111 } else if (ctype == NIX_AQ_CTYPE_RQ) {
1112 max_id = pfvf->rq_ctx->qsize;
1113 ctype_string = "rq";
1114 print_nix_ctx = print_nix_rq_ctx;
1115 } else if (ctype == NIX_AQ_CTYPE_CQ) {
1116 max_id = pfvf->cq_ctx->qsize;
1117 ctype_string = "cq";
1118 print_nix_ctx = print_nix_cq_ctx;
1119 }
1120
1121 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1122 aq_req.hdr.pcifunc = pcifunc;
1123 aq_req.ctype = ctype;
1124 aq_req.op = NIX_AQ_INSTOP_READ;
1125 if (all)
1126 id = 0;
1127 else
1128 max_id = id + 1;
1129 for (qidx = id; qidx < max_id; qidx++) {
1130 aq_req.qidx = qidx;
1131 seq_printf(filp, "=====%s_ctx for nixlf:%d and qidx:%d is=====\n",
1132 ctype_string, nixlf, aq_req.qidx);
1133 rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
1134 if (rc) {
1135 seq_puts(filp, "Failed to read the context\n");
1136 return -EINVAL;
1137 }
1138 print_nix_ctx(filp, &rsp);
1139 }
1140 return 0;
1141 }
1142
write_nix_queue_ctx(struct rvu * rvu,bool all,int nixlf,int id,int ctype,char * ctype_string)1143 static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf,
1144 int id, int ctype, char *ctype_string)
1145 {
1146 struct rvu_pfvf *pfvf;
1147 int max_id = 0;
1148 u16 pcifunc;
1149
1150 if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NIX, nixlf, &pcifunc))
1151 return -EINVAL;
1152
1153 pfvf = rvu_get_pfvf(rvu, pcifunc);
1154
1155 if (ctype == NIX_AQ_CTYPE_SQ) {
1156 if (!pfvf->sq_ctx) {
1157 dev_warn(rvu->dev, "SQ context is not initialized\n");
1158 return -EINVAL;
1159 }
1160 max_id = pfvf->sq_ctx->qsize;
1161 } else if (ctype == NIX_AQ_CTYPE_RQ) {
1162 if (!pfvf->rq_ctx) {
1163 dev_warn(rvu->dev, "RQ context is not initialized\n");
1164 return -EINVAL;
1165 }
1166 max_id = pfvf->rq_ctx->qsize;
1167 } else if (ctype == NIX_AQ_CTYPE_CQ) {
1168 if (!pfvf->cq_ctx) {
1169 dev_warn(rvu->dev, "CQ context is not initialized\n");
1170 return -EINVAL;
1171 }
1172 max_id = pfvf->cq_ctx->qsize;
1173 }
1174
1175 if (id < 0 || id >= max_id) {
1176 dev_warn(rvu->dev, "Invalid %s_ctx valid range 0-%d\n",
1177 ctype_string, max_id - 1);
1178 return -EINVAL;
1179 }
1180 switch (ctype) {
1181 case NIX_AQ_CTYPE_CQ:
1182 rvu->rvu_dbg.nix_cq_ctx.lf = nixlf;
1183 rvu->rvu_dbg.nix_cq_ctx.id = id;
1184 rvu->rvu_dbg.nix_cq_ctx.all = all;
1185 break;
1186
1187 case NIX_AQ_CTYPE_SQ:
1188 rvu->rvu_dbg.nix_sq_ctx.lf = nixlf;
1189 rvu->rvu_dbg.nix_sq_ctx.id = id;
1190 rvu->rvu_dbg.nix_sq_ctx.all = all;
1191 break;
1192
1193 case NIX_AQ_CTYPE_RQ:
1194 rvu->rvu_dbg.nix_rq_ctx.lf = nixlf;
1195 rvu->rvu_dbg.nix_rq_ctx.id = id;
1196 rvu->rvu_dbg.nix_rq_ctx.all = all;
1197 break;
1198 default:
1199 return -EINVAL;
1200 }
1201 return 0;
1202 }
1203
rvu_dbg_nix_queue_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos,int ctype)1204 static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
1205 const char __user *buffer,
1206 size_t count, loff_t *ppos,
1207 int ctype)
1208 {
1209 struct seq_file *m = filp->private_data;
1210 struct rvu *rvu = m->private;
1211 char *cmd_buf, *ctype_string;
1212 int nixlf, id = 0, ret;
1213 bool all = false;
1214
1215 if ((*ppos != 0) || !count)
1216 return -EINVAL;
1217
1218 switch (ctype) {
1219 case NIX_AQ_CTYPE_SQ:
1220 ctype_string = "sq";
1221 break;
1222 case NIX_AQ_CTYPE_RQ:
1223 ctype_string = "rq";
1224 break;
1225 case NIX_AQ_CTYPE_CQ:
1226 ctype_string = "cq";
1227 break;
1228 default:
1229 return -EINVAL;
1230 }
1231
1232 cmd_buf = kzalloc(count + 1, GFP_KERNEL);
1233
1234 if (!cmd_buf)
1235 return count;
1236
1237 ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
1238 &nixlf, &id, &all);
1239 if (ret < 0) {
1240 dev_info(rvu->dev,
1241 "Usage: echo <nixlf> [%s number/all] > %s_ctx\n",
1242 ctype_string, ctype_string);
1243 goto done;
1244 } else {
1245 ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype,
1246 ctype_string);
1247 }
1248 done:
1249 kfree(cmd_buf);
1250 return ret ? ret : count;
1251 }
1252
rvu_dbg_nix_sq_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)1253 static ssize_t rvu_dbg_nix_sq_ctx_write(struct file *filp,
1254 const char __user *buffer,
1255 size_t count, loff_t *ppos)
1256 {
1257 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1258 NIX_AQ_CTYPE_SQ);
1259 }
1260
rvu_dbg_nix_sq_ctx_display(struct seq_file * filp,void * unused)1261 static int rvu_dbg_nix_sq_ctx_display(struct seq_file *filp, void *unused)
1262 {
1263 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_SQ);
1264 }
1265
1266 RVU_DEBUG_SEQ_FOPS(nix_sq_ctx, nix_sq_ctx_display, nix_sq_ctx_write);
1267
rvu_dbg_nix_rq_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)1268 static ssize_t rvu_dbg_nix_rq_ctx_write(struct file *filp,
1269 const char __user *buffer,
1270 size_t count, loff_t *ppos)
1271 {
1272 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1273 NIX_AQ_CTYPE_RQ);
1274 }
1275
rvu_dbg_nix_rq_ctx_display(struct seq_file * filp,void * unused)1276 static int rvu_dbg_nix_rq_ctx_display(struct seq_file *filp, void *unused)
1277 {
1278 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_RQ);
1279 }
1280
1281 RVU_DEBUG_SEQ_FOPS(nix_rq_ctx, nix_rq_ctx_display, nix_rq_ctx_write);
1282
rvu_dbg_nix_cq_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)1283 static ssize_t rvu_dbg_nix_cq_ctx_write(struct file *filp,
1284 const char __user *buffer,
1285 size_t count, loff_t *ppos)
1286 {
1287 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1288 NIX_AQ_CTYPE_CQ);
1289 }
1290
rvu_dbg_nix_cq_ctx_display(struct seq_file * filp,void * unused)1291 static int rvu_dbg_nix_cq_ctx_display(struct seq_file *filp, void *unused)
1292 {
1293 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_CQ);
1294 }
1295
1296 RVU_DEBUG_SEQ_FOPS(nix_cq_ctx, nix_cq_ctx_display, nix_cq_ctx_write);
1297
print_nix_qctx_qsize(struct seq_file * filp,int qsize,unsigned long * bmap,char * qtype)1298 static void print_nix_qctx_qsize(struct seq_file *filp, int qsize,
1299 unsigned long *bmap, char *qtype)
1300 {
1301 char *buf;
1302
1303 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1304 if (!buf)
1305 return;
1306
1307 bitmap_print_to_pagebuf(false, buf, bmap, qsize);
1308 seq_printf(filp, "%s context count : %d\n", qtype, qsize);
1309 seq_printf(filp, "%s context ena/dis bitmap : %s\n",
1310 qtype, buf);
1311 kfree(buf);
1312 }
1313
print_nix_qsize(struct seq_file * filp,struct rvu_pfvf * pfvf)1314 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf)
1315 {
1316 if (!pfvf->cq_ctx)
1317 seq_puts(filp, "cq context is not initialized\n");
1318 else
1319 print_nix_qctx_qsize(filp, pfvf->cq_ctx->qsize, pfvf->cq_bmap,
1320 "cq");
1321
1322 if (!pfvf->rq_ctx)
1323 seq_puts(filp, "rq context is not initialized\n");
1324 else
1325 print_nix_qctx_qsize(filp, pfvf->rq_ctx->qsize, pfvf->rq_bmap,
1326 "rq");
1327
1328 if (!pfvf->sq_ctx)
1329 seq_puts(filp, "sq context is not initialized\n");
1330 else
1331 print_nix_qctx_qsize(filp, pfvf->sq_ctx->qsize, pfvf->sq_bmap,
1332 "sq");
1333 }
1334
rvu_dbg_nix_qsize_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)1335 static ssize_t rvu_dbg_nix_qsize_write(struct file *filp,
1336 const char __user *buffer,
1337 size_t count, loff_t *ppos)
1338 {
1339 return rvu_dbg_qsize_write(filp, buffer, count, ppos,
1340 BLKTYPE_NIX);
1341 }
1342
rvu_dbg_nix_qsize_display(struct seq_file * filp,void * unused)1343 static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
1344 {
1345 return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NIX);
1346 }
1347
1348 RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
1349
rvu_dbg_nix_init(struct rvu * rvu)1350 static void rvu_dbg_nix_init(struct rvu *rvu)
1351 {
1352 const struct device *dev = &rvu->pdev->dev;
1353 struct dentry *pfile;
1354
1355 rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root);
1356 if (!rvu->rvu_dbg.nix) {
1357 dev_err(rvu->dev, "create debugfs dir failed for nix\n");
1358 return;
1359 }
1360
1361 pfile = debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, rvu,
1362 &rvu_dbg_nix_sq_ctx_fops);
1363 if (!pfile)
1364 goto create_failed;
1365
1366 pfile = debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, rvu,
1367 &rvu_dbg_nix_rq_ctx_fops);
1368 if (!pfile)
1369 goto create_failed;
1370
1371 pfile = debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, rvu,
1372 &rvu_dbg_nix_cq_ctx_fops);
1373 if (!pfile)
1374 goto create_failed;
1375
1376 pfile = debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, rvu,
1377 &rvu_dbg_nix_ndc_tx_cache_fops);
1378 if (!pfile)
1379 goto create_failed;
1380
1381 pfile = debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, rvu,
1382 &rvu_dbg_nix_ndc_rx_cache_fops);
1383 if (!pfile)
1384 goto create_failed;
1385
1386 pfile = debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix,
1387 rvu, &rvu_dbg_nix_ndc_tx_hits_miss_fops);
1388 if (!pfile)
1389 goto create_failed;
1390
1391 pfile = debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix,
1392 rvu, &rvu_dbg_nix_ndc_rx_hits_miss_fops);
1393 if (!pfile)
1394 goto create_failed;
1395
1396 pfile = debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
1397 &rvu_dbg_nix_qsize_fops);
1398 if (!pfile)
1399 goto create_failed;
1400
1401 return;
1402 create_failed:
1403 dev_err(dev, "Failed to create debugfs dir/file for NIX\n");
1404 debugfs_remove_recursive(rvu->rvu_dbg.nix);
1405 }
1406
rvu_dbg_npa_init(struct rvu * rvu)1407 static void rvu_dbg_npa_init(struct rvu *rvu)
1408 {
1409 const struct device *dev = &rvu->pdev->dev;
1410 struct dentry *pfile;
1411
1412 rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root);
1413 if (!rvu->rvu_dbg.npa)
1414 return;
1415
1416 pfile = debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu,
1417 &rvu_dbg_npa_qsize_fops);
1418 if (!pfile)
1419 goto create_failed;
1420
1421 pfile = debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu,
1422 &rvu_dbg_npa_aura_ctx_fops);
1423 if (!pfile)
1424 goto create_failed;
1425
1426 pfile = debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
1427 &rvu_dbg_npa_pool_ctx_fops);
1428 if (!pfile)
1429 goto create_failed;
1430
1431 pfile = debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu,
1432 &rvu_dbg_npa_ndc_cache_fops);
1433 if (!pfile)
1434 goto create_failed;
1435
1436 pfile = debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa,
1437 rvu, &rvu_dbg_npa_ndc_hits_miss_fops);
1438 if (!pfile)
1439 goto create_failed;
1440
1441 return;
1442
1443 create_failed:
1444 dev_err(dev, "Failed to create debugfs dir/file for NPA\n");
1445 debugfs_remove_recursive(rvu->rvu_dbg.npa);
1446 }
1447
1448 #define PRINT_CGX_CUML_NIXRX_STATUS(idx, name) \
1449 ({ \
1450 u64 cnt; \
1451 err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \
1452 NIX_STATS_RX, &(cnt)); \
1453 if (!err) \
1454 seq_printf(s, "%s: %llu\n", name, cnt); \
1455 cnt; \
1456 })
1457
1458 #define PRINT_CGX_CUML_NIXTX_STATUS(idx, name) \
1459 ({ \
1460 u64 cnt; \
1461 err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \
1462 NIX_STATS_TX, &(cnt)); \
1463 if (!err) \
1464 seq_printf(s, "%s: %llu\n", name, cnt); \
1465 cnt; \
1466 })
1467
cgx_print_stats(struct seq_file * s,int lmac_id)1468 static int cgx_print_stats(struct seq_file *s, int lmac_id)
1469 {
1470 struct cgx_link_user_info linfo;
1471 void *cgxd = s->private;
1472 u64 ucast, mcast, bcast;
1473 int stat = 0, err = 0;
1474 u64 tx_stat, rx_stat;
1475 struct rvu *rvu;
1476
1477 rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
1478 PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
1479 if (!rvu)
1480 return -ENODEV;
1481
1482 /* Link status */
1483 seq_puts(s, "\n=======Link Status======\n\n");
1484 err = cgx_get_link_info(cgxd, lmac_id, &linfo);
1485 if (err)
1486 seq_puts(s, "Failed to read link status\n");
1487 seq_printf(s, "\nLink is %s %d Mbps\n\n",
1488 linfo.link_up ? "UP" : "DOWN", linfo.speed);
1489
1490 /* Rx stats */
1491 seq_puts(s, "\n=======NIX RX_STATS(CGX port level)======\n\n");
1492 ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames");
1493 if (err)
1494 return err;
1495 mcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_MCAST, "rx_mcast_frames");
1496 if (err)
1497 return err;
1498 bcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_BCAST, "rx_bcast_frames");
1499 if (err)
1500 return err;
1501 seq_printf(s, "rx_frames: %llu\n", ucast + mcast + bcast);
1502 PRINT_CGX_CUML_NIXRX_STATUS(RX_OCTS, "rx_bytes");
1503 if (err)
1504 return err;
1505 PRINT_CGX_CUML_NIXRX_STATUS(RX_DROP, "rx_drops");
1506 if (err)
1507 return err;
1508 PRINT_CGX_CUML_NIXRX_STATUS(RX_ERR, "rx_errors");
1509 if (err)
1510 return err;
1511
1512 /* Tx stats */
1513 seq_puts(s, "\n=======NIX TX_STATS(CGX port level)======\n\n");
1514 ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames");
1515 if (err)
1516 return err;
1517 mcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_MCAST, "tx_mcast_frames");
1518 if (err)
1519 return err;
1520 bcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_BCAST, "tx_bcast_frames");
1521 if (err)
1522 return err;
1523 seq_printf(s, "tx_frames: %llu\n", ucast + mcast + bcast);
1524 PRINT_CGX_CUML_NIXTX_STATUS(TX_OCTS, "tx_bytes");
1525 if (err)
1526 return err;
1527 PRINT_CGX_CUML_NIXTX_STATUS(TX_DROP, "tx_drops");
1528 if (err)
1529 return err;
1530
1531 /* Rx stats */
1532 seq_puts(s, "\n=======CGX RX_STATS======\n\n");
1533 while (stat < CGX_RX_STATS_COUNT) {
1534 err = cgx_get_rx_stats(cgxd, lmac_id, stat, &rx_stat);
1535 if (err)
1536 return err;
1537 seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat], rx_stat);
1538 stat++;
1539 }
1540
1541 /* Tx stats */
1542 stat = 0;
1543 seq_puts(s, "\n=======CGX TX_STATS======\n\n");
1544 while (stat < CGX_TX_STATS_COUNT) {
1545 err = cgx_get_tx_stats(cgxd, lmac_id, stat, &tx_stat);
1546 if (err)
1547 return err;
1548 seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat], tx_stat);
1549 stat++;
1550 }
1551
1552 return err;
1553 }
1554
rvu_dbg_cgx_stat_display(struct seq_file * filp,void * unused)1555 static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
1556 {
1557 struct dentry *current_dir;
1558 int err, lmac_id;
1559 char *buf;
1560
1561 current_dir = filp->file->f_path.dentry->d_parent;
1562 buf = strrchr(current_dir->d_name.name, 'c');
1563 if (!buf)
1564 return -EINVAL;
1565
1566 err = kstrtoint(buf + 1, 10, &lmac_id);
1567 if (!err) {
1568 err = cgx_print_stats(filp, lmac_id);
1569 if (err)
1570 return err;
1571 }
1572 return err;
1573 }
1574
1575 RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
1576
rvu_dbg_cgx_init(struct rvu * rvu)1577 static void rvu_dbg_cgx_init(struct rvu *rvu)
1578 {
1579 const struct device *dev = &rvu->pdev->dev;
1580 struct dentry *pfile;
1581 int i, lmac_id;
1582 char dname[20];
1583 void *cgx;
1584
1585 rvu->rvu_dbg.cgx_root = debugfs_create_dir("cgx", rvu->rvu_dbg.root);
1586
1587 for (i = 0; i < cgx_get_cgxcnt_max(); i++) {
1588 cgx = rvu_cgx_pdata(i, rvu);
1589 if (!cgx)
1590 continue;
1591 /* cgx debugfs dir */
1592 sprintf(dname, "cgx%d", i);
1593 rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
1594 rvu->rvu_dbg.cgx_root);
1595 for (lmac_id = 0; lmac_id < cgx_get_lmac_cnt(cgx); lmac_id++) {
1596 /* lmac debugfs dir */
1597 sprintf(dname, "lmac%d", lmac_id);
1598 rvu->rvu_dbg.lmac =
1599 debugfs_create_dir(dname, rvu->rvu_dbg.cgx);
1600
1601 pfile = debugfs_create_file("stats", 0600,
1602 rvu->rvu_dbg.lmac, cgx,
1603 &rvu_dbg_cgx_stat_fops);
1604 if (!pfile)
1605 goto create_failed;
1606 }
1607 }
1608 return;
1609
1610 create_failed:
1611 dev_err(dev, "Failed to create debugfs dir/file for CGX\n");
1612 debugfs_remove_recursive(rvu->rvu_dbg.cgx_root);
1613 }
1614
1615 /* NPC debugfs APIs */
rvu_print_npc_mcam_info(struct seq_file * s,u16 pcifunc,int blkaddr)1616 static void rvu_print_npc_mcam_info(struct seq_file *s,
1617 u16 pcifunc, int blkaddr)
1618 {
1619 struct rvu *rvu = s->private;
1620 int entry_acnt, entry_ecnt;
1621 int cntr_acnt, cntr_ecnt;
1622
1623 /* Skip PF0 */
1624 if (!pcifunc)
1625 return;
1626 rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr,
1627 &entry_acnt, &entry_ecnt);
1628 rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr,
1629 &cntr_acnt, &cntr_ecnt);
1630 if (!entry_acnt && !cntr_acnt)
1631 return;
1632
1633 if (!(pcifunc & RVU_PFVF_FUNC_MASK))
1634 seq_printf(s, "\n\t\t Device \t\t: PF%d\n",
1635 rvu_get_pf(pcifunc));
1636 else
1637 seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n",
1638 rvu_get_pf(pcifunc),
1639 (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
1640
1641 if (entry_acnt) {
1642 seq_printf(s, "\t\t Entries allocated \t: %d\n", entry_acnt);
1643 seq_printf(s, "\t\t Entries enabled \t: %d\n", entry_ecnt);
1644 }
1645 if (cntr_acnt) {
1646 seq_printf(s, "\t\t Counters allocated \t: %d\n", cntr_acnt);
1647 seq_printf(s, "\t\t Counters enabled \t: %d\n", cntr_ecnt);
1648 }
1649 }
1650
rvu_dbg_npc_mcam_info_display(struct seq_file * filp,void * unsued)1651 static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
1652 {
1653 struct rvu *rvu = filp->private;
1654 int pf, vf, numvfs, blkaddr;
1655 struct npc_mcam *mcam;
1656 u16 pcifunc;
1657 u64 cfg;
1658
1659 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
1660 if (blkaddr < 0)
1661 return -ENODEV;
1662
1663 mcam = &rvu->hw->mcam;
1664
1665 seq_puts(filp, "\nNPC MCAM info:\n");
1666 /* MCAM keywidth on receive and transmit sides */
1667 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
1668 cfg = (cfg >> 32) & 0x07;
1669 seq_printf(filp, "\t\t RX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
1670 "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
1671 "224bits" : "448bits"));
1672 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX));
1673 cfg = (cfg >> 32) & 0x07;
1674 seq_printf(filp, "\t\t TX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
1675 "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
1676 "224bits" : "448bits"));
1677
1678 mutex_lock(&mcam->lock);
1679 /* MCAM entries */
1680 seq_printf(filp, "\n\t\t MCAM entries \t: %d\n", mcam->total_entries);
1681 seq_printf(filp, "\t\t Reserved \t: %d\n",
1682 mcam->total_entries - mcam->bmap_entries);
1683 seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt);
1684
1685 /* MCAM counters */
1686 cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST);
1687 cfg = (cfg >> 48) & 0xFFFF;
1688 seq_printf(filp, "\n\t\t MCAM counters \t: %lld\n", cfg);
1689 seq_printf(filp, "\t\t Reserved \t: %lld\n", cfg - mcam->counters.max);
1690 seq_printf(filp, "\t\t Available \t: %d\n",
1691 rvu_rsrc_free_count(&mcam->counters));
1692
1693 if (mcam->bmap_entries == mcam->bmap_fcnt) {
1694 mutex_unlock(&mcam->lock);
1695 return 0;
1696 }
1697
1698 seq_puts(filp, "\n\t\t Current allocation\n");
1699 seq_puts(filp, "\t\t====================\n");
1700 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
1701 pcifunc = (pf << RVU_PFVF_PF_SHIFT);
1702 rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
1703
1704 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
1705 numvfs = (cfg >> 12) & 0xFF;
1706 for (vf = 0; vf < numvfs; vf++) {
1707 pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1);
1708 rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
1709 }
1710 }
1711
1712 mutex_unlock(&mcam->lock);
1713 return 0;
1714 }
1715
1716 RVU_DEBUG_SEQ_FOPS(npc_mcam_info, npc_mcam_info_display, NULL);
1717
rvu_dbg_npc_rx_miss_stats_display(struct seq_file * filp,void * unused)1718 static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
1719 void *unused)
1720 {
1721 struct rvu *rvu = filp->private;
1722 struct npc_mcam *mcam;
1723 int blkaddr;
1724
1725 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
1726 if (blkaddr < 0)
1727 return -ENODEV;
1728
1729 mcam = &rvu->hw->mcam;
1730
1731 seq_puts(filp, "\nNPC MCAM RX miss action stats\n");
1732 seq_printf(filp, "\t\tStat %d: \t%lld\n", mcam->rx_miss_act_cntr,
1733 rvu_read64(rvu, blkaddr,
1734 NPC_AF_MATCH_STATX(mcam->rx_miss_act_cntr)));
1735
1736 return 0;
1737 }
1738
1739 RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
1740
rvu_dbg_npc_init(struct rvu * rvu)1741 static void rvu_dbg_npc_init(struct rvu *rvu)
1742 {
1743 const struct device *dev = &rvu->pdev->dev;
1744 struct dentry *pfile;
1745
1746 rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
1747 if (!rvu->rvu_dbg.npc)
1748 return;
1749
1750 pfile = debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc,
1751 rvu, &rvu_dbg_npc_mcam_info_fops);
1752 if (!pfile)
1753 goto create_failed;
1754
1755 pfile = debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc,
1756 rvu, &rvu_dbg_npc_rx_miss_act_fops);
1757 if (!pfile)
1758 goto create_failed;
1759
1760 return;
1761
1762 create_failed:
1763 dev_err(dev, "Failed to create debugfs dir/file for NPC\n");
1764 debugfs_remove_recursive(rvu->rvu_dbg.npc);
1765 }
1766
rvu_dbg_init(struct rvu * rvu)1767 void rvu_dbg_init(struct rvu *rvu)
1768 {
1769 struct device *dev = &rvu->pdev->dev;
1770 struct dentry *pfile;
1771
1772 rvu->rvu_dbg.root = debugfs_create_dir(DEBUGFS_DIR_NAME, NULL);
1773 if (!rvu->rvu_dbg.root) {
1774 dev_err(rvu->dev, "%s failed\n", __func__);
1775 return;
1776 }
1777 pfile = debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
1778 &rvu_dbg_rsrc_status_fops);
1779 if (!pfile)
1780 goto create_failed;
1781
1782 rvu_dbg_npa_init(rvu);
1783 rvu_dbg_nix_init(rvu);
1784 rvu_dbg_cgx_init(rvu);
1785 rvu_dbg_npc_init(rvu);
1786
1787 return;
1788
1789 create_failed:
1790 dev_err(dev, "Failed to create debugfs dir\n");
1791 debugfs_remove_recursive(rvu->rvu_dbg.root);
1792 }
1793
rvu_dbg_exit(struct rvu * rvu)1794 void rvu_dbg_exit(struct rvu *rvu)
1795 {
1796 debugfs_remove_recursive(rvu->rvu_dbg.root);
1797 }
1798
1799 #endif /* CONFIG_DEBUG_FS */
1800