• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3  *
4  * Copyright (C) 2019 Marvell.
5  *
6  */
7 
8 #ifdef CONFIG_DEBUG_FS
9 
10 #include <linux/fs.h>
11 #include <linux/debugfs.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 
15 #include "rvu_struct.h"
16 #include "rvu_reg.h"
17 #include "rvu.h"
18 #include "cgx.h"
19 #include "lmac_common.h"
20 #include "npc.h"
21 
22 #define DEBUGFS_DIR_NAME "octeontx2"
23 
24 enum {
25 	CGX_STAT0,
26 	CGX_STAT1,
27 	CGX_STAT2,
28 	CGX_STAT3,
29 	CGX_STAT4,
30 	CGX_STAT5,
31 	CGX_STAT6,
32 	CGX_STAT7,
33 	CGX_STAT8,
34 	CGX_STAT9,
35 	CGX_STAT10,
36 	CGX_STAT11,
37 	CGX_STAT12,
38 	CGX_STAT13,
39 	CGX_STAT14,
40 	CGX_STAT15,
41 	CGX_STAT16,
42 	CGX_STAT17,
43 	CGX_STAT18,
44 };
45 
46 /* NIX TX stats */
47 enum nix_stat_lf_tx {
48 	TX_UCAST	= 0x0,
49 	TX_BCAST	= 0x1,
50 	TX_MCAST	= 0x2,
51 	TX_DROP		= 0x3,
52 	TX_OCTS		= 0x4,
53 	TX_STATS_ENUM_LAST,
54 };
55 
56 /* NIX RX stats */
57 enum nix_stat_lf_rx {
58 	RX_OCTS		= 0x0,
59 	RX_UCAST	= 0x1,
60 	RX_BCAST	= 0x2,
61 	RX_MCAST	= 0x3,
62 	RX_DROP		= 0x4,
63 	RX_DROP_OCTS	= 0x5,
64 	RX_FCS		= 0x6,
65 	RX_ERR		= 0x7,
66 	RX_DRP_BCAST	= 0x8,
67 	RX_DRP_MCAST	= 0x9,
68 	RX_DRP_L3BCAST	= 0xa,
69 	RX_DRP_L3MCAST	= 0xb,
70 	RX_STATS_ENUM_LAST,
71 };
72 
73 static char *cgx_rx_stats_fields[] = {
74 	[CGX_STAT0]	= "Received packets",
75 	[CGX_STAT1]	= "Octets of received packets",
76 	[CGX_STAT2]	= "Received PAUSE packets",
77 	[CGX_STAT3]	= "Received PAUSE and control packets",
78 	[CGX_STAT4]	= "Filtered DMAC0 (NIX-bound) packets",
79 	[CGX_STAT5]	= "Filtered DMAC0 (NIX-bound) octets",
80 	[CGX_STAT6]	= "Packets dropped due to RX FIFO full",
81 	[CGX_STAT7]	= "Octets dropped due to RX FIFO full",
82 	[CGX_STAT8]	= "Error packets",
83 	[CGX_STAT9]	= "Filtered DMAC1 (NCSI-bound) packets",
84 	[CGX_STAT10]	= "Filtered DMAC1 (NCSI-bound) octets",
85 	[CGX_STAT11]	= "NCSI-bound packets dropped",
86 	[CGX_STAT12]	= "NCSI-bound octets dropped",
87 };
88 
89 static char *cgx_tx_stats_fields[] = {
90 	[CGX_STAT0]	= "Packets dropped due to excessive collisions",
91 	[CGX_STAT1]	= "Packets dropped due to excessive deferral",
92 	[CGX_STAT2]	= "Multiple collisions before successful transmission",
93 	[CGX_STAT3]	= "Single collisions before successful transmission",
94 	[CGX_STAT4]	= "Total octets sent on the interface",
95 	[CGX_STAT5]	= "Total frames sent on the interface",
96 	[CGX_STAT6]	= "Packets sent with an octet count < 64",
97 	[CGX_STAT7]	= "Packets sent with an octet count == 64",
98 	[CGX_STAT8]	= "Packets sent with an octet count of 65–127",
99 	[CGX_STAT9]	= "Packets sent with an octet count of 128-255",
100 	[CGX_STAT10]	= "Packets sent with an octet count of 256-511",
101 	[CGX_STAT11]	= "Packets sent with an octet count of 512-1023",
102 	[CGX_STAT12]	= "Packets sent with an octet count of 1024-1518",
103 	[CGX_STAT13]	= "Packets sent with an octet count of > 1518",
104 	[CGX_STAT14]	= "Packets sent to a broadcast DMAC",
105 	[CGX_STAT15]	= "Packets sent to the multicast DMAC",
106 	[CGX_STAT16]	= "Transmit underflow and were truncated",
107 	[CGX_STAT17]	= "Control/PAUSE packets sent",
108 };
109 
110 static char *rpm_rx_stats_fields[] = {
111 	"Octets of received packets",
112 	"Octets of received packets with out error",
113 	"Received packets with alignment errors",
114 	"Control/PAUSE packets received",
115 	"Packets received with Frame too long Errors",
116 	"Packets received with a1nrange length Errors",
117 	"Received packets",
118 	"Packets received with FrameCheckSequenceErrors",
119 	"Packets received with VLAN header",
120 	"Error packets",
121 	"Packets received with unicast DMAC",
122 	"Packets received with multicast DMAC",
123 	"Packets received with broadcast DMAC",
124 	"Dropped packets",
125 	"Total frames received on interface",
126 	"Packets received with an octet count < 64",
127 	"Packets received with an octet count == 64",
128 	"Packets received with an octet count of 65–127",
129 	"Packets received with an octet count of 128-255",
130 	"Packets received with an octet count of 256-511",
131 	"Packets received with an octet count of 512-1023",
132 	"Packets received with an octet count of 1024-1518",
133 	"Packets received with an octet count of > 1518",
134 	"Oversized Packets",
135 	"Jabber Packets",
136 	"Fragmented Packets",
137 	"CBFC(class based flow control) pause frames received for class 0",
138 	"CBFC pause frames received for class 1",
139 	"CBFC pause frames received for class 2",
140 	"CBFC pause frames received for class 3",
141 	"CBFC pause frames received for class 4",
142 	"CBFC pause frames received for class 5",
143 	"CBFC pause frames received for class 6",
144 	"CBFC pause frames received for class 7",
145 	"CBFC pause frames received for class 8",
146 	"CBFC pause frames received for class 9",
147 	"CBFC pause frames received for class 10",
148 	"CBFC pause frames received for class 11",
149 	"CBFC pause frames received for class 12",
150 	"CBFC pause frames received for class 13",
151 	"CBFC pause frames received for class 14",
152 	"CBFC pause frames received for class 15",
153 	"MAC control packets received",
154 };
155 
156 static char *rpm_tx_stats_fields[] = {
157 	"Total octets sent on the interface",
158 	"Total octets transmitted OK",
159 	"Control/Pause frames sent",
160 	"Total frames transmitted OK",
161 	"Total frames sent with VLAN header",
162 	"Error Packets",
163 	"Packets sent to unicast DMAC",
164 	"Packets sent to the multicast DMAC",
165 	"Packets sent to a broadcast DMAC",
166 	"Packets sent with an octet count == 64",
167 	"Packets sent with an octet count of 65–127",
168 	"Packets sent with an octet count of 128-255",
169 	"Packets sent with an octet count of 256-511",
170 	"Packets sent with an octet count of 512-1023",
171 	"Packets sent with an octet count of 1024-1518",
172 	"Packets sent with an octet count of > 1518",
173 	"CBFC(class based flow control) pause frames transmitted for class 0",
174 	"CBFC pause frames transmitted for class 1",
175 	"CBFC pause frames transmitted for class 2",
176 	"CBFC pause frames transmitted for class 3",
177 	"CBFC pause frames transmitted for class 4",
178 	"CBFC pause frames transmitted for class 5",
179 	"CBFC pause frames transmitted for class 6",
180 	"CBFC pause frames transmitted for class 7",
181 	"CBFC pause frames transmitted for class 8",
182 	"CBFC pause frames transmitted for class 9",
183 	"CBFC pause frames transmitted for class 10",
184 	"CBFC pause frames transmitted for class 11",
185 	"CBFC pause frames transmitted for class 12",
186 	"CBFC pause frames transmitted for class 13",
187 	"CBFC pause frames transmitted for class 14",
188 	"CBFC pause frames transmitted for class 15",
189 	"MAC control packets sent",
190 	"Total frames sent on the interface"
191 };
192 
193 enum cpt_eng_type {
194 	CPT_AE_TYPE = 1,
195 	CPT_SE_TYPE = 2,
196 	CPT_IE_TYPE = 3,
197 };
198 
199 #define rvu_dbg_NULL NULL
200 #define rvu_dbg_open_NULL NULL
201 
202 #define RVU_DEBUG_SEQ_FOPS(name, read_op, write_op)	\
203 static int rvu_dbg_open_##name(struct inode *inode, struct file *file) \
204 { \
205 	return single_open(file, rvu_dbg_##read_op, inode->i_private); \
206 } \
207 static const struct file_operations rvu_dbg_##name##_fops = { \
208 	.owner		= THIS_MODULE, \
209 	.open		= rvu_dbg_open_##name, \
210 	.read		= seq_read, \
211 	.write		= rvu_dbg_##write_op, \
212 	.llseek		= seq_lseek, \
213 	.release	= single_release, \
214 }
215 
216 #define RVU_DEBUG_FOPS(name, read_op, write_op) \
217 static const struct file_operations rvu_dbg_##name##_fops = { \
218 	.owner = THIS_MODULE, \
219 	.open = simple_open, \
220 	.read = rvu_dbg_##read_op, \
221 	.write = rvu_dbg_##write_op \
222 }
223 
224 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
225 
get_lf_str_list(struct rvu_block block,int pcifunc,char * lfs)226 static void get_lf_str_list(struct rvu_block block, int pcifunc,
227 			    char *lfs)
228 {
229 	int lf = 0, seq = 0, len = 0, prev_lf = block.lf.max;
230 
231 	for_each_set_bit(lf, block.lf.bmap, block.lf.max) {
232 		if (lf >= block.lf.max)
233 			break;
234 
235 		if (block.fn_map[lf] != pcifunc)
236 			continue;
237 
238 		if (lf == prev_lf + 1) {
239 			prev_lf = lf;
240 			seq = 1;
241 			continue;
242 		}
243 
244 		if (seq)
245 			len += sprintf(lfs + len, "-%d,%d", prev_lf, lf);
246 		else
247 			len += (len ? sprintf(lfs + len, ",%d", lf) :
248 				      sprintf(lfs + len, "%d", lf));
249 
250 		prev_lf = lf;
251 		seq = 0;
252 	}
253 
254 	if (seq)
255 		len += sprintf(lfs + len, "-%d", prev_lf);
256 
257 	lfs[len] = '\0';
258 }
259 
get_max_column_width(struct rvu * rvu)260 static int get_max_column_width(struct rvu *rvu)
261 {
262 	int index, pf, vf, lf_str_size = 12, buf_size = 256;
263 	struct rvu_block block;
264 	u16 pcifunc;
265 	char *buf;
266 
267 	buf = kzalloc(buf_size, GFP_KERNEL);
268 	if (!buf)
269 		return -ENOMEM;
270 
271 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
272 		for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
273 			pcifunc = pf << 10 | vf;
274 			if (!pcifunc)
275 				continue;
276 
277 			for (index = 0; index < BLK_COUNT; index++) {
278 				block = rvu->hw->block[index];
279 				if (!strlen(block.name))
280 					continue;
281 
282 				get_lf_str_list(block, pcifunc, buf);
283 				if (lf_str_size <= strlen(buf))
284 					lf_str_size = strlen(buf) + 1;
285 			}
286 		}
287 	}
288 
289 	kfree(buf);
290 	return lf_str_size;
291 }
292 
293 /* Dumps current provisioning status of all RVU block LFs */
rvu_dbg_rsrc_attach_status(struct file * filp,char __user * buffer,size_t count,loff_t * ppos)294 static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
295 					  char __user *buffer,
296 					  size_t count, loff_t *ppos)
297 {
298 	int index, off = 0, flag = 0, len = 0, i = 0;
299 	struct rvu *rvu = filp->private_data;
300 	int bytes_not_copied = 0;
301 	struct rvu_block block;
302 	int pf, vf, pcifunc;
303 	int buf_size = 2048;
304 	int lf_str_size;
305 	char *lfs;
306 	char *buf;
307 
308 	/* don't allow partial reads */
309 	if (*ppos != 0)
310 		return 0;
311 
312 	buf = kzalloc(buf_size, GFP_KERNEL);
313 	if (!buf)
314 		return -ENOSPC;
315 
316 	/* Get the maximum width of a column */
317 	lf_str_size = get_max_column_width(rvu);
318 
319 	lfs = kzalloc(lf_str_size, GFP_KERNEL);
320 	if (!lfs) {
321 		kfree(buf);
322 		return -ENOMEM;
323 	}
324 	off +=	scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size,
325 			  "pcifunc");
326 	for (index = 0; index < BLK_COUNT; index++)
327 		if (strlen(rvu->hw->block[index].name)) {
328 			off += scnprintf(&buf[off], buf_size - 1 - off,
329 					 "%-*s", lf_str_size,
330 					 rvu->hw->block[index].name);
331 		}
332 
333 	off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
334 	bytes_not_copied = copy_to_user(buffer + (i * off), buf, off);
335 	if (bytes_not_copied)
336 		goto out;
337 
338 	i++;
339 	*ppos += off;
340 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
341 		for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
342 			off = 0;
343 			flag = 0;
344 			pcifunc = pf << 10 | vf;
345 			if (!pcifunc)
346 				continue;
347 
348 			if (vf) {
349 				sprintf(lfs, "PF%d:VF%d", pf, vf - 1);
350 				off = scnprintf(&buf[off],
351 						buf_size - 1 - off,
352 						"%-*s", lf_str_size, lfs);
353 			} else {
354 				sprintf(lfs, "PF%d", pf);
355 				off = scnprintf(&buf[off],
356 						buf_size - 1 - off,
357 						"%-*s", lf_str_size, lfs);
358 			}
359 
360 			for (index = 0; index < BLK_COUNT; index++) {
361 				block = rvu->hw->block[index];
362 				if (!strlen(block.name))
363 					continue;
364 				len = 0;
365 				lfs[len] = '\0';
366 				get_lf_str_list(block, pcifunc, lfs);
367 				if (strlen(lfs))
368 					flag = 1;
369 
370 				off += scnprintf(&buf[off], buf_size - 1 - off,
371 						 "%-*s", lf_str_size, lfs);
372 			}
373 			if (flag) {
374 				off +=	scnprintf(&buf[off],
375 						  buf_size - 1 - off, "\n");
376 				bytes_not_copied = copy_to_user(buffer +
377 								(i * off),
378 								buf, off);
379 				if (bytes_not_copied)
380 					goto out;
381 
382 				i++;
383 				*ppos += off;
384 			}
385 		}
386 	}
387 
388 out:
389 	kfree(lfs);
390 	kfree(buf);
391 	if (bytes_not_copied)
392 		return -EFAULT;
393 
394 	return *ppos;
395 }
396 
397 RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
398 
rvu_dbg_rvu_pf_cgx_map_display(struct seq_file * filp,void * unused)399 static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
400 {
401 	struct rvu *rvu = filp->private;
402 	struct pci_dev *pdev = NULL;
403 	struct mac_ops *mac_ops;
404 	char cgx[10], lmac[10];
405 	struct rvu_pfvf *pfvf;
406 	int pf, domain, blkid;
407 	u8 cgx_id, lmac_id;
408 	u16 pcifunc;
409 
410 	domain = 2;
411 	mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
412 	/* There can be no CGX devices at all */
413 	if (!mac_ops)
414 		return 0;
415 	seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\n",
416 		   mac_ops->name);
417 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
418 		if (!is_pf_cgxmapped(rvu, pf))
419 			continue;
420 
421 		pdev =  pci_get_domain_bus_and_slot(domain, pf + 1, 0);
422 		if (!pdev)
423 			continue;
424 
425 		cgx[0] = 0;
426 		lmac[0] = 0;
427 		pcifunc = pf << 10;
428 		pfvf = rvu_get_pfvf(rvu, pcifunc);
429 
430 		if (pfvf->nix_blkaddr == BLKADDR_NIX0)
431 			blkid = 0;
432 		else
433 			blkid = 1;
434 
435 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id,
436 				    &lmac_id);
437 		sprintf(cgx, "%s%d", mac_ops->name, cgx_id);
438 		sprintf(lmac, "LMAC%d", lmac_id);
439 		seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n",
440 			   dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac);
441 
442 		pci_dev_put(pdev);
443 	}
444 	return 0;
445 }
446 
447 RVU_DEBUG_SEQ_FOPS(rvu_pf_cgx_map, rvu_pf_cgx_map_display, NULL);
448 
rvu_dbg_is_valid_lf(struct rvu * rvu,int blkaddr,int lf,u16 * pcifunc)449 static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blkaddr, int lf,
450 				u16 *pcifunc)
451 {
452 	struct rvu_block *block;
453 	struct rvu_hwinfo *hw;
454 
455 	hw = rvu->hw;
456 	block = &hw->block[blkaddr];
457 
458 	if (lf < 0 || lf >= block->lf.max) {
459 		dev_warn(rvu->dev, "Invalid LF: valid range: 0-%d\n",
460 			 block->lf.max - 1);
461 		return false;
462 	}
463 
464 	*pcifunc = block->fn_map[lf];
465 	if (!*pcifunc) {
466 		dev_warn(rvu->dev,
467 			 "This LF is not attached to any RVU PFFUNC\n");
468 		return false;
469 	}
470 	return true;
471 }
472 
print_npa_qsize(struct seq_file * m,struct rvu_pfvf * pfvf)473 static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf)
474 {
475 	char *buf;
476 
477 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
478 	if (!buf)
479 		return;
480 
481 	if (!pfvf->aura_ctx) {
482 		seq_puts(m, "Aura context is not initialized\n");
483 	} else {
484 		bitmap_print_to_pagebuf(false, buf, pfvf->aura_bmap,
485 					pfvf->aura_ctx->qsize);
486 		seq_printf(m, "Aura count : %d\n", pfvf->aura_ctx->qsize);
487 		seq_printf(m, "Aura context ena/dis bitmap : %s\n", buf);
488 	}
489 
490 	if (!pfvf->pool_ctx) {
491 		seq_puts(m, "Pool context is not initialized\n");
492 	} else {
493 		bitmap_print_to_pagebuf(false, buf, pfvf->pool_bmap,
494 					pfvf->pool_ctx->qsize);
495 		seq_printf(m, "Pool count : %d\n", pfvf->pool_ctx->qsize);
496 		seq_printf(m, "Pool context ena/dis bitmap : %s\n", buf);
497 	}
498 	kfree(buf);
499 }
500 
501 /* The 'qsize' entry dumps current Aura/Pool context Qsize
502  * and each context's current enable/disable status in a bitmap.
503  */
rvu_dbg_qsize_display(struct seq_file * filp,void * unsused,int blktype)504 static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
505 				 int blktype)
506 {
507 	void (*print_qsize)(struct seq_file *filp,
508 			    struct rvu_pfvf *pfvf) = NULL;
509 	struct dentry *current_dir;
510 	struct rvu_pfvf *pfvf;
511 	struct rvu *rvu;
512 	int qsize_id;
513 	u16 pcifunc;
514 	int blkaddr;
515 
516 	rvu = filp->private;
517 	switch (blktype) {
518 	case BLKTYPE_NPA:
519 		qsize_id = rvu->rvu_dbg.npa_qsize_id;
520 		print_qsize = print_npa_qsize;
521 		break;
522 
523 	case BLKTYPE_NIX:
524 		qsize_id = rvu->rvu_dbg.nix_qsize_id;
525 		print_qsize = print_nix_qsize;
526 		break;
527 
528 	default:
529 		return -EINVAL;
530 	}
531 
532 	if (blktype == BLKTYPE_NPA) {
533 		blkaddr = BLKADDR_NPA;
534 	} else {
535 		current_dir = filp->file->f_path.dentry->d_parent;
536 		blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
537 				   BLKADDR_NIX1 : BLKADDR_NIX0);
538 	}
539 
540 	if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc))
541 		return -EINVAL;
542 
543 	pfvf = rvu_get_pfvf(rvu, pcifunc);
544 	print_qsize(filp, pfvf);
545 
546 	return 0;
547 }
548 
rvu_dbg_qsize_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos,int blktype)549 static ssize_t rvu_dbg_qsize_write(struct file *filp,
550 				   const char __user *buffer, size_t count,
551 				   loff_t *ppos, int blktype)
552 {
553 	char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix";
554 	struct seq_file *seqfile = filp->private_data;
555 	char *cmd_buf, *cmd_buf_tmp, *subtoken;
556 	struct rvu *rvu = seqfile->private;
557 	struct dentry *current_dir;
558 	int blkaddr;
559 	u16 pcifunc;
560 	int ret, lf;
561 
562 	cmd_buf = memdup_user(buffer, count + 1);
563 	if (IS_ERR(cmd_buf))
564 		return -ENOMEM;
565 
566 	cmd_buf[count] = '\0';
567 
568 	cmd_buf_tmp = strchr(cmd_buf, '\n');
569 	if (cmd_buf_tmp) {
570 		*cmd_buf_tmp = '\0';
571 		count = cmd_buf_tmp - cmd_buf + 1;
572 	}
573 
574 	cmd_buf_tmp = cmd_buf;
575 	subtoken = strsep(&cmd_buf, " ");
576 	ret = subtoken ? kstrtoint(subtoken, 10, &lf) : -EINVAL;
577 	if (cmd_buf)
578 		ret = -EINVAL;
579 
580 	if (ret < 0 || !strncmp(subtoken, "help", 4)) {
581 		dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
582 		goto qsize_write_done;
583 	}
584 
585 	if (blktype == BLKTYPE_NPA) {
586 		blkaddr = BLKADDR_NPA;
587 	} else {
588 		current_dir = filp->f_path.dentry->d_parent;
589 		blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
590 				   BLKADDR_NIX1 : BLKADDR_NIX0);
591 	}
592 
593 	if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) {
594 		ret = -EINVAL;
595 		goto qsize_write_done;
596 	}
597 	if (blktype  == BLKTYPE_NPA)
598 		rvu->rvu_dbg.npa_qsize_id = lf;
599 	else
600 		rvu->rvu_dbg.nix_qsize_id = lf;
601 
602 qsize_write_done:
603 	kfree(cmd_buf_tmp);
604 	return ret ? ret : count;
605 }
606 
rvu_dbg_npa_qsize_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)607 static ssize_t rvu_dbg_npa_qsize_write(struct file *filp,
608 				       const char __user *buffer,
609 				       size_t count, loff_t *ppos)
610 {
611 	return rvu_dbg_qsize_write(filp, buffer, count, ppos,
612 					    BLKTYPE_NPA);
613 }
614 
rvu_dbg_npa_qsize_display(struct seq_file * filp,void * unused)615 static int rvu_dbg_npa_qsize_display(struct seq_file *filp, void *unused)
616 {
617 	return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NPA);
618 }
619 
620 RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write);
621 
622 /* Dumps given NPA Aura's context */
print_npa_aura_ctx(struct seq_file * m,struct npa_aq_enq_rsp * rsp)623 static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
624 {
625 	struct npa_aura_s *aura = &rsp->aura;
626 	struct rvu *rvu = m->private;
627 
628 	seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
629 
630 	seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n",
631 		   aura->ena, aura->pool_caching);
632 	seq_printf(m, "W1: pool way mask\t%d\nW1: avg con\t\t%d\n",
633 		   aura->pool_way_mask, aura->avg_con);
634 	seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n",
635 		   aura->pool_drop_ena, aura->aura_drop_ena);
636 	seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n",
637 		   aura->bp_ena, aura->aura_drop);
638 	seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n",
639 		   aura->shift, aura->avg_level);
640 
641 	seq_printf(m, "W2: count\t\t%llu\nW2: nix0_bpid\t\t%d\nW2: nix1_bpid\t\t%d\n",
642 		   (u64)aura->count, aura->nix0_bpid, aura->nix1_bpid);
643 
644 	seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
645 		   (u64)aura->limit, aura->bp, aura->fc_ena);
646 
647 	if (!is_rvu_otx2(rvu))
648 		seq_printf(m, "W3: fc_be\t\t%d\n", aura->fc_be);
649 	seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n",
650 		   aura->fc_up_crossing, aura->fc_stype);
651 	seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits);
652 
653 	seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr);
654 
655 	seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n",
656 		   aura->pool_drop, aura->update_time);
657 	seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n",
658 		   aura->err_int, aura->err_int_ena);
659 	seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n",
660 		   aura->thresh_int, aura->thresh_int_ena);
661 	seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n",
662 		   aura->thresh_up, aura->thresh_qint_idx);
663 	seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx);
664 
665 	seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh);
666 	if (!is_rvu_otx2(rvu))
667 		seq_printf(m, "W6: fc_msh_dst\t\t%d\n", aura->fc_msh_dst);
668 }
669 
670 /* Dumps given NPA Pool's context */
print_npa_pool_ctx(struct seq_file * m,struct npa_aq_enq_rsp * rsp)671 static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
672 {
673 	struct npa_pool_s *pool = &rsp->pool;
674 	struct rvu *rvu = m->private;
675 
676 	seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
677 
678 	seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n",
679 		   pool->ena, pool->nat_align);
680 	seq_printf(m, "W1: stack_caching\t%d\nW1: stack_way_mask\t%d\n",
681 		   pool->stack_caching, pool->stack_way_mask);
682 	seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n",
683 		   pool->buf_offset, pool->buf_size);
684 
685 	seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n",
686 		   pool->stack_max_pages, pool->stack_pages);
687 
688 	seq_printf(m, "W3: op_pc \t\t%llu\n", (u64)pool->op_pc);
689 
690 	seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n",
691 		   pool->stack_offset, pool->shift, pool->avg_level);
692 	seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n",
693 		   pool->avg_con, pool->fc_ena, pool->fc_stype);
694 	seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n",
695 		   pool->fc_hyst_bits, pool->fc_up_crossing);
696 	if (!is_rvu_otx2(rvu))
697 		seq_printf(m, "W4: fc_be\t\t%d\n", pool->fc_be);
698 	seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time);
699 
700 	seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr);
701 
702 	seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start);
703 
704 	seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end);
705 
706 	seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n",
707 		   pool->err_int, pool->err_int_ena);
708 	seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int);
709 	seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n",
710 		   pool->thresh_int_ena, pool->thresh_up);
711 	seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d\n",
712 		   pool->thresh_qint_idx, pool->err_qint_idx);
713 	if (!is_rvu_otx2(rvu))
714 		seq_printf(m, "W8: fc_msh_dst\t\t%d\n", pool->fc_msh_dst);
715 }
716 
717 /* Reads aura/pool's ctx from admin queue */
rvu_dbg_npa_ctx_display(struct seq_file * m,void * unused,int ctype)718 static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
719 {
720 	void (*print_npa_ctx)(struct seq_file *m, struct npa_aq_enq_rsp *rsp);
721 	struct npa_aq_enq_req aq_req;
722 	struct npa_aq_enq_rsp rsp;
723 	struct rvu_pfvf *pfvf;
724 	int aura, rc, max_id;
725 	int npalf, id, all;
726 	struct rvu *rvu;
727 	u16 pcifunc;
728 
729 	rvu = m->private;
730 
731 	switch (ctype) {
732 	case NPA_AQ_CTYPE_AURA:
733 		npalf = rvu->rvu_dbg.npa_aura_ctx.lf;
734 		id = rvu->rvu_dbg.npa_aura_ctx.id;
735 		all = rvu->rvu_dbg.npa_aura_ctx.all;
736 		break;
737 
738 	case NPA_AQ_CTYPE_POOL:
739 		npalf = rvu->rvu_dbg.npa_pool_ctx.lf;
740 		id = rvu->rvu_dbg.npa_pool_ctx.id;
741 		all = rvu->rvu_dbg.npa_pool_ctx.all;
742 		break;
743 	default:
744 		return -EINVAL;
745 	}
746 
747 	if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
748 		return -EINVAL;
749 
750 	pfvf = rvu_get_pfvf(rvu, pcifunc);
751 	if (ctype == NPA_AQ_CTYPE_AURA && !pfvf->aura_ctx) {
752 		seq_puts(m, "Aura context is not initialized\n");
753 		return -EINVAL;
754 	} else if (ctype == NPA_AQ_CTYPE_POOL && !pfvf->pool_ctx) {
755 		seq_puts(m, "Pool context is not initialized\n");
756 		return -EINVAL;
757 	}
758 
759 	memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
760 	aq_req.hdr.pcifunc = pcifunc;
761 	aq_req.ctype = ctype;
762 	aq_req.op = NPA_AQ_INSTOP_READ;
763 	if (ctype == NPA_AQ_CTYPE_AURA) {
764 		max_id = pfvf->aura_ctx->qsize;
765 		print_npa_ctx = print_npa_aura_ctx;
766 	} else {
767 		max_id = pfvf->pool_ctx->qsize;
768 		print_npa_ctx = print_npa_pool_ctx;
769 	}
770 
771 	if (id < 0 || id >= max_id) {
772 		seq_printf(m, "Invalid %s, valid range is 0-%d\n",
773 			   (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
774 			max_id - 1);
775 		return -EINVAL;
776 	}
777 
778 	if (all)
779 		id = 0;
780 	else
781 		max_id = id + 1;
782 
783 	for (aura = id; aura < max_id; aura++) {
784 		aq_req.aura_id = aura;
785 		seq_printf(m, "======%s : %d=======\n",
786 			   (ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL",
787 			aq_req.aura_id);
788 		rc = rvu_npa_aq_enq_inst(rvu, &aq_req, &rsp);
789 		if (rc) {
790 			seq_puts(m, "Failed to read context\n");
791 			return -EINVAL;
792 		}
793 		print_npa_ctx(m, &rsp);
794 	}
795 	return 0;
796 }
797 
write_npa_ctx(struct rvu * rvu,bool all,int npalf,int id,int ctype)798 static int write_npa_ctx(struct rvu *rvu, bool all,
799 			 int npalf, int id, int ctype)
800 {
801 	struct rvu_pfvf *pfvf;
802 	int max_id = 0;
803 	u16 pcifunc;
804 
805 	if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
806 		return -EINVAL;
807 
808 	pfvf = rvu_get_pfvf(rvu, pcifunc);
809 
810 	if (ctype == NPA_AQ_CTYPE_AURA) {
811 		if (!pfvf->aura_ctx) {
812 			dev_warn(rvu->dev, "Aura context is not initialized\n");
813 			return -EINVAL;
814 		}
815 		max_id = pfvf->aura_ctx->qsize;
816 	} else if (ctype == NPA_AQ_CTYPE_POOL) {
817 		if (!pfvf->pool_ctx) {
818 			dev_warn(rvu->dev, "Pool context is not initialized\n");
819 			return -EINVAL;
820 		}
821 		max_id = pfvf->pool_ctx->qsize;
822 	}
823 
824 	if (id < 0 || id >= max_id) {
825 		dev_warn(rvu->dev, "Invalid %s, valid range is 0-%d\n",
826 			 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
827 			max_id - 1);
828 		return -EINVAL;
829 	}
830 
831 	switch (ctype) {
832 	case NPA_AQ_CTYPE_AURA:
833 		rvu->rvu_dbg.npa_aura_ctx.lf = npalf;
834 		rvu->rvu_dbg.npa_aura_ctx.id = id;
835 		rvu->rvu_dbg.npa_aura_ctx.all = all;
836 		break;
837 
838 	case NPA_AQ_CTYPE_POOL:
839 		rvu->rvu_dbg.npa_pool_ctx.lf = npalf;
840 		rvu->rvu_dbg.npa_pool_ctx.id = id;
841 		rvu->rvu_dbg.npa_pool_ctx.all = all;
842 		break;
843 	default:
844 		return -EINVAL;
845 	}
846 	return 0;
847 }
848 
parse_cmd_buffer_ctx(char * cmd_buf,size_t * count,const char __user * buffer,int * npalf,int * id,bool * all)849 static int parse_cmd_buffer_ctx(char *cmd_buf, size_t *count,
850 				const char __user *buffer, int *npalf,
851 				int *id, bool *all)
852 {
853 	int bytes_not_copied;
854 	char *cmd_buf_tmp;
855 	char *subtoken;
856 	int ret;
857 
858 	bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
859 	if (bytes_not_copied)
860 		return -EFAULT;
861 
862 	cmd_buf[*count] = '\0';
863 	cmd_buf_tmp = strchr(cmd_buf, '\n');
864 
865 	if (cmd_buf_tmp) {
866 		*cmd_buf_tmp = '\0';
867 		*count = cmd_buf_tmp - cmd_buf + 1;
868 	}
869 
870 	subtoken = strsep(&cmd_buf, " ");
871 	ret = subtoken ? kstrtoint(subtoken, 10, npalf) : -EINVAL;
872 	if (ret < 0)
873 		return ret;
874 	subtoken = strsep(&cmd_buf, " ");
875 	if (subtoken && strcmp(subtoken, "all") == 0) {
876 		*all = true;
877 	} else {
878 		ret = subtoken ? kstrtoint(subtoken, 10, id) : -EINVAL;
879 		if (ret < 0)
880 			return ret;
881 	}
882 	if (cmd_buf)
883 		return -EINVAL;
884 	return ret;
885 }
886 
rvu_dbg_npa_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos,int ctype)887 static ssize_t rvu_dbg_npa_ctx_write(struct file *filp,
888 				     const char __user *buffer,
889 				     size_t count, loff_t *ppos, int ctype)
890 {
891 	char *cmd_buf, *ctype_string = (ctype == NPA_AQ_CTYPE_AURA) ?
892 					"aura" : "pool";
893 	struct seq_file *seqfp = filp->private_data;
894 	struct rvu *rvu = seqfp->private;
895 	int npalf, id = 0, ret;
896 	bool all = false;
897 
898 	if ((*ppos != 0) || !count)
899 		return -EINVAL;
900 
901 	cmd_buf = kzalloc(count + 1, GFP_KERNEL);
902 	if (!cmd_buf)
903 		return count;
904 	ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
905 				   &npalf, &id, &all);
906 	if (ret < 0) {
907 		dev_info(rvu->dev,
908 			 "Usage: echo <npalf> [%s number/all] > %s_ctx\n",
909 			 ctype_string, ctype_string);
910 		goto done;
911 	} else {
912 		ret = write_npa_ctx(rvu, all, npalf, id, ctype);
913 	}
914 done:
915 	kfree(cmd_buf);
916 	return ret ? ret : count;
917 }
918 
rvu_dbg_npa_aura_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)919 static ssize_t rvu_dbg_npa_aura_ctx_write(struct file *filp,
920 					  const char __user *buffer,
921 					  size_t count, loff_t *ppos)
922 {
923 	return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
924 				     NPA_AQ_CTYPE_AURA);
925 }
926 
rvu_dbg_npa_aura_ctx_display(struct seq_file * filp,void * unused)927 static int rvu_dbg_npa_aura_ctx_display(struct seq_file *filp, void *unused)
928 {
929 	return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_AURA);
930 }
931 
932 RVU_DEBUG_SEQ_FOPS(npa_aura_ctx, npa_aura_ctx_display, npa_aura_ctx_write);
933 
rvu_dbg_npa_pool_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)934 static ssize_t rvu_dbg_npa_pool_ctx_write(struct file *filp,
935 					  const char __user *buffer,
936 					  size_t count, loff_t *ppos)
937 {
938 	return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
939 				     NPA_AQ_CTYPE_POOL);
940 }
941 
rvu_dbg_npa_pool_ctx_display(struct seq_file * filp,void * unused)942 static int rvu_dbg_npa_pool_ctx_display(struct seq_file *filp, void *unused)
943 {
944 	return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_POOL);
945 }
946 
947 RVU_DEBUG_SEQ_FOPS(npa_pool_ctx, npa_pool_ctx_display, npa_pool_ctx_write);
948 
ndc_cache_stats(struct seq_file * s,int blk_addr,int ctype,int transaction)949 static void ndc_cache_stats(struct seq_file *s, int blk_addr,
950 			    int ctype, int transaction)
951 {
952 	u64 req, out_req, lat, cant_alloc;
953 	struct nix_hw *nix_hw;
954 	struct rvu *rvu;
955 	int port;
956 
957 	if (blk_addr == BLKADDR_NDC_NPA0) {
958 		rvu = s->private;
959 	} else {
960 		nix_hw = s->private;
961 		rvu = nix_hw->rvu;
962 	}
963 
964 	for (port = 0; port < NDC_MAX_PORT; port++) {
965 		req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC
966 						(port, ctype, transaction));
967 		lat = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_LAT_PC
968 						(port, ctype, transaction));
969 		out_req = rvu_read64(rvu, blk_addr,
970 				     NDC_AF_PORTX_RTX_RWX_OSTDN_PC
971 				     (port, ctype, transaction));
972 		cant_alloc = rvu_read64(rvu, blk_addr,
973 					NDC_AF_PORTX_RTX_CANT_ALLOC_PC
974 					(port, transaction));
975 		seq_printf(s, "\nPort:%d\n", port);
976 		seq_printf(s, "\tTotal Requests:\t\t%lld\n", req);
977 		seq_printf(s, "\tTotal Time Taken:\t%lld cycles\n", lat);
978 		seq_printf(s, "\tAvg Latency:\t\t%lld cycles\n", lat / req);
979 		seq_printf(s, "\tOutstanding Requests:\t%lld\n", out_req);
980 		seq_printf(s, "\tCant Alloc Requests:\t%lld\n", cant_alloc);
981 	}
982 }
983 
ndc_blk_cache_stats(struct seq_file * s,int idx,int blk_addr)984 static int ndc_blk_cache_stats(struct seq_file *s, int idx, int blk_addr)
985 {
986 	seq_puts(s, "\n***** CACHE mode read stats *****\n");
987 	ndc_cache_stats(s, blk_addr, CACHING, NDC_READ_TRANS);
988 	seq_puts(s, "\n***** CACHE mode write stats *****\n");
989 	ndc_cache_stats(s, blk_addr, CACHING, NDC_WRITE_TRANS);
990 	seq_puts(s, "\n***** BY-PASS mode read stats *****\n");
991 	ndc_cache_stats(s, blk_addr, BYPASS, NDC_READ_TRANS);
992 	seq_puts(s, "\n***** BY-PASS mode write stats *****\n");
993 	ndc_cache_stats(s, blk_addr, BYPASS, NDC_WRITE_TRANS);
994 	return 0;
995 }
996 
rvu_dbg_npa_ndc_cache_display(struct seq_file * filp,void * unused)997 static int rvu_dbg_npa_ndc_cache_display(struct seq_file *filp, void *unused)
998 {
999 	return ndc_blk_cache_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
1000 }
1001 
1002 RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL);
1003 
ndc_blk_hits_miss_stats(struct seq_file * s,int idx,int blk_addr)1004 static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
1005 {
1006 	struct nix_hw *nix_hw;
1007 	struct rvu *rvu;
1008 	int bank, max_bank;
1009 	u64 ndc_af_const;
1010 
1011 	if (blk_addr == BLKADDR_NDC_NPA0) {
1012 		rvu = s->private;
1013 	} else {
1014 		nix_hw = s->private;
1015 		rvu = nix_hw->rvu;
1016 	}
1017 
1018 	ndc_af_const = rvu_read64(rvu, blk_addr, NDC_AF_CONST);
1019 	max_bank = FIELD_GET(NDC_AF_BANK_MASK, ndc_af_const);
1020 	for (bank = 0; bank < max_bank; bank++) {
1021 		seq_printf(s, "BANK:%d\n", bank);
1022 		seq_printf(s, "\tHits:\t%lld\n",
1023 			   (u64)rvu_read64(rvu, blk_addr,
1024 			   NDC_AF_BANKX_HIT_PC(bank)));
1025 		seq_printf(s, "\tMiss:\t%lld\n",
1026 			   (u64)rvu_read64(rvu, blk_addr,
1027 			    NDC_AF_BANKX_MISS_PC(bank)));
1028 	}
1029 	return 0;
1030 }
1031 
rvu_dbg_nix_ndc_rx_cache_display(struct seq_file * filp,void * unused)1032 static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused)
1033 {
1034 	struct nix_hw *nix_hw = filp->private;
1035 	int blkaddr = 0;
1036 	int ndc_idx = 0;
1037 
1038 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1039 		   BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1040 	ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_RX : NIX0_RX);
1041 
1042 	return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
1043 }
1044 
1045 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL);
1046 
rvu_dbg_nix_ndc_tx_cache_display(struct seq_file * filp,void * unused)1047 static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused)
1048 {
1049 	struct nix_hw *nix_hw = filp->private;
1050 	int blkaddr = 0;
1051 	int ndc_idx = 0;
1052 
1053 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1054 		   BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1055 	ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_TX : NIX0_TX);
1056 
1057 	return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
1058 }
1059 
1060 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL);
1061 
rvu_dbg_npa_ndc_hits_miss_display(struct seq_file * filp,void * unused)1062 static int rvu_dbg_npa_ndc_hits_miss_display(struct seq_file *filp,
1063 					     void *unused)
1064 {
1065 	return ndc_blk_hits_miss_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
1066 }
1067 
1068 RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL);
1069 
rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file * filp,void * unused)1070 static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp,
1071 						void *unused)
1072 {
1073 	struct nix_hw *nix_hw = filp->private;
1074 	int ndc_idx = NPA0_U;
1075 	int blkaddr = 0;
1076 
1077 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1078 		   BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1079 
1080 	return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1081 }
1082 
1083 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL);
1084 
rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file * filp,void * unused)1085 static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp,
1086 						void *unused)
1087 {
1088 	struct nix_hw *nix_hw = filp->private;
1089 	int ndc_idx = NPA0_U;
1090 	int blkaddr = 0;
1091 
1092 	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1093 		   BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1094 
1095 	return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1096 }
1097 
1098 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL);
1099 
print_nix_cn10k_sq_ctx(struct seq_file * m,struct nix_cn10k_sq_ctx_s * sq_ctx)1100 static void print_nix_cn10k_sq_ctx(struct seq_file *m,
1101 				   struct nix_cn10k_sq_ctx_s *sq_ctx)
1102 {
1103 	seq_printf(m, "W0: ena \t\t\t%d\nW0: qint_idx \t\t\t%d\n",
1104 		   sq_ctx->ena, sq_ctx->qint_idx);
1105 	seq_printf(m, "W0: substream \t\t\t0x%03x\nW0: sdp_mcast \t\t\t%d\n",
1106 		   sq_ctx->substream, sq_ctx->sdp_mcast);
1107 	seq_printf(m, "W0: cq \t\t\t\t%d\nW0: sqe_way_mask \t\t%d\n\n",
1108 		   sq_ctx->cq, sq_ctx->sqe_way_mask);
1109 
1110 	seq_printf(m, "W1: smq \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: xoff\t\t\t%d\n",
1111 		   sq_ctx->smq, sq_ctx->cq_ena, sq_ctx->xoff);
1112 	seq_printf(m, "W1: sso_ena \t\t\t%d\nW1: smq_rr_weight\t\t%d\n",
1113 		   sq_ctx->sso_ena, sq_ctx->smq_rr_weight);
1114 	seq_printf(m, "W1: default_chan\t\t%d\nW1: sqb_count\t\t\t%d\n\n",
1115 		   sq_ctx->default_chan, sq_ctx->sqb_count);
1116 
1117 	seq_printf(m, "W2: smq_rr_count_lb \t\t%d\n", sq_ctx->smq_rr_count_lb);
1118 	seq_printf(m, "W2: smq_rr_count_ub \t\t%d\n", sq_ctx->smq_rr_count_ub);
1119 	seq_printf(m, "W2: sqb_aura \t\t\t%d\nW2: sq_int \t\t\t%d\n",
1120 		   sq_ctx->sqb_aura, sq_ctx->sq_int);
1121 	seq_printf(m, "W2: sq_int_ena \t\t\t%d\nW2: sqe_stype \t\t\t%d\n",
1122 		   sq_ctx->sq_int_ena, sq_ctx->sqe_stype);
1123 
1124 	seq_printf(m, "W3: max_sqe_size\t\t%d\nW3: cq_limit\t\t\t%d\n",
1125 		   sq_ctx->max_sqe_size, sq_ctx->cq_limit);
1126 	seq_printf(m, "W3: lmt_dis \t\t\t%d\nW3: mnq_dis \t\t\t%d\n",
1127 		   sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1128 	seq_printf(m, "W3: smq_next_sq\t\t\t%d\nW3: smq_lso_segnum\t\t%d\n",
1129 		   sq_ctx->smq_next_sq, sq_ctx->smq_lso_segnum);
1130 	seq_printf(m, "W3: tail_offset \t\t%d\nW3: smenq_offset\t\t%d\n",
1131 		   sq_ctx->tail_offset, sq_ctx->smenq_offset);
1132 	seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n",
1133 		   sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld);
1134 
1135 	seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
1136 		   sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
1137 	seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1138 	seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1139 	seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1140 	seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1141 		   sq_ctx->smenq_next_sqb);
1142 
1143 	seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1144 
1145 	seq_printf(m, "W9: vfi_lso_total\t\t%d\n", sq_ctx->vfi_lso_total);
1146 	seq_printf(m, "W9: vfi_lso_sizem1\t\t%d\nW9: vfi_lso_sb\t\t\t%d\n",
1147 		   sq_ctx->vfi_lso_sizem1, sq_ctx->vfi_lso_sb);
1148 	seq_printf(m, "W9: vfi_lso_mps\t\t\t%d\nW9: vfi_lso_vlan0_ins_ena\t%d\n",
1149 		   sq_ctx->vfi_lso_mps, sq_ctx->vfi_lso_vlan0_ins_ena);
1150 	seq_printf(m, "W9: vfi_lso_vlan1_ins_ena\t%d\nW9: vfi_lso_vld \t\t%d\n\n",
1151 		   sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1152 
1153 	seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1154 		   (u64)sq_ctx->scm_lso_rem);
1155 	seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1156 	seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1157 	seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1158 		   (u64)sq_ctx->dropped_octs);
1159 	seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1160 		   (u64)sq_ctx->dropped_pkts);
1161 }
1162 
1163 /* Dumps given nix_sq's context */
print_nix_sq_ctx(struct seq_file * m,struct nix_aq_enq_rsp * rsp)1164 static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1165 {
1166 	struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
1167 	struct nix_hw *nix_hw = m->private;
1168 	struct rvu *rvu = nix_hw->rvu;
1169 
1170 	if (!is_rvu_otx2(rvu)) {
1171 		print_nix_cn10k_sq_ctx(m, (struct nix_cn10k_sq_ctx_s *)sq_ctx);
1172 		return;
1173 	}
1174 	seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n",
1175 		   sq_ctx->sqe_way_mask, sq_ctx->cq);
1176 	seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
1177 		   sq_ctx->sdp_mcast, sq_ctx->substream);
1178 	seq_printf(m, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n\n",
1179 		   sq_ctx->qint_idx, sq_ctx->ena);
1180 
1181 	seq_printf(m, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d\n",
1182 		   sq_ctx->sqb_count, sq_ctx->default_chan);
1183 	seq_printf(m, "W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d\n",
1184 		   sq_ctx->smq_rr_quantum, sq_ctx->sso_ena);
1185 	seq_printf(m, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n\n",
1186 		   sq_ctx->xoff, sq_ctx->cq_ena, sq_ctx->smq);
1187 
1188 	seq_printf(m, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d\n",
1189 		   sq_ctx->sqe_stype, sq_ctx->sq_int_ena);
1190 	seq_printf(m, "W2: sq_int \t\t\t%d\nW2: sqb_aura \t\t\t%d\n",
1191 		   sq_ctx->sq_int, sq_ctx->sqb_aura);
1192 	seq_printf(m, "W2: smq_rr_count \t\t%d\n\n", sq_ctx->smq_rr_count);
1193 
1194 	seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
1195 		   sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
1196 	seq_printf(m, "W3: smenq_next_sqb_vld \t\t%d\nW3: head_offset\t\t\t%d\n",
1197 		   sq_ctx->smenq_next_sqb_vld, sq_ctx->head_offset);
1198 	seq_printf(m, "W3: smenq_offset\t\t%d\nW3: tail_offset\t\t\t%d\n",
1199 		   sq_ctx->smenq_offset, sq_ctx->tail_offset);
1200 	seq_printf(m, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq\t\t\t%d\n",
1201 		   sq_ctx->smq_lso_segnum, sq_ctx->smq_next_sq);
1202 	seq_printf(m, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d\n",
1203 		   sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1204 	seq_printf(m, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n\n",
1205 		   sq_ctx->cq_limit, sq_ctx->max_sqe_size);
1206 
1207 	seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1208 	seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1209 	seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1210 	seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1211 		   sq_ctx->smenq_next_sqb);
1212 
1213 	seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1214 
1215 	seq_printf(m, "W9: vfi_lso_vld\t\t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d\n",
1216 		   sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1217 	seq_printf(m, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d\n",
1218 		   sq_ctx->vfi_lso_vlan0_ins_ena, sq_ctx->vfi_lso_mps);
1219 	seq_printf(m, "W9: vfi_lso_sb\t\t\t%d\nW9: vfi_lso_sizem1\t\t%d\n",
1220 		   sq_ctx->vfi_lso_sb, sq_ctx->vfi_lso_sizem1);
1221 	seq_printf(m, "W9: vfi_lso_total\t\t%d\n\n", sq_ctx->vfi_lso_total);
1222 
1223 	seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1224 		   (u64)sq_ctx->scm_lso_rem);
1225 	seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1226 	seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1227 	seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1228 		   (u64)sq_ctx->dropped_octs);
1229 	seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1230 		   (u64)sq_ctx->dropped_pkts);
1231 }
1232 
print_nix_cn10k_rq_ctx(struct seq_file * m,struct nix_cn10k_rq_ctx_s * rq_ctx)1233 static void print_nix_cn10k_rq_ctx(struct seq_file *m,
1234 				   struct nix_cn10k_rq_ctx_s *rq_ctx)
1235 {
1236 	seq_printf(m, "W0: ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
1237 		   rq_ctx->ena, rq_ctx->sso_ena);
1238 	seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
1239 		   rq_ctx->ipsech_ena, rq_ctx->ena_wqwd);
1240 	seq_printf(m, "W0: cq \t\t\t\t%d\nW0: lenerr_dis \t\t\t%d\n",
1241 		   rq_ctx->cq, rq_ctx->lenerr_dis);
1242 	seq_printf(m, "W0: csum_il4_dis \t\t%d\nW0: csum_ol4_dis \t\t%d\n",
1243 		   rq_ctx->csum_il4_dis, rq_ctx->csum_ol4_dis);
1244 	seq_printf(m, "W0: len_il4_dis \t\t%d\nW0: len_il3_dis \t\t%d\n",
1245 		   rq_ctx->len_il4_dis, rq_ctx->len_il3_dis);
1246 	seq_printf(m, "W0: len_ol4_dis \t\t%d\nW0: len_ol3_dis \t\t%d\n",
1247 		   rq_ctx->len_ol4_dis, rq_ctx->len_ol3_dis);
1248 	seq_printf(m, "W0: wqe_aura \t\t\t%d\n\n", rq_ctx->wqe_aura);
1249 
1250 	seq_printf(m, "W1: spb_aura \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
1251 		   rq_ctx->spb_aura, rq_ctx->lpb_aura);
1252 	seq_printf(m, "W1: spb_aura \t\t\t%d\n", rq_ctx->spb_aura);
1253 	seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
1254 		   rq_ctx->sso_grp, rq_ctx->sso_tt);
1255 	seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: wqe_caching \t\t%d\n",
1256 		   rq_ctx->pb_caching, rq_ctx->wqe_caching);
1257 	seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
1258 		   rq_ctx->xqe_drop_ena, rq_ctx->spb_drop_ena);
1259 	seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: pb_stashing \t\t%d\n",
1260 		   rq_ctx->lpb_drop_ena, rq_ctx->pb_stashing);
1261 	seq_printf(m, "W1: ipsecd_drop_ena \t\t%d\nW1: chi_ena \t\t\t%d\n\n",
1262 		   rq_ctx->ipsecd_drop_ena, rq_ctx->chi_ena);
1263 
1264 	seq_printf(m, "W2: band_prof_id \t\t%d\n", rq_ctx->band_prof_id);
1265 	seq_printf(m, "W2: policer_ena \t\t%d\n", rq_ctx->policer_ena);
1266 	seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n", rq_ctx->spb_sizem1);
1267 	seq_printf(m, "W2: wqe_skip \t\t\t%d\nW2: sqb_ena \t\t\t%d\n",
1268 		   rq_ctx->wqe_skip, rq_ctx->spb_ena);
1269 	seq_printf(m, "W2: lpb_size1 \t\t\t%d\nW2: first_skip \t\t\t%d\n",
1270 		   rq_ctx->lpb_sizem1, rq_ctx->first_skip);
1271 	seq_printf(m, "W2: later_skip\t\t\t%d\nW2: xqe_imm_size\t\t%d\n",
1272 		   rq_ctx->later_skip, rq_ctx->xqe_imm_size);
1273 	seq_printf(m, "W2: xqe_imm_copy \t\t%d\nW2: xqe_hdr_split \t\t%d\n\n",
1274 		   rq_ctx->xqe_imm_copy, rq_ctx->xqe_hdr_split);
1275 
1276 	seq_printf(m, "W3: xqe_drop \t\t\t%d\nW3: xqe_pass \t\t\t%d\n",
1277 		   rq_ctx->xqe_drop, rq_ctx->xqe_pass);
1278 	seq_printf(m, "W3: wqe_pool_drop \t\t%d\nW3: wqe_pool_pass \t\t%d\n",
1279 		   rq_ctx->wqe_pool_drop, rq_ctx->wqe_pool_pass);
1280 	seq_printf(m, "W3: spb_pool_drop \t\t%d\nW3: spb_pool_pass \t\t%d\n",
1281 		   rq_ctx->spb_pool_drop, rq_ctx->spb_pool_pass);
1282 	seq_printf(m, "W3: spb_aura_drop \t\t%d\nW3: spb_aura_pass \t\t%d\n\n",
1283 		   rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
1284 
1285 	seq_printf(m, "W4: lpb_aura_drop \t\t%d\nW3: lpb_aura_pass \t\t%d\n",
1286 		   rq_ctx->lpb_aura_pass, rq_ctx->lpb_aura_drop);
1287 	seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW3: lpb_pool_pass \t\t%d\n",
1288 		   rq_ctx->lpb_pool_drop, rq_ctx->lpb_pool_pass);
1289 	seq_printf(m, "W4: rq_int \t\t\t%d\nW4: rq_int_ena\t\t\t%d\n",
1290 		   rq_ctx->rq_int, rq_ctx->rq_int_ena);
1291 	seq_printf(m, "W4: qint_idx \t\t\t%d\n\n", rq_ctx->qint_idx);
1292 
1293 	seq_printf(m, "W5: ltag \t\t\t%d\nW5: good_utag \t\t\t%d\n",
1294 		   rq_ctx->ltag, rq_ctx->good_utag);
1295 	seq_printf(m, "W5: bad_utag \t\t\t%d\nW5: flow_tagw \t\t\t%d\n",
1296 		   rq_ctx->bad_utag, rq_ctx->flow_tagw);
1297 	seq_printf(m, "W5: ipsec_vwqe \t\t\t%d\nW5: vwqe_ena \t\t\t%d\n",
1298 		   rq_ctx->ipsec_vwqe, rq_ctx->vwqe_ena);
1299 	seq_printf(m, "W5: vwqe_wait \t\t\t%d\nW5: max_vsize_exp\t\t%d\n",
1300 		   rq_ctx->vwqe_wait, rq_ctx->max_vsize_exp);
1301 	seq_printf(m, "W5: vwqe_skip \t\t\t%d\n\n", rq_ctx->vwqe_skip);
1302 
1303 	seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
1304 	seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
1305 	seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
1306 	seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
1307 	seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
1308 }
1309 
1310 /* Dumps given nix_rq's context */
print_nix_rq_ctx(struct seq_file * m,struct nix_aq_enq_rsp * rsp)1311 static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1312 {
1313 	struct nix_rq_ctx_s *rq_ctx = &rsp->rq;
1314 	struct nix_hw *nix_hw = m->private;
1315 	struct rvu *rvu = nix_hw->rvu;
1316 
1317 	if (!is_rvu_otx2(rvu)) {
1318 		print_nix_cn10k_rq_ctx(m, (struct nix_cn10k_rq_ctx_s *)rq_ctx);
1319 		return;
1320 	}
1321 
1322 	seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
1323 		   rq_ctx->wqe_aura, rq_ctx->substream);
1324 	seq_printf(m, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
1325 		   rq_ctx->cq, rq_ctx->ena_wqwd);
1326 	seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
1327 		   rq_ctx->ipsech_ena, rq_ctx->sso_ena);
1328 	seq_printf(m, "W0: ena \t\t\t%d\n\n", rq_ctx->ena);
1329 
1330 	seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
1331 		   rq_ctx->lpb_drop_ena, rq_ctx->spb_drop_ena);
1332 	seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d\n",
1333 		   rq_ctx->xqe_drop_ena, rq_ctx->wqe_caching);
1334 	seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
1335 		   rq_ctx->pb_caching, rq_ctx->sso_tt);
1336 	seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
1337 		   rq_ctx->sso_grp, rq_ctx->lpb_aura);
1338 	seq_printf(m, "W1: spb_aura \t\t\t%d\n\n", rq_ctx->spb_aura);
1339 
1340 	seq_printf(m, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d\n",
1341 		   rq_ctx->xqe_hdr_split, rq_ctx->xqe_imm_copy);
1342 	seq_printf(m, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d\n",
1343 		   rq_ctx->xqe_imm_size, rq_ctx->later_skip);
1344 	seq_printf(m, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d\n",
1345 		   rq_ctx->first_skip, rq_ctx->lpb_sizem1);
1346 	seq_printf(m, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d\n",
1347 		   rq_ctx->spb_ena, rq_ctx->wqe_skip);
1348 	seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n\n", rq_ctx->spb_sizem1);
1349 
1350 	seq_printf(m, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d\n",
1351 		   rq_ctx->spb_pool_pass, rq_ctx->spb_pool_drop);
1352 	seq_printf(m, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d\n",
1353 		   rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
1354 	seq_printf(m, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d\n",
1355 		   rq_ctx->wqe_pool_pass, rq_ctx->wqe_pool_drop);
1356 	seq_printf(m, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n\n",
1357 		   rq_ctx->xqe_pass, rq_ctx->xqe_drop);
1358 
1359 	seq_printf(m, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d\n",
1360 		   rq_ctx->qint_idx, rq_ctx->rq_int_ena);
1361 	seq_printf(m, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d\n",
1362 		   rq_ctx->rq_int, rq_ctx->lpb_pool_pass);
1363 	seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d\n",
1364 		   rq_ctx->lpb_pool_drop, rq_ctx->lpb_aura_pass);
1365 	seq_printf(m, "W4: lpb_aura_drop \t\t%d\n\n", rq_ctx->lpb_aura_drop);
1366 
1367 	seq_printf(m, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d\n",
1368 		   rq_ctx->flow_tagw, rq_ctx->bad_utag);
1369 	seq_printf(m, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n\n",
1370 		   rq_ctx->good_utag, rq_ctx->ltag);
1371 
1372 	seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
1373 	seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
1374 	seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
1375 	seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
1376 	seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
1377 }
1378 
1379 /* Dumps given nix_cq's context */
print_nix_cq_ctx(struct seq_file * m,struct nix_aq_enq_rsp * rsp)1380 static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1381 {
1382 	struct nix_cq_ctx_s *cq_ctx = &rsp->cq;
1383 
1384 	seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base);
1385 
1386 	seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr);
1387 	seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n",
1388 		   cq_ctx->avg_con, cq_ctx->cint_idx);
1389 	seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n",
1390 		   cq_ctx->cq_err, cq_ctx->qint_idx);
1391 	seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n",
1392 		   cq_ctx->bpid, cq_ctx->bp_ena);
1393 
1394 	seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n",
1395 		   cq_ctx->update_time, cq_ctx->avg_level);
1396 	seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n",
1397 		   cq_ctx->head, cq_ctx->tail);
1398 
1399 	seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n",
1400 		   cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int);
1401 	seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n",
1402 		   cq_ctx->qsize, cq_ctx->caching);
1403 	seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n",
1404 		   cq_ctx->substream, cq_ctx->ena);
1405 	seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n",
1406 		   cq_ctx->drop_ena, cq_ctx->drop);
1407 	seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp);
1408 }
1409 
rvu_dbg_nix_queue_ctx_display(struct seq_file * filp,void * unused,int ctype)1410 static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
1411 					 void *unused, int ctype)
1412 {
1413 	void (*print_nix_ctx)(struct seq_file *filp,
1414 			      struct nix_aq_enq_rsp *rsp) = NULL;
1415 	struct nix_hw *nix_hw = filp->private;
1416 	struct rvu *rvu = nix_hw->rvu;
1417 	struct nix_aq_enq_req aq_req;
1418 	struct nix_aq_enq_rsp rsp;
1419 	char *ctype_string = NULL;
1420 	int qidx, rc, max_id = 0;
1421 	struct rvu_pfvf *pfvf;
1422 	int nixlf, id, all;
1423 	u16 pcifunc;
1424 
1425 	switch (ctype) {
1426 	case NIX_AQ_CTYPE_CQ:
1427 		nixlf = rvu->rvu_dbg.nix_cq_ctx.lf;
1428 		id = rvu->rvu_dbg.nix_cq_ctx.id;
1429 		all = rvu->rvu_dbg.nix_cq_ctx.all;
1430 		break;
1431 
1432 	case NIX_AQ_CTYPE_SQ:
1433 		nixlf = rvu->rvu_dbg.nix_sq_ctx.lf;
1434 		id = rvu->rvu_dbg.nix_sq_ctx.id;
1435 		all = rvu->rvu_dbg.nix_sq_ctx.all;
1436 		break;
1437 
1438 	case NIX_AQ_CTYPE_RQ:
1439 		nixlf = rvu->rvu_dbg.nix_rq_ctx.lf;
1440 		id = rvu->rvu_dbg.nix_rq_ctx.id;
1441 		all = rvu->rvu_dbg.nix_rq_ctx.all;
1442 		break;
1443 
1444 	default:
1445 		return -EINVAL;
1446 	}
1447 
1448 	if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1449 		return -EINVAL;
1450 
1451 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1452 	if (ctype == NIX_AQ_CTYPE_SQ && !pfvf->sq_ctx) {
1453 		seq_puts(filp, "SQ context is not initialized\n");
1454 		return -EINVAL;
1455 	} else if (ctype == NIX_AQ_CTYPE_RQ && !pfvf->rq_ctx) {
1456 		seq_puts(filp, "RQ context is not initialized\n");
1457 		return -EINVAL;
1458 	} else if (ctype == NIX_AQ_CTYPE_CQ && !pfvf->cq_ctx) {
1459 		seq_puts(filp, "CQ context is not initialized\n");
1460 		return -EINVAL;
1461 	}
1462 
1463 	if (ctype == NIX_AQ_CTYPE_SQ) {
1464 		max_id = pfvf->sq_ctx->qsize;
1465 		ctype_string = "sq";
1466 		print_nix_ctx = print_nix_sq_ctx;
1467 	} else if (ctype == NIX_AQ_CTYPE_RQ) {
1468 		max_id = pfvf->rq_ctx->qsize;
1469 		ctype_string = "rq";
1470 		print_nix_ctx = print_nix_rq_ctx;
1471 	} else if (ctype == NIX_AQ_CTYPE_CQ) {
1472 		max_id = pfvf->cq_ctx->qsize;
1473 		ctype_string = "cq";
1474 		print_nix_ctx = print_nix_cq_ctx;
1475 	}
1476 
1477 	memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1478 	aq_req.hdr.pcifunc = pcifunc;
1479 	aq_req.ctype = ctype;
1480 	aq_req.op = NIX_AQ_INSTOP_READ;
1481 	if (all)
1482 		id = 0;
1483 	else
1484 		max_id = id + 1;
1485 	for (qidx = id; qidx < max_id; qidx++) {
1486 		aq_req.qidx = qidx;
1487 		seq_printf(filp, "=====%s_ctx for nixlf:%d and qidx:%d is=====\n",
1488 			   ctype_string, nixlf, aq_req.qidx);
1489 		rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
1490 		if (rc) {
1491 			seq_puts(filp, "Failed to read the context\n");
1492 			return -EINVAL;
1493 		}
1494 		print_nix_ctx(filp, &rsp);
1495 	}
1496 	return 0;
1497 }
1498 
write_nix_queue_ctx(struct rvu * rvu,bool all,int nixlf,int id,int ctype,char * ctype_string,struct seq_file * m)1499 static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf,
1500 			       int id, int ctype, char *ctype_string,
1501 			       struct seq_file *m)
1502 {
1503 	struct nix_hw *nix_hw = m->private;
1504 	struct rvu_pfvf *pfvf;
1505 	int max_id = 0;
1506 	u16 pcifunc;
1507 
1508 	if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1509 		return -EINVAL;
1510 
1511 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1512 
1513 	if (ctype == NIX_AQ_CTYPE_SQ) {
1514 		if (!pfvf->sq_ctx) {
1515 			dev_warn(rvu->dev, "SQ context is not initialized\n");
1516 			return -EINVAL;
1517 		}
1518 		max_id = pfvf->sq_ctx->qsize;
1519 	} else if (ctype == NIX_AQ_CTYPE_RQ) {
1520 		if (!pfvf->rq_ctx) {
1521 			dev_warn(rvu->dev, "RQ context is not initialized\n");
1522 			return -EINVAL;
1523 		}
1524 		max_id = pfvf->rq_ctx->qsize;
1525 	} else if (ctype == NIX_AQ_CTYPE_CQ) {
1526 		if (!pfvf->cq_ctx) {
1527 			dev_warn(rvu->dev, "CQ context is not initialized\n");
1528 			return -EINVAL;
1529 		}
1530 		max_id = pfvf->cq_ctx->qsize;
1531 	}
1532 
1533 	if (id < 0 || id >= max_id) {
1534 		dev_warn(rvu->dev, "Invalid %s_ctx valid range 0-%d\n",
1535 			 ctype_string, max_id - 1);
1536 		return -EINVAL;
1537 	}
1538 	switch (ctype) {
1539 	case NIX_AQ_CTYPE_CQ:
1540 		rvu->rvu_dbg.nix_cq_ctx.lf = nixlf;
1541 		rvu->rvu_dbg.nix_cq_ctx.id = id;
1542 		rvu->rvu_dbg.nix_cq_ctx.all = all;
1543 		break;
1544 
1545 	case NIX_AQ_CTYPE_SQ:
1546 		rvu->rvu_dbg.nix_sq_ctx.lf = nixlf;
1547 		rvu->rvu_dbg.nix_sq_ctx.id = id;
1548 		rvu->rvu_dbg.nix_sq_ctx.all = all;
1549 		break;
1550 
1551 	case NIX_AQ_CTYPE_RQ:
1552 		rvu->rvu_dbg.nix_rq_ctx.lf = nixlf;
1553 		rvu->rvu_dbg.nix_rq_ctx.id = id;
1554 		rvu->rvu_dbg.nix_rq_ctx.all = all;
1555 		break;
1556 	default:
1557 		return -EINVAL;
1558 	}
1559 	return 0;
1560 }
1561 
rvu_dbg_nix_queue_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos,int ctype)1562 static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
1563 					   const char __user *buffer,
1564 					   size_t count, loff_t *ppos,
1565 					   int ctype)
1566 {
1567 	struct seq_file *m = filp->private_data;
1568 	struct nix_hw *nix_hw = m->private;
1569 	struct rvu *rvu = nix_hw->rvu;
1570 	char *cmd_buf, *ctype_string;
1571 	int nixlf, id = 0, ret;
1572 	bool all = false;
1573 
1574 	if ((*ppos != 0) || !count)
1575 		return -EINVAL;
1576 
1577 	switch (ctype) {
1578 	case NIX_AQ_CTYPE_SQ:
1579 		ctype_string = "sq";
1580 		break;
1581 	case NIX_AQ_CTYPE_RQ:
1582 		ctype_string = "rq";
1583 		break;
1584 	case NIX_AQ_CTYPE_CQ:
1585 		ctype_string = "cq";
1586 		break;
1587 	default:
1588 		return -EINVAL;
1589 	}
1590 
1591 	cmd_buf = kzalloc(count + 1, GFP_KERNEL);
1592 
1593 	if (!cmd_buf)
1594 		return count;
1595 
1596 	ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
1597 				   &nixlf, &id, &all);
1598 	if (ret < 0) {
1599 		dev_info(rvu->dev,
1600 			 "Usage: echo <nixlf> [%s number/all] > %s_ctx\n",
1601 			 ctype_string, ctype_string);
1602 		goto done;
1603 	} else {
1604 		ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype,
1605 					  ctype_string, m);
1606 	}
1607 done:
1608 	kfree(cmd_buf);
1609 	return ret ? ret : count;
1610 }
1611 
rvu_dbg_nix_sq_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)1612 static ssize_t rvu_dbg_nix_sq_ctx_write(struct file *filp,
1613 					const char __user *buffer,
1614 					size_t count, loff_t *ppos)
1615 {
1616 	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1617 					    NIX_AQ_CTYPE_SQ);
1618 }
1619 
rvu_dbg_nix_sq_ctx_display(struct seq_file * filp,void * unused)1620 static int rvu_dbg_nix_sq_ctx_display(struct seq_file *filp, void *unused)
1621 {
1622 	return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_SQ);
1623 }
1624 
1625 RVU_DEBUG_SEQ_FOPS(nix_sq_ctx, nix_sq_ctx_display, nix_sq_ctx_write);
1626 
rvu_dbg_nix_rq_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)1627 static ssize_t rvu_dbg_nix_rq_ctx_write(struct file *filp,
1628 					const char __user *buffer,
1629 					size_t count, loff_t *ppos)
1630 {
1631 	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1632 					    NIX_AQ_CTYPE_RQ);
1633 }
1634 
rvu_dbg_nix_rq_ctx_display(struct seq_file * filp,void * unused)1635 static int rvu_dbg_nix_rq_ctx_display(struct seq_file *filp, void  *unused)
1636 {
1637 	return rvu_dbg_nix_queue_ctx_display(filp, unused,  NIX_AQ_CTYPE_RQ);
1638 }
1639 
1640 RVU_DEBUG_SEQ_FOPS(nix_rq_ctx, nix_rq_ctx_display, nix_rq_ctx_write);
1641 
rvu_dbg_nix_cq_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)1642 static ssize_t rvu_dbg_nix_cq_ctx_write(struct file *filp,
1643 					const char __user *buffer,
1644 					size_t count, loff_t *ppos)
1645 {
1646 	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1647 					    NIX_AQ_CTYPE_CQ);
1648 }
1649 
rvu_dbg_nix_cq_ctx_display(struct seq_file * filp,void * unused)1650 static int rvu_dbg_nix_cq_ctx_display(struct seq_file *filp, void *unused)
1651 {
1652 	return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_CQ);
1653 }
1654 
1655 RVU_DEBUG_SEQ_FOPS(nix_cq_ctx, nix_cq_ctx_display, nix_cq_ctx_write);
1656 
print_nix_qctx_qsize(struct seq_file * filp,int qsize,unsigned long * bmap,char * qtype)1657 static void print_nix_qctx_qsize(struct seq_file *filp, int qsize,
1658 				 unsigned long *bmap, char *qtype)
1659 {
1660 	char *buf;
1661 
1662 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1663 	if (!buf)
1664 		return;
1665 
1666 	bitmap_print_to_pagebuf(false, buf, bmap, qsize);
1667 	seq_printf(filp, "%s context count : %d\n", qtype, qsize);
1668 	seq_printf(filp, "%s context ena/dis bitmap : %s\n",
1669 		   qtype, buf);
1670 	kfree(buf);
1671 }
1672 
print_nix_qsize(struct seq_file * filp,struct rvu_pfvf * pfvf)1673 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf)
1674 {
1675 	if (!pfvf->cq_ctx)
1676 		seq_puts(filp, "cq context is not initialized\n");
1677 	else
1678 		print_nix_qctx_qsize(filp, pfvf->cq_ctx->qsize, pfvf->cq_bmap,
1679 				     "cq");
1680 
1681 	if (!pfvf->rq_ctx)
1682 		seq_puts(filp, "rq context is not initialized\n");
1683 	else
1684 		print_nix_qctx_qsize(filp, pfvf->rq_ctx->qsize, pfvf->rq_bmap,
1685 				     "rq");
1686 
1687 	if (!pfvf->sq_ctx)
1688 		seq_puts(filp, "sq context is not initialized\n");
1689 	else
1690 		print_nix_qctx_qsize(filp, pfvf->sq_ctx->qsize, pfvf->sq_bmap,
1691 				     "sq");
1692 }
1693 
rvu_dbg_nix_qsize_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)1694 static ssize_t rvu_dbg_nix_qsize_write(struct file *filp,
1695 				       const char __user *buffer,
1696 				       size_t count, loff_t *ppos)
1697 {
1698 	return rvu_dbg_qsize_write(filp, buffer, count, ppos,
1699 				   BLKTYPE_NIX);
1700 }
1701 
rvu_dbg_nix_qsize_display(struct seq_file * filp,void * unused)1702 static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
1703 {
1704 	return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NIX);
1705 }
1706 
1707 RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
1708 
print_band_prof_ctx(struct seq_file * m,struct nix_bandprof_s * prof)1709 static void print_band_prof_ctx(struct seq_file *m,
1710 				struct nix_bandprof_s *prof)
1711 {
1712 	char *str;
1713 
1714 	switch (prof->pc_mode) {
1715 	case NIX_RX_PC_MODE_VLAN:
1716 		str = "VLAN";
1717 		break;
1718 	case NIX_RX_PC_MODE_DSCP:
1719 		str = "DSCP";
1720 		break;
1721 	case NIX_RX_PC_MODE_GEN:
1722 		str = "Generic";
1723 		break;
1724 	case NIX_RX_PC_MODE_RSVD:
1725 		str = "Reserved";
1726 		break;
1727 	}
1728 	seq_printf(m, "W0: pc_mode\t\t%s\n", str);
1729 	str = (prof->icolor == 3) ? "Color blind" :
1730 		(prof->icolor == 0) ? "Green" :
1731 		(prof->icolor == 1) ? "Yellow" : "Red";
1732 	seq_printf(m, "W0: icolor\t\t%s\n", str);
1733 	seq_printf(m, "W0: tnl_ena\t\t%d\n", prof->tnl_ena);
1734 	seq_printf(m, "W0: peir_exponent\t%d\n", prof->peir_exponent);
1735 	seq_printf(m, "W0: pebs_exponent\t%d\n", prof->pebs_exponent);
1736 	seq_printf(m, "W0: cir_exponent\t%d\n", prof->cir_exponent);
1737 	seq_printf(m, "W0: cbs_exponent\t%d\n", prof->cbs_exponent);
1738 	seq_printf(m, "W0: peir_mantissa\t%d\n", prof->peir_mantissa);
1739 	seq_printf(m, "W0: pebs_mantissa\t%d\n", prof->pebs_mantissa);
1740 	seq_printf(m, "W0: cir_mantissa\t%d\n", prof->cir_mantissa);
1741 
1742 	seq_printf(m, "W1: cbs_mantissa\t%d\n", prof->cbs_mantissa);
1743 	str = (prof->lmode == 0) ? "byte" : "packet";
1744 	seq_printf(m, "W1: lmode\t\t%s\n", str);
1745 	seq_printf(m, "W1: l_select\t\t%d\n", prof->l_sellect);
1746 	seq_printf(m, "W1: rdiv\t\t%d\n", prof->rdiv);
1747 	seq_printf(m, "W1: adjust_exponent\t%d\n", prof->adjust_exponent);
1748 	seq_printf(m, "W1: adjust_mantissa\t%d\n", prof->adjust_mantissa);
1749 	str = (prof->gc_action == 0) ? "PASS" :
1750 		(prof->gc_action == 1) ? "DROP" : "RED";
1751 	seq_printf(m, "W1: gc_action\t\t%s\n", str);
1752 	str = (prof->yc_action == 0) ? "PASS" :
1753 		(prof->yc_action == 1) ? "DROP" : "RED";
1754 	seq_printf(m, "W1: yc_action\t\t%s\n", str);
1755 	str = (prof->rc_action == 0) ? "PASS" :
1756 		(prof->rc_action == 1) ? "DROP" : "RED";
1757 	seq_printf(m, "W1: rc_action\t\t%s\n", str);
1758 	seq_printf(m, "W1: meter_algo\t\t%d\n", prof->meter_algo);
1759 	seq_printf(m, "W1: band_prof_id\t%d\n", prof->band_prof_id);
1760 	seq_printf(m, "W1: hl_en\t\t%d\n", prof->hl_en);
1761 
1762 	seq_printf(m, "W2: ts\t\t\t%lld\n", (u64)prof->ts);
1763 	seq_printf(m, "W3: pe_accum\t\t%d\n", prof->pe_accum);
1764 	seq_printf(m, "W3: c_accum\t\t%d\n", prof->c_accum);
1765 	seq_printf(m, "W4: green_pkt_pass\t%lld\n",
1766 		   (u64)prof->green_pkt_pass);
1767 	seq_printf(m, "W5: yellow_pkt_pass\t%lld\n",
1768 		   (u64)prof->yellow_pkt_pass);
1769 	seq_printf(m, "W6: red_pkt_pass\t%lld\n", (u64)prof->red_pkt_pass);
1770 	seq_printf(m, "W7: green_octs_pass\t%lld\n",
1771 		   (u64)prof->green_octs_pass);
1772 	seq_printf(m, "W8: yellow_octs_pass\t%lld\n",
1773 		   (u64)prof->yellow_octs_pass);
1774 	seq_printf(m, "W9: red_octs_pass\t%lld\n", (u64)prof->red_octs_pass);
1775 	seq_printf(m, "W10: green_pkt_drop\t%lld\n",
1776 		   (u64)prof->green_pkt_drop);
1777 	seq_printf(m, "W11: yellow_pkt_drop\t%lld\n",
1778 		   (u64)prof->yellow_pkt_drop);
1779 	seq_printf(m, "W12: red_pkt_drop\t%lld\n", (u64)prof->red_pkt_drop);
1780 	seq_printf(m, "W13: green_octs_drop\t%lld\n",
1781 		   (u64)prof->green_octs_drop);
1782 	seq_printf(m, "W14: yellow_octs_drop\t%lld\n",
1783 		   (u64)prof->yellow_octs_drop);
1784 	seq_printf(m, "W15: red_octs_drop\t%lld\n", (u64)prof->red_octs_drop);
1785 	seq_puts(m, "==============================\n");
1786 }
1787 
rvu_dbg_nix_band_prof_ctx_display(struct seq_file * m,void * unused)1788 static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)
1789 {
1790 	struct nix_hw *nix_hw = m->private;
1791 	struct nix_cn10k_aq_enq_req aq_req;
1792 	struct nix_cn10k_aq_enq_rsp aq_rsp;
1793 	struct rvu *rvu = nix_hw->rvu;
1794 	struct nix_ipolicer *ipolicer;
1795 	int layer, prof_idx, idx, rc;
1796 	u16 pcifunc;
1797 	char *str;
1798 
1799 	/* Ingress policers do not exist on all platforms */
1800 	if (!nix_hw->ipolicer)
1801 		return 0;
1802 
1803 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
1804 		if (layer == BAND_PROF_INVAL_LAYER)
1805 			continue;
1806 		str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
1807 			(layer == BAND_PROF_MID_LAYER) ? "Mid" : "Top";
1808 
1809 		seq_printf(m, "\n%s bandwidth profiles\n", str);
1810 		seq_puts(m, "=======================\n");
1811 
1812 		ipolicer = &nix_hw->ipolicer[layer];
1813 
1814 		for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
1815 			if (is_rsrc_free(&ipolicer->band_prof, idx))
1816 				continue;
1817 
1818 			prof_idx = (idx & 0x3FFF) | (layer << 14);
1819 			rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
1820 						 0x00, NIX_AQ_CTYPE_BANDPROF,
1821 						 prof_idx);
1822 			if (rc) {
1823 				dev_err(rvu->dev,
1824 					"%s: Failed to fetch context of %s profile %d, err %d\n",
1825 					__func__, str, idx, rc);
1826 				return 0;
1827 			}
1828 			seq_printf(m, "\n%s bandwidth profile:: %d\n", str, idx);
1829 			pcifunc = ipolicer->pfvf_map[idx];
1830 			if (!(pcifunc & RVU_PFVF_FUNC_MASK))
1831 				seq_printf(m, "Allocated to :: PF %d\n",
1832 					   rvu_get_pf(pcifunc));
1833 			else
1834 				seq_printf(m, "Allocated to :: PF %d VF %d\n",
1835 					   rvu_get_pf(pcifunc),
1836 					   (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
1837 			print_band_prof_ctx(m, &aq_rsp.prof);
1838 		}
1839 	}
1840 	return 0;
1841 }
1842 
1843 RVU_DEBUG_SEQ_FOPS(nix_band_prof_ctx, nix_band_prof_ctx_display, NULL);
1844 
rvu_dbg_nix_band_prof_rsrc_display(struct seq_file * m,void * unused)1845 static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused)
1846 {
1847 	struct nix_hw *nix_hw = m->private;
1848 	struct nix_ipolicer *ipolicer;
1849 	int layer;
1850 	char *str;
1851 
1852 	/* Ingress policers do not exist on all platforms */
1853 	if (!nix_hw->ipolicer)
1854 		return 0;
1855 
1856 	seq_puts(m, "\nBandwidth profile resource free count\n");
1857 	seq_puts(m, "=====================================\n");
1858 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
1859 		if (layer == BAND_PROF_INVAL_LAYER)
1860 			continue;
1861 		str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
1862 			(layer == BAND_PROF_MID_LAYER) ? "Mid " : "Top ";
1863 
1864 		ipolicer = &nix_hw->ipolicer[layer];
1865 		seq_printf(m, "%s :: Max: %4d  Free: %4d\n", str,
1866 			   ipolicer->band_prof.max,
1867 			   rvu_rsrc_free_count(&ipolicer->band_prof));
1868 	}
1869 	seq_puts(m, "=====================================\n");
1870 
1871 	return 0;
1872 }
1873 
1874 RVU_DEBUG_SEQ_FOPS(nix_band_prof_rsrc, nix_band_prof_rsrc_display, NULL);
1875 
rvu_dbg_nix_init(struct rvu * rvu,int blkaddr)1876 static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
1877 {
1878 	struct nix_hw *nix_hw;
1879 
1880 	if (!is_block_implemented(rvu->hw, blkaddr))
1881 		return;
1882 
1883 	if (blkaddr == BLKADDR_NIX0) {
1884 		rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root);
1885 		nix_hw = &rvu->hw->nix[0];
1886 	} else {
1887 		rvu->rvu_dbg.nix = debugfs_create_dir("nix1",
1888 						      rvu->rvu_dbg.root);
1889 		nix_hw = &rvu->hw->nix[1];
1890 	}
1891 
1892 	debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1893 			    &rvu_dbg_nix_sq_ctx_fops);
1894 	debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1895 			    &rvu_dbg_nix_rq_ctx_fops);
1896 	debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1897 			    &rvu_dbg_nix_cq_ctx_fops);
1898 	debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
1899 			    &rvu_dbg_nix_ndc_tx_cache_fops);
1900 	debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
1901 			    &rvu_dbg_nix_ndc_rx_cache_fops);
1902 	debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
1903 			    &rvu_dbg_nix_ndc_tx_hits_miss_fops);
1904 	debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
1905 			    &rvu_dbg_nix_ndc_rx_hits_miss_fops);
1906 	debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
1907 			    &rvu_dbg_nix_qsize_fops);
1908 	debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1909 			    &rvu_dbg_nix_band_prof_ctx_fops);
1910 	debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw,
1911 			    &rvu_dbg_nix_band_prof_rsrc_fops);
1912 }
1913 
rvu_dbg_npa_init(struct rvu * rvu)1914 static void rvu_dbg_npa_init(struct rvu *rvu)
1915 {
1916 	rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root);
1917 
1918 	debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu,
1919 			    &rvu_dbg_npa_qsize_fops);
1920 	debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu,
1921 			    &rvu_dbg_npa_aura_ctx_fops);
1922 	debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
1923 			    &rvu_dbg_npa_pool_ctx_fops);
1924 	debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu,
1925 			    &rvu_dbg_npa_ndc_cache_fops);
1926 	debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, rvu,
1927 			    &rvu_dbg_npa_ndc_hits_miss_fops);
1928 }
1929 
1930 #define PRINT_CGX_CUML_NIXRX_STATUS(idx, name)				\
1931 	({								\
1932 		u64 cnt;						\
1933 		err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx),	\
1934 					     NIX_STATS_RX, &(cnt));	\
1935 		if (!err)						\
1936 			seq_printf(s, "%s: %llu\n", name, cnt);		\
1937 		cnt;							\
1938 	})
1939 
1940 #define PRINT_CGX_CUML_NIXTX_STATUS(idx, name)			\
1941 	({								\
1942 		u64 cnt;						\
1943 		err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx),	\
1944 					  NIX_STATS_TX, &(cnt));	\
1945 		if (!err)						\
1946 			seq_printf(s, "%s: %llu\n", name, cnt);		\
1947 		cnt;							\
1948 	})
1949 
cgx_print_stats(struct seq_file * s,int lmac_id)1950 static int cgx_print_stats(struct seq_file *s, int lmac_id)
1951 {
1952 	struct cgx_link_user_info linfo;
1953 	struct mac_ops *mac_ops;
1954 	void *cgxd = s->private;
1955 	u64 ucast, mcast, bcast;
1956 	int stat = 0, err = 0;
1957 	u64 tx_stat, rx_stat;
1958 	struct rvu *rvu;
1959 
1960 	rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
1961 					     PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
1962 	if (!rvu)
1963 		return -ENODEV;
1964 
1965 	mac_ops = get_mac_ops(cgxd);
1966 
1967 	if (!mac_ops)
1968 		return 0;
1969 
1970 	/* Link status */
1971 	seq_puts(s, "\n=======Link Status======\n\n");
1972 	err = cgx_get_link_info(cgxd, lmac_id, &linfo);
1973 	if (err)
1974 		seq_puts(s, "Failed to read link status\n");
1975 	seq_printf(s, "\nLink is %s %d Mbps\n\n",
1976 		   linfo.link_up ? "UP" : "DOWN", linfo.speed);
1977 
1978 	/* Rx stats */
1979 	seq_printf(s, "\n=======NIX RX_STATS(%s port level)======\n\n",
1980 		   mac_ops->name);
1981 	ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames");
1982 	if (err)
1983 		return err;
1984 	mcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_MCAST, "rx_mcast_frames");
1985 	if (err)
1986 		return err;
1987 	bcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_BCAST, "rx_bcast_frames");
1988 	if (err)
1989 		return err;
1990 	seq_printf(s, "rx_frames: %llu\n", ucast + mcast + bcast);
1991 	PRINT_CGX_CUML_NIXRX_STATUS(RX_OCTS, "rx_bytes");
1992 	if (err)
1993 		return err;
1994 	PRINT_CGX_CUML_NIXRX_STATUS(RX_DROP, "rx_drops");
1995 	if (err)
1996 		return err;
1997 	PRINT_CGX_CUML_NIXRX_STATUS(RX_ERR, "rx_errors");
1998 	if (err)
1999 		return err;
2000 
2001 	/* Tx stats */
2002 	seq_printf(s, "\n=======NIX TX_STATS(%s port level)======\n\n",
2003 		   mac_ops->name);
2004 	ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames");
2005 	if (err)
2006 		return err;
2007 	mcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_MCAST, "tx_mcast_frames");
2008 	if (err)
2009 		return err;
2010 	bcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_BCAST, "tx_bcast_frames");
2011 	if (err)
2012 		return err;
2013 	seq_printf(s, "tx_frames: %llu\n", ucast + mcast + bcast);
2014 	PRINT_CGX_CUML_NIXTX_STATUS(TX_OCTS, "tx_bytes");
2015 	if (err)
2016 		return err;
2017 	PRINT_CGX_CUML_NIXTX_STATUS(TX_DROP, "tx_drops");
2018 	if (err)
2019 		return err;
2020 
2021 	/* Rx stats */
2022 	seq_printf(s, "\n=======%s RX_STATS======\n\n", mac_ops->name);
2023 	while (stat < mac_ops->rx_stats_cnt) {
2024 		err = mac_ops->mac_get_rx_stats(cgxd, lmac_id, stat, &rx_stat);
2025 		if (err)
2026 			return err;
2027 		if (is_rvu_otx2(rvu))
2028 			seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat],
2029 				   rx_stat);
2030 		else
2031 			seq_printf(s, "%s: %llu\n", rpm_rx_stats_fields[stat],
2032 				   rx_stat);
2033 		stat++;
2034 	}
2035 
2036 	/* Tx stats */
2037 	stat = 0;
2038 	seq_printf(s, "\n=======%s TX_STATS======\n\n", mac_ops->name);
2039 	while (stat < mac_ops->tx_stats_cnt) {
2040 		err = mac_ops->mac_get_tx_stats(cgxd, lmac_id, stat, &tx_stat);
2041 		if (err)
2042 			return err;
2043 
2044 	if (is_rvu_otx2(rvu))
2045 		seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat],
2046 			   tx_stat);
2047 	else
2048 		seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat],
2049 			   tx_stat);
2050 	stat++;
2051 	}
2052 
2053 	return err;
2054 }
2055 
rvu_dbg_derive_lmacid(struct seq_file * filp,int * lmac_id)2056 static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id)
2057 {
2058 	struct dentry *current_dir;
2059 	char *buf;
2060 
2061 	current_dir = filp->file->f_path.dentry->d_parent;
2062 	buf = strrchr(current_dir->d_name.name, 'c');
2063 	if (!buf)
2064 		return -EINVAL;
2065 
2066 	return kstrtoint(buf + 1, 10, lmac_id);
2067 }
2068 
rvu_dbg_cgx_stat_display(struct seq_file * filp,void * unused)2069 static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
2070 {
2071 	int lmac_id, err;
2072 
2073 	err = rvu_dbg_derive_lmacid(filp, &lmac_id);
2074 	if (!err)
2075 		return cgx_print_stats(filp, lmac_id);
2076 
2077 	return err;
2078 }
2079 
2080 RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
2081 
cgx_print_dmac_flt(struct seq_file * s,int lmac_id)2082 static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id)
2083 {
2084 	struct pci_dev *pdev = NULL;
2085 	void *cgxd = s->private;
2086 	char *bcast, *mcast;
2087 	u16 index, domain;
2088 	u8 dmac[ETH_ALEN];
2089 	struct rvu *rvu;
2090 	u64 cfg, mac;
2091 	int pf;
2092 
2093 	rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
2094 					     PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
2095 	if (!rvu)
2096 		return -ENODEV;
2097 
2098 	pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
2099 	domain = 2;
2100 
2101 	pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
2102 	if (!pdev)
2103 		return 0;
2104 
2105 	cfg = cgx_read_dmac_ctrl(cgxd, lmac_id);
2106 	bcast = cfg & CGX_DMAC_BCAST_MODE ? "ACCEPT" : "REJECT";
2107 	mcast = cfg & CGX_DMAC_MCAST_MODE ? "ACCEPT" : "REJECT";
2108 
2109 	seq_puts(s,
2110 		 "PCI dev       RVUPF   BROADCAST  MULTICAST  FILTER-MODE\n");
2111 	seq_printf(s, "%s  PF%d  %9s  %9s",
2112 		   dev_name(&pdev->dev), pf, bcast, mcast);
2113 	if (cfg & CGX_DMAC_CAM_ACCEPT)
2114 		seq_printf(s, "%12s\n\n", "UNICAST");
2115 	else
2116 		seq_printf(s, "%16s\n\n", "PROMISCUOUS");
2117 
2118 	seq_puts(s, "\nDMAC-INDEX  ADDRESS\n");
2119 
2120 	for (index = 0 ; index < 32 ; index++) {
2121 		cfg = cgx_read_dmac_entry(cgxd, index);
2122 		/* Display enabled dmac entries associated with current lmac */
2123 		if (lmac_id == FIELD_GET(CGX_DMAC_CAM_ENTRY_LMACID, cfg) &&
2124 		    FIELD_GET(CGX_DMAC_CAM_ADDR_ENABLE, cfg)) {
2125 			mac = FIELD_GET(CGX_RX_DMAC_ADR_MASK, cfg);
2126 			u64_to_ether_addr(mac, dmac);
2127 			seq_printf(s, "%7d     %pM\n", index, dmac);
2128 		}
2129 	}
2130 
2131 	pci_dev_put(pdev);
2132 	return 0;
2133 }
2134 
rvu_dbg_cgx_dmac_flt_display(struct seq_file * filp,void * unused)2135 static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused)
2136 {
2137 	int err, lmac_id;
2138 
2139 	err = rvu_dbg_derive_lmacid(filp, &lmac_id);
2140 	if (!err)
2141 		return cgx_print_dmac_flt(filp, lmac_id);
2142 
2143 	return err;
2144 }
2145 
2146 RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL);
2147 
rvu_dbg_cgx_init(struct rvu * rvu)2148 static void rvu_dbg_cgx_init(struct rvu *rvu)
2149 {
2150 	struct mac_ops *mac_ops;
2151 	unsigned long lmac_bmap;
2152 	int i, lmac_id;
2153 	char dname[20];
2154 	void *cgx;
2155 
2156 	if (!cgx_get_cgxcnt_max())
2157 		return;
2158 
2159 	mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
2160 	if (!mac_ops)
2161 		return;
2162 
2163 	rvu->rvu_dbg.cgx_root = debugfs_create_dir(mac_ops->name,
2164 						   rvu->rvu_dbg.root);
2165 
2166 	for (i = 0; i < cgx_get_cgxcnt_max(); i++) {
2167 		cgx = rvu_cgx_pdata(i, rvu);
2168 		if (!cgx)
2169 			continue;
2170 		lmac_bmap = cgx_get_lmac_bmap(cgx);
2171 		/* cgx debugfs dir */
2172 		sprintf(dname, "%s%d", mac_ops->name, i);
2173 		rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
2174 						      rvu->rvu_dbg.cgx_root);
2175 
2176 		for_each_set_bit(lmac_id, &lmac_bmap, MAX_LMAC_PER_CGX) {
2177 			/* lmac debugfs dir */
2178 			sprintf(dname, "lmac%d", lmac_id);
2179 			rvu->rvu_dbg.lmac =
2180 				debugfs_create_dir(dname, rvu->rvu_dbg.cgx);
2181 
2182 			debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac,
2183 					    cgx, &rvu_dbg_cgx_stat_fops);
2184 			debugfs_create_file("mac_filter", 0600,
2185 					    rvu->rvu_dbg.lmac, cgx,
2186 					    &rvu_dbg_cgx_dmac_flt_fops);
2187 		}
2188 	}
2189 }
2190 
2191 /* NPC debugfs APIs */
rvu_print_npc_mcam_info(struct seq_file * s,u16 pcifunc,int blkaddr)2192 static void rvu_print_npc_mcam_info(struct seq_file *s,
2193 				    u16 pcifunc, int blkaddr)
2194 {
2195 	struct rvu *rvu = s->private;
2196 	int entry_acnt, entry_ecnt;
2197 	int cntr_acnt, cntr_ecnt;
2198 
2199 	rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr,
2200 					  &entry_acnt, &entry_ecnt);
2201 	rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr,
2202 					    &cntr_acnt, &cntr_ecnt);
2203 	if (!entry_acnt && !cntr_acnt)
2204 		return;
2205 
2206 	if (!(pcifunc & RVU_PFVF_FUNC_MASK))
2207 		seq_printf(s, "\n\t\t Device \t\t: PF%d\n",
2208 			   rvu_get_pf(pcifunc));
2209 	else
2210 		seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n",
2211 			   rvu_get_pf(pcifunc),
2212 			   (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
2213 
2214 	if (entry_acnt) {
2215 		seq_printf(s, "\t\t Entries allocated \t: %d\n", entry_acnt);
2216 		seq_printf(s, "\t\t Entries enabled \t: %d\n", entry_ecnt);
2217 	}
2218 	if (cntr_acnt) {
2219 		seq_printf(s, "\t\t Counters allocated \t: %d\n", cntr_acnt);
2220 		seq_printf(s, "\t\t Counters enabled \t: %d\n", cntr_ecnt);
2221 	}
2222 }
2223 
rvu_dbg_npc_mcam_info_display(struct seq_file * filp,void * unsued)2224 static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
2225 {
2226 	struct rvu *rvu = filp->private;
2227 	int pf, vf, numvfs, blkaddr;
2228 	struct npc_mcam *mcam;
2229 	u16 pcifunc, counters;
2230 	u64 cfg;
2231 
2232 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2233 	if (blkaddr < 0)
2234 		return -ENODEV;
2235 
2236 	mcam = &rvu->hw->mcam;
2237 	counters = rvu->hw->npc_counters;
2238 
2239 	seq_puts(filp, "\nNPC MCAM info:\n");
2240 	/* MCAM keywidth on receive and transmit sides */
2241 	cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
2242 	cfg = (cfg >> 32) & 0x07;
2243 	seq_printf(filp, "\t\t RX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
2244 		   "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
2245 		   "224bits" : "448bits"));
2246 	cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX));
2247 	cfg = (cfg >> 32) & 0x07;
2248 	seq_printf(filp, "\t\t TX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
2249 		   "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
2250 		   "224bits" : "448bits"));
2251 
2252 	mutex_lock(&mcam->lock);
2253 	/* MCAM entries */
2254 	seq_printf(filp, "\n\t\t MCAM entries \t: %d\n", mcam->total_entries);
2255 	seq_printf(filp, "\t\t Reserved \t: %d\n",
2256 		   mcam->total_entries - mcam->bmap_entries);
2257 	seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt);
2258 
2259 	/* MCAM counters */
2260 	seq_printf(filp, "\n\t\t MCAM counters \t: %d\n", counters);
2261 	seq_printf(filp, "\t\t Reserved \t: %d\n",
2262 		   counters - mcam->counters.max);
2263 	seq_printf(filp, "\t\t Available \t: %d\n",
2264 		   rvu_rsrc_free_count(&mcam->counters));
2265 
2266 	if (mcam->bmap_entries == mcam->bmap_fcnt) {
2267 		mutex_unlock(&mcam->lock);
2268 		return 0;
2269 	}
2270 
2271 	seq_puts(filp, "\n\t\t Current allocation\n");
2272 	seq_puts(filp, "\t\t====================\n");
2273 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2274 		pcifunc = (pf << RVU_PFVF_PF_SHIFT);
2275 		rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
2276 
2277 		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2278 		numvfs = (cfg >> 12) & 0xFF;
2279 		for (vf = 0; vf < numvfs; vf++) {
2280 			pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1);
2281 			rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
2282 		}
2283 	}
2284 
2285 	mutex_unlock(&mcam->lock);
2286 	return 0;
2287 }
2288 
2289 RVU_DEBUG_SEQ_FOPS(npc_mcam_info, npc_mcam_info_display, NULL);
2290 
rvu_dbg_npc_rx_miss_stats_display(struct seq_file * filp,void * unused)2291 static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
2292 					     void *unused)
2293 {
2294 	struct rvu *rvu = filp->private;
2295 	struct npc_mcam *mcam;
2296 	int blkaddr;
2297 
2298 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2299 	if (blkaddr < 0)
2300 		return -ENODEV;
2301 
2302 	mcam = &rvu->hw->mcam;
2303 
2304 	seq_puts(filp, "\nNPC MCAM RX miss action stats\n");
2305 	seq_printf(filp, "\t\tStat %d: \t%lld\n", mcam->rx_miss_act_cntr,
2306 		   rvu_read64(rvu, blkaddr,
2307 			      NPC_AF_MATCH_STATX(mcam->rx_miss_act_cntr)));
2308 
2309 	return 0;
2310 }
2311 
2312 RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
2313 
rvu_dbg_npc_mcam_show_flows(struct seq_file * s,struct rvu_npc_mcam_rule * rule)2314 static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
2315 					struct rvu_npc_mcam_rule *rule)
2316 {
2317 	u8 bit;
2318 
2319 	for_each_set_bit(bit, (unsigned long *)&rule->features, 64) {
2320 		seq_printf(s, "\t%s  ", npc_get_field_name(bit));
2321 		switch (bit) {
2322 		case NPC_DMAC:
2323 			seq_printf(s, "%pM ", rule->packet.dmac);
2324 			seq_printf(s, "mask %pM\n", rule->mask.dmac);
2325 			break;
2326 		case NPC_SMAC:
2327 			seq_printf(s, "%pM ", rule->packet.smac);
2328 			seq_printf(s, "mask %pM\n", rule->mask.smac);
2329 			break;
2330 		case NPC_ETYPE:
2331 			seq_printf(s, "0x%x ", ntohs(rule->packet.etype));
2332 			seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype));
2333 			break;
2334 		case NPC_OUTER_VID:
2335 			seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_tci));
2336 			seq_printf(s, "mask 0x%x\n",
2337 				   ntohs(rule->mask.vlan_tci));
2338 			break;
2339 		case NPC_TOS:
2340 			seq_printf(s, "%d ", rule->packet.tos);
2341 			seq_printf(s, "mask 0x%x\n", rule->mask.tos);
2342 			break;
2343 		case NPC_SIP_IPV4:
2344 			seq_printf(s, "%pI4 ", &rule->packet.ip4src);
2345 			seq_printf(s, "mask %pI4\n", &rule->mask.ip4src);
2346 			break;
2347 		case NPC_DIP_IPV4:
2348 			seq_printf(s, "%pI4 ", &rule->packet.ip4dst);
2349 			seq_printf(s, "mask %pI4\n", &rule->mask.ip4dst);
2350 			break;
2351 		case NPC_SIP_IPV6:
2352 			seq_printf(s, "%pI6 ", rule->packet.ip6src);
2353 			seq_printf(s, "mask %pI6\n", rule->mask.ip6src);
2354 			break;
2355 		case NPC_DIP_IPV6:
2356 			seq_printf(s, "%pI6 ", rule->packet.ip6dst);
2357 			seq_printf(s, "mask %pI6\n", rule->mask.ip6dst);
2358 			break;
2359 		case NPC_SPORT_TCP:
2360 		case NPC_SPORT_UDP:
2361 		case NPC_SPORT_SCTP:
2362 			seq_printf(s, "%d ", ntohs(rule->packet.sport));
2363 			seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.sport));
2364 			break;
2365 		case NPC_DPORT_TCP:
2366 		case NPC_DPORT_UDP:
2367 		case NPC_DPORT_SCTP:
2368 			seq_printf(s, "%d ", ntohs(rule->packet.dport));
2369 			seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
2370 			break;
2371 		default:
2372 			seq_puts(s, "\n");
2373 			break;
2374 		}
2375 	}
2376 }
2377 
rvu_dbg_npc_mcam_show_action(struct seq_file * s,struct rvu_npc_mcam_rule * rule)2378 static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
2379 					 struct rvu_npc_mcam_rule *rule)
2380 {
2381 	if (is_npc_intf_tx(rule->intf)) {
2382 		switch (rule->tx_action.op) {
2383 		case NIX_TX_ACTIONOP_DROP:
2384 			seq_puts(s, "\taction: Drop\n");
2385 			break;
2386 		case NIX_TX_ACTIONOP_UCAST_DEFAULT:
2387 			seq_puts(s, "\taction: Unicast to default channel\n");
2388 			break;
2389 		case NIX_TX_ACTIONOP_UCAST_CHAN:
2390 			seq_printf(s, "\taction: Unicast to channel %d\n",
2391 				   rule->tx_action.index);
2392 			break;
2393 		case NIX_TX_ACTIONOP_MCAST:
2394 			seq_puts(s, "\taction: Multicast\n");
2395 			break;
2396 		case NIX_TX_ACTIONOP_DROP_VIOL:
2397 			seq_puts(s, "\taction: Lockdown Violation Drop\n");
2398 			break;
2399 		default:
2400 			break;
2401 		}
2402 	} else {
2403 		switch (rule->rx_action.op) {
2404 		case NIX_RX_ACTIONOP_DROP:
2405 			seq_puts(s, "\taction: Drop\n");
2406 			break;
2407 		case NIX_RX_ACTIONOP_UCAST:
2408 			seq_printf(s, "\taction: Direct to queue %d\n",
2409 				   rule->rx_action.index);
2410 			break;
2411 		case NIX_RX_ACTIONOP_RSS:
2412 			seq_puts(s, "\taction: RSS\n");
2413 			break;
2414 		case NIX_RX_ACTIONOP_UCAST_IPSEC:
2415 			seq_puts(s, "\taction: Unicast ipsec\n");
2416 			break;
2417 		case NIX_RX_ACTIONOP_MCAST:
2418 			seq_puts(s, "\taction: Multicast\n");
2419 			break;
2420 		default:
2421 			break;
2422 		}
2423 	}
2424 }
2425 
rvu_dbg_get_intf_name(int intf)2426 static const char *rvu_dbg_get_intf_name(int intf)
2427 {
2428 	switch (intf) {
2429 	case NIX_INTFX_RX(0):
2430 		return "NIX0_RX";
2431 	case NIX_INTFX_RX(1):
2432 		return "NIX1_RX";
2433 	case NIX_INTFX_TX(0):
2434 		return "NIX0_TX";
2435 	case NIX_INTFX_TX(1):
2436 		return "NIX1_TX";
2437 	default:
2438 		break;
2439 	}
2440 
2441 	return "unknown";
2442 }
2443 
rvu_dbg_npc_mcam_show_rules(struct seq_file * s,void * unused)2444 static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
2445 {
2446 	struct rvu_npc_mcam_rule *iter;
2447 	struct rvu *rvu = s->private;
2448 	struct npc_mcam *mcam;
2449 	int pf, vf = -1;
2450 	bool enabled;
2451 	int blkaddr;
2452 	u16 target;
2453 	u64 hits;
2454 
2455 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2456 	if (blkaddr < 0)
2457 		return 0;
2458 
2459 	mcam = &rvu->hw->mcam;
2460 
2461 	mutex_lock(&mcam->lock);
2462 	list_for_each_entry(iter, &mcam->mcam_rules, list) {
2463 		pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
2464 		seq_printf(s, "\n\tInstalled by: PF%d ", pf);
2465 
2466 		if (iter->owner & RVU_PFVF_FUNC_MASK) {
2467 			vf = (iter->owner & RVU_PFVF_FUNC_MASK) - 1;
2468 			seq_printf(s, "VF%d", vf);
2469 		}
2470 		seq_puts(s, "\n");
2471 
2472 		seq_printf(s, "\tdirection: %s\n", is_npc_intf_rx(iter->intf) ?
2473 						    "RX" : "TX");
2474 		seq_printf(s, "\tinterface: %s\n",
2475 			   rvu_dbg_get_intf_name(iter->intf));
2476 		seq_printf(s, "\tmcam entry: %d\n", iter->entry);
2477 
2478 		rvu_dbg_npc_mcam_show_flows(s, iter);
2479 		if (is_npc_intf_rx(iter->intf)) {
2480 			target = iter->rx_action.pf_func;
2481 			pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
2482 			seq_printf(s, "\tForward to: PF%d ", pf);
2483 
2484 			if (target & RVU_PFVF_FUNC_MASK) {
2485 				vf = (target & RVU_PFVF_FUNC_MASK) - 1;
2486 				seq_printf(s, "VF%d", vf);
2487 			}
2488 			seq_puts(s, "\n");
2489 		}
2490 
2491 		rvu_dbg_npc_mcam_show_action(s, iter);
2492 
2493 		enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, iter->entry);
2494 		seq_printf(s, "\tenabled: %s\n", enabled ? "yes" : "no");
2495 
2496 		if (!iter->has_cntr)
2497 			continue;
2498 		seq_printf(s, "\tcounter: %d\n", iter->cntr);
2499 
2500 		hits = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(iter->cntr));
2501 		seq_printf(s, "\thits: %lld\n", hits);
2502 	}
2503 	mutex_unlock(&mcam->lock);
2504 
2505 	return 0;
2506 }
2507 
2508 RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL);
2509 
rvu_dbg_npc_init(struct rvu * rvu)2510 static void rvu_dbg_npc_init(struct rvu *rvu)
2511 {
2512 	rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
2513 
2514 	debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc, rvu,
2515 			    &rvu_dbg_npc_mcam_info_fops);
2516 	debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc, rvu,
2517 			    &rvu_dbg_npc_mcam_rules_fops);
2518 	debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, rvu,
2519 			    &rvu_dbg_npc_rx_miss_act_fops);
2520 }
2521 
cpt_eng_sts_display(struct seq_file * filp,u8 eng_type)2522 static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type)
2523 {
2524 	struct cpt_ctx *ctx = filp->private;
2525 	u64 busy_sts = 0, free_sts = 0;
2526 	u32 e_min = 0, e_max = 0, e, i;
2527 	u16 max_ses, max_ies, max_aes;
2528 	struct rvu *rvu = ctx->rvu;
2529 	int blkaddr = ctx->blkaddr;
2530 	u64 reg;
2531 
2532 	reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
2533 	max_ses = reg & 0xffff;
2534 	max_ies = (reg >> 16) & 0xffff;
2535 	max_aes = (reg >> 32) & 0xffff;
2536 
2537 	switch (eng_type) {
2538 	case CPT_AE_TYPE:
2539 		e_min = max_ses + max_ies;
2540 		e_max = max_ses + max_ies + max_aes;
2541 		break;
2542 	case CPT_SE_TYPE:
2543 		e_min = 0;
2544 		e_max = max_ses;
2545 		break;
2546 	case CPT_IE_TYPE:
2547 		e_min = max_ses;
2548 		e_max = max_ses + max_ies;
2549 		break;
2550 	default:
2551 		return -EINVAL;
2552 	}
2553 
2554 	for (e = e_min, i = 0; e < e_max; e++, i++) {
2555 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e));
2556 		if (reg & 0x1)
2557 			busy_sts |= 1ULL << i;
2558 
2559 		if (reg & 0x2)
2560 			free_sts |= 1ULL << i;
2561 	}
2562 	seq_printf(filp, "FREE STS : 0x%016llx\n", free_sts);
2563 	seq_printf(filp, "BUSY STS : 0x%016llx\n", busy_sts);
2564 
2565 	return 0;
2566 }
2567 
rvu_dbg_cpt_ae_sts_display(struct seq_file * filp,void * unused)2568 static int rvu_dbg_cpt_ae_sts_display(struct seq_file *filp, void *unused)
2569 {
2570 	return cpt_eng_sts_display(filp, CPT_AE_TYPE);
2571 }
2572 
2573 RVU_DEBUG_SEQ_FOPS(cpt_ae_sts, cpt_ae_sts_display, NULL);
2574 
rvu_dbg_cpt_se_sts_display(struct seq_file * filp,void * unused)2575 static int rvu_dbg_cpt_se_sts_display(struct seq_file *filp, void *unused)
2576 {
2577 	return cpt_eng_sts_display(filp, CPT_SE_TYPE);
2578 }
2579 
2580 RVU_DEBUG_SEQ_FOPS(cpt_se_sts, cpt_se_sts_display, NULL);
2581 
rvu_dbg_cpt_ie_sts_display(struct seq_file * filp,void * unused)2582 static int rvu_dbg_cpt_ie_sts_display(struct seq_file *filp, void *unused)
2583 {
2584 	return cpt_eng_sts_display(filp, CPT_IE_TYPE);
2585 }
2586 
2587 RVU_DEBUG_SEQ_FOPS(cpt_ie_sts, cpt_ie_sts_display, NULL);
2588 
rvu_dbg_cpt_engines_info_display(struct seq_file * filp,void * unused)2589 static int rvu_dbg_cpt_engines_info_display(struct seq_file *filp, void *unused)
2590 {
2591 	struct cpt_ctx *ctx = filp->private;
2592 	u16 max_ses, max_ies, max_aes;
2593 	struct rvu *rvu = ctx->rvu;
2594 	int blkaddr = ctx->blkaddr;
2595 	u32 e_max, e;
2596 	u64 reg;
2597 
2598 	reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
2599 	max_ses = reg & 0xffff;
2600 	max_ies = (reg >> 16) & 0xffff;
2601 	max_aes = (reg >> 32) & 0xffff;
2602 
2603 	e_max = max_ses + max_ies + max_aes;
2604 
2605 	seq_puts(filp, "===========================================\n");
2606 	for (e = 0; e < e_max; e++) {
2607 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(e));
2608 		seq_printf(filp, "CPT Engine[%u] Group Enable   0x%02llx\n", e,
2609 			   reg & 0xff);
2610 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_ACTIVE(e));
2611 		seq_printf(filp, "CPT Engine[%u] Active Info    0x%llx\n", e,
2612 			   reg);
2613 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(e));
2614 		seq_printf(filp, "CPT Engine[%u] Control        0x%llx\n", e,
2615 			   reg);
2616 		seq_puts(filp, "===========================================\n");
2617 	}
2618 	return 0;
2619 }
2620 
2621 RVU_DEBUG_SEQ_FOPS(cpt_engines_info, cpt_engines_info_display, NULL);
2622 
rvu_dbg_cpt_lfs_info_display(struct seq_file * filp,void * unused)2623 static int rvu_dbg_cpt_lfs_info_display(struct seq_file *filp, void *unused)
2624 {
2625 	struct cpt_ctx *ctx = filp->private;
2626 	int blkaddr = ctx->blkaddr;
2627 	struct rvu *rvu = ctx->rvu;
2628 	struct rvu_block *block;
2629 	struct rvu_hwinfo *hw;
2630 	u64 reg;
2631 	u32 lf;
2632 
2633 	hw = rvu->hw;
2634 	block = &hw->block[blkaddr];
2635 	if (!block->lf.bmap)
2636 		return -ENODEV;
2637 
2638 	seq_puts(filp, "===========================================\n");
2639 	for (lf = 0; lf < block->lf.max; lf++) {
2640 		reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(lf));
2641 		seq_printf(filp, "CPT Lf[%u] CTL          0x%llx\n", lf, reg);
2642 		reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(lf));
2643 		seq_printf(filp, "CPT Lf[%u] CTL2         0x%llx\n", lf, reg);
2644 		reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_PTR_CTL(lf));
2645 		seq_printf(filp, "CPT Lf[%u] PTR_CTL      0x%llx\n", lf, reg);
2646 		reg = rvu_read64(rvu, blkaddr, block->lfcfg_reg |
2647 				(lf << block->lfshift));
2648 		seq_printf(filp, "CPT Lf[%u] CFG          0x%llx\n", lf, reg);
2649 		seq_puts(filp, "===========================================\n");
2650 	}
2651 	return 0;
2652 }
2653 
2654 RVU_DEBUG_SEQ_FOPS(cpt_lfs_info, cpt_lfs_info_display, NULL);
2655 
rvu_dbg_cpt_err_info_display(struct seq_file * filp,void * unused)2656 static int rvu_dbg_cpt_err_info_display(struct seq_file *filp, void *unused)
2657 {
2658 	struct cpt_ctx *ctx = filp->private;
2659 	struct rvu *rvu = ctx->rvu;
2660 	int blkaddr = ctx->blkaddr;
2661 	u64 reg0, reg1;
2662 
2663 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
2664 	reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
2665 	seq_printf(filp, "CPT_AF_FLTX_INT:       0x%llx 0x%llx\n", reg0, reg1);
2666 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(0));
2667 	reg1 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(1));
2668 	seq_printf(filp, "CPT_AF_PSNX_EXE:       0x%llx 0x%llx\n", reg0, reg1);
2669 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_LF(0));
2670 	seq_printf(filp, "CPT_AF_PSNX_LF:        0x%llx\n", reg0);
2671 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
2672 	seq_printf(filp, "CPT_AF_RVU_INT:        0x%llx\n", reg0);
2673 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
2674 	seq_printf(filp, "CPT_AF_RAS_INT:        0x%llx\n", reg0);
2675 	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO);
2676 	seq_printf(filp, "CPT_AF_EXE_ERR_INFO:   0x%llx\n", reg0);
2677 
2678 	return 0;
2679 }
2680 
2681 RVU_DEBUG_SEQ_FOPS(cpt_err_info, cpt_err_info_display, NULL);
2682 
rvu_dbg_cpt_pc_display(struct seq_file * filp,void * unused)2683 static int rvu_dbg_cpt_pc_display(struct seq_file *filp, void *unused)
2684 {
2685 	struct cpt_ctx *ctx = filp->private;
2686 	struct rvu *rvu = ctx->rvu;
2687 	int blkaddr = ctx->blkaddr;
2688 	u64 reg;
2689 
2690 	reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC);
2691 	seq_printf(filp, "CPT instruction requests   %llu\n", reg);
2692 	reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC);
2693 	seq_printf(filp, "CPT instruction latency    %llu\n", reg);
2694 	reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC);
2695 	seq_printf(filp, "CPT NCB read requests      %llu\n", reg);
2696 	reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC);
2697 	seq_printf(filp, "CPT NCB read latency       %llu\n", reg);
2698 	reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC);
2699 	seq_printf(filp, "CPT read requests caused by UC fills   %llu\n", reg);
2700 	reg = rvu_read64(rvu, blkaddr, CPT_AF_ACTIVE_CYCLES_PC);
2701 	seq_printf(filp, "CPT active cycles pc       %llu\n", reg);
2702 	reg = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT);
2703 	seq_printf(filp, "CPT clock count pc         %llu\n", reg);
2704 
2705 	return 0;
2706 }
2707 
2708 RVU_DEBUG_SEQ_FOPS(cpt_pc, cpt_pc_display, NULL);
2709 
rvu_dbg_cpt_init(struct rvu * rvu,int blkaddr)2710 static void rvu_dbg_cpt_init(struct rvu *rvu, int blkaddr)
2711 {
2712 	struct cpt_ctx *ctx;
2713 
2714 	if (!is_block_implemented(rvu->hw, blkaddr))
2715 		return;
2716 
2717 	if (blkaddr == BLKADDR_CPT0) {
2718 		rvu->rvu_dbg.cpt = debugfs_create_dir("cpt", rvu->rvu_dbg.root);
2719 		ctx = &rvu->rvu_dbg.cpt_ctx[0];
2720 		ctx->blkaddr = BLKADDR_CPT0;
2721 		ctx->rvu = rvu;
2722 	} else {
2723 		rvu->rvu_dbg.cpt = debugfs_create_dir("cpt1",
2724 						      rvu->rvu_dbg.root);
2725 		ctx = &rvu->rvu_dbg.cpt_ctx[1];
2726 		ctx->blkaddr = BLKADDR_CPT1;
2727 		ctx->rvu = rvu;
2728 	}
2729 
2730 	debugfs_create_file("cpt_pc", 0600, rvu->rvu_dbg.cpt, ctx,
2731 			    &rvu_dbg_cpt_pc_fops);
2732 	debugfs_create_file("cpt_ae_sts", 0600, rvu->rvu_dbg.cpt, ctx,
2733 			    &rvu_dbg_cpt_ae_sts_fops);
2734 	debugfs_create_file("cpt_se_sts", 0600, rvu->rvu_dbg.cpt, ctx,
2735 			    &rvu_dbg_cpt_se_sts_fops);
2736 	debugfs_create_file("cpt_ie_sts", 0600, rvu->rvu_dbg.cpt, ctx,
2737 			    &rvu_dbg_cpt_ie_sts_fops);
2738 	debugfs_create_file("cpt_engines_info", 0600, rvu->rvu_dbg.cpt, ctx,
2739 			    &rvu_dbg_cpt_engines_info_fops);
2740 	debugfs_create_file("cpt_lfs_info", 0600, rvu->rvu_dbg.cpt, ctx,
2741 			    &rvu_dbg_cpt_lfs_info_fops);
2742 	debugfs_create_file("cpt_err_info", 0600, rvu->rvu_dbg.cpt, ctx,
2743 			    &rvu_dbg_cpt_err_info_fops);
2744 }
2745 
rvu_get_dbg_dir_name(struct rvu * rvu)2746 static const char *rvu_get_dbg_dir_name(struct rvu *rvu)
2747 {
2748 	if (!is_rvu_otx2(rvu))
2749 		return "cn10k";
2750 	else
2751 		return "octeontx2";
2752 }
2753 
rvu_dbg_init(struct rvu * rvu)2754 void rvu_dbg_init(struct rvu *rvu)
2755 {
2756 	rvu->rvu_dbg.root = debugfs_create_dir(rvu_get_dbg_dir_name(rvu), NULL);
2757 
2758 	debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
2759 			    &rvu_dbg_rsrc_status_fops);
2760 
2761 	if (!cgx_get_cgxcnt_max())
2762 		goto create;
2763 
2764 	if (is_rvu_otx2(rvu))
2765 		debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root,
2766 				    rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
2767 	else
2768 		debugfs_create_file("rvu_pf_rpm_map", 0444, rvu->rvu_dbg.root,
2769 				    rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
2770 
2771 create:
2772 	rvu_dbg_npa_init(rvu);
2773 	rvu_dbg_nix_init(rvu, BLKADDR_NIX0);
2774 
2775 	rvu_dbg_nix_init(rvu, BLKADDR_NIX1);
2776 	rvu_dbg_cgx_init(rvu);
2777 	rvu_dbg_npc_init(rvu);
2778 	rvu_dbg_cpt_init(rvu, BLKADDR_CPT0);
2779 	rvu_dbg_cpt_init(rvu, BLKADDR_CPT1);
2780 }
2781 
rvu_dbg_exit(struct rvu * rvu)2782 void rvu_dbg_exit(struct rvu *rvu)
2783 {
2784 	debugfs_remove_recursive(rvu->rvu_dbg.root);
2785 }
2786 
2787 #endif /* CONFIG_DEBUG_FS */
2788