• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * This file is part of the Chelsio T4 Ethernet driver for Linux.
3  *
4  * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include <linux/seq_file.h>
36 #include <linux/debugfs.h>
37 #include <linux/string_helpers.h>
38 #include <linux/sort.h>
39 #include <linux/ctype.h>
40 
41 #include "cxgb4.h"
42 #include "t4_regs.h"
43 #include "t4_values.h"
44 #include "t4fw_api.h"
45 #include "cxgb4_debugfs.h"
46 #include "clip_tbl.h"
47 #include "l2t.h"
48 #include "cudbg_if.h"
49 #include "cudbg_lib_common.h"
50 #include "cudbg_entity.h"
51 #include "cudbg_lib.h"
52 #include "cxgb4_tc_mqprio.h"
53 
54 /* generic seq_file support for showing a table of size rows x width. */
seq_tab_get_idx(struct seq_tab * tb,loff_t pos)55 static void *seq_tab_get_idx(struct seq_tab *tb, loff_t pos)
56 {
57 	pos -= tb->skip_first;
58 	return pos >= tb->rows ? NULL : &tb->data[pos * tb->width];
59 }
60 
seq_tab_start(struct seq_file * seq,loff_t * pos)61 static void *seq_tab_start(struct seq_file *seq, loff_t *pos)
62 {
63 	struct seq_tab *tb = seq->private;
64 
65 	if (tb->skip_first && *pos == 0)
66 		return SEQ_START_TOKEN;
67 
68 	return seq_tab_get_idx(tb, *pos);
69 }
70 
seq_tab_next(struct seq_file * seq,void * v,loff_t * pos)71 static void *seq_tab_next(struct seq_file *seq, void *v, loff_t *pos)
72 {
73 	v = seq_tab_get_idx(seq->private, *pos + 1);
74 	++(*pos);
75 	return v;
76 }
77 
seq_tab_stop(struct seq_file * seq,void * v)78 static void seq_tab_stop(struct seq_file *seq, void *v)
79 {
80 }
81 
seq_tab_show(struct seq_file * seq,void * v)82 static int seq_tab_show(struct seq_file *seq, void *v)
83 {
84 	const struct seq_tab *tb = seq->private;
85 
86 	return tb->show(seq, v, ((char *)v - tb->data) / tb->width);
87 }
88 
89 static const struct seq_operations seq_tab_ops = {
90 	.start = seq_tab_start,
91 	.next  = seq_tab_next,
92 	.stop  = seq_tab_stop,
93 	.show  = seq_tab_show
94 };
95 
seq_open_tab(struct file * f,unsigned int rows,unsigned int width,unsigned int have_header,int (* show)(struct seq_file * seq,void * v,int i))96 struct seq_tab *seq_open_tab(struct file *f, unsigned int rows,
97 			     unsigned int width, unsigned int have_header,
98 			     int (*show)(struct seq_file *seq, void *v, int i))
99 {
100 	struct seq_tab *p;
101 
102 	p = __seq_open_private(f, &seq_tab_ops, sizeof(*p) + rows * width);
103 	if (p) {
104 		p->show = show;
105 		p->rows = rows;
106 		p->width = width;
107 		p->skip_first = have_header != 0;
108 	}
109 	return p;
110 }
111 
112 /* Trim the size of a seq_tab to the supplied number of rows.  The operation is
113  * irreversible.
114  */
seq_tab_trim(struct seq_tab * p,unsigned int new_rows)115 static int seq_tab_trim(struct seq_tab *p, unsigned int new_rows)
116 {
117 	if (new_rows > p->rows)
118 		return -EINVAL;
119 	p->rows = new_rows;
120 	return 0;
121 }
122 
cim_la_show(struct seq_file * seq,void * v,int idx)123 static int cim_la_show(struct seq_file *seq, void *v, int idx)
124 {
125 	if (v == SEQ_START_TOKEN)
126 		seq_puts(seq, "Status   Data      PC     LS0Stat  LS0Addr "
127 			 "            LS0Data\n");
128 	else {
129 		const u32 *p = v;
130 
131 		seq_printf(seq,
132 			   "  %02x  %x%07x %x%07x %08x %08x %08x%08x%08x%08x\n",
133 			   (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
134 			   p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
135 			   p[6], p[7]);
136 	}
137 	return 0;
138 }
139 
cim_la_show_3in1(struct seq_file * seq,void * v,int idx)140 static int cim_la_show_3in1(struct seq_file *seq, void *v, int idx)
141 {
142 	if (v == SEQ_START_TOKEN) {
143 		seq_puts(seq, "Status   Data      PC\n");
144 	} else {
145 		const u32 *p = v;
146 
147 		seq_printf(seq, "  %02x   %08x %08x\n", p[5] & 0xff, p[6],
148 			   p[7]);
149 		seq_printf(seq, "  %02x   %02x%06x %02x%06x\n",
150 			   (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
151 			   p[4] & 0xff, p[5] >> 8);
152 		seq_printf(seq, "  %02x   %x%07x %x%07x\n", (p[0] >> 4) & 0xff,
153 			   p[0] & 0xf, p[1] >> 4, p[1] & 0xf, p[2] >> 4);
154 	}
155 	return 0;
156 }
157 
cim_la_show_t6(struct seq_file * seq,void * v,int idx)158 static int cim_la_show_t6(struct seq_file *seq, void *v, int idx)
159 {
160 	if (v == SEQ_START_TOKEN) {
161 		seq_puts(seq, "Status   Inst    Data      PC     LS0Stat  "
162 			 "LS0Addr  LS0Data  LS1Stat  LS1Addr  LS1Data\n");
163 	} else {
164 		const u32 *p = v;
165 
166 		seq_printf(seq, "  %02x   %04x%04x %04x%04x %04x%04x %08x %08x %08x %08x %08x %08x\n",
167 			   (p[9] >> 16) & 0xff,       /* Status */
168 			   p[9] & 0xffff, p[8] >> 16, /* Inst */
169 			   p[8] & 0xffff, p[7] >> 16, /* Data */
170 			   p[7] & 0xffff, p[6] >> 16, /* PC */
171 			   p[2], p[1], p[0],      /* LS0 Stat, Addr and Data */
172 			   p[5], p[4], p[3]);     /* LS1 Stat, Addr and Data */
173 	}
174 	return 0;
175 }
176 
cim_la_show_pc_t6(struct seq_file * seq,void * v,int idx)177 static int cim_la_show_pc_t6(struct seq_file *seq, void *v, int idx)
178 {
179 	if (v == SEQ_START_TOKEN) {
180 		seq_puts(seq, "Status   Inst    Data      PC\n");
181 	} else {
182 		const u32 *p = v;
183 
184 		seq_printf(seq, "  %02x   %08x %08x %08x\n",
185 			   p[3] & 0xff, p[2], p[1], p[0]);
186 		seq_printf(seq, "  %02x   %02x%06x %02x%06x %02x%06x\n",
187 			   (p[6] >> 8) & 0xff, p[6] & 0xff, p[5] >> 8,
188 			   p[5] & 0xff, p[4] >> 8, p[4] & 0xff, p[3] >> 8);
189 		seq_printf(seq, "  %02x   %04x%04x %04x%04x %04x%04x\n",
190 			   (p[9] >> 16) & 0xff, p[9] & 0xffff, p[8] >> 16,
191 			   p[8] & 0xffff, p[7] >> 16, p[7] & 0xffff,
192 			   p[6] >> 16);
193 	}
194 	return 0;
195 }
196 
cim_la_open(struct inode * inode,struct file * file)197 static int cim_la_open(struct inode *inode, struct file *file)
198 {
199 	int ret;
200 	unsigned int cfg;
201 	struct seq_tab *p;
202 	struct adapter *adap = inode->i_private;
203 
204 	ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
205 	if (ret)
206 		return ret;
207 
208 	if (is_t6(adap->params.chip)) {
209 		/* +1 to account for integer division of CIMLA_SIZE/10 */
210 		p = seq_open_tab(file, (adap->params.cim_la_size / 10) + 1,
211 				 10 * sizeof(u32), 1,
212 				 cfg & UPDBGLACAPTPCONLY_F ?
213 					cim_la_show_pc_t6 : cim_la_show_t6);
214 	} else {
215 		p = seq_open_tab(file, adap->params.cim_la_size / 8,
216 				 8 * sizeof(u32), 1,
217 				 cfg & UPDBGLACAPTPCONLY_F ? cim_la_show_3in1 :
218 							     cim_la_show);
219 	}
220 	if (!p)
221 		return -ENOMEM;
222 
223 	ret = t4_cim_read_la(adap, (u32 *)p->data, NULL);
224 	if (ret)
225 		seq_release_private(inode, file);
226 	return ret;
227 }
228 
229 static const struct file_operations cim_la_fops = {
230 	.owner   = THIS_MODULE,
231 	.open    = cim_la_open,
232 	.read    = seq_read,
233 	.llseek  = seq_lseek,
234 	.release = seq_release_private
235 };
236 
cim_pif_la_show(struct seq_file * seq,void * v,int idx)237 static int cim_pif_la_show(struct seq_file *seq, void *v, int idx)
238 {
239 	const u32 *p = v;
240 
241 	if (v == SEQ_START_TOKEN) {
242 		seq_puts(seq, "Cntl ID DataBE   Addr                 Data\n");
243 	} else if (idx < CIM_PIFLA_SIZE) {
244 		seq_printf(seq, " %02x  %02x  %04x  %08x %08x%08x%08x%08x\n",
245 			   (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f,
246 			   p[5] & 0xffff, p[4], p[3], p[2], p[1], p[0]);
247 	} else {
248 		if (idx == CIM_PIFLA_SIZE)
249 			seq_puts(seq, "\nCntl ID               Data\n");
250 		seq_printf(seq, " %02x  %02x %08x%08x%08x%08x\n",
251 			   (p[4] >> 6) & 0xff, p[4] & 0x3f,
252 			   p[3], p[2], p[1], p[0]);
253 	}
254 	return 0;
255 }
256 
cim_pif_la_open(struct inode * inode,struct file * file)257 static int cim_pif_la_open(struct inode *inode, struct file *file)
258 {
259 	struct seq_tab *p;
260 	struct adapter *adap = inode->i_private;
261 
262 	p = seq_open_tab(file, 2 * CIM_PIFLA_SIZE, 6 * sizeof(u32), 1,
263 			 cim_pif_la_show);
264 	if (!p)
265 		return -ENOMEM;
266 
267 	t4_cim_read_pif_la(adap, (u32 *)p->data,
268 			   (u32 *)p->data + 6 * CIM_PIFLA_SIZE, NULL, NULL);
269 	return 0;
270 }
271 
272 static const struct file_operations cim_pif_la_fops = {
273 	.owner   = THIS_MODULE,
274 	.open    = cim_pif_la_open,
275 	.read    = seq_read,
276 	.llseek  = seq_lseek,
277 	.release = seq_release_private
278 };
279 
cim_ma_la_show(struct seq_file * seq,void * v,int idx)280 static int cim_ma_la_show(struct seq_file *seq, void *v, int idx)
281 {
282 	const u32 *p = v;
283 
284 	if (v == SEQ_START_TOKEN) {
285 		seq_puts(seq, "\n");
286 	} else if (idx < CIM_MALA_SIZE) {
287 		seq_printf(seq, "%02x%08x%08x%08x%08x\n",
288 			   p[4], p[3], p[2], p[1], p[0]);
289 	} else {
290 		if (idx == CIM_MALA_SIZE)
291 			seq_puts(seq,
292 				 "\nCnt ID Tag UE       Data       RDY VLD\n");
293 		seq_printf(seq, "%3u %2u  %x   %u %08x%08x  %u   %u\n",
294 			   (p[2] >> 10) & 0xff, (p[2] >> 7) & 7,
295 			   (p[2] >> 3) & 0xf, (p[2] >> 2) & 1,
296 			   (p[1] >> 2) | ((p[2] & 3) << 30),
297 			   (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
298 			   p[0] & 1);
299 	}
300 	return 0;
301 }
302 
cim_ma_la_open(struct inode * inode,struct file * file)303 static int cim_ma_la_open(struct inode *inode, struct file *file)
304 {
305 	struct seq_tab *p;
306 	struct adapter *adap = inode->i_private;
307 
308 	p = seq_open_tab(file, 2 * CIM_MALA_SIZE, 5 * sizeof(u32), 1,
309 			 cim_ma_la_show);
310 	if (!p)
311 		return -ENOMEM;
312 
313 	t4_cim_read_ma_la(adap, (u32 *)p->data,
314 			  (u32 *)p->data + 5 * CIM_MALA_SIZE);
315 	return 0;
316 }
317 
318 static const struct file_operations cim_ma_la_fops = {
319 	.owner   = THIS_MODULE,
320 	.open    = cim_ma_la_open,
321 	.read    = seq_read,
322 	.llseek  = seq_lseek,
323 	.release = seq_release_private
324 };
325 
cim_qcfg_show(struct seq_file * seq,void * v)326 static int cim_qcfg_show(struct seq_file *seq, void *v)
327 {
328 	static const char * const qname[] = {
329 		"TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI",
330 		"ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI",
331 		"SGE0-RX", "SGE1-RX"
332 	};
333 
334 	int i;
335 	struct adapter *adap = seq->private;
336 	u16 base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
337 	u16 size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
338 	u32 stat[(4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5))];
339 	u16 thres[CIM_NUM_IBQ];
340 	u32 obq_wr_t4[2 * CIM_NUM_OBQ], *wr;
341 	u32 obq_wr_t5[2 * CIM_NUM_OBQ_T5];
342 	u32 *p = stat;
343 	int cim_num_obq = is_t4(adap->params.chip) ?
344 				CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
345 
346 	i = t4_cim_read(adap, is_t4(adap->params.chip) ? UP_IBQ_0_RDADDR_A :
347 			UP_IBQ_0_SHADOW_RDADDR_A,
348 			ARRAY_SIZE(stat), stat);
349 	if (!i) {
350 		if (is_t4(adap->params.chip)) {
351 			i = t4_cim_read(adap, UP_OBQ_0_REALADDR_A,
352 					ARRAY_SIZE(obq_wr_t4), obq_wr_t4);
353 			wr = obq_wr_t4;
354 		} else {
355 			i = t4_cim_read(adap, UP_OBQ_0_SHADOW_REALADDR_A,
356 					ARRAY_SIZE(obq_wr_t5), obq_wr_t5);
357 			wr = obq_wr_t5;
358 		}
359 	}
360 	if (i)
361 		return i;
362 
363 	t4_read_cimq_cfg(adap, base, size, thres);
364 
365 	seq_printf(seq,
366 		   "  Queue  Base  Size Thres  RdPtr WrPtr  SOP  EOP Avail\n");
367 	for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
368 		seq_printf(seq, "%7s %5x %5u %5u %6x  %4x %4u %4u %5u\n",
369 			   qname[i], base[i], size[i], thres[i],
370 			   IBQRDADDR_G(p[0]), IBQWRADDR_G(p[1]),
371 			   QUESOPCNT_G(p[3]), QUEEOPCNT_G(p[3]),
372 			   QUEREMFLITS_G(p[2]) * 16);
373 	for ( ; i < CIM_NUM_IBQ + cim_num_obq; i++, p += 4, wr += 2)
374 		seq_printf(seq, "%7s %5x %5u %12x  %4x %4u %4u %5u\n",
375 			   qname[i], base[i], size[i],
376 			   QUERDADDR_G(p[0]) & 0x3fff, wr[0] - base[i],
377 			   QUESOPCNT_G(p[3]), QUEEOPCNT_G(p[3]),
378 			   QUEREMFLITS_G(p[2]) * 16);
379 	return 0;
380 }
381 DEFINE_SHOW_ATTRIBUTE(cim_qcfg);
382 
cimq_show(struct seq_file * seq,void * v,int idx)383 static int cimq_show(struct seq_file *seq, void *v, int idx)
384 {
385 	const u32 *p = v;
386 
387 	seq_printf(seq, "%#06x: %08x %08x %08x %08x\n", idx * 16, p[0], p[1],
388 		   p[2], p[3]);
389 	return 0;
390 }
391 
cim_ibq_open(struct inode * inode,struct file * file)392 static int cim_ibq_open(struct inode *inode, struct file *file)
393 {
394 	int ret;
395 	struct seq_tab *p;
396 	unsigned int qid = (uintptr_t)inode->i_private & 7;
397 	struct adapter *adap = inode->i_private - qid;
398 
399 	p = seq_open_tab(file, CIM_IBQ_SIZE, 4 * sizeof(u32), 0, cimq_show);
400 	if (!p)
401 		return -ENOMEM;
402 
403 	ret = t4_read_cim_ibq(adap, qid, (u32 *)p->data, CIM_IBQ_SIZE * 4);
404 	if (ret < 0)
405 		seq_release_private(inode, file);
406 	else
407 		ret = 0;
408 	return ret;
409 }
410 
411 static const struct file_operations cim_ibq_fops = {
412 	.owner   = THIS_MODULE,
413 	.open    = cim_ibq_open,
414 	.read    = seq_read,
415 	.llseek  = seq_lseek,
416 	.release = seq_release_private
417 };
418 
cim_obq_open(struct inode * inode,struct file * file)419 static int cim_obq_open(struct inode *inode, struct file *file)
420 {
421 	int ret;
422 	struct seq_tab *p;
423 	unsigned int qid = (uintptr_t)inode->i_private & 7;
424 	struct adapter *adap = inode->i_private - qid;
425 
426 	p = seq_open_tab(file, 6 * CIM_OBQ_SIZE, 4 * sizeof(u32), 0, cimq_show);
427 	if (!p)
428 		return -ENOMEM;
429 
430 	ret = t4_read_cim_obq(adap, qid, (u32 *)p->data, 6 * CIM_OBQ_SIZE * 4);
431 	if (ret < 0) {
432 		seq_release_private(inode, file);
433 	} else {
434 		seq_tab_trim(p, ret / 4);
435 		ret = 0;
436 	}
437 	return ret;
438 }
439 
440 static const struct file_operations cim_obq_fops = {
441 	.owner   = THIS_MODULE,
442 	.open    = cim_obq_open,
443 	.read    = seq_read,
444 	.llseek  = seq_lseek,
445 	.release = seq_release_private
446 };
447 
448 struct field_desc {
449 	const char *name;
450 	unsigned int start;
451 	unsigned int width;
452 };
453 
field_desc_show(struct seq_file * seq,u64 v,const struct field_desc * p)454 static void field_desc_show(struct seq_file *seq, u64 v,
455 			    const struct field_desc *p)
456 {
457 	char buf[32];
458 	int line_size = 0;
459 
460 	while (p->name) {
461 		u64 mask = (1ULL << p->width) - 1;
462 		int len = scnprintf(buf, sizeof(buf), "%s: %llu", p->name,
463 				    ((unsigned long long)v >> p->start) & mask);
464 
465 		if (line_size + len >= 79) {
466 			line_size = 8;
467 			seq_puts(seq, "\n        ");
468 		}
469 		seq_printf(seq, "%s ", buf);
470 		line_size += len + 1;
471 		p++;
472 	}
473 	seq_putc(seq, '\n');
474 }
475 
476 static struct field_desc tp_la0[] = {
477 	{ "RcfOpCodeOut", 60, 4 },
478 	{ "State", 56, 4 },
479 	{ "WcfState", 52, 4 },
480 	{ "RcfOpcSrcOut", 50, 2 },
481 	{ "CRxError", 49, 1 },
482 	{ "ERxError", 48, 1 },
483 	{ "SanityFailed", 47, 1 },
484 	{ "SpuriousMsg", 46, 1 },
485 	{ "FlushInputMsg", 45, 1 },
486 	{ "FlushInputCpl", 44, 1 },
487 	{ "RssUpBit", 43, 1 },
488 	{ "RssFilterHit", 42, 1 },
489 	{ "Tid", 32, 10 },
490 	{ "InitTcb", 31, 1 },
491 	{ "LineNumber", 24, 7 },
492 	{ "Emsg", 23, 1 },
493 	{ "EdataOut", 22, 1 },
494 	{ "Cmsg", 21, 1 },
495 	{ "CdataOut", 20, 1 },
496 	{ "EreadPdu", 19, 1 },
497 	{ "CreadPdu", 18, 1 },
498 	{ "TunnelPkt", 17, 1 },
499 	{ "RcfPeerFin", 16, 1 },
500 	{ "RcfReasonOut", 12, 4 },
501 	{ "TxCchannel", 10, 2 },
502 	{ "RcfTxChannel", 8, 2 },
503 	{ "RxEchannel", 6, 2 },
504 	{ "RcfRxChannel", 5, 1 },
505 	{ "RcfDataOutSrdy", 4, 1 },
506 	{ "RxDvld", 3, 1 },
507 	{ "RxOoDvld", 2, 1 },
508 	{ "RxCongestion", 1, 1 },
509 	{ "TxCongestion", 0, 1 },
510 	{ NULL }
511 };
512 
tp_la_show(struct seq_file * seq,void * v,int idx)513 static int tp_la_show(struct seq_file *seq, void *v, int idx)
514 {
515 	const u64 *p = v;
516 
517 	field_desc_show(seq, *p, tp_la0);
518 	return 0;
519 }
520 
tp_la_show2(struct seq_file * seq,void * v,int idx)521 static int tp_la_show2(struct seq_file *seq, void *v, int idx)
522 {
523 	const u64 *p = v;
524 
525 	if (idx)
526 		seq_putc(seq, '\n');
527 	field_desc_show(seq, p[0], tp_la0);
528 	if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
529 		field_desc_show(seq, p[1], tp_la0);
530 	return 0;
531 }
532 
tp_la_show3(struct seq_file * seq,void * v,int idx)533 static int tp_la_show3(struct seq_file *seq, void *v, int idx)
534 {
535 	static struct field_desc tp_la1[] = {
536 		{ "CplCmdIn", 56, 8 },
537 		{ "CplCmdOut", 48, 8 },
538 		{ "ESynOut", 47, 1 },
539 		{ "EAckOut", 46, 1 },
540 		{ "EFinOut", 45, 1 },
541 		{ "ERstOut", 44, 1 },
542 		{ "SynIn", 43, 1 },
543 		{ "AckIn", 42, 1 },
544 		{ "FinIn", 41, 1 },
545 		{ "RstIn", 40, 1 },
546 		{ "DataIn", 39, 1 },
547 		{ "DataInVld", 38, 1 },
548 		{ "PadIn", 37, 1 },
549 		{ "RxBufEmpty", 36, 1 },
550 		{ "RxDdp", 35, 1 },
551 		{ "RxFbCongestion", 34, 1 },
552 		{ "TxFbCongestion", 33, 1 },
553 		{ "TxPktSumSrdy", 32, 1 },
554 		{ "RcfUlpType", 28, 4 },
555 		{ "Eread", 27, 1 },
556 		{ "Ebypass", 26, 1 },
557 		{ "Esave", 25, 1 },
558 		{ "Static0", 24, 1 },
559 		{ "Cread", 23, 1 },
560 		{ "Cbypass", 22, 1 },
561 		{ "Csave", 21, 1 },
562 		{ "CPktOut", 20, 1 },
563 		{ "RxPagePoolFull", 18, 2 },
564 		{ "RxLpbkPkt", 17, 1 },
565 		{ "TxLpbkPkt", 16, 1 },
566 		{ "RxVfValid", 15, 1 },
567 		{ "SynLearned", 14, 1 },
568 		{ "SetDelEntry", 13, 1 },
569 		{ "SetInvEntry", 12, 1 },
570 		{ "CpcmdDvld", 11, 1 },
571 		{ "CpcmdSave", 10, 1 },
572 		{ "RxPstructsFull", 8, 2 },
573 		{ "EpcmdDvld", 7, 1 },
574 		{ "EpcmdFlush", 6, 1 },
575 		{ "EpcmdTrimPrefix", 5, 1 },
576 		{ "EpcmdTrimPostfix", 4, 1 },
577 		{ "ERssIp4Pkt", 3, 1 },
578 		{ "ERssIp6Pkt", 2, 1 },
579 		{ "ERssTcpUdpPkt", 1, 1 },
580 		{ "ERssFceFipPkt", 0, 1 },
581 		{ NULL }
582 	};
583 	static struct field_desc tp_la2[] = {
584 		{ "CplCmdIn", 56, 8 },
585 		{ "MpsVfVld", 55, 1 },
586 		{ "MpsPf", 52, 3 },
587 		{ "MpsVf", 44, 8 },
588 		{ "SynIn", 43, 1 },
589 		{ "AckIn", 42, 1 },
590 		{ "FinIn", 41, 1 },
591 		{ "RstIn", 40, 1 },
592 		{ "DataIn", 39, 1 },
593 		{ "DataInVld", 38, 1 },
594 		{ "PadIn", 37, 1 },
595 		{ "RxBufEmpty", 36, 1 },
596 		{ "RxDdp", 35, 1 },
597 		{ "RxFbCongestion", 34, 1 },
598 		{ "TxFbCongestion", 33, 1 },
599 		{ "TxPktSumSrdy", 32, 1 },
600 		{ "RcfUlpType", 28, 4 },
601 		{ "Eread", 27, 1 },
602 		{ "Ebypass", 26, 1 },
603 		{ "Esave", 25, 1 },
604 		{ "Static0", 24, 1 },
605 		{ "Cread", 23, 1 },
606 		{ "Cbypass", 22, 1 },
607 		{ "Csave", 21, 1 },
608 		{ "CPktOut", 20, 1 },
609 		{ "RxPagePoolFull", 18, 2 },
610 		{ "RxLpbkPkt", 17, 1 },
611 		{ "TxLpbkPkt", 16, 1 },
612 		{ "RxVfValid", 15, 1 },
613 		{ "SynLearned", 14, 1 },
614 		{ "SetDelEntry", 13, 1 },
615 		{ "SetInvEntry", 12, 1 },
616 		{ "CpcmdDvld", 11, 1 },
617 		{ "CpcmdSave", 10, 1 },
618 		{ "RxPstructsFull", 8, 2 },
619 		{ "EpcmdDvld", 7, 1 },
620 		{ "EpcmdFlush", 6, 1 },
621 		{ "EpcmdTrimPrefix", 5, 1 },
622 		{ "EpcmdTrimPostfix", 4, 1 },
623 		{ "ERssIp4Pkt", 3, 1 },
624 		{ "ERssIp6Pkt", 2, 1 },
625 		{ "ERssTcpUdpPkt", 1, 1 },
626 		{ "ERssFceFipPkt", 0, 1 },
627 		{ NULL }
628 	};
629 	const u64 *p = v;
630 
631 	if (idx)
632 		seq_putc(seq, '\n');
633 	field_desc_show(seq, p[0], tp_la0);
634 	if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
635 		field_desc_show(seq, p[1], (p[0] & BIT(17)) ? tp_la2 : tp_la1);
636 	return 0;
637 }
638 
tp_la_open(struct inode * inode,struct file * file)639 static int tp_la_open(struct inode *inode, struct file *file)
640 {
641 	struct seq_tab *p;
642 	struct adapter *adap = inode->i_private;
643 
644 	switch (DBGLAMODE_G(t4_read_reg(adap, TP_DBG_LA_CONFIG_A))) {
645 	case 2:
646 		p = seq_open_tab(file, TPLA_SIZE / 2, 2 * sizeof(u64), 0,
647 				 tp_la_show2);
648 		break;
649 	case 3:
650 		p = seq_open_tab(file, TPLA_SIZE / 2, 2 * sizeof(u64), 0,
651 				 tp_la_show3);
652 		break;
653 	default:
654 		p = seq_open_tab(file, TPLA_SIZE, sizeof(u64), 0, tp_la_show);
655 	}
656 	if (!p)
657 		return -ENOMEM;
658 
659 	t4_tp_read_la(adap, (u64 *)p->data, NULL);
660 	return 0;
661 }
662 
tp_la_write(struct file * file,const char __user * buf,size_t count,loff_t * pos)663 static ssize_t tp_la_write(struct file *file, const char __user *buf,
664 			   size_t count, loff_t *pos)
665 {
666 	int err;
667 	char s[32];
668 	unsigned long val;
669 	size_t size = min(sizeof(s) - 1, count);
670 	struct adapter *adap = file_inode(file)->i_private;
671 
672 	if (copy_from_user(s, buf, size))
673 		return -EFAULT;
674 	s[size] = '\0';
675 	err = kstrtoul(s, 0, &val);
676 	if (err)
677 		return err;
678 	if (val > 0xffff)
679 		return -EINVAL;
680 	adap->params.tp.la_mask = val << 16;
681 	t4_set_reg_field(adap, TP_DBG_LA_CONFIG_A, 0xffff0000U,
682 			 adap->params.tp.la_mask);
683 	return count;
684 }
685 
686 static const struct file_operations tp_la_fops = {
687 	.owner   = THIS_MODULE,
688 	.open    = tp_la_open,
689 	.read    = seq_read,
690 	.llseek  = seq_lseek,
691 	.release = seq_release_private,
692 	.write   = tp_la_write
693 };
694 
ulprx_la_show(struct seq_file * seq,void * v,int idx)695 static int ulprx_la_show(struct seq_file *seq, void *v, int idx)
696 {
697 	const u32 *p = v;
698 
699 	if (v == SEQ_START_TOKEN)
700 		seq_puts(seq, "      Pcmd        Type   Message"
701 			 "                Data\n");
702 	else
703 		seq_printf(seq, "%08x%08x  %4x  %08x  %08x%08x%08x%08x\n",
704 			   p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
705 	return 0;
706 }
707 
ulprx_la_open(struct inode * inode,struct file * file)708 static int ulprx_la_open(struct inode *inode, struct file *file)
709 {
710 	struct seq_tab *p;
711 	struct adapter *adap = inode->i_private;
712 
713 	p = seq_open_tab(file, ULPRX_LA_SIZE, 8 * sizeof(u32), 1,
714 			 ulprx_la_show);
715 	if (!p)
716 		return -ENOMEM;
717 
718 	t4_ulprx_read_la(adap, (u32 *)p->data);
719 	return 0;
720 }
721 
722 static const struct file_operations ulprx_la_fops = {
723 	.owner   = THIS_MODULE,
724 	.open    = ulprx_la_open,
725 	.read    = seq_read,
726 	.llseek  = seq_lseek,
727 	.release = seq_release_private
728 };
729 
730 /* Show the PM memory stats.  These stats include:
731  *
732  * TX:
733  *   Read: memory read operation
734  *   Write Bypass: cut-through
735  *   Bypass + mem: cut-through and save copy
736  *
737  * RX:
738  *   Read: memory read
739  *   Write Bypass: cut-through
740  *   Flush: payload trim or drop
741  */
pm_stats_show(struct seq_file * seq,void * v)742 static int pm_stats_show(struct seq_file *seq, void *v)
743 {
744 	static const char * const tx_pm_stats[] = {
745 		"Read:", "Write bypass:", "Write mem:", "Bypass + mem:"
746 	};
747 	static const char * const rx_pm_stats[] = {
748 		"Read:", "Write bypass:", "Write mem:", "Flush:"
749 	};
750 
751 	int i;
752 	u32 tx_cnt[T6_PM_NSTATS], rx_cnt[T6_PM_NSTATS];
753 	u64 tx_cyc[T6_PM_NSTATS], rx_cyc[T6_PM_NSTATS];
754 	struct adapter *adap = seq->private;
755 
756 	t4_pmtx_get_stats(adap, tx_cnt, tx_cyc);
757 	t4_pmrx_get_stats(adap, rx_cnt, rx_cyc);
758 
759 	seq_printf(seq, "%13s %10s  %20s\n", " ", "Tx pcmds", "Tx bytes");
760 	for (i = 0; i < PM_NSTATS - 1; i++)
761 		seq_printf(seq, "%-13s %10u  %20llu\n",
762 			   tx_pm_stats[i], tx_cnt[i], tx_cyc[i]);
763 
764 	seq_printf(seq, "%13s %10s  %20s\n", " ", "Rx pcmds", "Rx bytes");
765 	for (i = 0; i < PM_NSTATS - 1; i++)
766 		seq_printf(seq, "%-13s %10u  %20llu\n",
767 			   rx_pm_stats[i], rx_cnt[i], rx_cyc[i]);
768 
769 	if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
770 		/* In T5 the granularity of the total wait is too fine.
771 		 * It is not useful as it reaches the max value too fast.
772 		 * Hence display this Input FIFO wait for T6 onwards.
773 		 */
774 		seq_printf(seq, "%13s %10s  %20s\n",
775 			   " ", "Total wait", "Total Occupancy");
776 		seq_printf(seq, "Tx FIFO wait  %10u  %20llu\n",
777 			   tx_cnt[i], tx_cyc[i]);
778 		seq_printf(seq, "Rx FIFO wait  %10u  %20llu\n",
779 			   rx_cnt[i], rx_cyc[i]);
780 
781 		/* Skip index 6 as there is nothing useful ihere */
782 		i += 2;
783 
784 		/* At index 7, a new stat for read latency (count, total wait)
785 		 * is added.
786 		 */
787 		seq_printf(seq, "%13s %10s  %20s\n",
788 			   " ", "Reads", "Total wait");
789 		seq_printf(seq, "Tx latency    %10u  %20llu\n",
790 			   tx_cnt[i], tx_cyc[i]);
791 		seq_printf(seq, "Rx latency    %10u  %20llu\n",
792 			   rx_cnt[i], rx_cyc[i]);
793 	}
794 	return 0;
795 }
796 
pm_stats_open(struct inode * inode,struct file * file)797 static int pm_stats_open(struct inode *inode, struct file *file)
798 {
799 	return single_open(file, pm_stats_show, inode->i_private);
800 }
801 
pm_stats_clear(struct file * file,const char __user * buf,size_t count,loff_t * pos)802 static ssize_t pm_stats_clear(struct file *file, const char __user *buf,
803 			      size_t count, loff_t *pos)
804 {
805 	struct adapter *adap = file_inode(file)->i_private;
806 
807 	t4_write_reg(adap, PM_RX_STAT_CONFIG_A, 0);
808 	t4_write_reg(adap, PM_TX_STAT_CONFIG_A, 0);
809 	return count;
810 }
811 
812 static const struct file_operations pm_stats_debugfs_fops = {
813 	.owner   = THIS_MODULE,
814 	.open    = pm_stats_open,
815 	.read    = seq_read,
816 	.llseek  = seq_lseek,
817 	.release = single_release,
818 	.write   = pm_stats_clear
819 };
820 
tx_rate_show(struct seq_file * seq,void * v)821 static int tx_rate_show(struct seq_file *seq, void *v)
822 {
823 	u64 nrate[NCHAN], orate[NCHAN];
824 	struct adapter *adap = seq->private;
825 
826 	t4_get_chan_txrate(adap, nrate, orate);
827 	if (adap->params.arch.nchan == NCHAN) {
828 		seq_puts(seq, "              channel 0   channel 1   "
829 			 "channel 2   channel 3\n");
830 		seq_printf(seq, "NIC B/s:     %10llu  %10llu  %10llu  %10llu\n",
831 			   (unsigned long long)nrate[0],
832 			   (unsigned long long)nrate[1],
833 			   (unsigned long long)nrate[2],
834 			   (unsigned long long)nrate[3]);
835 		seq_printf(seq, "Offload B/s: %10llu  %10llu  %10llu  %10llu\n",
836 			   (unsigned long long)orate[0],
837 			   (unsigned long long)orate[1],
838 			   (unsigned long long)orate[2],
839 			   (unsigned long long)orate[3]);
840 	} else {
841 		seq_puts(seq, "              channel 0   channel 1\n");
842 		seq_printf(seq, "NIC B/s:     %10llu  %10llu\n",
843 			   (unsigned long long)nrate[0],
844 			   (unsigned long long)nrate[1]);
845 		seq_printf(seq, "Offload B/s: %10llu  %10llu\n",
846 			   (unsigned long long)orate[0],
847 			   (unsigned long long)orate[1]);
848 	}
849 	return 0;
850 }
851 DEFINE_SHOW_ATTRIBUTE(tx_rate);
852 
cctrl_tbl_show(struct seq_file * seq,void * v)853 static int cctrl_tbl_show(struct seq_file *seq, void *v)
854 {
855 	static const char * const dec_fac[] = {
856 		"0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
857 		"0.9375" };
858 
859 	int i;
860 	u16 (*incr)[NCCTRL_WIN];
861 	struct adapter *adap = seq->private;
862 
863 	incr = kmalloc_array(NMTUS, sizeof(*incr), GFP_KERNEL);
864 	if (!incr)
865 		return -ENOMEM;
866 
867 	t4_read_cong_tbl(adap, incr);
868 
869 	for (i = 0; i < NCCTRL_WIN; ++i) {
870 		seq_printf(seq, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
871 			   incr[0][i], incr[1][i], incr[2][i], incr[3][i],
872 			   incr[4][i], incr[5][i], incr[6][i], incr[7][i]);
873 		seq_printf(seq, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
874 			   incr[8][i], incr[9][i], incr[10][i], incr[11][i],
875 			   incr[12][i], incr[13][i], incr[14][i], incr[15][i],
876 			   adap->params.a_wnd[i],
877 			   dec_fac[adap->params.b_wnd[i]]);
878 	}
879 
880 	kfree(incr);
881 	return 0;
882 }
883 DEFINE_SHOW_ATTRIBUTE(cctrl_tbl);
884 
885 /* Format a value in a unit that differs from the value's native unit by the
886  * given factor.
887  */
unit_conv(char * buf,size_t len,unsigned int val,unsigned int factor)888 static char *unit_conv(char *buf, size_t len, unsigned int val,
889 		       unsigned int factor)
890 {
891 	unsigned int rem = val % factor;
892 
893 	if (rem == 0) {
894 		snprintf(buf, len, "%u", val / factor);
895 	} else {
896 		while (rem % 10 == 0)
897 			rem /= 10;
898 		snprintf(buf, len, "%u.%u", val / factor, rem);
899 	}
900 	return buf;
901 }
902 
clk_show(struct seq_file * seq,void * v)903 static int clk_show(struct seq_file *seq, void *v)
904 {
905 	char buf[32];
906 	struct adapter *adap = seq->private;
907 	unsigned int cclk_ps = 1000000000 / adap->params.vpd.cclk;  /* in ps */
908 	u32 res = t4_read_reg(adap, TP_TIMER_RESOLUTION_A);
909 	unsigned int tre = TIMERRESOLUTION_G(res);
910 	unsigned int dack_re = DELAYEDACKRESOLUTION_G(res);
911 	unsigned long long tp_tick_us = (cclk_ps << tre) / 1000000; /* in us */
912 
913 	seq_printf(seq, "Core clock period: %s ns\n",
914 		   unit_conv(buf, sizeof(buf), cclk_ps, 1000));
915 	seq_printf(seq, "TP timer tick: %s us\n",
916 		   unit_conv(buf, sizeof(buf), (cclk_ps << tre), 1000000));
917 	seq_printf(seq, "TCP timestamp tick: %s us\n",
918 		   unit_conv(buf, sizeof(buf),
919 			     (cclk_ps << TIMESTAMPRESOLUTION_G(res)), 1000000));
920 	seq_printf(seq, "DACK tick: %s us\n",
921 		   unit_conv(buf, sizeof(buf), (cclk_ps << dack_re), 1000000));
922 	seq_printf(seq, "DACK timer: %u us\n",
923 		   ((cclk_ps << dack_re) / 1000000) *
924 		   t4_read_reg(adap, TP_DACK_TIMER_A));
925 	seq_printf(seq, "Retransmit min: %llu us\n",
926 		   tp_tick_us * t4_read_reg(adap, TP_RXT_MIN_A));
927 	seq_printf(seq, "Retransmit max: %llu us\n",
928 		   tp_tick_us * t4_read_reg(adap, TP_RXT_MAX_A));
929 	seq_printf(seq, "Persist timer min: %llu us\n",
930 		   tp_tick_us * t4_read_reg(adap, TP_PERS_MIN_A));
931 	seq_printf(seq, "Persist timer max: %llu us\n",
932 		   tp_tick_us * t4_read_reg(adap, TP_PERS_MAX_A));
933 	seq_printf(seq, "Keepalive idle timer: %llu us\n",
934 		   tp_tick_us * t4_read_reg(adap, TP_KEEP_IDLE_A));
935 	seq_printf(seq, "Keepalive interval: %llu us\n",
936 		   tp_tick_us * t4_read_reg(adap, TP_KEEP_INTVL_A));
937 	seq_printf(seq, "Initial SRTT: %llu us\n",
938 		   tp_tick_us * INITSRTT_G(t4_read_reg(adap, TP_INIT_SRTT_A)));
939 	seq_printf(seq, "FINWAIT2 timer: %llu us\n",
940 		   tp_tick_us * t4_read_reg(adap, TP_FINWAIT2_TIMER_A));
941 
942 	return 0;
943 }
944 DEFINE_SHOW_ATTRIBUTE(clk);
945 
946 /* Firmware Device Log dump. */
947 static const char * const devlog_level_strings[] = {
948 	[FW_DEVLOG_LEVEL_EMERG]		= "EMERG",
949 	[FW_DEVLOG_LEVEL_CRIT]		= "CRIT",
950 	[FW_DEVLOG_LEVEL_ERR]		= "ERR",
951 	[FW_DEVLOG_LEVEL_NOTICE]	= "NOTICE",
952 	[FW_DEVLOG_LEVEL_INFO]		= "INFO",
953 	[FW_DEVLOG_LEVEL_DEBUG]		= "DEBUG"
954 };
955 
956 static const char * const devlog_facility_strings[] = {
957 	[FW_DEVLOG_FACILITY_CORE]	= "CORE",
958 	[FW_DEVLOG_FACILITY_CF]         = "CF",
959 	[FW_DEVLOG_FACILITY_SCHED]	= "SCHED",
960 	[FW_DEVLOG_FACILITY_TIMER]	= "TIMER",
961 	[FW_DEVLOG_FACILITY_RES]	= "RES",
962 	[FW_DEVLOG_FACILITY_HW]		= "HW",
963 	[FW_DEVLOG_FACILITY_FLR]	= "FLR",
964 	[FW_DEVLOG_FACILITY_DMAQ]	= "DMAQ",
965 	[FW_DEVLOG_FACILITY_PHY]	= "PHY",
966 	[FW_DEVLOG_FACILITY_MAC]	= "MAC",
967 	[FW_DEVLOG_FACILITY_PORT]	= "PORT",
968 	[FW_DEVLOG_FACILITY_VI]		= "VI",
969 	[FW_DEVLOG_FACILITY_FILTER]	= "FILTER",
970 	[FW_DEVLOG_FACILITY_ACL]	= "ACL",
971 	[FW_DEVLOG_FACILITY_TM]		= "TM",
972 	[FW_DEVLOG_FACILITY_QFC]	= "QFC",
973 	[FW_DEVLOG_FACILITY_DCB]	= "DCB",
974 	[FW_DEVLOG_FACILITY_ETH]	= "ETH",
975 	[FW_DEVLOG_FACILITY_OFLD]	= "OFLD",
976 	[FW_DEVLOG_FACILITY_RI]		= "RI",
977 	[FW_DEVLOG_FACILITY_ISCSI]	= "ISCSI",
978 	[FW_DEVLOG_FACILITY_FCOE]	= "FCOE",
979 	[FW_DEVLOG_FACILITY_FOISCSI]	= "FOISCSI",
980 	[FW_DEVLOG_FACILITY_FOFCOE]	= "FOFCOE"
981 };
982 
983 /* Information gathered by Device Log Open routine for the display routine.
984  */
985 struct devlog_info {
986 	unsigned int nentries;		/* number of entries in log[] */
987 	unsigned int first;		/* first [temporal] entry in log[] */
988 	struct fw_devlog_e log[];	/* Firmware Device Log */
989 };
990 
991 /* Dump a Firmaware Device Log entry.
992  */
devlog_show(struct seq_file * seq,void * v)993 static int devlog_show(struct seq_file *seq, void *v)
994 {
995 	if (v == SEQ_START_TOKEN)
996 		seq_printf(seq, "%10s  %15s  %8s  %8s  %s\n",
997 			   "Seq#", "Tstamp", "Level", "Facility", "Message");
998 	else {
999 		struct devlog_info *dinfo = seq->private;
1000 		int fidx = (uintptr_t)v - 2;
1001 		unsigned long index;
1002 		struct fw_devlog_e *e;
1003 
1004 		/* Get a pointer to the log entry to display.  Skip unused log
1005 		 * entries.
1006 		 */
1007 		index = dinfo->first + fidx;
1008 		if (index >= dinfo->nentries)
1009 			index -= dinfo->nentries;
1010 		e = &dinfo->log[index];
1011 		if (e->timestamp == 0)
1012 			return 0;
1013 
1014 		/* Print the message.  This depends on the firmware using
1015 		 * exactly the same formating strings as the kernel so we may
1016 		 * eventually have to put a format interpreter in here ...
1017 		 */
1018 		seq_printf(seq, "%10d  %15llu  %8s  %8s  ",
1019 			   be32_to_cpu(e->seqno),
1020 			   be64_to_cpu(e->timestamp),
1021 			   (e->level < ARRAY_SIZE(devlog_level_strings)
1022 			    ? devlog_level_strings[e->level]
1023 			    : "UNKNOWN"),
1024 			   (e->facility < ARRAY_SIZE(devlog_facility_strings)
1025 			    ? devlog_facility_strings[e->facility]
1026 			    : "UNKNOWN"));
1027 		seq_printf(seq, e->fmt,
1028 			   be32_to_cpu(e->params[0]),
1029 			   be32_to_cpu(e->params[1]),
1030 			   be32_to_cpu(e->params[2]),
1031 			   be32_to_cpu(e->params[3]),
1032 			   be32_to_cpu(e->params[4]),
1033 			   be32_to_cpu(e->params[5]),
1034 			   be32_to_cpu(e->params[6]),
1035 			   be32_to_cpu(e->params[7]));
1036 	}
1037 	return 0;
1038 }
1039 
1040 /* Sequential File Operations for Device Log.
1041  */
devlog_get_idx(struct devlog_info * dinfo,loff_t pos)1042 static inline void *devlog_get_idx(struct devlog_info *dinfo, loff_t pos)
1043 {
1044 	if (pos > dinfo->nentries)
1045 		return NULL;
1046 
1047 	return (void *)(uintptr_t)(pos + 1);
1048 }
1049 
devlog_start(struct seq_file * seq,loff_t * pos)1050 static void *devlog_start(struct seq_file *seq, loff_t *pos)
1051 {
1052 	struct devlog_info *dinfo = seq->private;
1053 
1054 	return (*pos
1055 		? devlog_get_idx(dinfo, *pos)
1056 		: SEQ_START_TOKEN);
1057 }
1058 
devlog_next(struct seq_file * seq,void * v,loff_t * pos)1059 static void *devlog_next(struct seq_file *seq, void *v, loff_t *pos)
1060 {
1061 	struct devlog_info *dinfo = seq->private;
1062 
1063 	(*pos)++;
1064 	return devlog_get_idx(dinfo, *pos);
1065 }
1066 
devlog_stop(struct seq_file * seq,void * v)1067 static void devlog_stop(struct seq_file *seq, void *v)
1068 {
1069 }
1070 
1071 static const struct seq_operations devlog_seq_ops = {
1072 	.start = devlog_start,
1073 	.next  = devlog_next,
1074 	.stop  = devlog_stop,
1075 	.show  = devlog_show
1076 };
1077 
1078 /* Set up for reading the firmware's device log.  We read the entire log here
1079  * and then display it incrementally in devlog_show().
1080  */
devlog_open(struct inode * inode,struct file * file)1081 static int devlog_open(struct inode *inode, struct file *file)
1082 {
1083 	struct adapter *adap = inode->i_private;
1084 	struct devlog_params *dparams = &adap->params.devlog;
1085 	struct devlog_info *dinfo;
1086 	unsigned int index;
1087 	u32 fseqno;
1088 	int ret;
1089 
1090 	/* If we don't know where the log is we can't do anything.
1091 	 */
1092 	if (dparams->start == 0)
1093 		return -ENXIO;
1094 
1095 	/* Allocate the space to read in the firmware's device log and set up
1096 	 * for the iterated call to our display function.
1097 	 */
1098 	dinfo = __seq_open_private(file, &devlog_seq_ops,
1099 				   sizeof(*dinfo) + dparams->size);
1100 	if (!dinfo)
1101 		return -ENOMEM;
1102 
1103 	/* Record the basic log buffer information and read in the raw log.
1104 	 */
1105 	dinfo->nentries = (dparams->size / sizeof(struct fw_devlog_e));
1106 	dinfo->first = 0;
1107 	spin_lock(&adap->win0_lock);
1108 	ret = t4_memory_rw(adap, adap->params.drv_memwin, dparams->memtype,
1109 			   dparams->start, dparams->size, (__be32 *)dinfo->log,
1110 			   T4_MEMORY_READ);
1111 	spin_unlock(&adap->win0_lock);
1112 	if (ret) {
1113 		seq_release_private(inode, file);
1114 		return ret;
1115 	}
1116 
1117 	/* Find the earliest (lowest Sequence Number) log entry in the
1118 	 * circular Device Log.
1119 	 */
1120 	for (fseqno = ~((u32)0), index = 0; index < dinfo->nentries; index++) {
1121 		struct fw_devlog_e *e = &dinfo->log[index];
1122 		__u32 seqno;
1123 
1124 		if (e->timestamp == 0)
1125 			continue;
1126 
1127 		seqno = be32_to_cpu(e->seqno);
1128 		if (seqno < fseqno) {
1129 			fseqno = seqno;
1130 			dinfo->first = index;
1131 		}
1132 	}
1133 	return 0;
1134 }
1135 
1136 static const struct file_operations devlog_fops = {
1137 	.owner   = THIS_MODULE,
1138 	.open    = devlog_open,
1139 	.read    = seq_read,
1140 	.llseek  = seq_lseek,
1141 	.release = seq_release_private
1142 };
1143 
1144 /* Show Firmware Mailbox Command/Reply Log
1145  *
1146  * Note that we don't do any locking when dumping the Firmware Mailbox Log so
1147  * it's possible that we can catch things during a log update and therefore
1148  * see partially corrupted log entries.  But it's probably Good Enough(tm).
1149  * If we ever decide that we want to make sure that we're dumping a coherent
1150  * log, we'd need to perform locking in the mailbox logging and in
1151  * mboxlog_open() where we'd need to grab the entire mailbox log in one go
1152  * like we do for the Firmware Device Log.
1153  */
mboxlog_show(struct seq_file * seq,void * v)1154 static int mboxlog_show(struct seq_file *seq, void *v)
1155 {
1156 	struct adapter *adapter = seq->private;
1157 	struct mbox_cmd_log *log = adapter->mbox_log;
1158 	struct mbox_cmd *entry;
1159 	int entry_idx, i;
1160 
1161 	if (v == SEQ_START_TOKEN) {
1162 		seq_printf(seq,
1163 			   "%10s  %15s  %5s  %5s  %s\n",
1164 			   "Seq#", "Tstamp", "Atime", "Etime",
1165 			   "Command/Reply");
1166 		return 0;
1167 	}
1168 
1169 	entry_idx = log->cursor + ((uintptr_t)v - 2);
1170 	if (entry_idx >= log->size)
1171 		entry_idx -= log->size;
1172 	entry = mbox_cmd_log_entry(log, entry_idx);
1173 
1174 	/* skip over unused entries */
1175 	if (entry->timestamp == 0)
1176 		return 0;
1177 
1178 	seq_printf(seq, "%10u  %15llu  %5d  %5d",
1179 		   entry->seqno, entry->timestamp,
1180 		   entry->access, entry->execute);
1181 	for (i = 0; i < MBOX_LEN / 8; i++) {
1182 		u64 flit = entry->cmd[i];
1183 		u32 hi = (u32)(flit >> 32);
1184 		u32 lo = (u32)flit;
1185 
1186 		seq_printf(seq, "  %08x %08x", hi, lo);
1187 	}
1188 	seq_puts(seq, "\n");
1189 	return 0;
1190 }
1191 
mboxlog_get_idx(struct seq_file * seq,loff_t pos)1192 static inline void *mboxlog_get_idx(struct seq_file *seq, loff_t pos)
1193 {
1194 	struct adapter *adapter = seq->private;
1195 	struct mbox_cmd_log *log = adapter->mbox_log;
1196 
1197 	return ((pos <= log->size) ? (void *)(uintptr_t)(pos + 1) : NULL);
1198 }
1199 
mboxlog_start(struct seq_file * seq,loff_t * pos)1200 static void *mboxlog_start(struct seq_file *seq, loff_t *pos)
1201 {
1202 	return *pos ? mboxlog_get_idx(seq, *pos) : SEQ_START_TOKEN;
1203 }
1204 
mboxlog_next(struct seq_file * seq,void * v,loff_t * pos)1205 static void *mboxlog_next(struct seq_file *seq, void *v, loff_t *pos)
1206 {
1207 	++*pos;
1208 	return mboxlog_get_idx(seq, *pos);
1209 }
1210 
mboxlog_stop(struct seq_file * seq,void * v)1211 static void mboxlog_stop(struct seq_file *seq, void *v)
1212 {
1213 }
1214 
1215 static const struct seq_operations mboxlog_seq_ops = {
1216 	.start = mboxlog_start,
1217 	.next  = mboxlog_next,
1218 	.stop  = mboxlog_stop,
1219 	.show  = mboxlog_show
1220 };
1221 
mboxlog_open(struct inode * inode,struct file * file)1222 static int mboxlog_open(struct inode *inode, struct file *file)
1223 {
1224 	int res = seq_open(file, &mboxlog_seq_ops);
1225 
1226 	if (!res) {
1227 		struct seq_file *seq = file->private_data;
1228 
1229 		seq->private = inode->i_private;
1230 	}
1231 	return res;
1232 }
1233 
1234 static const struct file_operations mboxlog_fops = {
1235 	.owner   = THIS_MODULE,
1236 	.open    = mboxlog_open,
1237 	.read    = seq_read,
1238 	.llseek  = seq_lseek,
1239 	.release = seq_release,
1240 };
1241 
mbox_show(struct seq_file * seq,void * v)1242 static int mbox_show(struct seq_file *seq, void *v)
1243 {
1244 	static const char * const owner[] = { "none", "FW", "driver",
1245 					      "unknown", "<unread>" };
1246 
1247 	int i;
1248 	unsigned int mbox = (uintptr_t)seq->private & 7;
1249 	struct adapter *adap = seq->private - mbox;
1250 	void __iomem *addr = adap->regs + PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
1251 
1252 	/* For T4 we don't have a shadow copy of the Mailbox Control register.
1253 	 * And since reading that real register causes a side effect of
1254 	 * granting ownership, we're best of simply not reading it at all.
1255 	 */
1256 	if (is_t4(adap->params.chip)) {
1257 		i = 4; /* index of "<unread>" */
1258 	} else {
1259 		unsigned int ctrl_reg = CIM_PF_MAILBOX_CTRL_SHADOW_COPY_A;
1260 		void __iomem *ctrl = adap->regs + PF_REG(mbox, ctrl_reg);
1261 
1262 		i = MBOWNER_G(readl(ctrl));
1263 	}
1264 
1265 	seq_printf(seq, "mailbox owned by %s\n\n", owner[i]);
1266 
1267 	for (i = 0; i < MBOX_LEN; i += 8)
1268 		seq_printf(seq, "%016llx\n",
1269 			   (unsigned long long)readq(addr + i));
1270 	return 0;
1271 }
1272 
mbox_open(struct inode * inode,struct file * file)1273 static int mbox_open(struct inode *inode, struct file *file)
1274 {
1275 	return single_open(file, mbox_show, inode->i_private);
1276 }
1277 
mbox_write(struct file * file,const char __user * buf,size_t count,loff_t * pos)1278 static ssize_t mbox_write(struct file *file, const char __user *buf,
1279 			  size_t count, loff_t *pos)
1280 {
1281 	int i;
1282 	char c = '\n', s[256];
1283 	unsigned long long data[8];
1284 	const struct inode *ino;
1285 	unsigned int mbox;
1286 	struct adapter *adap;
1287 	void __iomem *addr;
1288 	void __iomem *ctrl;
1289 
1290 	if (count > sizeof(s) - 1 || !count)
1291 		return -EINVAL;
1292 	if (copy_from_user(s, buf, count))
1293 		return -EFAULT;
1294 	s[count] = '\0';
1295 
1296 	if (sscanf(s, "%llx %llx %llx %llx %llx %llx %llx %llx%c", &data[0],
1297 		   &data[1], &data[2], &data[3], &data[4], &data[5], &data[6],
1298 		   &data[7], &c) < 8 || c != '\n')
1299 		return -EINVAL;
1300 
1301 	ino = file_inode(file);
1302 	mbox = (uintptr_t)ino->i_private & 7;
1303 	adap = ino->i_private - mbox;
1304 	addr = adap->regs + PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
1305 	ctrl = addr + MBOX_LEN;
1306 
1307 	if (MBOWNER_G(readl(ctrl)) != X_MBOWNER_PL)
1308 		return -EBUSY;
1309 
1310 	for (i = 0; i < 8; i++)
1311 		writeq(data[i], addr + 8 * i);
1312 
1313 	writel(MBMSGVALID_F | MBOWNER_V(X_MBOWNER_FW), ctrl);
1314 	return count;
1315 }
1316 
1317 static const struct file_operations mbox_debugfs_fops = {
1318 	.owner   = THIS_MODULE,
1319 	.open    = mbox_open,
1320 	.read    = seq_read,
1321 	.llseek  = seq_lseek,
1322 	.release = single_release,
1323 	.write   = mbox_write
1324 };
1325 
mps_trc_show(struct seq_file * seq,void * v)1326 static int mps_trc_show(struct seq_file *seq, void *v)
1327 {
1328 	int enabled, i;
1329 	struct trace_params tp;
1330 	unsigned int trcidx = (uintptr_t)seq->private & 3;
1331 	struct adapter *adap = seq->private - trcidx;
1332 
1333 	t4_get_trace_filter(adap, &tp, trcidx, &enabled);
1334 	if (!enabled) {
1335 		seq_puts(seq, "tracer is disabled\n");
1336 		return 0;
1337 	}
1338 
1339 	if (tp.skip_ofst * 8 >= TRACE_LEN) {
1340 		dev_err(adap->pdev_dev, "illegal trace pattern skip offset\n");
1341 		return -EINVAL;
1342 	}
1343 	if (tp.port < 8) {
1344 		i = adap->chan_map[tp.port & 3];
1345 		if (i >= MAX_NPORTS) {
1346 			dev_err(adap->pdev_dev, "tracer %u is assigned "
1347 				"to non-existing port\n", trcidx);
1348 			return -EINVAL;
1349 		}
1350 		seq_printf(seq, "tracer is capturing %s %s, ",
1351 			   adap->port[i]->name, tp.port < 4 ? "Rx" : "Tx");
1352 	} else
1353 		seq_printf(seq, "tracer is capturing loopback %d, ",
1354 			   tp.port - 8);
1355 	seq_printf(seq, "snap length: %u, min length: %u\n", tp.snap_len,
1356 		   tp.min_len);
1357 	seq_printf(seq, "packets captured %smatch filter\n",
1358 		   tp.invert ? "do not " : "");
1359 
1360 	if (tp.skip_ofst) {
1361 		seq_puts(seq, "filter pattern: ");
1362 		for (i = 0; i < tp.skip_ofst * 2; i += 2)
1363 			seq_printf(seq, "%08x%08x", tp.data[i], tp.data[i + 1]);
1364 		seq_putc(seq, '/');
1365 		for (i = 0; i < tp.skip_ofst * 2; i += 2)
1366 			seq_printf(seq, "%08x%08x", tp.mask[i], tp.mask[i + 1]);
1367 		seq_puts(seq, "@0\n");
1368 	}
1369 
1370 	seq_puts(seq, "filter pattern: ");
1371 	for (i = tp.skip_ofst * 2; i < TRACE_LEN / 4; i += 2)
1372 		seq_printf(seq, "%08x%08x", tp.data[i], tp.data[i + 1]);
1373 	seq_putc(seq, '/');
1374 	for (i = tp.skip_ofst * 2; i < TRACE_LEN / 4; i += 2)
1375 		seq_printf(seq, "%08x%08x", tp.mask[i], tp.mask[i + 1]);
1376 	seq_printf(seq, "@%u\n", (tp.skip_ofst + tp.skip_len) * 8);
1377 	return 0;
1378 }
1379 
mps_trc_open(struct inode * inode,struct file * file)1380 static int mps_trc_open(struct inode *inode, struct file *file)
1381 {
1382 	return single_open(file, mps_trc_show, inode->i_private);
1383 }
1384 
xdigit2int(unsigned char c)1385 static unsigned int xdigit2int(unsigned char c)
1386 {
1387 	return isdigit(c) ? c - '0' : tolower(c) - 'a' + 10;
1388 }
1389 
1390 #define TRC_PORT_NONE 0xff
1391 #define TRC_RSS_ENABLE 0x33
1392 #define TRC_RSS_DISABLE 0x13
1393 
1394 /* Set an MPS trace filter.  Syntax is:
1395  *
1396  * disable
1397  *
1398  * to disable tracing, or
1399  *
1400  * interface qid=<qid no> [snaplen=<val>] [minlen=<val>] [not] [<pattern>]...
1401  *
1402  * where interface is one of rxN, txN, or loopbackN, N = 0..3, qid can be one
1403  * of the NIC's response qid obtained from sge_qinfo and pattern has the form
1404  *
1405  * <pattern data>[/<pattern mask>][@<anchor>]
1406  *
1407  * Up to 2 filter patterns can be specified.  If 2 are supplied the first one
1408  * must be anchored at 0.  An omitted mask is taken as a mask of 1s, an omitted
1409  * anchor is taken as 0.
1410  */
mps_trc_write(struct file * file,const char __user * buf,size_t count,loff_t * pos)1411 static ssize_t mps_trc_write(struct file *file, const char __user *buf,
1412 			     size_t count, loff_t *pos)
1413 {
1414 	int i, enable, ret;
1415 	u32 *data, *mask;
1416 	struct trace_params tp;
1417 	const struct inode *ino;
1418 	unsigned int trcidx;
1419 	char *s, *p, *word, *end;
1420 	struct adapter *adap;
1421 	u32 j;
1422 
1423 	ino = file_inode(file);
1424 	trcidx = (uintptr_t)ino->i_private & 3;
1425 	adap = ino->i_private - trcidx;
1426 
1427 	/* Don't accept input more than 1K, can't be anything valid except lots
1428 	 * of whitespace.  Well, use less.
1429 	 */
1430 	if (count > 1024)
1431 		return -EFBIG;
1432 	p = s = kzalloc(count + 1, GFP_USER);
1433 	if (!s)
1434 		return -ENOMEM;
1435 	if (copy_from_user(s, buf, count)) {
1436 		count = -EFAULT;
1437 		goto out;
1438 	}
1439 
1440 	if (s[count - 1] == '\n')
1441 		s[count - 1] = '\0';
1442 
1443 	enable = strcmp("disable", s) != 0;
1444 	if (!enable)
1445 		goto apply;
1446 
1447 	/* enable or disable trace multi rss filter */
1448 	if (adap->trace_rss)
1449 		t4_write_reg(adap, MPS_TRC_CFG_A, TRC_RSS_ENABLE);
1450 	else
1451 		t4_write_reg(adap, MPS_TRC_CFG_A, TRC_RSS_DISABLE);
1452 
1453 	memset(&tp, 0, sizeof(tp));
1454 	tp.port = TRC_PORT_NONE;
1455 	i = 0;	/* counts pattern nibbles */
1456 
1457 	while (p) {
1458 		while (isspace(*p))
1459 			p++;
1460 		word = strsep(&p, " ");
1461 		if (!*word)
1462 			break;
1463 
1464 		if (!strncmp(word, "qid=", 4)) {
1465 			end = (char *)word + 4;
1466 			ret = kstrtouint(end, 10, &j);
1467 			if (ret)
1468 				goto out;
1469 			if (!adap->trace_rss) {
1470 				t4_write_reg(adap, MPS_T5_TRC_RSS_CONTROL_A, j);
1471 				continue;
1472 			}
1473 
1474 			switch (trcidx) {
1475 			case 0:
1476 				t4_write_reg(adap, MPS_TRC_RSS_CONTROL_A, j);
1477 				break;
1478 			case 1:
1479 				t4_write_reg(adap,
1480 					     MPS_TRC_FILTER1_RSS_CONTROL_A, j);
1481 				break;
1482 			case 2:
1483 				t4_write_reg(adap,
1484 					     MPS_TRC_FILTER2_RSS_CONTROL_A, j);
1485 				break;
1486 			case 3:
1487 				t4_write_reg(adap,
1488 					     MPS_TRC_FILTER3_RSS_CONTROL_A, j);
1489 				break;
1490 			}
1491 			continue;
1492 		}
1493 		if (!strncmp(word, "snaplen=", 8)) {
1494 			end = (char *)word + 8;
1495 			ret = kstrtouint(end, 10, &j);
1496 			if (ret || j > 9600) {
1497 inval:				count = -EINVAL;
1498 				goto out;
1499 			}
1500 			tp.snap_len = j;
1501 			continue;
1502 		}
1503 		if (!strncmp(word, "minlen=", 7)) {
1504 			end = (char *)word + 7;
1505 			ret = kstrtouint(end, 10, &j);
1506 			if (ret || j > TFMINPKTSIZE_M)
1507 				goto inval;
1508 			tp.min_len = j;
1509 			continue;
1510 		}
1511 		if (!strcmp(word, "not")) {
1512 			tp.invert = !tp.invert;
1513 			continue;
1514 		}
1515 		if (!strncmp(word, "loopback", 8) && tp.port == TRC_PORT_NONE) {
1516 			if (word[8] < '0' || word[8] > '3' || word[9])
1517 				goto inval;
1518 			tp.port = word[8] - '0' + 8;
1519 			continue;
1520 		}
1521 		if (!strncmp(word, "tx", 2) && tp.port == TRC_PORT_NONE) {
1522 			if (word[2] < '0' || word[2] > '3' || word[3])
1523 				goto inval;
1524 			tp.port = word[2] - '0' + 4;
1525 			if (adap->chan_map[tp.port & 3] >= MAX_NPORTS)
1526 				goto inval;
1527 			continue;
1528 		}
1529 		if (!strncmp(word, "rx", 2) && tp.port == TRC_PORT_NONE) {
1530 			if (word[2] < '0' || word[2] > '3' || word[3])
1531 				goto inval;
1532 			tp.port = word[2] - '0';
1533 			if (adap->chan_map[tp.port] >= MAX_NPORTS)
1534 				goto inval;
1535 			continue;
1536 		}
1537 		if (!isxdigit(*word))
1538 			goto inval;
1539 
1540 		/* we have found a trace pattern */
1541 		if (i) {                            /* split pattern */
1542 			if (tp.skip_len)            /* too many splits */
1543 				goto inval;
1544 			tp.skip_ofst = i / 16;
1545 		}
1546 
1547 		data = &tp.data[i / 8];
1548 		mask = &tp.mask[i / 8];
1549 		j = i;
1550 
1551 		while (isxdigit(*word)) {
1552 			if (i >= TRACE_LEN * 2) {
1553 				count = -EFBIG;
1554 				goto out;
1555 			}
1556 			*data = (*data << 4) + xdigit2int(*word++);
1557 			if (++i % 8 == 0)
1558 				data++;
1559 		}
1560 		if (*word == '/') {
1561 			word++;
1562 			while (isxdigit(*word)) {
1563 				if (j >= i)         /* mask longer than data */
1564 					goto inval;
1565 				*mask = (*mask << 4) + xdigit2int(*word++);
1566 				if (++j % 8 == 0)
1567 					mask++;
1568 			}
1569 			if (i != j)                 /* mask shorter than data */
1570 				goto inval;
1571 		} else {                            /* no mask, use all 1s */
1572 			for ( ; i - j >= 8; j += 8)
1573 				*mask++ = 0xffffffff;
1574 			if (i % 8)
1575 				*mask = (1 << (i % 8) * 4) - 1;
1576 		}
1577 		if (*word == '@') {
1578 			end = (char *)word + 1;
1579 			ret = kstrtouint(end, 10, &j);
1580 			if (*end && *end != '\n')
1581 				goto inval;
1582 			if (j & 7)          /* doesn't start at multiple of 8 */
1583 				goto inval;
1584 			j /= 8;
1585 			if (j < tp.skip_ofst)     /* overlaps earlier pattern */
1586 				goto inval;
1587 			if (j - tp.skip_ofst > 31)            /* skip too big */
1588 				goto inval;
1589 			tp.skip_len = j - tp.skip_ofst;
1590 		}
1591 		if (i % 8) {
1592 			*data <<= (8 - i % 8) * 4;
1593 			*mask <<= (8 - i % 8) * 4;
1594 			i = (i + 15) & ~15;         /* 8-byte align */
1595 		}
1596 	}
1597 
1598 	if (tp.port == TRC_PORT_NONE)
1599 		goto inval;
1600 
1601 apply:
1602 	i = t4_set_trace_filter(adap, &tp, trcidx, enable);
1603 	if (i)
1604 		count = i;
1605 out:
1606 	kfree(s);
1607 	return count;
1608 }
1609 
1610 static const struct file_operations mps_trc_debugfs_fops = {
1611 	.owner   = THIS_MODULE,
1612 	.open    = mps_trc_open,
1613 	.read    = seq_read,
1614 	.llseek  = seq_lseek,
1615 	.release = single_release,
1616 	.write   = mps_trc_write
1617 };
1618 
flash_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)1619 static ssize_t flash_read(struct file *file, char __user *buf, size_t count,
1620 			  loff_t *ppos)
1621 {
1622 	loff_t pos = *ppos;
1623 	loff_t avail = file_inode(file)->i_size;
1624 	struct adapter *adap = file->private_data;
1625 
1626 	if (pos < 0)
1627 		return -EINVAL;
1628 	if (pos >= avail)
1629 		return 0;
1630 	if (count > avail - pos)
1631 		count = avail - pos;
1632 
1633 	while (count) {
1634 		size_t len;
1635 		int ret, ofst;
1636 		u8 data[256];
1637 
1638 		ofst = pos & 3;
1639 		len = min(count + ofst, sizeof(data));
1640 		ret = t4_read_flash(adap, pos - ofst, (len + 3) / 4,
1641 				    (u32 *)data, 1);
1642 		if (ret)
1643 			return ret;
1644 
1645 		len -= ofst;
1646 		if (copy_to_user(buf, data + ofst, len))
1647 			return -EFAULT;
1648 
1649 		buf += len;
1650 		pos += len;
1651 		count -= len;
1652 	}
1653 	count = pos - *ppos;
1654 	*ppos = pos;
1655 	return count;
1656 }
1657 
1658 static const struct file_operations flash_debugfs_fops = {
1659 	.owner   = THIS_MODULE,
1660 	.open    = mem_open,
1661 	.read    = flash_read,
1662 	.llseek  = default_llseek,
1663 };
1664 
tcamxy2valmask(u64 x,u64 y,u8 * addr,u64 * mask)1665 static inline void tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
1666 {
1667 	*mask = x | y;
1668 	y = (__force u64)cpu_to_be64(y);
1669 	memcpy(addr, (char *)&y + 2, ETH_ALEN);
1670 }
1671 
mps_tcam_show(struct seq_file * seq,void * v)1672 static int mps_tcam_show(struct seq_file *seq, void *v)
1673 {
1674 	struct adapter *adap = seq->private;
1675 	unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
1676 	if (v == SEQ_START_TOKEN) {
1677 		if (chip_ver > CHELSIO_T5) {
1678 			seq_puts(seq, "Idx  Ethernet address     Mask     "
1679 				 "  VNI   Mask   IVLAN Vld "
1680 				 "DIP_Hit   Lookup  Port "
1681 				 "Vld Ports PF  VF                           "
1682 				 "Replication                                "
1683 				 "    P0 P1 P2 P3  ML\n");
1684 		} else {
1685 			if (adap->params.arch.mps_rplc_size > 128)
1686 				seq_puts(seq, "Idx  Ethernet address     Mask     "
1687 					 "Vld Ports PF  VF                           "
1688 					 "Replication                                "
1689 					 "    P0 P1 P2 P3  ML\n");
1690 			else
1691 				seq_puts(seq, "Idx  Ethernet address     Mask     "
1692 					 "Vld Ports PF  VF              Replication"
1693 					 "	         P0 P1 P2 P3  ML\n");
1694 		}
1695 	} else {
1696 		u64 mask;
1697 		u8 addr[ETH_ALEN];
1698 		bool replicate, dip_hit = false, vlan_vld = false;
1699 		unsigned int idx = (uintptr_t)v - 2;
1700 		u64 tcamy, tcamx, val;
1701 		u32 cls_lo, cls_hi, ctl, data2, vnix = 0, vniy = 0;
1702 		u32 rplc[8] = {0};
1703 		u8 lookup_type = 0, port_num = 0;
1704 		u16 ivlan = 0;
1705 
1706 		if (chip_ver > CHELSIO_T5) {
1707 			/* CtlCmdType - 0: Read, 1: Write
1708 			 * CtlTcamSel - 0: TCAM0, 1: TCAM1
1709 			 * CtlXYBitSel- 0: Y bit, 1: X bit
1710 			 */
1711 
1712 			/* Read tcamy */
1713 			ctl = CTLCMDTYPE_V(0) | CTLXYBITSEL_V(0);
1714 			if (idx < 256)
1715 				ctl |= CTLTCAMINDEX_V(idx) | CTLTCAMSEL_V(0);
1716 			else
1717 				ctl |= CTLTCAMINDEX_V(idx - 256) |
1718 				       CTLTCAMSEL_V(1);
1719 			t4_write_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
1720 			val = t4_read_reg(adap, MPS_CLS_TCAM_DATA1_A);
1721 			tcamy = DMACH_G(val) << 32;
1722 			tcamy |= t4_read_reg(adap, MPS_CLS_TCAM_DATA0_A);
1723 			data2 = t4_read_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A);
1724 			lookup_type = DATALKPTYPE_G(data2);
1725 			/* 0 - Outer header, 1 - Inner header
1726 			 * [71:48] bit locations are overloaded for
1727 			 * outer vs. inner lookup types.
1728 			 */
1729 			if (lookup_type && (lookup_type != DATALKPTYPE_M)) {
1730 				/* Inner header VNI */
1731 				vniy = (data2 & DATAVIDH2_F) |
1732 				       (DATAVIDH1_G(data2) << 16) | VIDL_G(val);
1733 				dip_hit = data2 & DATADIPHIT_F;
1734 			} else {
1735 				vlan_vld = data2 & DATAVIDH2_F;
1736 				ivlan = VIDL_G(val);
1737 			}
1738 			port_num = DATAPORTNUM_G(data2);
1739 
1740 			/* Read tcamx. Change the control param */
1741 			vnix = 0;
1742 			ctl |= CTLXYBITSEL_V(1);
1743 			t4_write_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
1744 			val = t4_read_reg(adap, MPS_CLS_TCAM_DATA1_A);
1745 			tcamx = DMACH_G(val) << 32;
1746 			tcamx |= t4_read_reg(adap, MPS_CLS_TCAM_DATA0_A);
1747 			data2 = t4_read_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A);
1748 			if (lookup_type && (lookup_type != DATALKPTYPE_M)) {
1749 				/* Inner header VNI mask */
1750 				vnix = (data2 & DATAVIDH2_F) |
1751 				       (DATAVIDH1_G(data2) << 16) | VIDL_G(val);
1752 			}
1753 		} else {
1754 			tcamy = t4_read_reg64(adap, MPS_CLS_TCAM_Y_L(idx));
1755 			tcamx = t4_read_reg64(adap, MPS_CLS_TCAM_X_L(idx));
1756 		}
1757 
1758 		cls_lo = t4_read_reg(adap, MPS_CLS_SRAM_L(idx));
1759 		cls_hi = t4_read_reg(adap, MPS_CLS_SRAM_H(idx));
1760 
1761 		if (tcamx & tcamy) {
1762 			seq_printf(seq, "%3u         -\n", idx);
1763 			goto out;
1764 		}
1765 
1766 		rplc[0] = rplc[1] = rplc[2] = rplc[3] = 0;
1767 		if (chip_ver > CHELSIO_T5)
1768 			replicate = (cls_lo & T6_REPLICATE_F);
1769 		else
1770 			replicate = (cls_lo & REPLICATE_F);
1771 
1772 		if (replicate) {
1773 			struct fw_ldst_cmd ldst_cmd;
1774 			int ret;
1775 			struct fw_ldst_mps_rplc mps_rplc;
1776 			u32 ldst_addrspc;
1777 
1778 			memset(&ldst_cmd, 0, sizeof(ldst_cmd));
1779 			ldst_addrspc =
1780 				FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MPS);
1781 			ldst_cmd.op_to_addrspace =
1782 				htonl(FW_CMD_OP_V(FW_LDST_CMD) |
1783 				      FW_CMD_REQUEST_F |
1784 				      FW_CMD_READ_F |
1785 				      ldst_addrspc);
1786 			ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
1787 			ldst_cmd.u.mps.rplc.fid_idx =
1788 				htons(FW_LDST_CMD_FID_V(FW_LDST_MPS_RPLC) |
1789 				      FW_LDST_CMD_IDX_V(idx));
1790 			ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd,
1791 					 sizeof(ldst_cmd), &ldst_cmd);
1792 			if (ret)
1793 				dev_warn(adap->pdev_dev, "Can't read MPS "
1794 					 "replication map for idx %d: %d\n",
1795 					 idx, -ret);
1796 			else {
1797 				mps_rplc = ldst_cmd.u.mps.rplc;
1798 				rplc[0] = ntohl(mps_rplc.rplc31_0);
1799 				rplc[1] = ntohl(mps_rplc.rplc63_32);
1800 				rplc[2] = ntohl(mps_rplc.rplc95_64);
1801 				rplc[3] = ntohl(mps_rplc.rplc127_96);
1802 				if (adap->params.arch.mps_rplc_size > 128) {
1803 					rplc[4] = ntohl(mps_rplc.rplc159_128);
1804 					rplc[5] = ntohl(mps_rplc.rplc191_160);
1805 					rplc[6] = ntohl(mps_rplc.rplc223_192);
1806 					rplc[7] = ntohl(mps_rplc.rplc255_224);
1807 				}
1808 			}
1809 		}
1810 
1811 		tcamxy2valmask(tcamx, tcamy, addr, &mask);
1812 		if (chip_ver > CHELSIO_T5) {
1813 			/* Inner header lookup */
1814 			if (lookup_type && (lookup_type != DATALKPTYPE_M)) {
1815 				seq_printf(seq,
1816 					   "%3u %pM %012llx %06x %06x    -    -   %3c      'I'  %4x   %3c   %#x%4u%4d",
1817 					   idx, addr,
1818 					   (unsigned long long)mask,
1819 					   vniy, (vnix | vniy),
1820 					   dip_hit ? 'Y' : 'N',
1821 					   port_num,
1822 					   (cls_lo & T6_SRAM_VLD_F) ? 'Y' : 'N',
1823 					   PORTMAP_G(cls_hi),
1824 					   T6_PF_G(cls_lo),
1825 					   (cls_lo & T6_VF_VALID_F) ?
1826 					   T6_VF_G(cls_lo) : -1);
1827 			} else {
1828 				seq_printf(seq,
1829 					   "%3u %pM %012llx    -       -   ",
1830 					   idx, addr,
1831 					   (unsigned long long)mask);
1832 
1833 				if (vlan_vld)
1834 					seq_printf(seq, "%4u   Y     ", ivlan);
1835 				else
1836 					seq_puts(seq, "  -    N     ");
1837 
1838 				seq_printf(seq,
1839 					   "-      %3c  %4x   %3c   %#x%4u%4d",
1840 					   lookup_type ? 'I' : 'O', port_num,
1841 					   (cls_lo & T6_SRAM_VLD_F) ? 'Y' : 'N',
1842 					   PORTMAP_G(cls_hi),
1843 					   T6_PF_G(cls_lo),
1844 					   (cls_lo & T6_VF_VALID_F) ?
1845 					   T6_VF_G(cls_lo) : -1);
1846 			}
1847 		} else
1848 			seq_printf(seq, "%3u %pM %012llx%3c   %#x%4u%4d",
1849 				   idx, addr, (unsigned long long)mask,
1850 				   (cls_lo & SRAM_VLD_F) ? 'Y' : 'N',
1851 				   PORTMAP_G(cls_hi),
1852 				   PF_G(cls_lo),
1853 				   (cls_lo & VF_VALID_F) ? VF_G(cls_lo) : -1);
1854 
1855 		if (replicate) {
1856 			if (adap->params.arch.mps_rplc_size > 128)
1857 				seq_printf(seq, " %08x %08x %08x %08x "
1858 					   "%08x %08x %08x %08x",
1859 					   rplc[7], rplc[6], rplc[5], rplc[4],
1860 					   rplc[3], rplc[2], rplc[1], rplc[0]);
1861 			else
1862 				seq_printf(seq, " %08x %08x %08x %08x",
1863 					   rplc[3], rplc[2], rplc[1], rplc[0]);
1864 		} else {
1865 			if (adap->params.arch.mps_rplc_size > 128)
1866 				seq_printf(seq, "%72c", ' ');
1867 			else
1868 				seq_printf(seq, "%36c", ' ');
1869 		}
1870 
1871 		if (chip_ver > CHELSIO_T5)
1872 			seq_printf(seq, "%4u%3u%3u%3u %#x\n",
1873 				   T6_SRAM_PRIO0_G(cls_lo),
1874 				   T6_SRAM_PRIO1_G(cls_lo),
1875 				   T6_SRAM_PRIO2_G(cls_lo),
1876 				   T6_SRAM_PRIO3_G(cls_lo),
1877 				   (cls_lo >> T6_MULTILISTEN0_S) & 0xf);
1878 		else
1879 			seq_printf(seq, "%4u%3u%3u%3u %#x\n",
1880 				   SRAM_PRIO0_G(cls_lo), SRAM_PRIO1_G(cls_lo),
1881 				   SRAM_PRIO2_G(cls_lo), SRAM_PRIO3_G(cls_lo),
1882 				   (cls_lo >> MULTILISTEN0_S) & 0xf);
1883 	}
1884 out:	return 0;
1885 }
1886 
mps_tcam_get_idx(struct seq_file * seq,loff_t pos)1887 static inline void *mps_tcam_get_idx(struct seq_file *seq, loff_t pos)
1888 {
1889 	struct adapter *adap = seq->private;
1890 	int max_mac_addr = is_t4(adap->params.chip) ?
1891 				NUM_MPS_CLS_SRAM_L_INSTANCES :
1892 				NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
1893 	return ((pos <= max_mac_addr) ? (void *)(uintptr_t)(pos + 1) : NULL);
1894 }
1895 
mps_tcam_start(struct seq_file * seq,loff_t * pos)1896 static void *mps_tcam_start(struct seq_file *seq, loff_t *pos)
1897 {
1898 	return *pos ? mps_tcam_get_idx(seq, *pos) : SEQ_START_TOKEN;
1899 }
1900 
mps_tcam_next(struct seq_file * seq,void * v,loff_t * pos)1901 static void *mps_tcam_next(struct seq_file *seq, void *v, loff_t *pos)
1902 {
1903 	++*pos;
1904 	return mps_tcam_get_idx(seq, *pos);
1905 }
1906 
mps_tcam_stop(struct seq_file * seq,void * v)1907 static void mps_tcam_stop(struct seq_file *seq, void *v)
1908 {
1909 }
1910 
1911 static const struct seq_operations mps_tcam_seq_ops = {
1912 	.start = mps_tcam_start,
1913 	.next  = mps_tcam_next,
1914 	.stop  = mps_tcam_stop,
1915 	.show  = mps_tcam_show
1916 };
1917 
mps_tcam_open(struct inode * inode,struct file * file)1918 static int mps_tcam_open(struct inode *inode, struct file *file)
1919 {
1920 	int res = seq_open(file, &mps_tcam_seq_ops);
1921 
1922 	if (!res) {
1923 		struct seq_file *seq = file->private_data;
1924 
1925 		seq->private = inode->i_private;
1926 	}
1927 	return res;
1928 }
1929 
1930 static const struct file_operations mps_tcam_debugfs_fops = {
1931 	.owner   = THIS_MODULE,
1932 	.open    = mps_tcam_open,
1933 	.read    = seq_read,
1934 	.llseek  = seq_lseek,
1935 	.release = seq_release,
1936 };
1937 
1938 /* Display various sensor information.
1939  */
sensors_show(struct seq_file * seq,void * v)1940 static int sensors_show(struct seq_file *seq, void *v)
1941 {
1942 	struct adapter *adap = seq->private;
1943 	u32 param[7], val[7];
1944 	int ret;
1945 
1946 	/* Note that if the sensors haven't been initialized and turned on
1947 	 * we'll get values of 0, so treat those as "<unknown>" ...
1948 	 */
1949 	param[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
1950 		    FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DIAG) |
1951 		    FW_PARAMS_PARAM_Y_V(FW_PARAM_DEV_DIAG_TMP));
1952 	param[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
1953 		    FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DIAG) |
1954 		    FW_PARAMS_PARAM_Y_V(FW_PARAM_DEV_DIAG_VDD));
1955 	ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
1956 			      param, val);
1957 
1958 	if (ret < 0 || val[0] == 0)
1959 		seq_puts(seq, "Temperature: <unknown>\n");
1960 	else
1961 		seq_printf(seq, "Temperature: %dC\n", val[0]);
1962 
1963 	if (ret < 0 || val[1] == 0)
1964 		seq_puts(seq, "Core VDD:    <unknown>\n");
1965 	else
1966 		seq_printf(seq, "Core VDD:    %dmV\n", val[1]);
1967 
1968 	return 0;
1969 }
1970 DEFINE_SHOW_ATTRIBUTE(sensors);
1971 
1972 #if IS_ENABLED(CONFIG_IPV6)
1973 DEFINE_SHOW_ATTRIBUTE(clip_tbl);
1974 #endif
1975 
1976 /*RSS Table.
1977  */
1978 
rss_show(struct seq_file * seq,void * v,int idx)1979 static int rss_show(struct seq_file *seq, void *v, int idx)
1980 {
1981 	u16 *entry = v;
1982 
1983 	seq_printf(seq, "%4d:  %4u  %4u  %4u  %4u  %4u  %4u  %4u  %4u\n",
1984 		   idx * 8, entry[0], entry[1], entry[2], entry[3], entry[4],
1985 		   entry[5], entry[6], entry[7]);
1986 	return 0;
1987 }
1988 
rss_open(struct inode * inode,struct file * file)1989 static int rss_open(struct inode *inode, struct file *file)
1990 {
1991 	struct adapter *adap = inode->i_private;
1992 	int ret, nentries;
1993 	struct seq_tab *p;
1994 
1995 	nentries = t4_chip_rss_size(adap);
1996 	p = seq_open_tab(file, nentries / 8, 8 * sizeof(u16), 0, rss_show);
1997 	if (!p)
1998 		return -ENOMEM;
1999 
2000 	ret = t4_read_rss(adap, (u16 *)p->data);
2001 	if (ret)
2002 		seq_release_private(inode, file);
2003 
2004 	return ret;
2005 }
2006 
2007 static const struct file_operations rss_debugfs_fops = {
2008 	.owner   = THIS_MODULE,
2009 	.open    = rss_open,
2010 	.read    = seq_read,
2011 	.llseek  = seq_lseek,
2012 	.release = seq_release_private
2013 };
2014 
2015 /* RSS Configuration.
2016  */
2017 
2018 /* Small utility function to return the strings "yes" or "no" if the supplied
2019  * argument is non-zero.
2020  */
yesno(int x)2021 static const char *yesno(int x)
2022 {
2023 	static const char *yes = "yes";
2024 	static const char *no = "no";
2025 
2026 	return x ? yes : no;
2027 }
2028 
rss_config_show(struct seq_file * seq,void * v)2029 static int rss_config_show(struct seq_file *seq, void *v)
2030 {
2031 	struct adapter *adapter = seq->private;
2032 	static const char * const keymode[] = {
2033 		"global",
2034 		"global and per-VF scramble",
2035 		"per-PF and per-VF scramble",
2036 		"per-VF and per-VF scramble",
2037 	};
2038 	u32 rssconf;
2039 
2040 	rssconf = t4_read_reg(adapter, TP_RSS_CONFIG_A);
2041 	seq_printf(seq, "TP_RSS_CONFIG: %#x\n", rssconf);
2042 	seq_printf(seq, "  Tnl4TupEnIpv6: %3s\n", yesno(rssconf &
2043 							TNL4TUPENIPV6_F));
2044 	seq_printf(seq, "  Tnl2TupEnIpv6: %3s\n", yesno(rssconf &
2045 							TNL2TUPENIPV6_F));
2046 	seq_printf(seq, "  Tnl4TupEnIpv4: %3s\n", yesno(rssconf &
2047 							TNL4TUPENIPV4_F));
2048 	seq_printf(seq, "  Tnl2TupEnIpv4: %3s\n", yesno(rssconf &
2049 							TNL2TUPENIPV4_F));
2050 	seq_printf(seq, "  TnlTcpSel:     %3s\n", yesno(rssconf & TNLTCPSEL_F));
2051 	seq_printf(seq, "  TnlIp6Sel:     %3s\n", yesno(rssconf & TNLIP6SEL_F));
2052 	seq_printf(seq, "  TnlVrtSel:     %3s\n", yesno(rssconf & TNLVRTSEL_F));
2053 	seq_printf(seq, "  TnlMapEn:      %3s\n", yesno(rssconf & TNLMAPEN_F));
2054 	seq_printf(seq, "  OfdHashSave:   %3s\n", yesno(rssconf &
2055 							OFDHASHSAVE_F));
2056 	seq_printf(seq, "  OfdVrtSel:     %3s\n", yesno(rssconf & OFDVRTSEL_F));
2057 	seq_printf(seq, "  OfdMapEn:      %3s\n", yesno(rssconf & OFDMAPEN_F));
2058 	seq_printf(seq, "  OfdLkpEn:      %3s\n", yesno(rssconf & OFDLKPEN_F));
2059 	seq_printf(seq, "  Syn4TupEnIpv6: %3s\n", yesno(rssconf &
2060 							SYN4TUPENIPV6_F));
2061 	seq_printf(seq, "  Syn2TupEnIpv6: %3s\n", yesno(rssconf &
2062 							SYN2TUPENIPV6_F));
2063 	seq_printf(seq, "  Syn4TupEnIpv4: %3s\n", yesno(rssconf &
2064 							SYN4TUPENIPV4_F));
2065 	seq_printf(seq, "  Syn2TupEnIpv4: %3s\n", yesno(rssconf &
2066 							SYN2TUPENIPV4_F));
2067 	seq_printf(seq, "  Syn4TupEnIpv6: %3s\n", yesno(rssconf &
2068 							SYN4TUPENIPV6_F));
2069 	seq_printf(seq, "  SynIp6Sel:     %3s\n", yesno(rssconf & SYNIP6SEL_F));
2070 	seq_printf(seq, "  SynVrt6Sel:    %3s\n", yesno(rssconf & SYNVRTSEL_F));
2071 	seq_printf(seq, "  SynMapEn:      %3s\n", yesno(rssconf & SYNMAPEN_F));
2072 	seq_printf(seq, "  SynLkpEn:      %3s\n", yesno(rssconf & SYNLKPEN_F));
2073 	seq_printf(seq, "  ChnEn:         %3s\n", yesno(rssconf &
2074 							CHANNELENABLE_F));
2075 	seq_printf(seq, "  PrtEn:         %3s\n", yesno(rssconf &
2076 							PORTENABLE_F));
2077 	seq_printf(seq, "  TnlAllLkp:     %3s\n", yesno(rssconf &
2078 							TNLALLLOOKUP_F));
2079 	seq_printf(seq, "  VrtEn:         %3s\n", yesno(rssconf &
2080 							VIRTENABLE_F));
2081 	seq_printf(seq, "  CngEn:         %3s\n", yesno(rssconf &
2082 							CONGESTIONENABLE_F));
2083 	seq_printf(seq, "  HashToeplitz:  %3s\n", yesno(rssconf &
2084 							HASHTOEPLITZ_F));
2085 	seq_printf(seq, "  Udp4En:        %3s\n", yesno(rssconf & UDPENABLE_F));
2086 	seq_printf(seq, "  Disable:       %3s\n", yesno(rssconf & DISABLE_F));
2087 
2088 	seq_puts(seq, "\n");
2089 
2090 	rssconf = t4_read_reg(adapter, TP_RSS_CONFIG_TNL_A);
2091 	seq_printf(seq, "TP_RSS_CONFIG_TNL: %#x\n", rssconf);
2092 	seq_printf(seq, "  MaskSize:      %3d\n", MASKSIZE_G(rssconf));
2093 	seq_printf(seq, "  MaskFilter:    %3d\n", MASKFILTER_G(rssconf));
2094 	if (CHELSIO_CHIP_VERSION(adapter->params.chip) > CHELSIO_T5) {
2095 		seq_printf(seq, "  HashAll:     %3s\n",
2096 			   yesno(rssconf & HASHALL_F));
2097 		seq_printf(seq, "  HashEth:     %3s\n",
2098 			   yesno(rssconf & HASHETH_F));
2099 	}
2100 	seq_printf(seq, "  UseWireCh:     %3s\n", yesno(rssconf & USEWIRECH_F));
2101 
2102 	seq_puts(seq, "\n");
2103 
2104 	rssconf = t4_read_reg(adapter, TP_RSS_CONFIG_OFD_A);
2105 	seq_printf(seq, "TP_RSS_CONFIG_OFD: %#x\n", rssconf);
2106 	seq_printf(seq, "  MaskSize:      %3d\n", MASKSIZE_G(rssconf));
2107 	seq_printf(seq, "  RRCplMapEn:    %3s\n", yesno(rssconf &
2108 							RRCPLMAPEN_F));
2109 	seq_printf(seq, "  RRCplQueWidth: %3d\n", RRCPLQUEWIDTH_G(rssconf));
2110 
2111 	seq_puts(seq, "\n");
2112 
2113 	rssconf = t4_read_reg(adapter, TP_RSS_CONFIG_SYN_A);
2114 	seq_printf(seq, "TP_RSS_CONFIG_SYN: %#x\n", rssconf);
2115 	seq_printf(seq, "  MaskSize:      %3d\n", MASKSIZE_G(rssconf));
2116 	seq_printf(seq, "  UseWireCh:     %3s\n", yesno(rssconf & USEWIRECH_F));
2117 
2118 	seq_puts(seq, "\n");
2119 
2120 	rssconf = t4_read_reg(adapter, TP_RSS_CONFIG_VRT_A);
2121 	seq_printf(seq, "TP_RSS_CONFIG_VRT: %#x\n", rssconf);
2122 	if (CHELSIO_CHIP_VERSION(adapter->params.chip) > CHELSIO_T5) {
2123 		seq_printf(seq, "  KeyWrAddrX:     %3d\n",
2124 			   KEYWRADDRX_G(rssconf));
2125 		seq_printf(seq, "  KeyExtend:      %3s\n",
2126 			   yesno(rssconf & KEYEXTEND_F));
2127 	}
2128 	seq_printf(seq, "  VfRdRg:        %3s\n", yesno(rssconf & VFRDRG_F));
2129 	seq_printf(seq, "  VfRdEn:        %3s\n", yesno(rssconf & VFRDEN_F));
2130 	seq_printf(seq, "  VfPerrEn:      %3s\n", yesno(rssconf & VFPERREN_F));
2131 	seq_printf(seq, "  KeyPerrEn:     %3s\n", yesno(rssconf & KEYPERREN_F));
2132 	seq_printf(seq, "  DisVfVlan:     %3s\n", yesno(rssconf &
2133 							DISABLEVLAN_F));
2134 	seq_printf(seq, "  EnUpSwt:       %3s\n", yesno(rssconf & ENABLEUP0_F));
2135 	seq_printf(seq, "  HashDelay:     %3d\n", HASHDELAY_G(rssconf));
2136 	if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
2137 		seq_printf(seq, "  VfWrAddr:      %3d\n", VFWRADDR_G(rssconf));
2138 	else
2139 		seq_printf(seq, "  VfWrAddr:      %3d\n",
2140 			   T6_VFWRADDR_G(rssconf));
2141 	seq_printf(seq, "  KeyMode:       %s\n", keymode[KEYMODE_G(rssconf)]);
2142 	seq_printf(seq, "  VfWrEn:        %3s\n", yesno(rssconf & VFWREN_F));
2143 	seq_printf(seq, "  KeyWrEn:       %3s\n", yesno(rssconf & KEYWREN_F));
2144 	seq_printf(seq, "  KeyWrAddr:     %3d\n", KEYWRADDR_G(rssconf));
2145 
2146 	seq_puts(seq, "\n");
2147 
2148 	rssconf = t4_read_reg(adapter, TP_RSS_CONFIG_CNG_A);
2149 	seq_printf(seq, "TP_RSS_CONFIG_CNG: %#x\n", rssconf);
2150 	seq_printf(seq, "  ChnCount3:     %3s\n", yesno(rssconf & CHNCOUNT3_F));
2151 	seq_printf(seq, "  ChnCount2:     %3s\n", yesno(rssconf & CHNCOUNT2_F));
2152 	seq_printf(seq, "  ChnCount1:     %3s\n", yesno(rssconf & CHNCOUNT1_F));
2153 	seq_printf(seq, "  ChnCount0:     %3s\n", yesno(rssconf & CHNCOUNT0_F));
2154 	seq_printf(seq, "  ChnUndFlow3:   %3s\n", yesno(rssconf &
2155 							CHNUNDFLOW3_F));
2156 	seq_printf(seq, "  ChnUndFlow2:   %3s\n", yesno(rssconf &
2157 							CHNUNDFLOW2_F));
2158 	seq_printf(seq, "  ChnUndFlow1:   %3s\n", yesno(rssconf &
2159 							CHNUNDFLOW1_F));
2160 	seq_printf(seq, "  ChnUndFlow0:   %3s\n", yesno(rssconf &
2161 							CHNUNDFLOW0_F));
2162 	seq_printf(seq, "  RstChn3:       %3s\n", yesno(rssconf & RSTCHN3_F));
2163 	seq_printf(seq, "  RstChn2:       %3s\n", yesno(rssconf & RSTCHN2_F));
2164 	seq_printf(seq, "  RstChn1:       %3s\n", yesno(rssconf & RSTCHN1_F));
2165 	seq_printf(seq, "  RstChn0:       %3s\n", yesno(rssconf & RSTCHN0_F));
2166 	seq_printf(seq, "  UpdVld:        %3s\n", yesno(rssconf & UPDVLD_F));
2167 	seq_printf(seq, "  Xoff:          %3s\n", yesno(rssconf & XOFF_F));
2168 	seq_printf(seq, "  UpdChn3:       %3s\n", yesno(rssconf & UPDCHN3_F));
2169 	seq_printf(seq, "  UpdChn2:       %3s\n", yesno(rssconf & UPDCHN2_F));
2170 	seq_printf(seq, "  UpdChn1:       %3s\n", yesno(rssconf & UPDCHN1_F));
2171 	seq_printf(seq, "  UpdChn0:       %3s\n", yesno(rssconf & UPDCHN0_F));
2172 	seq_printf(seq, "  Queue:         %3d\n", QUEUE_G(rssconf));
2173 
2174 	return 0;
2175 }
2176 DEFINE_SHOW_ATTRIBUTE(rss_config);
2177 
2178 /* RSS Secret Key.
2179  */
2180 
rss_key_show(struct seq_file * seq,void * v)2181 static int rss_key_show(struct seq_file *seq, void *v)
2182 {
2183 	u32 key[10];
2184 
2185 	t4_read_rss_key(seq->private, key, true);
2186 	seq_printf(seq, "%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x\n",
2187 		   key[9], key[8], key[7], key[6], key[5], key[4], key[3],
2188 		   key[2], key[1], key[0]);
2189 	return 0;
2190 }
2191 
rss_key_open(struct inode * inode,struct file * file)2192 static int rss_key_open(struct inode *inode, struct file *file)
2193 {
2194 	return single_open(file, rss_key_show, inode->i_private);
2195 }
2196 
rss_key_write(struct file * file,const char __user * buf,size_t count,loff_t * pos)2197 static ssize_t rss_key_write(struct file *file, const char __user *buf,
2198 			     size_t count, loff_t *pos)
2199 {
2200 	int i, j;
2201 	u32 key[10];
2202 	char s[100], *p;
2203 	struct adapter *adap = file_inode(file)->i_private;
2204 
2205 	if (count > sizeof(s) - 1)
2206 		return -EINVAL;
2207 	if (copy_from_user(s, buf, count))
2208 		return -EFAULT;
2209 	for (i = count; i > 0 && isspace(s[i - 1]); i--)
2210 		;
2211 	s[i] = '\0';
2212 
2213 	for (p = s, i = 9; i >= 0; i--) {
2214 		key[i] = 0;
2215 		for (j = 0; j < 8; j++, p++) {
2216 			if (!isxdigit(*p))
2217 				return -EINVAL;
2218 			key[i] = (key[i] << 4) | hex2val(*p);
2219 		}
2220 	}
2221 
2222 	t4_write_rss_key(adap, key, -1, true);
2223 	return count;
2224 }
2225 
2226 static const struct file_operations rss_key_debugfs_fops = {
2227 	.owner   = THIS_MODULE,
2228 	.open    = rss_key_open,
2229 	.read    = seq_read,
2230 	.llseek  = seq_lseek,
2231 	.release = single_release,
2232 	.write   = rss_key_write
2233 };
2234 
2235 /* PF RSS Configuration.
2236  */
2237 
2238 struct rss_pf_conf {
2239 	u32 rss_pf_map;
2240 	u32 rss_pf_mask;
2241 	u32 rss_pf_config;
2242 };
2243 
rss_pf_config_show(struct seq_file * seq,void * v,int idx)2244 static int rss_pf_config_show(struct seq_file *seq, void *v, int idx)
2245 {
2246 	struct rss_pf_conf *pfconf;
2247 
2248 	if (v == SEQ_START_TOKEN) {
2249 		/* use the 0th entry to dump the PF Map Index Size */
2250 		pfconf = seq->private + offsetof(struct seq_tab, data);
2251 		seq_printf(seq, "PF Map Index Size = %d\n\n",
2252 			   LKPIDXSIZE_G(pfconf->rss_pf_map));
2253 
2254 		seq_puts(seq, "     RSS              PF   VF    Hash Tuple Enable         Default\n");
2255 		seq_puts(seq, "     Enable       IPF Mask Mask  IPv6      IPv4      UDP   Queue\n");
2256 		seq_puts(seq, " PF  Map Chn Prt  Map Size Size  Four Two  Four Two  Four  Ch1  Ch0\n");
2257 	} else {
2258 		#define G_PFnLKPIDX(map, n) \
2259 			(((map) >> PF1LKPIDX_S*(n)) & PF0LKPIDX_M)
2260 		#define G_PFnMSKSIZE(mask, n) \
2261 			(((mask) >> PF1MSKSIZE_S*(n)) & PF1MSKSIZE_M)
2262 
2263 		pfconf = v;
2264 		seq_printf(seq, "%3d  %3s %3s %3s  %3d  %3d  %3d   %3s %3s   %3s %3s   %3s  %3d  %3d\n",
2265 			   idx,
2266 			   yesno(pfconf->rss_pf_config & MAPENABLE_F),
2267 			   yesno(pfconf->rss_pf_config & CHNENABLE_F),
2268 			   yesno(pfconf->rss_pf_config & PRTENABLE_F),
2269 			   G_PFnLKPIDX(pfconf->rss_pf_map, idx),
2270 			   G_PFnMSKSIZE(pfconf->rss_pf_mask, idx),
2271 			   IVFWIDTH_G(pfconf->rss_pf_config),
2272 			   yesno(pfconf->rss_pf_config & IP6FOURTUPEN_F),
2273 			   yesno(pfconf->rss_pf_config & IP6TWOTUPEN_F),
2274 			   yesno(pfconf->rss_pf_config & IP4FOURTUPEN_F),
2275 			   yesno(pfconf->rss_pf_config & IP4TWOTUPEN_F),
2276 			   yesno(pfconf->rss_pf_config & UDPFOURTUPEN_F),
2277 			   CH1DEFAULTQUEUE_G(pfconf->rss_pf_config),
2278 			   CH0DEFAULTQUEUE_G(pfconf->rss_pf_config));
2279 
2280 		#undef G_PFnLKPIDX
2281 		#undef G_PFnMSKSIZE
2282 	}
2283 	return 0;
2284 }
2285 
rss_pf_config_open(struct inode * inode,struct file * file)2286 static int rss_pf_config_open(struct inode *inode, struct file *file)
2287 {
2288 	struct adapter *adapter = inode->i_private;
2289 	struct seq_tab *p;
2290 	u32 rss_pf_map, rss_pf_mask;
2291 	struct rss_pf_conf *pfconf;
2292 	int pf;
2293 
2294 	p = seq_open_tab(file, 8, sizeof(*pfconf), 1, rss_pf_config_show);
2295 	if (!p)
2296 		return -ENOMEM;
2297 
2298 	pfconf = (struct rss_pf_conf *)p->data;
2299 	rss_pf_map = t4_read_rss_pf_map(adapter, true);
2300 	rss_pf_mask = t4_read_rss_pf_mask(adapter, true);
2301 	for (pf = 0; pf < 8; pf++) {
2302 		pfconf[pf].rss_pf_map = rss_pf_map;
2303 		pfconf[pf].rss_pf_mask = rss_pf_mask;
2304 		t4_read_rss_pf_config(adapter, pf, &pfconf[pf].rss_pf_config,
2305 				      true);
2306 	}
2307 	return 0;
2308 }
2309 
2310 static const struct file_operations rss_pf_config_debugfs_fops = {
2311 	.owner   = THIS_MODULE,
2312 	.open    = rss_pf_config_open,
2313 	.read    = seq_read,
2314 	.llseek  = seq_lseek,
2315 	.release = seq_release_private
2316 };
2317 
2318 /* VF RSS Configuration.
2319  */
2320 
2321 struct rss_vf_conf {
2322 	u32 rss_vf_vfl;
2323 	u32 rss_vf_vfh;
2324 };
2325 
rss_vf_config_show(struct seq_file * seq,void * v,int idx)2326 static int rss_vf_config_show(struct seq_file *seq, void *v, int idx)
2327 {
2328 	if (v == SEQ_START_TOKEN) {
2329 		seq_puts(seq, "     RSS                     Hash Tuple Enable\n");
2330 		seq_puts(seq, "     Enable   IVF  Dis  Enb  IPv6      IPv4      UDP    Def  Secret Key\n");
2331 		seq_puts(seq, " VF  Chn Prt  Map  VLAN  uP  Four Two  Four Two  Four   Que  Idx       Hash\n");
2332 	} else {
2333 		struct rss_vf_conf *vfconf = v;
2334 
2335 		seq_printf(seq, "%3d  %3s %3s  %3d   %3s %3s   %3s %3s   %3s  %3s   %3s  %4d  %3d %#10x\n",
2336 			   idx,
2337 			   yesno(vfconf->rss_vf_vfh & VFCHNEN_F),
2338 			   yesno(vfconf->rss_vf_vfh & VFPRTEN_F),
2339 			   VFLKPIDX_G(vfconf->rss_vf_vfh),
2340 			   yesno(vfconf->rss_vf_vfh & VFVLNEX_F),
2341 			   yesno(vfconf->rss_vf_vfh & VFUPEN_F),
2342 			   yesno(vfconf->rss_vf_vfh & VFIP4FOURTUPEN_F),
2343 			   yesno(vfconf->rss_vf_vfh & VFIP6TWOTUPEN_F),
2344 			   yesno(vfconf->rss_vf_vfh & VFIP4FOURTUPEN_F),
2345 			   yesno(vfconf->rss_vf_vfh & VFIP4TWOTUPEN_F),
2346 			   yesno(vfconf->rss_vf_vfh & ENABLEUDPHASH_F),
2347 			   DEFAULTQUEUE_G(vfconf->rss_vf_vfh),
2348 			   KEYINDEX_G(vfconf->rss_vf_vfh),
2349 			   vfconf->rss_vf_vfl);
2350 	}
2351 	return 0;
2352 }
2353 
rss_vf_config_open(struct inode * inode,struct file * file)2354 static int rss_vf_config_open(struct inode *inode, struct file *file)
2355 {
2356 	struct adapter *adapter = inode->i_private;
2357 	struct seq_tab *p;
2358 	struct rss_vf_conf *vfconf;
2359 	int vf, vfcount = adapter->params.arch.vfcount;
2360 
2361 	p = seq_open_tab(file, vfcount, sizeof(*vfconf), 1, rss_vf_config_show);
2362 	if (!p)
2363 		return -ENOMEM;
2364 
2365 	vfconf = (struct rss_vf_conf *)p->data;
2366 	for (vf = 0; vf < vfcount; vf++) {
2367 		t4_read_rss_vf_config(adapter, vf, &vfconf[vf].rss_vf_vfl,
2368 				      &vfconf[vf].rss_vf_vfh, true);
2369 	}
2370 	return 0;
2371 }
2372 
2373 static const struct file_operations rss_vf_config_debugfs_fops = {
2374 	.owner   = THIS_MODULE,
2375 	.open    = rss_vf_config_open,
2376 	.read    = seq_read,
2377 	.llseek  = seq_lseek,
2378 	.release = seq_release_private
2379 };
2380 
2381 #ifdef CONFIG_CHELSIO_T4_DCB
2382 
2383 /* Data Center Briging information for each port.
2384  */
dcb_info_show(struct seq_file * seq,void * v)2385 static int dcb_info_show(struct seq_file *seq, void *v)
2386 {
2387 	struct adapter *adap = seq->private;
2388 
2389 	if (v == SEQ_START_TOKEN) {
2390 		seq_puts(seq, "Data Center Bridging Information\n");
2391 	} else {
2392 		int port = (uintptr_t)v - 2;
2393 		struct net_device *dev = adap->port[port];
2394 		struct port_info *pi = netdev2pinfo(dev);
2395 		struct port_dcb_info *dcb = &pi->dcb;
2396 
2397 		seq_puts(seq, "\n");
2398 		seq_printf(seq, "Port: %d (DCB negotiated: %s)\n",
2399 			   port,
2400 			   cxgb4_dcb_enabled(dev) ? "yes" : "no");
2401 
2402 		if (cxgb4_dcb_enabled(dev))
2403 			seq_printf(seq, "[ DCBx Version %s ]\n",
2404 				   dcb_ver_array[dcb->dcb_version]);
2405 
2406 		if (dcb->msgs) {
2407 			int i;
2408 
2409 			seq_puts(seq, "\n  Index\t\t\t  :\t");
2410 			for (i = 0; i < 8; i++)
2411 				seq_printf(seq, " %3d", i);
2412 			seq_puts(seq, "\n\n");
2413 		}
2414 
2415 		if (dcb->msgs & CXGB4_DCB_FW_PGID) {
2416 			int prio, pgid;
2417 
2418 			seq_puts(seq, "  Priority Group IDs\t  :\t");
2419 			for (prio = 0; prio < 8; prio++) {
2420 				pgid = (dcb->pgid >> 4 * (7 - prio)) & 0xf;
2421 				seq_printf(seq, " %3d", pgid);
2422 			}
2423 			seq_puts(seq, "\n");
2424 		}
2425 
2426 		if (dcb->msgs & CXGB4_DCB_FW_PGRATE) {
2427 			int pg;
2428 
2429 			seq_puts(seq, "  Priority Group BW(%)\t  :\t");
2430 			for (pg = 0; pg < 8; pg++)
2431 				seq_printf(seq, " %3d", dcb->pgrate[pg]);
2432 			seq_puts(seq, "\n");
2433 
2434 			if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE) {
2435 				seq_puts(seq, "  TSA Algorithm\t\t  :\t");
2436 				for (pg = 0; pg < 8; pg++)
2437 					seq_printf(seq, " %3d", dcb->tsa[pg]);
2438 				seq_puts(seq, "\n");
2439 			}
2440 
2441 			seq_printf(seq, "  Max PG Traffic Classes  [%3d  ]\n",
2442 				   dcb->pg_num_tcs_supported);
2443 
2444 			seq_puts(seq, "\n");
2445 		}
2446 
2447 		if (dcb->msgs & CXGB4_DCB_FW_PRIORATE) {
2448 			int prio;
2449 
2450 			seq_puts(seq, "  Priority Rate\t:\t");
2451 			for (prio = 0; prio < 8; prio++)
2452 				seq_printf(seq, " %3d", dcb->priorate[prio]);
2453 			seq_puts(seq, "\n");
2454 		}
2455 
2456 		if (dcb->msgs & CXGB4_DCB_FW_PFC) {
2457 			int prio;
2458 
2459 			seq_puts(seq, "  Priority Flow Control   :\t");
2460 			for (prio = 0; prio < 8; prio++) {
2461 				int pfcen = (dcb->pfcen >> 1 * (7 - prio))
2462 					    & 0x1;
2463 				seq_printf(seq, " %3d", pfcen);
2464 			}
2465 			seq_puts(seq, "\n");
2466 
2467 			seq_printf(seq, "  Max PFC Traffic Classes [%3d  ]\n",
2468 				   dcb->pfc_num_tcs_supported);
2469 
2470 			seq_puts(seq, "\n");
2471 		}
2472 
2473 		if (dcb->msgs & CXGB4_DCB_FW_APP_ID) {
2474 			int app, napps;
2475 
2476 			seq_puts(seq, "  Application Information:\n");
2477 			seq_puts(seq, "  App    Priority    Selection         Protocol\n");
2478 			seq_puts(seq, "  Index  Map         Field             ID\n");
2479 			for (app = 0, napps = 0;
2480 			     app < CXGB4_MAX_DCBX_APP_SUPPORTED; app++) {
2481 				struct app_priority *ap;
2482 				static const char * const sel_names[] = {
2483 					"Ethertype",
2484 					"Socket TCP",
2485 					"Socket UDP",
2486 					"Socket All",
2487 				};
2488 				const char *sel_name;
2489 
2490 				ap = &dcb->app_priority[app];
2491 				/* skip empty slots */
2492 				if (ap->protocolid == 0)
2493 					continue;
2494 				napps++;
2495 
2496 				if (ap->sel_field < ARRAY_SIZE(sel_names))
2497 					sel_name = sel_names[ap->sel_field];
2498 				else
2499 					sel_name = "UNKNOWN";
2500 
2501 				seq_printf(seq, "  %3d    %#04x        %-10s (%d)    %#06x (%d)\n",
2502 					   app,
2503 					   ap->user_prio_map,
2504 					   sel_name, ap->sel_field,
2505 					   ap->protocolid, ap->protocolid);
2506 			}
2507 			if (napps == 0)
2508 				seq_puts(seq, "    --- None ---\n");
2509 		}
2510 	}
2511 	return 0;
2512 }
2513 
dcb_info_get_idx(struct adapter * adap,loff_t pos)2514 static inline void *dcb_info_get_idx(struct adapter *adap, loff_t pos)
2515 {
2516 	return (pos <= adap->params.nports
2517 		? (void *)((uintptr_t)pos + 1)
2518 		: NULL);
2519 }
2520 
dcb_info_start(struct seq_file * seq,loff_t * pos)2521 static void *dcb_info_start(struct seq_file *seq, loff_t *pos)
2522 {
2523 	struct adapter *adap = seq->private;
2524 
2525 	return (*pos
2526 		? dcb_info_get_idx(adap, *pos)
2527 		: SEQ_START_TOKEN);
2528 }
2529 
dcb_info_stop(struct seq_file * seq,void * v)2530 static void dcb_info_stop(struct seq_file *seq, void *v)
2531 {
2532 }
2533 
dcb_info_next(struct seq_file * seq,void * v,loff_t * pos)2534 static void *dcb_info_next(struct seq_file *seq, void *v, loff_t *pos)
2535 {
2536 	struct adapter *adap = seq->private;
2537 
2538 	(*pos)++;
2539 	return dcb_info_get_idx(adap, *pos);
2540 }
2541 
2542 static const struct seq_operations dcb_info_seq_ops = {
2543 	.start = dcb_info_start,
2544 	.next  = dcb_info_next,
2545 	.stop  = dcb_info_stop,
2546 	.show  = dcb_info_show
2547 };
2548 
dcb_info_open(struct inode * inode,struct file * file)2549 static int dcb_info_open(struct inode *inode, struct file *file)
2550 {
2551 	int res = seq_open(file, &dcb_info_seq_ops);
2552 
2553 	if (!res) {
2554 		struct seq_file *seq = file->private_data;
2555 
2556 		seq->private = inode->i_private;
2557 	}
2558 	return res;
2559 }
2560 
2561 static const struct file_operations dcb_info_debugfs_fops = {
2562 	.owner   = THIS_MODULE,
2563 	.open    = dcb_info_open,
2564 	.read    = seq_read,
2565 	.llseek  = seq_lseek,
2566 	.release = seq_release,
2567 };
2568 #endif /* CONFIG_CHELSIO_T4_DCB */
2569 
resources_show(struct seq_file * seq,void * v)2570 static int resources_show(struct seq_file *seq, void *v)
2571 {
2572 	struct adapter *adapter = seq->private;
2573 	struct pf_resources *pfres = &adapter->params.pfres;
2574 
2575 	#define S(desc, fmt, var) \
2576 		seq_printf(seq, "%-60s " fmt "\n", \
2577 			   desc " (" #var "):", pfres->var)
2578 
2579 	S("Virtual Interfaces", "%d", nvi);
2580 	S("Egress Queues", "%d", neq);
2581 	S("Ethernet Control", "%d", nethctrl);
2582 	S("Ingress Queues/w Free Lists/Interrupts", "%d", niqflint);
2583 	S("Ingress Queues", "%d", niq);
2584 	S("Traffic Class", "%d", tc);
2585 	S("Port Access Rights Mask", "%#x", pmask);
2586 	S("MAC Address Filters", "%d", nexactf);
2587 	S("Firmware Command Read Capabilities", "%#x", r_caps);
2588 	S("Firmware Command Write/Execute Capabilities", "%#x", wx_caps);
2589 
2590 	#undef S
2591 
2592 	return 0;
2593 }
2594 DEFINE_SHOW_ATTRIBUTE(resources);
2595 
2596 /**
2597  * ethqset2pinfo - return port_info of an Ethernet Queue Set
2598  * @adap: the adapter
2599  * @qset: Ethernet Queue Set
2600  */
ethqset2pinfo(struct adapter * adap,int qset)2601 static inline struct port_info *ethqset2pinfo(struct adapter *adap, int qset)
2602 {
2603 	int pidx;
2604 
2605 	for_each_port(adap, pidx) {
2606 		struct port_info *pi = adap2pinfo(adap, pidx);
2607 
2608 		if (qset >= pi->first_qset &&
2609 		    qset < pi->first_qset + pi->nqsets)
2610 			return pi;
2611 	}
2612 
2613 	/* should never happen! */
2614 	BUG();
2615 	return NULL;
2616 }
2617 
sge_qinfo_uld_txq_entries(const struct adapter * adap,int uld)2618 static int sge_qinfo_uld_txq_entries(const struct adapter *adap, int uld)
2619 {
2620 	const struct sge_uld_txq_info *utxq_info = adap->sge.uld_txq_info[uld];
2621 
2622 	if (!utxq_info)
2623 		return 0;
2624 
2625 	return DIV_ROUND_UP(utxq_info->ntxq, 4);
2626 }
2627 
sge_qinfo_uld_rspq_entries(const struct adapter * adap,int uld,bool ciq)2628 static int sge_qinfo_uld_rspq_entries(const struct adapter *adap, int uld,
2629 				      bool ciq)
2630 {
2631 	const struct sge_uld_rxq_info *urxq_info = adap->sge.uld_rxq_info[uld];
2632 
2633 	if (!urxq_info)
2634 		return 0;
2635 
2636 	return ciq ? DIV_ROUND_UP(urxq_info->nciq, 4) :
2637 		     DIV_ROUND_UP(urxq_info->nrxq, 4);
2638 }
2639 
sge_qinfo_uld_rxq_entries(const struct adapter * adap,int uld)2640 static int sge_qinfo_uld_rxq_entries(const struct adapter *adap, int uld)
2641 {
2642 	return sge_qinfo_uld_rspq_entries(adap, uld, false);
2643 }
2644 
sge_qinfo_uld_ciq_entries(const struct adapter * adap,int uld)2645 static int sge_qinfo_uld_ciq_entries(const struct adapter *adap, int uld)
2646 {
2647 	return sge_qinfo_uld_rspq_entries(adap, uld, true);
2648 }
2649 
sge_qinfo_show(struct seq_file * seq,void * v)2650 static int sge_qinfo_show(struct seq_file *seq, void *v)
2651 {
2652 	int eth_entries, ctrl_entries, eohw_entries = 0, eosw_entries = 0;
2653 	int uld_rxq_entries[CXGB4_ULD_MAX] = { 0 };
2654 	int uld_ciq_entries[CXGB4_ULD_MAX] = { 0 };
2655 	int uld_txq_entries[CXGB4_TX_MAX] = { 0 };
2656 	const struct sge_uld_txq_info *utxq_info;
2657 	const struct sge_uld_rxq_info *urxq_info;
2658 	struct cxgb4_tc_port_mqprio *port_mqprio;
2659 	struct adapter *adap = seq->private;
2660 	int i, j, n, r = (uintptr_t)v - 1;
2661 	struct sge *s = &adap->sge;
2662 
2663 	eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4);
2664 	ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4);
2665 
2666 	if (r)
2667 		seq_putc(seq, '\n');
2668 
2669 #define S3(fmt_spec, s, v) \
2670 do { \
2671 	seq_printf(seq, "%-12s", s); \
2672 	for (i = 0; i < n; ++i) \
2673 		seq_printf(seq, " %16" fmt_spec, v); \
2674 	seq_putc(seq, '\n'); \
2675 } while (0)
2676 #define S(s, v) S3("s", s, v)
2677 #define T3(fmt_spec, s, v) S3(fmt_spec, s, tx[i].v)
2678 #define T(s, v) S3("u", s, tx[i].v)
2679 #define TL(s, v) T3("lu", s, v)
2680 #define R3(fmt_spec, s, v) S3(fmt_spec, s, rx[i].v)
2681 #define R(s, v) S3("u", s, rx[i].v)
2682 #define RL(s, v) R3("lu", s, v)
2683 
2684 	if (r < eth_entries) {
2685 		int base_qset = r * 4;
2686 		const struct sge_eth_rxq *rx = &s->ethrxq[base_qset];
2687 		const struct sge_eth_txq *tx = &s->ethtxq[base_qset];
2688 
2689 		n = min(4, s->ethqsets - 4 * r);
2690 
2691 		S("QType:", "Ethernet");
2692 		S("Interface:",
2693 		  rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A");
2694 		T("TxQ ID:", q.cntxt_id);
2695 		T("TxQ size:", q.size);
2696 		T("TxQ inuse:", q.in_use);
2697 		T("TxQ CIDX:", q.cidx);
2698 		T("TxQ PIDX:", q.pidx);
2699 #ifdef CONFIG_CHELSIO_T4_DCB
2700 		T("DCB Prio:", dcb_prio);
2701 		S3("u", "DCB PGID:",
2702 		   (ethqset2pinfo(adap, base_qset + i)->dcb.pgid >>
2703 		    4*(7-tx[i].dcb_prio)) & 0xf);
2704 		S3("u", "DCB PFC:",
2705 		   (ethqset2pinfo(adap, base_qset + i)->dcb.pfcen >>
2706 		    1*(7-tx[i].dcb_prio)) & 0x1);
2707 #endif
2708 		R("RspQ ID:", rspq.abs_id);
2709 		R("RspQ size:", rspq.size);
2710 		R("RspQE size:", rspq.iqe_len);
2711 		R("RspQ CIDX:", rspq.cidx);
2712 		R("RspQ Gen:", rspq.gen);
2713 		S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
2714 		S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]);
2715 		R("FL ID:", fl.cntxt_id);
2716 		R("FL size:", fl.size - 8);
2717 		R("FL pend:", fl.pend_cred);
2718 		R("FL avail:", fl.avail);
2719 		R("FL PIDX:", fl.pidx);
2720 		R("FL CIDX:", fl.cidx);
2721 		RL("RxPackets:", stats.pkts);
2722 		RL("RxCSO:", stats.rx_cso);
2723 		RL("VLANxtract:", stats.vlan_ex);
2724 		RL("LROmerged:", stats.lro_merged);
2725 		RL("LROpackets:", stats.lro_pkts);
2726 		RL("RxDrops:", stats.rx_drops);
2727 		RL("RxBadPkts:", stats.bad_rx_pkts);
2728 		TL("TSO:", tso);
2729 		TL("USO:", uso);
2730 		TL("TxCSO:", tx_cso);
2731 		TL("VLANins:", vlan_ins);
2732 		TL("TxQFull:", q.stops);
2733 		TL("TxQRestarts:", q.restarts);
2734 		TL("TxMapErr:", mapping_err);
2735 		RL("FLAllocErr:", fl.alloc_failed);
2736 		RL("FLLrgAlcErr:", fl.large_alloc_failed);
2737 		RL("FLMapErr:", fl.mapping_err);
2738 		RL("FLLow:", fl.low);
2739 		RL("FLStarving:", fl.starving);
2740 
2741 		goto out;
2742 	}
2743 
2744 	r -= eth_entries;
2745 	for_each_port(adap, j) {
2746 		struct port_info *pi = adap2pinfo(adap, j);
2747 		const struct sge_eth_rxq *rx;
2748 
2749 		mutex_lock(&pi->vi_mirror_mutex);
2750 		if (!pi->vi_mirror_count) {
2751 			mutex_unlock(&pi->vi_mirror_mutex);
2752 			continue;
2753 		}
2754 
2755 		if (r >= DIV_ROUND_UP(pi->nmirrorqsets, 4)) {
2756 			r -= DIV_ROUND_UP(pi->nmirrorqsets, 4);
2757 			mutex_unlock(&pi->vi_mirror_mutex);
2758 			continue;
2759 		}
2760 
2761 		rx = &s->mirror_rxq[j][r * 4];
2762 		n = min(4, pi->nmirrorqsets - 4 * r);
2763 
2764 		S("QType:", "Mirror-Rxq");
2765 		S("Interface:",
2766 		  rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A");
2767 		R("RspQ ID:", rspq.abs_id);
2768 		R("RspQ size:", rspq.size);
2769 		R("RspQE size:", rspq.iqe_len);
2770 		R("RspQ CIDX:", rspq.cidx);
2771 		R("RspQ Gen:", rspq.gen);
2772 		S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
2773 		S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]);
2774 		R("FL ID:", fl.cntxt_id);
2775 		R("FL size:", fl.size - 8);
2776 		R("FL pend:", fl.pend_cred);
2777 		R("FL avail:", fl.avail);
2778 		R("FL PIDX:", fl.pidx);
2779 		R("FL CIDX:", fl.cidx);
2780 		RL("RxPackets:", stats.pkts);
2781 		RL("RxCSO:", stats.rx_cso);
2782 		RL("VLANxtract:", stats.vlan_ex);
2783 		RL("LROmerged:", stats.lro_merged);
2784 		RL("LROpackets:", stats.lro_pkts);
2785 		RL("RxDrops:", stats.rx_drops);
2786 		RL("RxBadPkts:", stats.bad_rx_pkts);
2787 		RL("FLAllocErr:", fl.alloc_failed);
2788 		RL("FLLrgAlcErr:", fl.large_alloc_failed);
2789 		RL("FLMapErr:", fl.mapping_err);
2790 		RL("FLLow:", fl.low);
2791 		RL("FLStarving:", fl.starving);
2792 
2793 		mutex_unlock(&pi->vi_mirror_mutex);
2794 		goto out;
2795 	}
2796 
2797 	if (!adap->tc_mqprio)
2798 		goto skip_mqprio;
2799 
2800 	mutex_lock(&adap->tc_mqprio->mqprio_mutex);
2801 	if (!refcount_read(&adap->tc_mqprio->refcnt)) {
2802 		mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
2803 		goto skip_mqprio;
2804 	}
2805 
2806 	eohw_entries = DIV_ROUND_UP(adap->sge.eoqsets, 4);
2807 	if (r < eohw_entries) {
2808 		int base_qset = r * 4;
2809 		const struct sge_ofld_rxq *rx = &s->eohw_rxq[base_qset];
2810 		const struct sge_eohw_txq *tx = &s->eohw_txq[base_qset];
2811 
2812 		n = min(4, s->eoqsets - 4 * r);
2813 
2814 		S("QType:", "ETHOFLD");
2815 		S("Interface:",
2816 		  rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A");
2817 		T("TxQ ID:", q.cntxt_id);
2818 		T("TxQ size:", q.size);
2819 		T("TxQ inuse:", q.in_use);
2820 		T("TxQ CIDX:", q.cidx);
2821 		T("TxQ PIDX:", q.pidx);
2822 		R("RspQ ID:", rspq.abs_id);
2823 		R("RspQ size:", rspq.size);
2824 		R("RspQE size:", rspq.iqe_len);
2825 		R("RspQ CIDX:", rspq.cidx);
2826 		R("RspQ Gen:", rspq.gen);
2827 		S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
2828 		S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]);
2829 		R("FL ID:", fl.cntxt_id);
2830 		S3("u", "FL size:", rx->fl.size ? rx->fl.size - 8 : 0);
2831 		R("FL pend:", fl.pend_cred);
2832 		R("FL avail:", fl.avail);
2833 		R("FL PIDX:", fl.pidx);
2834 		R("FL CIDX:", fl.cidx);
2835 		RL("RxPackets:", stats.pkts);
2836 		RL("RxImm:", stats.imm);
2837 		RL("RxAN", stats.an);
2838 		RL("RxNoMem", stats.nomem);
2839 		TL("TSO:", tso);
2840 		TL("USO:", uso);
2841 		TL("TxCSO:", tx_cso);
2842 		TL("VLANins:", vlan_ins);
2843 		TL("TxQFull:", q.stops);
2844 		TL("TxQRestarts:", q.restarts);
2845 		TL("TxMapErr:", mapping_err);
2846 		RL("FLAllocErr:", fl.alloc_failed);
2847 		RL("FLLrgAlcErr:", fl.large_alloc_failed);
2848 		RL("FLMapErr:", fl.mapping_err);
2849 		RL("FLLow:", fl.low);
2850 		RL("FLStarving:", fl.starving);
2851 
2852 		mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
2853 		goto out;
2854 	}
2855 
2856 	r -= eohw_entries;
2857 	for (j = 0; j < adap->params.nports; j++) {
2858 		int entries;
2859 		u8 tc;
2860 
2861 		port_mqprio = &adap->tc_mqprio->port_mqprio[j];
2862 		entries = 0;
2863 		for (tc = 0; tc < port_mqprio->mqprio.qopt.num_tc; tc++)
2864 			entries += port_mqprio->mqprio.qopt.count[tc];
2865 
2866 		if (!entries)
2867 			continue;
2868 
2869 		eosw_entries = DIV_ROUND_UP(entries, 4);
2870 		if (r < eosw_entries) {
2871 			const struct sge_eosw_txq *tx;
2872 
2873 			n = min(4, entries - 4 * r);
2874 			tx = &port_mqprio->eosw_txq[4 * r];
2875 
2876 			S("QType:", "EOSW-TXQ");
2877 			S("Interface:",
2878 			  adap->port[j] ? adap->port[j]->name : "N/A");
2879 			T("EOTID:", hwtid);
2880 			T("HWQID:", hwqid);
2881 			T("State:", state);
2882 			T("Size:", ndesc);
2883 			T("In-Use:", inuse);
2884 			T("Credits:", cred);
2885 			T("Compl:", ncompl);
2886 			T("Last-Compl:", last_compl);
2887 			T("PIDX:", pidx);
2888 			T("Last-PIDX:", last_pidx);
2889 			T("CIDX:", cidx);
2890 			T("Last-CIDX:", last_cidx);
2891 			T("FLOWC-IDX:", flowc_idx);
2892 
2893 			mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
2894 			goto out;
2895 		}
2896 
2897 		r -= eosw_entries;
2898 	}
2899 	mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
2900 
2901 skip_mqprio:
2902 	if (!is_uld(adap))
2903 		goto skip_uld;
2904 
2905 	mutex_lock(&uld_mutex);
2906 	if (s->uld_txq_info)
2907 		for (i = 0; i < ARRAY_SIZE(uld_txq_entries); i++)
2908 			uld_txq_entries[i] = sge_qinfo_uld_txq_entries(adap, i);
2909 
2910 	if (s->uld_rxq_info) {
2911 		for (i = 0; i < ARRAY_SIZE(uld_rxq_entries); i++) {
2912 			uld_rxq_entries[i] = sge_qinfo_uld_rxq_entries(adap, i);
2913 			uld_ciq_entries[i] = sge_qinfo_uld_ciq_entries(adap, i);
2914 		}
2915 	}
2916 
2917 	if (r < uld_txq_entries[CXGB4_TX_OFLD]) {
2918 		const struct sge_uld_txq *tx;
2919 
2920 		utxq_info = s->uld_txq_info[CXGB4_TX_OFLD];
2921 		tx = &utxq_info->uldtxq[r * 4];
2922 		n = min(4, utxq_info->ntxq - 4 * r);
2923 
2924 		S("QType:", "OFLD-TXQ");
2925 		T("TxQ ID:", q.cntxt_id);
2926 		T("TxQ size:", q.size);
2927 		T("TxQ inuse:", q.in_use);
2928 		T("TxQ CIDX:", q.cidx);
2929 		T("TxQ PIDX:", q.pidx);
2930 
2931 		goto unlock;
2932 	}
2933 
2934 	r -= uld_txq_entries[CXGB4_TX_OFLD];
2935 	if (r < uld_rxq_entries[CXGB4_ULD_RDMA]) {
2936 		const struct sge_ofld_rxq *rx;
2937 
2938 		urxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA];
2939 		rx = &urxq_info->uldrxq[r * 4];
2940 		n = min(4, urxq_info->nrxq - 4 * r);
2941 
2942 		S("QType:", "RDMA-CPL");
2943 		S("Interface:",
2944 		  rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A");
2945 		R("RspQ ID:", rspq.abs_id);
2946 		R("RspQ size:", rspq.size);
2947 		R("RspQE size:", rspq.iqe_len);
2948 		R("RspQ CIDX:", rspq.cidx);
2949 		R("RspQ Gen:", rspq.gen);
2950 		S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
2951 		S3("u", "Intr pktcnt:",	s->counter_val[rx[i].rspq.pktcnt_idx]);
2952 		R("FL ID:", fl.cntxt_id);
2953 		R("FL size:", fl.size - 8);
2954 		R("FL pend:", fl.pend_cred);
2955 		R("FL avail:", fl.avail);
2956 		R("FL PIDX:", fl.pidx);
2957 		R("FL CIDX:", fl.cidx);
2958 
2959 		goto unlock;
2960 	}
2961 
2962 	r -= uld_rxq_entries[CXGB4_ULD_RDMA];
2963 	if (r < uld_ciq_entries[CXGB4_ULD_RDMA]) {
2964 		const struct sge_ofld_rxq *rx;
2965 		int ciq_idx = 0;
2966 
2967 		urxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA];
2968 		ciq_idx = urxq_info->nrxq + (r * 4);
2969 		rx = &urxq_info->uldrxq[ciq_idx];
2970 		n = min(4, urxq_info->nciq - 4 * r);
2971 
2972 		S("QType:", "RDMA-CIQ");
2973 		S("Interface:",
2974 		  rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A");
2975 		R("RspQ ID:", rspq.abs_id);
2976 		R("RspQ size:", rspq.size);
2977 		R("RspQE size:", rspq.iqe_len);
2978 		R("RspQ CIDX:", rspq.cidx);
2979 		R("RspQ Gen:", rspq.gen);
2980 		S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
2981 		S3("u", "Intr pktcnt:",	s->counter_val[rx[i].rspq.pktcnt_idx]);
2982 
2983 		goto unlock;
2984 	}
2985 
2986 	r -= uld_ciq_entries[CXGB4_ULD_RDMA];
2987 	if (r < uld_rxq_entries[CXGB4_ULD_ISCSI]) {
2988 		const struct sge_ofld_rxq *rx;
2989 
2990 		urxq_info = s->uld_rxq_info[CXGB4_ULD_ISCSI];
2991 		rx = &urxq_info->uldrxq[r * 4];
2992 		n = min(4, urxq_info->nrxq - 4 * r);
2993 
2994 		S("QType:", "iSCSI");
2995 		R("RspQ ID:", rspq.abs_id);
2996 		R("RspQ size:", rspq.size);
2997 		R("RspQE size:", rspq.iqe_len);
2998 		R("RspQ CIDX:", rspq.cidx);
2999 		R("RspQ Gen:", rspq.gen);
3000 		S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
3001 		S3("u", "Intr pktcnt:",	s->counter_val[rx[i].rspq.pktcnt_idx]);
3002 		R("FL ID:", fl.cntxt_id);
3003 		R("FL size:", fl.size - 8);
3004 		R("FL pend:", fl.pend_cred);
3005 		R("FL avail:", fl.avail);
3006 		R("FL PIDX:", fl.pidx);
3007 		R("FL CIDX:", fl.cidx);
3008 
3009 		goto unlock;
3010 	}
3011 
3012 	r -= uld_rxq_entries[CXGB4_ULD_ISCSI];
3013 	if (r < uld_rxq_entries[CXGB4_ULD_ISCSIT]) {
3014 		const struct sge_ofld_rxq *rx;
3015 
3016 		urxq_info = s->uld_rxq_info[CXGB4_ULD_ISCSIT];
3017 		rx = &urxq_info->uldrxq[r * 4];
3018 		n = min(4, urxq_info->nrxq - 4 * r);
3019 
3020 		S("QType:", "iSCSIT");
3021 		R("RspQ ID:", rspq.abs_id);
3022 		R("RspQ size:", rspq.size);
3023 		R("RspQE size:", rspq.iqe_len);
3024 		R("RspQ CIDX:", rspq.cidx);
3025 		R("RspQ Gen:", rspq.gen);
3026 		S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
3027 		S3("u", "Intr pktcnt:",	s->counter_val[rx[i].rspq.pktcnt_idx]);
3028 		R("FL ID:", fl.cntxt_id);
3029 		R("FL size:", fl.size - 8);
3030 		R("FL pend:", fl.pend_cred);
3031 		R("FL avail:", fl.avail);
3032 		R("FL PIDX:", fl.pidx);
3033 		R("FL CIDX:", fl.cidx);
3034 
3035 		goto unlock;
3036 	}
3037 
3038 	r -= uld_rxq_entries[CXGB4_ULD_ISCSIT];
3039 	if (r < uld_rxq_entries[CXGB4_ULD_TLS]) {
3040 		const struct sge_ofld_rxq *rx;
3041 
3042 		urxq_info = s->uld_rxq_info[CXGB4_ULD_TLS];
3043 		rx = &urxq_info->uldrxq[r * 4];
3044 		n = min(4, urxq_info->nrxq - 4 * r);
3045 
3046 		S("QType:", "TLS");
3047 		R("RspQ ID:", rspq.abs_id);
3048 		R("RspQ size:", rspq.size);
3049 		R("RspQE size:", rspq.iqe_len);
3050 		R("RspQ CIDX:", rspq.cidx);
3051 		R("RspQ Gen:", rspq.gen);
3052 		S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
3053 		S3("u", "Intr pktcnt:",	s->counter_val[rx[i].rspq.pktcnt_idx]);
3054 		R("FL ID:", fl.cntxt_id);
3055 		R("FL size:", fl.size - 8);
3056 		R("FL pend:", fl.pend_cred);
3057 		R("FL avail:", fl.avail);
3058 		R("FL PIDX:", fl.pidx);
3059 		R("FL CIDX:", fl.cidx);
3060 
3061 		goto unlock;
3062 	}
3063 
3064 	r -= uld_rxq_entries[CXGB4_ULD_TLS];
3065 	if (r < uld_txq_entries[CXGB4_TX_CRYPTO]) {
3066 		const struct sge_ofld_rxq *rx;
3067 		const struct sge_uld_txq *tx;
3068 
3069 		utxq_info = s->uld_txq_info[CXGB4_TX_CRYPTO];
3070 		urxq_info = s->uld_rxq_info[CXGB4_ULD_CRYPTO];
3071 		tx = &utxq_info->uldtxq[r * 4];
3072 		rx = &urxq_info->uldrxq[r * 4];
3073 		n = min(4, utxq_info->ntxq - 4 * r);
3074 
3075 		S("QType:", "Crypto");
3076 		T("TxQ ID:", q.cntxt_id);
3077 		T("TxQ size:", q.size);
3078 		T("TxQ inuse:", q.in_use);
3079 		T("TxQ CIDX:", q.cidx);
3080 		T("TxQ PIDX:", q.pidx);
3081 		R("RspQ ID:", rspq.abs_id);
3082 		R("RspQ size:", rspq.size);
3083 		R("RspQE size:", rspq.iqe_len);
3084 		R("RspQ CIDX:", rspq.cidx);
3085 		R("RspQ Gen:", rspq.gen);
3086 		S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
3087 		S3("u", "Intr pktcnt:",	s->counter_val[rx[i].rspq.pktcnt_idx]);
3088 		R("FL ID:", fl.cntxt_id);
3089 		R("FL size:", fl.size - 8);
3090 		R("FL pend:", fl.pend_cred);
3091 		R("FL avail:", fl.avail);
3092 		R("FL PIDX:", fl.pidx);
3093 		R("FL CIDX:", fl.cidx);
3094 
3095 		goto unlock;
3096 	}
3097 
3098 	r -= uld_txq_entries[CXGB4_TX_CRYPTO];
3099 	mutex_unlock(&uld_mutex);
3100 
3101 skip_uld:
3102 	if (r < ctrl_entries) {
3103 		const struct sge_ctrl_txq *tx = &s->ctrlq[r * 4];
3104 
3105 		n = min(4, adap->params.nports - 4 * r);
3106 
3107 		S("QType:", "Control");
3108 		T("TxQ ID:", q.cntxt_id);
3109 		T("TxQ size:", q.size);
3110 		T("TxQ inuse:", q.in_use);
3111 		T("TxQ CIDX:", q.cidx);
3112 		T("TxQ PIDX:", q.pidx);
3113 		TL("TxQFull:", q.stops);
3114 		TL("TxQRestarts:", q.restarts);
3115 
3116 		goto out;
3117 	}
3118 
3119 	r -= ctrl_entries;
3120 	if (r < 1) {
3121 		const struct sge_rspq *evtq = &s->fw_evtq;
3122 
3123 		seq_printf(seq, "%-12s %16s\n", "QType:", "FW event queue");
3124 		seq_printf(seq, "%-12s %16u\n", "RspQ ID:", evtq->abs_id);
3125 		seq_printf(seq, "%-12s %16u\n", "RspQ size:", evtq->size);
3126 		seq_printf(seq, "%-12s %16u\n", "RspQE size:", evtq->iqe_len);
3127 		seq_printf(seq, "%-12s %16u\n", "RspQ CIDX:", evtq->cidx);
3128 		seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", evtq->gen);
3129 		seq_printf(seq, "%-12s %16u\n", "Intr delay:",
3130 			   qtimer_val(adap, evtq));
3131 		seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
3132 			   s->counter_val[evtq->pktcnt_idx]);
3133 
3134 		goto out;
3135 	}
3136 
3137 #undef R
3138 #undef RL
3139 #undef T
3140 #undef TL
3141 #undef S
3142 #undef R3
3143 #undef T3
3144 #undef S3
3145 out:
3146 	return 0;
3147 
3148 unlock:
3149 	mutex_unlock(&uld_mutex);
3150 	return 0;
3151 }
3152 
sge_queue_entries(struct adapter * adap)3153 static int sge_queue_entries(struct adapter *adap)
3154 {
3155 	int i, tot_uld_entries = 0, eohw_entries = 0, eosw_entries = 0;
3156 	int mirror_rxq_entries = 0;
3157 
3158 	if (adap->tc_mqprio) {
3159 		struct cxgb4_tc_port_mqprio *port_mqprio;
3160 		u8 tc;
3161 
3162 		mutex_lock(&adap->tc_mqprio->mqprio_mutex);
3163 		if (adap->sge.eohw_txq)
3164 			eohw_entries = DIV_ROUND_UP(adap->sge.eoqsets, 4);
3165 
3166 		for (i = 0; i < adap->params.nports; i++) {
3167 			u32 entries = 0;
3168 
3169 			port_mqprio = &adap->tc_mqprio->port_mqprio[i];
3170 			for (tc = 0; tc < port_mqprio->mqprio.qopt.num_tc; tc++)
3171 				entries += port_mqprio->mqprio.qopt.count[tc];
3172 
3173 			if (entries)
3174 				eosw_entries += DIV_ROUND_UP(entries, 4);
3175 		}
3176 		mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
3177 	}
3178 
3179 	for_each_port(adap, i) {
3180 		struct port_info *pi = adap2pinfo(adap, i);
3181 
3182 		mutex_lock(&pi->vi_mirror_mutex);
3183 		if (pi->vi_mirror_count)
3184 			mirror_rxq_entries += DIV_ROUND_UP(pi->nmirrorqsets, 4);
3185 		mutex_unlock(&pi->vi_mirror_mutex);
3186 	}
3187 
3188 	if (!is_uld(adap))
3189 		goto lld_only;
3190 
3191 	mutex_lock(&uld_mutex);
3192 	for (i = 0; i < CXGB4_TX_MAX; i++)
3193 		tot_uld_entries += sge_qinfo_uld_txq_entries(adap, i);
3194 
3195 	for (i = 0; i < CXGB4_ULD_MAX; i++) {
3196 		tot_uld_entries += sge_qinfo_uld_rxq_entries(adap, i);
3197 		tot_uld_entries += sge_qinfo_uld_ciq_entries(adap, i);
3198 	}
3199 	mutex_unlock(&uld_mutex);
3200 
3201 lld_only:
3202 	return DIV_ROUND_UP(adap->sge.ethqsets, 4) + mirror_rxq_entries +
3203 	       eohw_entries + eosw_entries + tot_uld_entries +
3204 	       DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1;
3205 }
3206 
sge_queue_start(struct seq_file * seq,loff_t * pos)3207 static void *sge_queue_start(struct seq_file *seq, loff_t *pos)
3208 {
3209 	int entries = sge_queue_entries(seq->private);
3210 
3211 	return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
3212 }
3213 
sge_queue_stop(struct seq_file * seq,void * v)3214 static void sge_queue_stop(struct seq_file *seq, void *v)
3215 {
3216 }
3217 
sge_queue_next(struct seq_file * seq,void * v,loff_t * pos)3218 static void *sge_queue_next(struct seq_file *seq, void *v, loff_t *pos)
3219 {
3220 	int entries = sge_queue_entries(seq->private);
3221 
3222 	++*pos;
3223 	return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
3224 }
3225 
3226 static const struct seq_operations sge_qinfo_seq_ops = {
3227 	.start = sge_queue_start,
3228 	.next  = sge_queue_next,
3229 	.stop  = sge_queue_stop,
3230 	.show  = sge_qinfo_show
3231 };
3232 
sge_qinfo_open(struct inode * inode,struct file * file)3233 static int sge_qinfo_open(struct inode *inode, struct file *file)
3234 {
3235 	int res = seq_open(file, &sge_qinfo_seq_ops);
3236 
3237 	if (!res) {
3238 		struct seq_file *seq = file->private_data;
3239 
3240 		seq->private = inode->i_private;
3241 	}
3242 	return res;
3243 }
3244 
3245 static const struct file_operations sge_qinfo_debugfs_fops = {
3246 	.owner   = THIS_MODULE,
3247 	.open    = sge_qinfo_open,
3248 	.read    = seq_read,
3249 	.llseek  = seq_lseek,
3250 	.release = seq_release,
3251 };
3252 
mem_open(struct inode * inode,struct file * file)3253 int mem_open(struct inode *inode, struct file *file)
3254 {
3255 	unsigned int mem;
3256 	struct adapter *adap;
3257 
3258 	file->private_data = inode->i_private;
3259 
3260 	mem = (uintptr_t)file->private_data & 0x7;
3261 	adap = file->private_data - mem;
3262 
3263 	(void)t4_fwcache(adap, FW_PARAM_DEV_FWCACHE_FLUSH);
3264 
3265 	return 0;
3266 }
3267 
mem_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)3268 static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
3269 			loff_t *ppos)
3270 {
3271 	loff_t pos = *ppos;
3272 	loff_t avail = file_inode(file)->i_size;
3273 	unsigned int mem = (uintptr_t)file->private_data & 0x7;
3274 	struct adapter *adap = file->private_data - mem;
3275 	__be32 *data;
3276 	int ret;
3277 
3278 	if (pos < 0)
3279 		return -EINVAL;
3280 	if (pos >= avail)
3281 		return 0;
3282 	if (count > avail - pos)
3283 		count = avail - pos;
3284 
3285 	data = kvzalloc(count, GFP_KERNEL);
3286 	if (!data)
3287 		return -ENOMEM;
3288 
3289 	spin_lock(&adap->win0_lock);
3290 	ret = t4_memory_rw(adap, 0, mem, pos, count, data, T4_MEMORY_READ);
3291 	spin_unlock(&adap->win0_lock);
3292 	if (ret) {
3293 		kvfree(data);
3294 		return ret;
3295 	}
3296 	ret = copy_to_user(buf, data, count);
3297 
3298 	kvfree(data);
3299 	if (ret)
3300 		return -EFAULT;
3301 
3302 	*ppos = pos + count;
3303 	return count;
3304 }
3305 static const struct file_operations mem_debugfs_fops = {
3306 	.owner   = THIS_MODULE,
3307 	.open    = simple_open,
3308 	.read    = mem_read,
3309 	.llseek  = default_llseek,
3310 };
3311 
tid_info_show(struct seq_file * seq,void * v)3312 static int tid_info_show(struct seq_file *seq, void *v)
3313 {
3314 	struct adapter *adap = seq->private;
3315 	const struct tid_info *t;
3316 	enum chip_type chip;
3317 
3318 	t = &adap->tids;
3319 	chip = CHELSIO_CHIP_VERSION(adap->params.chip);
3320 	if (t4_read_reg(adap, LE_DB_CONFIG_A) & HASHEN_F) {
3321 		unsigned int sb;
3322 		seq_printf(seq, "Connections in use: %u\n",
3323 			   atomic_read(&t->conns_in_use));
3324 
3325 		if (chip <= CHELSIO_T5)
3326 			sb = t4_read_reg(adap, LE_DB_SERVER_INDEX_A) / 4;
3327 		else
3328 			sb = t4_read_reg(adap, LE_DB_SRVR_START_INDEX_A);
3329 
3330 		if (sb) {
3331 			seq_printf(seq, "TID range: %u..%u/%u..%u", t->tid_base,
3332 				   sb - 1, adap->tids.hash_base,
3333 				   t->tid_base + t->ntids - 1);
3334 			seq_printf(seq, ", in use: %u/%u\n",
3335 				   atomic_read(&t->tids_in_use),
3336 				   atomic_read(&t->hash_tids_in_use));
3337 		} else if (adap->flags & CXGB4_FW_OFLD_CONN) {
3338 			seq_printf(seq, "TID range: %u..%u/%u..%u",
3339 				   t->aftid_base,
3340 				   t->aftid_end,
3341 				   adap->tids.hash_base,
3342 				   t->tid_base + t->ntids - 1);
3343 			seq_printf(seq, ", in use: %u/%u\n",
3344 				   atomic_read(&t->tids_in_use),
3345 				   atomic_read(&t->hash_tids_in_use));
3346 		} else {
3347 			seq_printf(seq, "TID range: %u..%u",
3348 				   adap->tids.hash_base,
3349 				   t->tid_base + t->ntids - 1);
3350 			seq_printf(seq, ", in use: %u\n",
3351 				   atomic_read(&t->hash_tids_in_use));
3352 		}
3353 	} else if (t->ntids) {
3354 		seq_printf(seq, "Connections in use: %u\n",
3355 			   atomic_read(&t->conns_in_use));
3356 
3357 		seq_printf(seq, "TID range: %u..%u", t->tid_base,
3358 			   t->tid_base + t->ntids - 1);
3359 		seq_printf(seq, ", in use: %u\n",
3360 			   atomic_read(&t->tids_in_use));
3361 	}
3362 
3363 	if (t->nstids)
3364 		seq_printf(seq, "STID range: %u..%u, in use-IPv4/IPv6: %u/%u\n",
3365 			   (!t->stid_base &&
3366 			   (chip <= CHELSIO_T5)) ?
3367 			   t->stid_base + 1 : t->stid_base,
3368 			   t->stid_base + t->nstids - 1,
3369 			   t->stids_in_use - t->v6_stids_in_use,
3370 			   t->v6_stids_in_use);
3371 
3372 	if (t->natids)
3373 		seq_printf(seq, "ATID range: 0..%u, in use: %u\n",
3374 			   t->natids - 1, t->atids_in_use);
3375 	seq_printf(seq, "FTID range: %u..%u\n", t->ftid_base,
3376 		   t->ftid_base + t->nftids - 1);
3377 	if (t->nsftids)
3378 		seq_printf(seq, "SFTID range: %u..%u in use: %u\n",
3379 			   t->sftid_base, t->sftid_base + t->nsftids - 2,
3380 			   t->sftids_in_use);
3381 	if (t->nhpftids)
3382 		seq_printf(seq, "HPFTID range: %u..%u\n", t->hpftid_base,
3383 			   t->hpftid_base + t->nhpftids - 1);
3384 	if (t->neotids)
3385 		seq_printf(seq, "EOTID range: %u..%u, in use: %u\n",
3386 			   t->eotid_base, t->eotid_base + t->neotids - 1,
3387 			   atomic_read(&t->eotids_in_use));
3388 	if (t->ntids)
3389 		seq_printf(seq, "HW TID usage: %u IP users, %u IPv6 users\n",
3390 			   t4_read_reg(adap, LE_DB_ACT_CNT_IPV4_A),
3391 			   t4_read_reg(adap, LE_DB_ACT_CNT_IPV6_A));
3392 	return 0;
3393 }
3394 DEFINE_SHOW_ATTRIBUTE(tid_info);
3395 
add_debugfs_mem(struct adapter * adap,const char * name,unsigned int idx,unsigned int size_mb)3396 static void add_debugfs_mem(struct adapter *adap, const char *name,
3397 			    unsigned int idx, unsigned int size_mb)
3398 {
3399 	debugfs_create_file_size(name, 0400, adap->debugfs_root,
3400 				 (void *)adap + idx, &mem_debugfs_fops,
3401 				 size_mb << 20);
3402 }
3403 
blocked_fl_read(struct file * filp,char __user * ubuf,size_t count,loff_t * ppos)3404 static ssize_t blocked_fl_read(struct file *filp, char __user *ubuf,
3405 			       size_t count, loff_t *ppos)
3406 {
3407 	int len;
3408 	const struct adapter *adap = filp->private_data;
3409 	char *buf;
3410 	ssize_t size = (adap->sge.egr_sz + 3) / 4 +
3411 			adap->sge.egr_sz / 32 + 2; /* includes ,/\n/\0 */
3412 
3413 	buf = kzalloc(size, GFP_KERNEL);
3414 	if (!buf)
3415 		return -ENOMEM;
3416 
3417 	len = snprintf(buf, size - 1, "%*pb\n",
3418 		       adap->sge.egr_sz, adap->sge.blocked_fl);
3419 	len += sprintf(buf + len, "\n");
3420 	size = simple_read_from_buffer(ubuf, count, ppos, buf, len);
3421 	kfree(buf);
3422 	return size;
3423 }
3424 
blocked_fl_write(struct file * filp,const char __user * ubuf,size_t count,loff_t * ppos)3425 static ssize_t blocked_fl_write(struct file *filp, const char __user *ubuf,
3426 				size_t count, loff_t *ppos)
3427 {
3428 	int err;
3429 	unsigned long *t;
3430 	struct adapter *adap = filp->private_data;
3431 
3432 	t = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), sizeof(long), GFP_KERNEL);
3433 	if (!t)
3434 		return -ENOMEM;
3435 
3436 	err = bitmap_parse_user(ubuf, count, t, adap->sge.egr_sz);
3437 	if (err) {
3438 		kfree(t);
3439 		return err;
3440 	}
3441 
3442 	bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz);
3443 	kfree(t);
3444 	return count;
3445 }
3446 
3447 static const struct file_operations blocked_fl_fops = {
3448 	.owner   = THIS_MODULE,
3449 	.open    = simple_open,
3450 	.read    = blocked_fl_read,
3451 	.write   = blocked_fl_write,
3452 	.llseek  = generic_file_llseek,
3453 };
3454 
mem_region_show(struct seq_file * seq,const char * name,unsigned int from,unsigned int to)3455 static void mem_region_show(struct seq_file *seq, const char *name,
3456 			    unsigned int from, unsigned int to)
3457 {
3458 	char buf[40];
3459 
3460 	string_get_size((u64)to - from + 1, 1, STRING_UNITS_2, buf,
3461 			sizeof(buf));
3462 	seq_printf(seq, "%-15s %#x-%#x [%s]\n", name, from, to, buf);
3463 }
3464 
meminfo_show(struct seq_file * seq,void * v)3465 static int meminfo_show(struct seq_file *seq, void *v)
3466 {
3467 	static const char * const memory[] = { "EDC0:", "EDC1:", "MC:",
3468 					       "MC0:", "MC1:", "HMA:"};
3469 	struct adapter *adap = seq->private;
3470 	struct cudbg_meminfo meminfo;
3471 	int i, rc;
3472 
3473 	memset(&meminfo, 0, sizeof(struct cudbg_meminfo));
3474 	rc = cudbg_fill_meminfo(adap, &meminfo);
3475 	if (rc)
3476 		return -ENXIO;
3477 
3478 	for (i = 0; i < meminfo.avail_c; i++)
3479 		mem_region_show(seq, memory[meminfo.avail[i].idx],
3480 				meminfo.avail[i].base,
3481 				meminfo.avail[i].limit - 1);
3482 
3483 	seq_putc(seq, '\n');
3484 	for (i = 0; i < meminfo.mem_c; i++) {
3485 		if (meminfo.mem[i].idx >= ARRAY_SIZE(cudbg_region))
3486 			continue;                        /* skip holes */
3487 		if (!meminfo.mem[i].limit)
3488 			meminfo.mem[i].limit =
3489 				i < meminfo.mem_c - 1 ?
3490 				meminfo.mem[i + 1].base - 1 : ~0;
3491 		mem_region_show(seq, cudbg_region[meminfo.mem[i].idx],
3492 				meminfo.mem[i].base, meminfo.mem[i].limit);
3493 	}
3494 
3495 	seq_putc(seq, '\n');
3496 	mem_region_show(seq, "uP RAM:", meminfo.up_ram_lo, meminfo.up_ram_hi);
3497 	mem_region_show(seq, "uP Extmem2:", meminfo.up_extmem2_lo,
3498 			meminfo.up_extmem2_hi);
3499 
3500 	seq_printf(seq, "\n%u Rx pages (%u free) of size %uKiB for %u channels\n",
3501 		   meminfo.rx_pages_data[0], meminfo.free_rx_cnt,
3502 		   meminfo.rx_pages_data[1], meminfo.rx_pages_data[2]);
3503 
3504 	seq_printf(seq, "%u Tx pages (%u free) of size %u%ciB for %u channels\n",
3505 		   meminfo.tx_pages_data[0], meminfo.free_tx_cnt,
3506 		   meminfo.tx_pages_data[1], meminfo.tx_pages_data[2],
3507 		   meminfo.tx_pages_data[3]);
3508 
3509 	seq_printf(seq, "%u p-structs (%u free)\n\n",
3510 		   meminfo.p_structs, meminfo.p_structs_free_cnt);
3511 
3512 	for (i = 0; i < 4; i++)
3513 		/* For T6 these are MAC buffer groups */
3514 		seq_printf(seq, "Port %d using %u pages out of %u allocated\n",
3515 			   i, meminfo.port_used[i], meminfo.port_alloc[i]);
3516 
3517 	for (i = 0; i < adap->params.arch.nchan; i++)
3518 		/* For T6 these are MAC buffer groups */
3519 		seq_printf(seq,
3520 			   "Loopback %d using %u pages out of %u allocated\n",
3521 			   i, meminfo.loopback_used[i],
3522 			   meminfo.loopback_alloc[i]);
3523 
3524 	return 0;
3525 }
3526 DEFINE_SHOW_ATTRIBUTE(meminfo);
3527 
chcr_stats_show(struct seq_file * seq,void * v)3528 static int chcr_stats_show(struct seq_file *seq, void *v)
3529 {
3530 #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
3531 	struct ch_ktls_port_stats_debug *ktls_port;
3532 	int i = 0;
3533 #endif
3534 	struct adapter *adap = seq->private;
3535 
3536 	seq_puts(seq, "Chelsio Crypto Accelerator Stats \n");
3537 	seq_printf(seq, "Cipher Ops: %10u \n",
3538 		   atomic_read(&adap->chcr_stats.cipher_rqst));
3539 	seq_printf(seq, "Digest Ops: %10u \n",
3540 		   atomic_read(&adap->chcr_stats.digest_rqst));
3541 	seq_printf(seq, "Aead Ops: %10u \n",
3542 		   atomic_read(&adap->chcr_stats.aead_rqst));
3543 	seq_printf(seq, "Completion: %10u \n",
3544 		   atomic_read(&adap->chcr_stats.complete));
3545 	seq_printf(seq, "Error: %10u \n",
3546 		   atomic_read(&adap->chcr_stats.error));
3547 	seq_printf(seq, "Fallback: %10u \n",
3548 		   atomic_read(&adap->chcr_stats.fallback));
3549 	seq_printf(seq, "TLS PDU Tx: %10u\n",
3550 		   atomic_read(&adap->chcr_stats.tls_pdu_tx));
3551 	seq_printf(seq, "TLS PDU Rx: %10u\n",
3552 		   atomic_read(&adap->chcr_stats.tls_pdu_rx));
3553 	seq_printf(seq, "TLS Keys (DDR) Count: %10u\n",
3554 		   atomic_read(&adap->chcr_stats.tls_key));
3555 #if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
3556 	seq_puts(seq, "\nChelsio Inline IPsec Crypto Accelerator Stats\n");
3557 	seq_printf(seq, "IPSec PDU: %10u\n",
3558 		   atomic_read(&adap->ch_ipsec_stats.ipsec_cnt));
3559 #endif
3560 #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
3561 	seq_puts(seq, "\nChelsio KTLS Crypto Accelerator Stats\n");
3562 	seq_printf(seq, "Tx TLS offload refcount:          %20u\n",
3563 		   refcount_read(&adap->chcr_ktls.ktls_refcount));
3564 	seq_printf(seq, "Tx records send:                  %20llu\n",
3565 		   atomic64_read(&adap->ch_ktls_stats.ktls_tx_send_records));
3566 	seq_printf(seq, "Tx partial start of records:      %20llu\n",
3567 		   atomic64_read(&adap->ch_ktls_stats.ktls_tx_start_pkts));
3568 	seq_printf(seq, "Tx partial middle of records:     %20llu\n",
3569 		   atomic64_read(&adap->ch_ktls_stats.ktls_tx_middle_pkts));
3570 	seq_printf(seq, "Tx partial end of record:         %20llu\n",
3571 		   atomic64_read(&adap->ch_ktls_stats.ktls_tx_end_pkts));
3572 	seq_printf(seq, "Tx complete records:              %20llu\n",
3573 		   atomic64_read(&adap->ch_ktls_stats.ktls_tx_complete_pkts));
3574 	seq_printf(seq, "TX trim pkts :                    %20llu\n",
3575 		   atomic64_read(&adap->ch_ktls_stats.ktls_tx_trimmed_pkts));
3576 	seq_printf(seq, "TX sw fallback :                  %20llu\n",
3577 		   atomic64_read(&adap->ch_ktls_stats.ktls_tx_fallback));
3578 	while (i < MAX_NPORTS) {
3579 		ktls_port = &adap->ch_ktls_stats.ktls_port[i];
3580 		seq_printf(seq, "Port %d\n", i);
3581 		seq_printf(seq, "Tx connection created:            %20llu\n",
3582 			   atomic64_read(&ktls_port->ktls_tx_connection_open));
3583 		seq_printf(seq, "Tx connection failed:             %20llu\n",
3584 			   atomic64_read(&ktls_port->ktls_tx_connection_fail));
3585 		seq_printf(seq, "Tx connection closed:             %20llu\n",
3586 			   atomic64_read(&ktls_port->ktls_tx_connection_close));
3587 		i++;
3588 	}
3589 #endif
3590 	return 0;
3591 }
3592 DEFINE_SHOW_ATTRIBUTE(chcr_stats);
3593 
3594 #define PRINT_ADAP_STATS(string, value) \
3595 	seq_printf(seq, "%-25s %-20llu\n", (string), \
3596 		   (unsigned long long)(value))
3597 
3598 #define PRINT_CH_STATS(string, value) \
3599 do { \
3600 	seq_printf(seq, "%-25s ", (string)); \
3601 	for (i = 0; i < adap->params.arch.nchan; i++) \
3602 		seq_printf(seq, "%-20llu ", \
3603 			   (unsigned long long)stats.value[i]); \
3604 	seq_printf(seq, "\n"); \
3605 } while (0)
3606 
3607 #define PRINT_CH_STATS2(string, value) \
3608 do { \
3609 	seq_printf(seq, "%-25s ", (string)); \
3610 	for (i = 0; i < adap->params.arch.nchan; i++) \
3611 		seq_printf(seq, "%-20llu ", \
3612 			   (unsigned long long)stats[i].value); \
3613 	seq_printf(seq, "\n"); \
3614 } while (0)
3615 
show_tcp_stats(struct seq_file * seq)3616 static void show_tcp_stats(struct seq_file *seq)
3617 {
3618 	struct adapter *adap = seq->private;
3619 	struct tp_tcp_stats v4, v6;
3620 
3621 	spin_lock(&adap->stats_lock);
3622 	t4_tp_get_tcp_stats(adap, &v4, &v6, false);
3623 	spin_unlock(&adap->stats_lock);
3624 
3625 	PRINT_ADAP_STATS("tcp_ipv4_out_rsts:", v4.tcp_out_rsts);
3626 	PRINT_ADAP_STATS("tcp_ipv4_in_segs:", v4.tcp_in_segs);
3627 	PRINT_ADAP_STATS("tcp_ipv4_out_segs:", v4.tcp_out_segs);
3628 	PRINT_ADAP_STATS("tcp_ipv4_retrans_segs:", v4.tcp_retrans_segs);
3629 	PRINT_ADAP_STATS("tcp_ipv6_out_rsts:", v6.tcp_out_rsts);
3630 	PRINT_ADAP_STATS("tcp_ipv6_in_segs:", v6.tcp_in_segs);
3631 	PRINT_ADAP_STATS("tcp_ipv6_out_segs:", v6.tcp_out_segs);
3632 	PRINT_ADAP_STATS("tcp_ipv6_retrans_segs:", v6.tcp_retrans_segs);
3633 }
3634 
show_ddp_stats(struct seq_file * seq)3635 static void show_ddp_stats(struct seq_file *seq)
3636 {
3637 	struct adapter *adap = seq->private;
3638 	struct tp_usm_stats stats;
3639 
3640 	spin_lock(&adap->stats_lock);
3641 	t4_get_usm_stats(adap, &stats, false);
3642 	spin_unlock(&adap->stats_lock);
3643 
3644 	PRINT_ADAP_STATS("usm_ddp_frames:", stats.frames);
3645 	PRINT_ADAP_STATS("usm_ddp_octets:", stats.octets);
3646 	PRINT_ADAP_STATS("usm_ddp_drops:", stats.drops);
3647 }
3648 
show_rdma_stats(struct seq_file * seq)3649 static void show_rdma_stats(struct seq_file *seq)
3650 {
3651 	struct adapter *adap = seq->private;
3652 	struct tp_rdma_stats stats;
3653 
3654 	spin_lock(&adap->stats_lock);
3655 	t4_tp_get_rdma_stats(adap, &stats, false);
3656 	spin_unlock(&adap->stats_lock);
3657 
3658 	PRINT_ADAP_STATS("rdma_no_rqe_mod_defer:", stats.rqe_dfr_mod);
3659 	PRINT_ADAP_STATS("rdma_no_rqe_pkt_defer:", stats.rqe_dfr_pkt);
3660 }
3661 
show_tp_err_adapter_stats(struct seq_file * seq)3662 static void show_tp_err_adapter_stats(struct seq_file *seq)
3663 {
3664 	struct adapter *adap = seq->private;
3665 	struct tp_err_stats stats;
3666 
3667 	spin_lock(&adap->stats_lock);
3668 	t4_tp_get_err_stats(adap, &stats, false);
3669 	spin_unlock(&adap->stats_lock);
3670 
3671 	PRINT_ADAP_STATS("tp_err_ofld_no_neigh:", stats.ofld_no_neigh);
3672 	PRINT_ADAP_STATS("tp_err_ofld_cong_defer:", stats.ofld_cong_defer);
3673 }
3674 
show_cpl_stats(struct seq_file * seq)3675 static void show_cpl_stats(struct seq_file *seq)
3676 {
3677 	struct adapter *adap = seq->private;
3678 	struct tp_cpl_stats stats;
3679 	u8 i;
3680 
3681 	spin_lock(&adap->stats_lock);
3682 	t4_tp_get_cpl_stats(adap, &stats, false);
3683 	spin_unlock(&adap->stats_lock);
3684 
3685 	PRINT_CH_STATS("tp_cpl_requests:", req);
3686 	PRINT_CH_STATS("tp_cpl_responses:", rsp);
3687 }
3688 
show_tp_err_channel_stats(struct seq_file * seq)3689 static void show_tp_err_channel_stats(struct seq_file *seq)
3690 {
3691 	struct adapter *adap = seq->private;
3692 	struct tp_err_stats stats;
3693 	u8 i;
3694 
3695 	spin_lock(&adap->stats_lock);
3696 	t4_tp_get_err_stats(adap, &stats, false);
3697 	spin_unlock(&adap->stats_lock);
3698 
3699 	PRINT_CH_STATS("tp_mac_in_errs:", mac_in_errs);
3700 	PRINT_CH_STATS("tp_hdr_in_errs:", hdr_in_errs);
3701 	PRINT_CH_STATS("tp_tcp_in_errs:", tcp_in_errs);
3702 	PRINT_CH_STATS("tp_tcp6_in_errs:", tcp6_in_errs);
3703 	PRINT_CH_STATS("tp_tnl_cong_drops:", tnl_cong_drops);
3704 	PRINT_CH_STATS("tp_tnl_tx_drops:", tnl_tx_drops);
3705 	PRINT_CH_STATS("tp_ofld_vlan_drops:", ofld_vlan_drops);
3706 	PRINT_CH_STATS("tp_ofld_chan_drops:", ofld_chan_drops);
3707 }
3708 
show_fcoe_stats(struct seq_file * seq)3709 static void show_fcoe_stats(struct seq_file *seq)
3710 {
3711 	struct adapter *adap = seq->private;
3712 	struct tp_fcoe_stats stats[NCHAN];
3713 	u8 i;
3714 
3715 	spin_lock(&adap->stats_lock);
3716 	for (i = 0; i < adap->params.arch.nchan; i++)
3717 		t4_get_fcoe_stats(adap, i, &stats[i], false);
3718 	spin_unlock(&adap->stats_lock);
3719 
3720 	PRINT_CH_STATS2("fcoe_octets_ddp", octets_ddp);
3721 	PRINT_CH_STATS2("fcoe_frames_ddp", frames_ddp);
3722 	PRINT_CH_STATS2("fcoe_frames_drop", frames_drop);
3723 }
3724 
3725 #undef PRINT_CH_STATS2
3726 #undef PRINT_CH_STATS
3727 #undef PRINT_ADAP_STATS
3728 
tp_stats_show(struct seq_file * seq,void * v)3729 static int tp_stats_show(struct seq_file *seq, void *v)
3730 {
3731 	struct adapter *adap = seq->private;
3732 
3733 	seq_puts(seq, "\n--------Adapter Stats--------\n");
3734 	show_tcp_stats(seq);
3735 	show_ddp_stats(seq);
3736 	show_rdma_stats(seq);
3737 	show_tp_err_adapter_stats(seq);
3738 
3739 	seq_puts(seq, "\n-------- Channel Stats --------\n");
3740 	if (adap->params.arch.nchan == NCHAN)
3741 		seq_printf(seq, "%-25s %-20s %-20s %-20s %-20s\n",
3742 			   " ", "channel 0", "channel 1",
3743 			   "channel 2", "channel 3");
3744 	else
3745 		seq_printf(seq, "%-25s %-20s %-20s\n",
3746 			   " ", "channel 0", "channel 1");
3747 	show_cpl_stats(seq);
3748 	show_tp_err_channel_stats(seq);
3749 	show_fcoe_stats(seq);
3750 
3751 	return 0;
3752 }
3753 DEFINE_SHOW_ATTRIBUTE(tp_stats);
3754 
3755 /* Add an array of Debug FS files.
3756  */
add_debugfs_files(struct adapter * adap,struct t4_debugfs_entry * files,unsigned int nfiles)3757 void add_debugfs_files(struct adapter *adap,
3758 		       struct t4_debugfs_entry *files,
3759 		       unsigned int nfiles)
3760 {
3761 	int i;
3762 
3763 	/* debugfs support is best effort */
3764 	for (i = 0; i < nfiles; i++)
3765 		debugfs_create_file(files[i].name, files[i].mode,
3766 				    adap->debugfs_root,
3767 				    (void *)adap + files[i].data,
3768 				    files[i].ops);
3769 }
3770 
t4_setup_debugfs(struct adapter * adap)3771 int t4_setup_debugfs(struct adapter *adap)
3772 {
3773 	int i;
3774 	u32 size = 0;
3775 
3776 	static struct t4_debugfs_entry t4_debugfs_files[] = {
3777 		{ "cim_la", &cim_la_fops, 0400, 0 },
3778 		{ "cim_pif_la", &cim_pif_la_fops, 0400, 0 },
3779 		{ "cim_ma_la", &cim_ma_la_fops, 0400, 0 },
3780 		{ "cim_qcfg", &cim_qcfg_fops, 0400, 0 },
3781 		{ "clk", &clk_fops, 0400, 0 },
3782 		{ "devlog", &devlog_fops, 0400, 0 },
3783 		{ "mboxlog", &mboxlog_fops, 0400, 0 },
3784 		{ "mbox0", &mbox_debugfs_fops, 0600, 0 },
3785 		{ "mbox1", &mbox_debugfs_fops, 0600, 1 },
3786 		{ "mbox2", &mbox_debugfs_fops, 0600, 2 },
3787 		{ "mbox3", &mbox_debugfs_fops, 0600, 3 },
3788 		{ "mbox4", &mbox_debugfs_fops, 0600, 4 },
3789 		{ "mbox5", &mbox_debugfs_fops, 0600, 5 },
3790 		{ "mbox6", &mbox_debugfs_fops, 0600, 6 },
3791 		{ "mbox7", &mbox_debugfs_fops, 0600, 7 },
3792 		{ "trace0", &mps_trc_debugfs_fops, 0600, 0 },
3793 		{ "trace1", &mps_trc_debugfs_fops, 0600, 1 },
3794 		{ "trace2", &mps_trc_debugfs_fops, 0600, 2 },
3795 		{ "trace3", &mps_trc_debugfs_fops, 0600, 3 },
3796 		{ "l2t", &t4_l2t_fops, 0400, 0},
3797 		{ "mps_tcam", &mps_tcam_debugfs_fops, 0400, 0 },
3798 		{ "rss", &rss_debugfs_fops, 0400, 0 },
3799 		{ "rss_config", &rss_config_fops, 0400, 0 },
3800 		{ "rss_key", &rss_key_debugfs_fops, 0400, 0 },
3801 		{ "rss_pf_config", &rss_pf_config_debugfs_fops, 0400, 0 },
3802 		{ "rss_vf_config", &rss_vf_config_debugfs_fops, 0400, 0 },
3803 		{ "resources", &resources_fops, 0400, 0 },
3804 #ifdef CONFIG_CHELSIO_T4_DCB
3805 		{ "dcb_info", &dcb_info_debugfs_fops, 0400, 0 },
3806 #endif
3807 		{ "sge_qinfo", &sge_qinfo_debugfs_fops, 0400, 0 },
3808 		{ "ibq_tp0",  &cim_ibq_fops, 0400, 0 },
3809 		{ "ibq_tp1",  &cim_ibq_fops, 0400, 1 },
3810 		{ "ibq_ulp",  &cim_ibq_fops, 0400, 2 },
3811 		{ "ibq_sge0", &cim_ibq_fops, 0400, 3 },
3812 		{ "ibq_sge1", &cim_ibq_fops, 0400, 4 },
3813 		{ "ibq_ncsi", &cim_ibq_fops, 0400, 5 },
3814 		{ "obq_ulp0", &cim_obq_fops, 0400, 0 },
3815 		{ "obq_ulp1", &cim_obq_fops, 0400, 1 },
3816 		{ "obq_ulp2", &cim_obq_fops, 0400, 2 },
3817 		{ "obq_ulp3", &cim_obq_fops, 0400, 3 },
3818 		{ "obq_sge",  &cim_obq_fops, 0400, 4 },
3819 		{ "obq_ncsi", &cim_obq_fops, 0400, 5 },
3820 		{ "tp_la", &tp_la_fops, 0400, 0 },
3821 		{ "ulprx_la", &ulprx_la_fops, 0400, 0 },
3822 		{ "sensors", &sensors_fops, 0400, 0 },
3823 		{ "pm_stats", &pm_stats_debugfs_fops, 0400, 0 },
3824 		{ "tx_rate", &tx_rate_fops, 0400, 0 },
3825 		{ "cctrl", &cctrl_tbl_fops, 0400, 0 },
3826 #if IS_ENABLED(CONFIG_IPV6)
3827 		{ "clip_tbl", &clip_tbl_fops, 0400, 0 },
3828 #endif
3829 		{ "tids", &tid_info_fops, 0400, 0},
3830 		{ "blocked_fl", &blocked_fl_fops, 0600, 0 },
3831 		{ "meminfo", &meminfo_fops, 0400, 0 },
3832 		{ "crypto", &chcr_stats_fops, 0400, 0 },
3833 		{ "tp_stats", &tp_stats_fops, 0400, 0 },
3834 	};
3835 
3836 	/* Debug FS nodes common to all T5 and later adapters.
3837 	 */
3838 	static struct t4_debugfs_entry t5_debugfs_files[] = {
3839 		{ "obq_sge_rx_q0", &cim_obq_fops, 0400, 6 },
3840 		{ "obq_sge_rx_q1", &cim_obq_fops, 0400, 7 },
3841 	};
3842 
3843 	add_debugfs_files(adap,
3844 			  t4_debugfs_files,
3845 			  ARRAY_SIZE(t4_debugfs_files));
3846 	if (!is_t4(adap->params.chip))
3847 		add_debugfs_files(adap,
3848 				  t5_debugfs_files,
3849 				  ARRAY_SIZE(t5_debugfs_files));
3850 
3851 	i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
3852 	if (i & EDRAM0_ENABLE_F) {
3853 		size = t4_read_reg(adap, MA_EDRAM0_BAR_A);
3854 		add_debugfs_mem(adap, "edc0", MEM_EDC0, EDRAM0_SIZE_G(size));
3855 	}
3856 	if (i & EDRAM1_ENABLE_F) {
3857 		size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
3858 		add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM1_SIZE_G(size));
3859 	}
3860 	if (is_t5(adap->params.chip)) {
3861 		if (i & EXT_MEM0_ENABLE_F) {
3862 			size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
3863 			add_debugfs_mem(adap, "mc0", MEM_MC0,
3864 					EXT_MEM0_SIZE_G(size));
3865 		}
3866 		if (i & EXT_MEM1_ENABLE_F) {
3867 			size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
3868 			add_debugfs_mem(adap, "mc1", MEM_MC1,
3869 					EXT_MEM1_SIZE_G(size));
3870 		}
3871 	} else {
3872 		if (i & EXT_MEM_ENABLE_F) {
3873 			size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A);
3874 			add_debugfs_mem(adap, "mc", MEM_MC,
3875 					EXT_MEM_SIZE_G(size));
3876 		}
3877 
3878 		if (i & HMA_MUX_F) {
3879 			size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
3880 			add_debugfs_mem(adap, "hma", MEM_HMA,
3881 					EXT_MEM1_SIZE_G(size));
3882 		}
3883 	}
3884 
3885 	debugfs_create_file_size("flash", 0400, adap->debugfs_root, adap,
3886 				 &flash_debugfs_fops, adap->params.sf_size);
3887 	debugfs_create_bool("use_backdoor", 0600,
3888 			    adap->debugfs_root, &adap->use_bd);
3889 	debugfs_create_bool("trace_rss", 0600,
3890 			    adap->debugfs_root, &adap->trace_rss);
3891 
3892 	return 0;
3893 }
3894