• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3  * Copyright (c) 2015 QLogic Corporation
4  * Copyright (c) 2019-2020 Marvell International Ltd.
5  */
6 
7 #include <linux/module.h>
8 #include <linux/vmalloc.h>
9 #include <linux/crc32.h>
10 #include "qed.h"
11 #include "qed_cxt.h"
12 #include "qed_hsi.h"
13 #include "qed_hw.h"
14 #include "qed_mcp.h"
15 #include "qed_reg_addr.h"
16 
17 /* Memory groups enum */
18 enum mem_groups {
19 	MEM_GROUP_PXP_MEM,
20 	MEM_GROUP_DMAE_MEM,
21 	MEM_GROUP_CM_MEM,
22 	MEM_GROUP_QM_MEM,
23 	MEM_GROUP_DORQ_MEM,
24 	MEM_GROUP_BRB_RAM,
25 	MEM_GROUP_BRB_MEM,
26 	MEM_GROUP_PRS_MEM,
27 	MEM_GROUP_SDM_MEM,
28 	MEM_GROUP_PBUF,
29 	MEM_GROUP_IOR,
30 	MEM_GROUP_RAM,
31 	MEM_GROUP_BTB_RAM,
32 	MEM_GROUP_RDIF_CTX,
33 	MEM_GROUP_TDIF_CTX,
34 	MEM_GROUP_CFC_MEM,
35 	MEM_GROUP_CONN_CFC_MEM,
36 	MEM_GROUP_CAU_PI,
37 	MEM_GROUP_CAU_MEM,
38 	MEM_GROUP_CAU_MEM_EXT,
39 	MEM_GROUP_PXP_ILT,
40 	MEM_GROUP_MULD_MEM,
41 	MEM_GROUP_BTB_MEM,
42 	MEM_GROUP_IGU_MEM,
43 	MEM_GROUP_IGU_MSIX,
44 	MEM_GROUP_CAU_SB,
45 	MEM_GROUP_BMB_RAM,
46 	MEM_GROUP_BMB_MEM,
47 	MEM_GROUP_TM_MEM,
48 	MEM_GROUP_TASK_CFC_MEM,
49 	MEM_GROUPS_NUM
50 };
51 
52 /* Memory groups names */
53 static const char * const s_mem_group_names[] = {
54 	"PXP_MEM",
55 	"DMAE_MEM",
56 	"CM_MEM",
57 	"QM_MEM",
58 	"DORQ_MEM",
59 	"BRB_RAM",
60 	"BRB_MEM",
61 	"PRS_MEM",
62 	"SDM_MEM",
63 	"PBUF",
64 	"IOR",
65 	"RAM",
66 	"BTB_RAM",
67 	"RDIF_CTX",
68 	"TDIF_CTX",
69 	"CFC_MEM",
70 	"CONN_CFC_MEM",
71 	"CAU_PI",
72 	"CAU_MEM",
73 	"CAU_MEM_EXT",
74 	"PXP_ILT",
75 	"MULD_MEM",
76 	"BTB_MEM",
77 	"IGU_MEM",
78 	"IGU_MSIX",
79 	"CAU_SB",
80 	"BMB_RAM",
81 	"BMB_MEM",
82 	"TM_MEM",
83 	"TASK_CFC_MEM",
84 };
85 
86 /* Idle check conditions */
87 
cond5(const u32 * r,const u32 * imm)88 static u32 cond5(const u32 *r, const u32 *imm)
89 {
90 	return ((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]);
91 }
92 
cond7(const u32 * r,const u32 * imm)93 static u32 cond7(const u32 *r, const u32 *imm)
94 {
95 	return ((r[0] >> imm[0]) & imm[1]) != imm[2];
96 }
97 
cond6(const u32 * r,const u32 * imm)98 static u32 cond6(const u32 *r, const u32 *imm)
99 {
100 	return (r[0] & imm[0]) != imm[1];
101 }
102 
cond9(const u32 * r,const u32 * imm)103 static u32 cond9(const u32 *r, const u32 *imm)
104 {
105 	return ((r[0] & imm[0]) >> imm[1]) !=
106 	    (((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5]));
107 }
108 
cond10(const u32 * r,const u32 * imm)109 static u32 cond10(const u32 *r, const u32 *imm)
110 {
111 	return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]);
112 }
113 
cond4(const u32 * r,const u32 * imm)114 static u32 cond4(const u32 *r, const u32 *imm)
115 {
116 	return (r[0] & ~imm[0]) != imm[1];
117 }
118 
cond0(const u32 * r,const u32 * imm)119 static u32 cond0(const u32 *r, const u32 *imm)
120 {
121 	return (r[0] & ~r[1]) != imm[0];
122 }
123 
cond1(const u32 * r,const u32 * imm)124 static u32 cond1(const u32 *r, const u32 *imm)
125 {
126 	return r[0] != imm[0];
127 }
128 
cond11(const u32 * r,const u32 * imm)129 static u32 cond11(const u32 *r, const u32 *imm)
130 {
131 	return r[0] != r[1] && r[2] == imm[0];
132 }
133 
cond12(const u32 * r,const u32 * imm)134 static u32 cond12(const u32 *r, const u32 *imm)
135 {
136 	return r[0] != r[1] && r[2] > imm[0];
137 }
138 
cond3(const u32 * r,const u32 * imm)139 static u32 cond3(const u32 *r, const u32 *imm)
140 {
141 	return r[0] != r[1];
142 }
143 
cond13(const u32 * r,const u32 * imm)144 static u32 cond13(const u32 *r, const u32 *imm)
145 {
146 	return r[0] & imm[0];
147 }
148 
cond8(const u32 * r,const u32 * imm)149 static u32 cond8(const u32 *r, const u32 *imm)
150 {
151 	return r[0] < (r[1] - imm[0]);
152 }
153 
cond2(const u32 * r,const u32 * imm)154 static u32 cond2(const u32 *r, const u32 *imm)
155 {
156 	return r[0] > imm[0];
157 }
158 
159 /* Array of Idle Check conditions */
160 static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = {
161 	cond0,
162 	cond1,
163 	cond2,
164 	cond3,
165 	cond4,
166 	cond5,
167 	cond6,
168 	cond7,
169 	cond8,
170 	cond9,
171 	cond10,
172 	cond11,
173 	cond12,
174 	cond13,
175 };
176 
177 #define NUM_PHYS_BLOCKS 84
178 
179 #define NUM_DBG_RESET_REGS 8
180 
181 /******************************* Data Types **********************************/
182 
183 enum hw_types {
184 	HW_TYPE_ASIC,
185 	PLATFORM_RESERVED,
186 	PLATFORM_RESERVED2,
187 	PLATFORM_RESERVED3,
188 	PLATFORM_RESERVED4,
189 	MAX_HW_TYPES
190 };
191 
192 /* CM context types */
193 enum cm_ctx_types {
194 	CM_CTX_CONN_AG,
195 	CM_CTX_CONN_ST,
196 	CM_CTX_TASK_AG,
197 	CM_CTX_TASK_ST,
198 	NUM_CM_CTX_TYPES
199 };
200 
201 /* Debug bus frame modes */
202 enum dbg_bus_frame_modes {
203 	DBG_BUS_FRAME_MODE_4ST = 0,	/* 4 Storm dwords (no HW) */
204 	DBG_BUS_FRAME_MODE_2ST_2HW = 1,	/* 2 Storm dwords, 2 HW dwords */
205 	DBG_BUS_FRAME_MODE_1ST_3HW = 2,	/* 1 Storm dwords, 3 HW dwords */
206 	DBG_BUS_FRAME_MODE_4HW = 3,	/* 4 HW dwords (no Storms) */
207 	DBG_BUS_FRAME_MODE_8HW = 4,	/* 8 HW dwords (no Storms) */
208 	DBG_BUS_NUM_FRAME_MODES
209 };
210 
211 /* Chip constant definitions */
212 struct chip_defs {
213 	const char *name;
214 	u32 num_ilt_pages;
215 };
216 
217 /* HW type constant definitions */
218 struct hw_type_defs {
219 	const char *name;
220 	u32 delay_factor;
221 	u32 dmae_thresh;
222 	u32 log_thresh;
223 };
224 
225 /* RBC reset definitions */
226 struct rbc_reset_defs {
227 	u32 reset_reg_addr;
228 	u32 reset_val[MAX_CHIP_IDS];
229 };
230 
231 /* Storm constant definitions.
232  * Addresses are in bytes, sizes are in quad-regs.
233  */
234 struct storm_defs {
235 	char letter;
236 	enum block_id sem_block_id;
237 	enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
238 	bool has_vfc;
239 	u32 sem_fast_mem_addr;
240 	u32 sem_frame_mode_addr;
241 	u32 sem_slow_enable_addr;
242 	u32 sem_slow_mode_addr;
243 	u32 sem_slow_mode1_conf_addr;
244 	u32 sem_sync_dbg_empty_addr;
245 	u32 sem_gpre_vect_addr;
246 	u32 cm_ctx_wr_addr;
247 	u32 cm_ctx_rd_addr[NUM_CM_CTX_TYPES];
248 	u32 cm_ctx_lid_sizes[MAX_CHIP_IDS][NUM_CM_CTX_TYPES];
249 };
250 
251 /* Debug Bus Constraint operation constant definitions */
252 struct dbg_bus_constraint_op_defs {
253 	u8 hw_op_val;
254 	bool is_cyclic;
255 };
256 
257 /* Storm Mode definitions */
258 struct storm_mode_defs {
259 	const char *name;
260 	bool is_fast_dbg;
261 	u8 id_in_hw;
262 	u32 src_disable_reg_addr;
263 	u32 src_enable_val;
264 	bool exists[MAX_CHIP_IDS];
265 };
266 
267 struct grc_param_defs {
268 	u32 default_val[MAX_CHIP_IDS];
269 	u32 min;
270 	u32 max;
271 	bool is_preset;
272 	bool is_persistent;
273 	u32 exclude_all_preset_val;
274 	u32 crash_preset_val[MAX_CHIP_IDS];
275 };
276 
277 /* Address is in 128b units. Width is in bits. */
278 struct rss_mem_defs {
279 	const char *mem_name;
280 	const char *type_name;
281 	u32 addr;
282 	u32 entry_width;
283 	u32 num_entries[MAX_CHIP_IDS];
284 };
285 
286 struct vfc_ram_defs {
287 	const char *mem_name;
288 	const char *type_name;
289 	u32 base_row;
290 	u32 num_rows;
291 };
292 
293 struct big_ram_defs {
294 	const char *instance_name;
295 	enum mem_groups mem_group_id;
296 	enum mem_groups ram_mem_group_id;
297 	enum dbg_grc_params grc_param;
298 	u32 addr_reg_addr;
299 	u32 data_reg_addr;
300 	u32 is_256b_reg_addr;
301 	u32 is_256b_bit_offset[MAX_CHIP_IDS];
302 	u32 ram_size[MAX_CHIP_IDS]; /* In dwords */
303 };
304 
305 struct phy_defs {
306 	const char *phy_name;
307 
308 	/* PHY base GRC address */
309 	u32 base_addr;
310 
311 	/* Relative address of indirect TBUS address register (bits 0..7) */
312 	u32 tbus_addr_lo_addr;
313 
314 	/* Relative address of indirect TBUS address register (bits 8..10) */
315 	u32 tbus_addr_hi_addr;
316 
317 	/* Relative address of indirect TBUS data register (bits 0..7) */
318 	u32 tbus_data_lo_addr;
319 
320 	/* Relative address of indirect TBUS data register (bits 8..11) */
321 	u32 tbus_data_hi_addr;
322 };
323 
324 /* Split type definitions */
325 struct split_type_defs {
326 	const char *name;
327 };
328 
329 /******************************** Constants **********************************/
330 
331 #define BYTES_IN_DWORD			sizeof(u32)
332 /* In the macros below, size and offset are specified in bits */
333 #define CEIL_DWORDS(size)		DIV_ROUND_UP(size, 32)
334 #define FIELD_BIT_OFFSET(type, field)	type ## _ ## field ## _ ## OFFSET
335 #define FIELD_BIT_SIZE(type, field)	type ## _ ## field ## _ ## SIZE
336 #define FIELD_DWORD_OFFSET(type, field) \
337 	 (int)(FIELD_BIT_OFFSET(type, field) / 32)
338 #define FIELD_DWORD_SHIFT(type, field)	(FIELD_BIT_OFFSET(type, field) % 32)
339 #define FIELD_BIT_MASK(type, field) \
340 	(((1 << FIELD_BIT_SIZE(type, field)) - 1) << \
341 	 FIELD_DWORD_SHIFT(type, field))
342 
343 #define SET_VAR_FIELD(var, type, field, val) \
344 	do { \
345 		var[FIELD_DWORD_OFFSET(type, field)] &=	\
346 		(~FIELD_BIT_MASK(type, field));	\
347 		var[FIELD_DWORD_OFFSET(type, field)] |= \
348 		(val) << FIELD_DWORD_SHIFT(type, field); \
349 	} while (0)
350 
351 #define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \
352 	do { \
353 		for (i = 0; i < (arr_size); i++) \
354 			qed_wr(dev, ptt, addr,	(arr)[i]); \
355 	} while (0)
356 
357 #define DWORDS_TO_BYTES(dwords)		((dwords) * BYTES_IN_DWORD)
358 #define BYTES_TO_DWORDS(bytes)		((bytes) / BYTES_IN_DWORD)
359 
360 /* extra lines include a signature line + optional latency events line */
361 #define NUM_EXTRA_DBG_LINES(block) \
362 	(GET_FIELD((block)->flags, DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS) ? 2 : 1)
363 #define NUM_DBG_LINES(block) \
364 	((block)->num_of_dbg_bus_lines + NUM_EXTRA_DBG_LINES(block))
365 
366 #define USE_DMAE			true
367 #define PROTECT_WIDE_BUS		true
368 
369 #define RAM_LINES_TO_DWORDS(lines)	((lines) * 2)
370 #define RAM_LINES_TO_BYTES(lines) \
371 	DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines))
372 
373 #define REG_DUMP_LEN_SHIFT		24
374 #define MEM_DUMP_ENTRY_SIZE_DWORDS \
375 	BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem))
376 
377 #define IDLE_CHK_RULE_SIZE_DWORDS \
378 	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule))
379 
380 #define IDLE_CHK_RESULT_HDR_DWORDS \
381 	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr))
382 
383 #define IDLE_CHK_RESULT_REG_HDR_DWORDS \
384 	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr))
385 
386 #define PAGE_MEM_DESC_SIZE_DWORDS \
387 	BYTES_TO_DWORDS(sizeof(struct phys_mem_desc))
388 
389 #define IDLE_CHK_MAX_ENTRIES_SIZE	32
390 
391 /* The sizes and offsets below are specified in bits */
392 #define VFC_CAM_CMD_STRUCT_SIZE		64
393 #define VFC_CAM_CMD_ROW_OFFSET		48
394 #define VFC_CAM_CMD_ROW_SIZE		9
395 #define VFC_CAM_ADDR_STRUCT_SIZE	16
396 #define VFC_CAM_ADDR_OP_OFFSET		0
397 #define VFC_CAM_ADDR_OP_SIZE		4
398 #define VFC_CAM_RESP_STRUCT_SIZE	256
399 #define VFC_RAM_ADDR_STRUCT_SIZE	16
400 #define VFC_RAM_ADDR_OP_OFFSET		0
401 #define VFC_RAM_ADDR_OP_SIZE		2
402 #define VFC_RAM_ADDR_ROW_OFFSET		2
403 #define VFC_RAM_ADDR_ROW_SIZE		10
404 #define VFC_RAM_RESP_STRUCT_SIZE	256
405 
406 #define VFC_CAM_CMD_DWORDS		CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE)
407 #define VFC_CAM_ADDR_DWORDS		CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE)
408 #define VFC_CAM_RESP_DWORDS		CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE)
409 #define VFC_RAM_CMD_DWORDS		VFC_CAM_CMD_DWORDS
410 #define VFC_RAM_ADDR_DWORDS		CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE)
411 #define VFC_RAM_RESP_DWORDS		CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE)
412 
413 #define NUM_VFC_RAM_TYPES		4
414 
415 #define VFC_CAM_NUM_ROWS		512
416 
417 #define VFC_OPCODE_CAM_RD		14
418 #define VFC_OPCODE_RAM_RD		0
419 
420 #define NUM_RSS_MEM_TYPES		5
421 
422 #define NUM_BIG_RAM_TYPES		3
423 #define BIG_RAM_NAME_LEN		3
424 
425 #define NUM_PHY_TBUS_ADDRESSES		2048
426 #define PHY_DUMP_SIZE_DWORDS		(NUM_PHY_TBUS_ADDRESSES / 2)
427 
428 #define RESET_REG_UNRESET_OFFSET	4
429 
430 #define STALL_DELAY_MS			500
431 
432 #define STATIC_DEBUG_LINE_DWORDS	9
433 
434 #define NUM_COMMON_GLOBAL_PARAMS	9
435 
436 #define MAX_RECURSION_DEPTH		10
437 
438 #define FW_IMG_MAIN			1
439 
440 #define REG_FIFO_ELEMENT_DWORDS		2
441 #define REG_FIFO_DEPTH_ELEMENTS		32
442 #define REG_FIFO_DEPTH_DWORDS \
443 	(REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
444 
445 #define IGU_FIFO_ELEMENT_DWORDS		4
446 #define IGU_FIFO_DEPTH_ELEMENTS		64
447 #define IGU_FIFO_DEPTH_DWORDS \
448 	(IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
449 
450 #define PROTECTION_OVERRIDE_ELEMENT_DWORDS	2
451 #define PROTECTION_OVERRIDE_DEPTH_ELEMENTS	20
452 #define PROTECTION_OVERRIDE_DEPTH_DWORDS \
453 	(PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \
454 	 PROTECTION_OVERRIDE_ELEMENT_DWORDS)
455 
456 #define MCP_SPAD_TRACE_OFFSIZE_ADDR \
457 	(MCP_REG_SCRATCH + \
458 	 offsetof(struct static_init, sections[SPAD_SECTION_TRACE]))
459 
460 #define MAX_SW_PLTAFORM_STR_SIZE	64
461 
462 #define EMPTY_FW_VERSION_STR		"???_???_???_???"
463 #define EMPTY_FW_IMAGE_STR		"???????????????"
464 
465 /***************************** Constant Arrays *******************************/
466 
467 /* Chip constant definitions array */
468 static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
469 	{"bb", PSWRQ2_REG_ILT_MEMORY_SIZE_BB / 2},
470 	{"ah", PSWRQ2_REG_ILT_MEMORY_SIZE_K2 / 2}
471 };
472 
473 /* Storm constant definitions array */
474 static struct storm_defs s_storm_defs[] = {
475 	/* Tstorm */
476 	{'T', BLOCK_TSEM,
477 		{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
478 		true,
479 		TSEM_REG_FAST_MEMORY,
480 		TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
481 		TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2,
482 		TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_DBG_GPRE_VECT,
483 		TCM_REG_CTX_RBC_ACCS,
484 		{TCM_REG_AGG_CON_CTX, TCM_REG_SM_CON_CTX, TCM_REG_AGG_TASK_CTX,
485 		 TCM_REG_SM_TASK_CTX},
486 		{{4, 16, 2, 4}, {4, 16, 2, 4}} /* {bb} {k2} */
487 	},
488 
489 	/* Mstorm */
490 	{'M', BLOCK_MSEM,
491 		{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
492 		false,
493 		MSEM_REG_FAST_MEMORY,
494 		MSEM_REG_DBG_FRAME_MODE_BB_K2,
495 		MSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
496 		MSEM_REG_SLOW_DBG_MODE_BB_K2,
497 		MSEM_REG_DBG_MODE1_CFG_BB_K2,
498 		MSEM_REG_SYNC_DBG_EMPTY,
499 		MSEM_REG_DBG_GPRE_VECT,
500 		MCM_REG_CTX_RBC_ACCS,
501 		{MCM_REG_AGG_CON_CTX, MCM_REG_SM_CON_CTX, MCM_REG_AGG_TASK_CTX,
502 		 MCM_REG_SM_TASK_CTX },
503 		{{1, 10, 2, 7}, {1, 10, 2, 7}} /* {bb} {k2}*/
504 	},
505 
506 	/* Ustorm */
507 	{'U', BLOCK_USEM,
508 		{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
509 		false,
510 		USEM_REG_FAST_MEMORY,
511 		USEM_REG_DBG_FRAME_MODE_BB_K2,
512 		USEM_REG_SLOW_DBG_ACTIVE_BB_K2,
513 		USEM_REG_SLOW_DBG_MODE_BB_K2,
514 		USEM_REG_DBG_MODE1_CFG_BB_K2,
515 		USEM_REG_SYNC_DBG_EMPTY,
516 		USEM_REG_DBG_GPRE_VECT,
517 		UCM_REG_CTX_RBC_ACCS,
518 		{UCM_REG_AGG_CON_CTX, UCM_REG_SM_CON_CTX, UCM_REG_AGG_TASK_CTX,
519 		 UCM_REG_SM_TASK_CTX},
520 		{{2, 13, 3, 3}, {2, 13, 3, 3}} /* {bb} {k2} */
521 	},
522 
523 	/* Xstorm */
524 	{'X', BLOCK_XSEM,
525 		{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
526 		false,
527 		XSEM_REG_FAST_MEMORY,
528 		XSEM_REG_DBG_FRAME_MODE_BB_K2,
529 		XSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
530 		XSEM_REG_SLOW_DBG_MODE_BB_K2,
531 		XSEM_REG_DBG_MODE1_CFG_BB_K2,
532 		XSEM_REG_SYNC_DBG_EMPTY,
533 		XSEM_REG_DBG_GPRE_VECT,
534 		XCM_REG_CTX_RBC_ACCS,
535 		{XCM_REG_AGG_CON_CTX, XCM_REG_SM_CON_CTX, 0, 0},
536 		{{9, 15, 0, 0}, {9, 15,	0, 0}} /* {bb} {k2} */
537 	},
538 
539 	/* Ystorm */
540 	{'Y', BLOCK_YSEM,
541 		{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
542 		false,
543 		YSEM_REG_FAST_MEMORY,
544 		YSEM_REG_DBG_FRAME_MODE_BB_K2,
545 		YSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
546 		YSEM_REG_SLOW_DBG_MODE_BB_K2,
547 		YSEM_REG_DBG_MODE1_CFG_BB_K2,
548 		YSEM_REG_SYNC_DBG_EMPTY,
549 		YSEM_REG_DBG_GPRE_VECT,
550 		YCM_REG_CTX_RBC_ACCS,
551 		{YCM_REG_AGG_CON_CTX, YCM_REG_SM_CON_CTX, YCM_REG_AGG_TASK_CTX,
552 		 YCM_REG_SM_TASK_CTX},
553 		{{2, 3, 2, 12}, {2, 3, 2, 12}} /* {bb} {k2} */
554 	},
555 
556 	/* Pstorm */
557 	{'P', BLOCK_PSEM,
558 		{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
559 		true,
560 		PSEM_REG_FAST_MEMORY,
561 		PSEM_REG_DBG_FRAME_MODE_BB_K2,
562 		PSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
563 		PSEM_REG_SLOW_DBG_MODE_BB_K2,
564 		PSEM_REG_DBG_MODE1_CFG_BB_K2,
565 		PSEM_REG_SYNC_DBG_EMPTY,
566 		PSEM_REG_DBG_GPRE_VECT,
567 		PCM_REG_CTX_RBC_ACCS,
568 		{0, PCM_REG_SM_CON_CTX, 0, 0},
569 		{{0, 10, 0, 0}, {0, 10, 0, 0}} /* {bb} {k2} */
570 	},
571 };
572 
573 static struct hw_type_defs s_hw_type_defs[] = {
574 	/* HW_TYPE_ASIC */
575 	{"asic", 1, 256, 32768},
576 	{"reserved", 0, 0, 0},
577 	{"reserved2", 0, 0, 0},
578 	{"reserved3", 0, 0, 0}
579 };
580 
581 static struct grc_param_defs s_grc_param_defs[] = {
582 	/* DBG_GRC_PARAM_DUMP_TSTORM */
583 	{{1, 1}, 0, 1, false, false, 1, {1, 1}},
584 
585 	/* DBG_GRC_PARAM_DUMP_MSTORM */
586 	{{1, 1}, 0, 1, false, false, 1, {1, 1}},
587 
588 	/* DBG_GRC_PARAM_DUMP_USTORM */
589 	{{1, 1}, 0, 1, false, false, 1, {1, 1}},
590 
591 	/* DBG_GRC_PARAM_DUMP_XSTORM */
592 	{{1, 1}, 0, 1, false, false, 1, {1, 1}},
593 
594 	/* DBG_GRC_PARAM_DUMP_YSTORM */
595 	{{1, 1}, 0, 1, false, false, 1, {1, 1}},
596 
597 	/* DBG_GRC_PARAM_DUMP_PSTORM */
598 	{{1, 1}, 0, 1, false, false, 1, {1, 1}},
599 
600 	/* DBG_GRC_PARAM_DUMP_REGS */
601 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
602 
603 	/* DBG_GRC_PARAM_DUMP_RAM */
604 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
605 
606 	/* DBG_GRC_PARAM_DUMP_PBUF */
607 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
608 
609 	/* DBG_GRC_PARAM_DUMP_IOR */
610 	{{0, 0}, 0, 1, false, false, 0, {1, 1}},
611 
612 	/* DBG_GRC_PARAM_DUMP_VFC */
613 	{{0, 0}, 0, 1, false, false, 0, {1, 1}},
614 
615 	/* DBG_GRC_PARAM_DUMP_CM_CTX */
616 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
617 
618 	/* DBG_GRC_PARAM_DUMP_ILT */
619 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
620 
621 	/* DBG_GRC_PARAM_DUMP_RSS */
622 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
623 
624 	/* DBG_GRC_PARAM_DUMP_CAU */
625 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
626 
627 	/* DBG_GRC_PARAM_DUMP_QM */
628 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
629 
630 	/* DBG_GRC_PARAM_DUMP_MCP */
631 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
632 
633 	/* DBG_GRC_PARAM_DUMP_DORQ */
634 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
635 
636 	/* DBG_GRC_PARAM_DUMP_CFC */
637 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
638 
639 	/* DBG_GRC_PARAM_DUMP_IGU */
640 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
641 
642 	/* DBG_GRC_PARAM_DUMP_BRB */
643 	{{0, 0}, 0, 1, false, false, 0, {1, 1}},
644 
645 	/* DBG_GRC_PARAM_DUMP_BTB */
646 	{{0, 0}, 0, 1, false, false, 0, {1, 1}},
647 
648 	/* DBG_GRC_PARAM_DUMP_BMB */
649 	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
650 
651 	/* DBG_GRC_PARAM_RESERVED1 */
652 	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
653 
654 	/* DBG_GRC_PARAM_DUMP_MULD */
655 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
656 
657 	/* DBG_GRC_PARAM_DUMP_PRS */
658 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
659 
660 	/* DBG_GRC_PARAM_DUMP_DMAE */
661 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
662 
663 	/* DBG_GRC_PARAM_DUMP_TM */
664 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
665 
666 	/* DBG_GRC_PARAM_DUMP_SDM */
667 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
668 
669 	/* DBG_GRC_PARAM_DUMP_DIF */
670 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
671 
672 	/* DBG_GRC_PARAM_DUMP_STATIC */
673 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
674 
675 	/* DBG_GRC_PARAM_UNSTALL */
676 	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
677 
678 	/* DBG_GRC_PARAM_RESERVED2 */
679 	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
680 
681 	/* DBG_GRC_PARAM_MCP_TRACE_META_SIZE */
682 	{{0, 0}, 1, 0xffffffff, false, true, 0, {0, 0}},
683 
684 	/* DBG_GRC_PARAM_EXCLUDE_ALL */
685 	{{0, 0}, 0, 1, true, false, 0, {0, 0}},
686 
687 	/* DBG_GRC_PARAM_CRASH */
688 	{{0, 0}, 0, 1, true, false, 0, {0, 0}},
689 
690 	/* DBG_GRC_PARAM_PARITY_SAFE */
691 	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
692 
693 	/* DBG_GRC_PARAM_DUMP_CM */
694 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
695 
696 	/* DBG_GRC_PARAM_DUMP_PHY */
697 	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
698 
699 	/* DBG_GRC_PARAM_NO_MCP */
700 	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
701 
702 	/* DBG_GRC_PARAM_NO_FW_VER */
703 	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
704 
705 	/* DBG_GRC_PARAM_RESERVED3 */
706 	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
707 
708 	/* DBG_GRC_PARAM_DUMP_MCP_HW_DUMP */
709 	{{0, 1}, 0, 1, false, false, 0, {0, 1}},
710 
711 	/* DBG_GRC_PARAM_DUMP_ILT_CDUC */
712 	{{1, 1}, 0, 1, false, false, 0, {0, 0}},
713 
714 	/* DBG_GRC_PARAM_DUMP_ILT_CDUT */
715 	{{1, 1}, 0, 1, false, false, 0, {0, 0}},
716 
717 	/* DBG_GRC_PARAM_DUMP_CAU_EXT */
718 	{{0, 0}, 0, 1, false, false, 0, {1, 1}}
719 };
720 
721 static struct rss_mem_defs s_rss_mem_defs[] = {
722 	{"rss_mem_cid", "rss_cid", 0, 32,
723 	 {256, 320}},
724 
725 	{"rss_mem_key_msb", "rss_key", 1024, 256,
726 	 {128, 208}},
727 
728 	{"rss_mem_key_lsb", "rss_key", 2048, 64,
729 	 {128, 208}},
730 
731 	{"rss_mem_info", "rss_info", 3072, 16,
732 	 {128, 208}},
733 
734 	{"rss_mem_ind", "rss_ind", 4096, 16,
735 	 {16384, 26624}}
736 };
737 
738 static struct vfc_ram_defs s_vfc_ram_defs[] = {
739 	{"vfc_ram_tt1", "vfc_ram", 0, 512},
740 	{"vfc_ram_mtt2", "vfc_ram", 512, 128},
741 	{"vfc_ram_stt2", "vfc_ram", 640, 32},
742 	{"vfc_ram_ro_vect", "vfc_ram", 672, 32}
743 };
744 
745 static struct big_ram_defs s_big_ram_defs[] = {
746 	{"BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
747 	 BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
748 	 MISC_REG_BLOCK_256B_EN, {0, 0},
749 	 {153600, 180224}},
750 
751 	{"BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
752 	 BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
753 	 MISC_REG_BLOCK_256B_EN, {0, 1},
754 	 {92160, 117760}},
755 
756 	{"BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
757 	 BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
758 	 MISCS_REG_BLOCK_256B_EN, {0, 0},
759 	 {36864, 36864}}
760 };
761 
762 static struct rbc_reset_defs s_rbc_reset_defs[] = {
763 	{MISCS_REG_RESET_PL_HV,
764 	 {0x0, 0x400}},
765 	{MISC_REG_RESET_PL_PDA_VMAIN_1,
766 	 {0x4404040, 0x4404040}},
767 	{MISC_REG_RESET_PL_PDA_VMAIN_2,
768 	 {0x7, 0x7c00007}},
769 	{MISC_REG_RESET_PL_PDA_VAUX,
770 	 {0x2, 0x2}},
771 };
772 
773 static struct phy_defs s_phy_defs[] = {
774 	{"nw_phy", NWS_REG_NWS_CMU_K2,
775 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5,
776 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5,
777 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5,
778 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5},
779 	{"sgmii_phy", MS_REG_MS_CMU_K2_E5,
780 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
781 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
782 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
783 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
784 	{"pcie_phy0", PHY_PCIE_REG_PHY0_K2_E5,
785 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
786 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
787 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
788 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
789 	{"pcie_phy1", PHY_PCIE_REG_PHY1_K2_E5,
790 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
791 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
792 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
793 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
794 };
795 
796 static struct split_type_defs s_split_type_defs[] = {
797 	/* SPLIT_TYPE_NONE */
798 	{"eng"},
799 
800 	/* SPLIT_TYPE_PORT */
801 	{"port"},
802 
803 	/* SPLIT_TYPE_PF */
804 	{"pf"},
805 
806 	/* SPLIT_TYPE_PORT_PF */
807 	{"port"},
808 
809 	/* SPLIT_TYPE_VF */
810 	{"vf"}
811 };
812 
813 /**************************** Private Functions ******************************/
814 
815 /* Reads and returns a single dword from the specified unaligned buffer */
qed_read_unaligned_dword(u8 * buf)816 static u32 qed_read_unaligned_dword(u8 *buf)
817 {
818 	u32 dword;
819 
820 	memcpy((u8 *)&dword, buf, sizeof(dword));
821 	return dword;
822 }
823 
824 /* Sets the value of the specified GRC param */
qed_grc_set_param(struct qed_hwfn * p_hwfn,enum dbg_grc_params grc_param,u32 val)825 static void qed_grc_set_param(struct qed_hwfn *p_hwfn,
826 			      enum dbg_grc_params grc_param, u32 val)
827 {
828 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
829 
830 	dev_data->grc.param_val[grc_param] = val;
831 }
832 
833 /* Returns the value of the specified GRC param */
qed_grc_get_param(struct qed_hwfn * p_hwfn,enum dbg_grc_params grc_param)834 static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
835 			     enum dbg_grc_params grc_param)
836 {
837 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
838 
839 	return dev_data->grc.param_val[grc_param];
840 }
841 
842 /* Initializes the GRC parameters */
qed_dbg_grc_init_params(struct qed_hwfn * p_hwfn)843 static void qed_dbg_grc_init_params(struct qed_hwfn *p_hwfn)
844 {
845 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
846 
847 	if (!dev_data->grc.params_initialized) {
848 		qed_dbg_grc_set_params_default(p_hwfn);
849 		dev_data->grc.params_initialized = 1;
850 	}
851 }
852 
853 /* Sets pointer and size for the specified binary buffer type */
qed_set_dbg_bin_buf(struct qed_hwfn * p_hwfn,enum bin_dbg_buffer_type buf_type,const u32 * ptr,u32 size)854 static void qed_set_dbg_bin_buf(struct qed_hwfn *p_hwfn,
855 				enum bin_dbg_buffer_type buf_type,
856 				const u32 *ptr, u32 size)
857 {
858 	struct virt_mem_desc *buf = &p_hwfn->dbg_arrays[buf_type];
859 
860 	buf->ptr = (void *)ptr;
861 	buf->size = size;
862 }
863 
864 /* Initializes debug data for the specified device */
qed_dbg_dev_init(struct qed_hwfn * p_hwfn)865 static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn)
866 {
867 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
868 	u8 num_pfs = 0, max_pfs_per_port = 0;
869 
870 	if (dev_data->initialized)
871 		return DBG_STATUS_OK;
872 
873 	/* Set chip */
874 	if (QED_IS_K2(p_hwfn->cdev)) {
875 		dev_data->chip_id = CHIP_K2;
876 		dev_data->mode_enable[MODE_K2] = 1;
877 		dev_data->num_vfs = MAX_NUM_VFS_K2;
878 		num_pfs = MAX_NUM_PFS_K2;
879 		max_pfs_per_port = MAX_NUM_PFS_K2 / 2;
880 	} else if (QED_IS_BB_B0(p_hwfn->cdev)) {
881 		dev_data->chip_id = CHIP_BB;
882 		dev_data->mode_enable[MODE_BB] = 1;
883 		dev_data->num_vfs = MAX_NUM_VFS_BB;
884 		num_pfs = MAX_NUM_PFS_BB;
885 		max_pfs_per_port = MAX_NUM_PFS_BB;
886 	} else {
887 		return DBG_STATUS_UNKNOWN_CHIP;
888 	}
889 
890 	/* Set HW type */
891 	dev_data->hw_type = HW_TYPE_ASIC;
892 	dev_data->mode_enable[MODE_ASIC] = 1;
893 
894 	/* Set port mode */
895 	switch (p_hwfn->cdev->num_ports_in_engine) {
896 	case 1:
897 		dev_data->mode_enable[MODE_PORTS_PER_ENG_1] = 1;
898 		break;
899 	case 2:
900 		dev_data->mode_enable[MODE_PORTS_PER_ENG_2] = 1;
901 		break;
902 	case 4:
903 		dev_data->mode_enable[MODE_PORTS_PER_ENG_4] = 1;
904 		break;
905 	}
906 
907 	/* Set 100G mode */
908 	if (QED_IS_CMT(p_hwfn->cdev))
909 		dev_data->mode_enable[MODE_100G] = 1;
910 
911 	/* Set number of ports */
912 	if (dev_data->mode_enable[MODE_PORTS_PER_ENG_1] ||
913 	    dev_data->mode_enable[MODE_100G])
914 		dev_data->num_ports = 1;
915 	else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_2])
916 		dev_data->num_ports = 2;
917 	else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_4])
918 		dev_data->num_ports = 4;
919 
920 	/* Set number of PFs per port */
921 	dev_data->num_pfs_per_port = min_t(u32,
922 					   num_pfs / dev_data->num_ports,
923 					   max_pfs_per_port);
924 
925 	/* Initializes the GRC parameters */
926 	qed_dbg_grc_init_params(p_hwfn);
927 
928 	dev_data->use_dmae = true;
929 	dev_data->initialized = 1;
930 
931 	return DBG_STATUS_OK;
932 }
933 
get_dbg_block(struct qed_hwfn * p_hwfn,enum block_id block_id)934 static const struct dbg_block *get_dbg_block(struct qed_hwfn *p_hwfn,
935 					     enum block_id block_id)
936 {
937 	const struct dbg_block *dbg_block;
938 
939 	dbg_block = p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS].ptr;
940 	return dbg_block + block_id;
941 }
942 
qed_get_dbg_block_per_chip(struct qed_hwfn * p_hwfn,enum block_id block_id)943 static const struct dbg_block_chip *qed_get_dbg_block_per_chip(struct qed_hwfn
944 							       *p_hwfn,
945 							       enum block_id
946 							       block_id)
947 {
948 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
949 
950 	return (const struct dbg_block_chip *)
951 	    p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS_CHIP_DATA].ptr +
952 	    block_id * MAX_CHIP_IDS + dev_data->chip_id;
953 }
954 
qed_get_dbg_reset_reg(struct qed_hwfn * p_hwfn,u8 reset_reg_id)955 static const struct dbg_reset_reg *qed_get_dbg_reset_reg(struct qed_hwfn
956 							 *p_hwfn,
957 							 u8 reset_reg_id)
958 {
959 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
960 
961 	return (const struct dbg_reset_reg *)
962 	    p_hwfn->dbg_arrays[BIN_BUF_DBG_RESET_REGS].ptr +
963 	    reset_reg_id * MAX_CHIP_IDS + dev_data->chip_id;
964 }
965 
966 /* Reads the FW info structure for the specified Storm from the chip,
967  * and writes it to the specified fw_info pointer.
968  */
qed_read_storm_fw_info(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u8 storm_id,struct fw_info * fw_info)969 static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn,
970 				   struct qed_ptt *p_ptt,
971 				   u8 storm_id, struct fw_info *fw_info)
972 {
973 	struct storm_defs *storm = &s_storm_defs[storm_id];
974 	struct fw_info_location fw_info_location;
975 	u32 addr, i, size, *dest;
976 
977 	memset(&fw_info_location, 0, sizeof(fw_info_location));
978 	memset(fw_info, 0, sizeof(*fw_info));
979 
980 	/* Read first the address that points to fw_info location.
981 	 * The address is located in the last line of the Storm RAM.
982 	 */
983 	addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
984 	    DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) -
985 	    sizeof(fw_info_location);
986 
987 	dest = (u32 *)&fw_info_location;
988 	size = BYTES_TO_DWORDS(sizeof(fw_info_location));
989 
990 	for (i = 0; i < size; i++, addr += BYTES_IN_DWORD)
991 		dest[i] = qed_rd(p_hwfn, p_ptt, addr);
992 
993 	/* qed_rq() fetches data in CPU byteorder. Swap it back to
994 	 * the device's to get right structure layout.
995 	 */
996 	cpu_to_le32_array(dest, size);
997 
998 	/* Read FW version info from Storm RAM */
999 	size = le32_to_cpu(fw_info_location.size);
1000 	if (!size || size > sizeof(*fw_info))
1001 		return;
1002 
1003 	addr = le32_to_cpu(fw_info_location.grc_addr);
1004 	dest = (u32 *)fw_info;
1005 	size = BYTES_TO_DWORDS(size);
1006 
1007 	for (i = 0; i < size; i++, addr += BYTES_IN_DWORD)
1008 		dest[i] = qed_rd(p_hwfn, p_ptt, addr);
1009 
1010 	cpu_to_le32_array(dest, size);
1011 }
1012 
1013 /* Dumps the specified string to the specified buffer.
1014  * Returns the dumped size in bytes.
1015  */
qed_dump_str(char * dump_buf,bool dump,const char * str)1016 static u32 qed_dump_str(char *dump_buf, bool dump, const char *str)
1017 {
1018 	if (dump)
1019 		strcpy(dump_buf, str);
1020 
1021 	return (u32)strlen(str) + 1;
1022 }
1023 
1024 /* Dumps zeros to align the specified buffer to dwords.
1025  * Returns the dumped size in bytes.
1026  */
qed_dump_align(char * dump_buf,bool dump,u32 byte_offset)1027 static u32 qed_dump_align(char *dump_buf, bool dump, u32 byte_offset)
1028 {
1029 	u8 offset_in_dword, align_size;
1030 
1031 	offset_in_dword = (u8)(byte_offset & 0x3);
1032 	align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0;
1033 
1034 	if (dump && align_size)
1035 		memset(dump_buf, 0, align_size);
1036 
1037 	return align_size;
1038 }
1039 
1040 /* Writes the specified string param to the specified buffer.
1041  * Returns the dumped size in dwords.
1042  */
qed_dump_str_param(u32 * dump_buf,bool dump,const char * param_name,const char * param_val)1043 static u32 qed_dump_str_param(u32 *dump_buf,
1044 			      bool dump,
1045 			      const char *param_name, const char *param_val)
1046 {
1047 	char *char_buf = (char *)dump_buf;
1048 	u32 offset = 0;
1049 
1050 	/* Dump param name */
1051 	offset += qed_dump_str(char_buf + offset, dump, param_name);
1052 
1053 	/* Indicate a string param value */
1054 	if (dump)
1055 		*(char_buf + offset) = 1;
1056 	offset++;
1057 
1058 	/* Dump param value */
1059 	offset += qed_dump_str(char_buf + offset, dump, param_val);
1060 
1061 	/* Align buffer to next dword */
1062 	offset += qed_dump_align(char_buf + offset, dump, offset);
1063 
1064 	return BYTES_TO_DWORDS(offset);
1065 }
1066 
1067 /* Writes the specified numeric param to the specified buffer.
1068  * Returns the dumped size in dwords.
1069  */
qed_dump_num_param(u32 * dump_buf,bool dump,const char * param_name,u32 param_val)1070 static u32 qed_dump_num_param(u32 *dump_buf,
1071 			      bool dump, const char *param_name, u32 param_val)
1072 {
1073 	char *char_buf = (char *)dump_buf;
1074 	u32 offset = 0;
1075 
1076 	/* Dump param name */
1077 	offset += qed_dump_str(char_buf + offset, dump, param_name);
1078 
1079 	/* Indicate a numeric param value */
1080 	if (dump)
1081 		*(char_buf + offset) = 0;
1082 	offset++;
1083 
1084 	/* Align buffer to next dword */
1085 	offset += qed_dump_align(char_buf + offset, dump, offset);
1086 
1087 	/* Dump param value (and change offset from bytes to dwords) */
1088 	offset = BYTES_TO_DWORDS(offset);
1089 	if (dump)
1090 		*(dump_buf + offset) = param_val;
1091 	offset++;
1092 
1093 	return offset;
1094 }
1095 
1096 /* Reads the FW version and writes it as a param to the specified buffer.
1097  * Returns the dumped size in dwords.
1098  */
qed_dump_fw_ver_param(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)1099 static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
1100 				 struct qed_ptt *p_ptt,
1101 				 u32 *dump_buf, bool dump)
1102 {
1103 	char fw_ver_str[16] = EMPTY_FW_VERSION_STR;
1104 	char fw_img_str[16] = EMPTY_FW_IMAGE_STR;
1105 	struct fw_info fw_info = { {0}, {0} };
1106 	u32 offset = 0;
1107 
1108 	if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
1109 		/* Read FW info from chip */
1110 		qed_read_fw_info(p_hwfn, p_ptt, &fw_info);
1111 
1112 		/* Create FW version/image strings */
1113 		if (snprintf(fw_ver_str, sizeof(fw_ver_str),
1114 			     "%d_%d_%d_%d", fw_info.ver.num.major,
1115 			     fw_info.ver.num.minor, fw_info.ver.num.rev,
1116 			     fw_info.ver.num.eng) < 0)
1117 			DP_NOTICE(p_hwfn,
1118 				  "Unexpected debug error: invalid FW version string\n");
1119 		switch (fw_info.ver.image_id) {
1120 		case FW_IMG_MAIN:
1121 			strcpy(fw_img_str, "main");
1122 			break;
1123 		default:
1124 			strcpy(fw_img_str, "unknown");
1125 			break;
1126 		}
1127 	}
1128 
1129 	/* Dump FW version, image and timestamp */
1130 	offset += qed_dump_str_param(dump_buf + offset,
1131 				     dump, "fw-version", fw_ver_str);
1132 	offset += qed_dump_str_param(dump_buf + offset,
1133 				     dump, "fw-image", fw_img_str);
1134 	offset += qed_dump_num_param(dump_buf + offset, dump, "fw-timestamp",
1135 				     le32_to_cpu(fw_info.ver.timestamp));
1136 
1137 	return offset;
1138 }
1139 
1140 /* Reads the MFW version and writes it as a param to the specified buffer.
1141  * Returns the dumped size in dwords.
1142  */
qed_dump_mfw_ver_param(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)1143 static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn,
1144 				  struct qed_ptt *p_ptt,
1145 				  u32 *dump_buf, bool dump)
1146 {
1147 	char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
1148 
1149 	if (dump &&
1150 	    !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
1151 		u32 global_section_offsize, global_section_addr, mfw_ver;
1152 		u32 public_data_addr, global_section_offsize_addr;
1153 
1154 		/* Find MCP public data GRC address. Needs to be ORed with
1155 		 * MCP_REG_SCRATCH due to a HW bug.
1156 		 */
1157 		public_data_addr = qed_rd(p_hwfn,
1158 					  p_ptt,
1159 					  MISC_REG_SHARED_MEM_ADDR) |
1160 				   MCP_REG_SCRATCH;
1161 
1162 		/* Find MCP public global section offset */
1163 		global_section_offsize_addr = public_data_addr +
1164 					      offsetof(struct mcp_public_data,
1165 						       sections) +
1166 					      sizeof(offsize_t) * PUBLIC_GLOBAL;
1167 		global_section_offsize = qed_rd(p_hwfn, p_ptt,
1168 						global_section_offsize_addr);
1169 		global_section_addr =
1170 			MCP_REG_SCRATCH +
1171 			(global_section_offsize & OFFSIZE_OFFSET_MASK) * 4;
1172 
1173 		/* Read MFW version from MCP public global section */
1174 		mfw_ver = qed_rd(p_hwfn, p_ptt,
1175 				 global_section_addr +
1176 				 offsetof(struct public_global, mfw_ver));
1177 
1178 		/* Dump MFW version param */
1179 		if (snprintf(mfw_ver_str, sizeof(mfw_ver_str), "%d_%d_%d_%d",
1180 			     (u8)(mfw_ver >> 24), (u8)(mfw_ver >> 16),
1181 			     (u8)(mfw_ver >> 8), (u8)mfw_ver) < 0)
1182 			DP_NOTICE(p_hwfn,
1183 				  "Unexpected debug error: invalid MFW version string\n");
1184 	}
1185 
1186 	return qed_dump_str_param(dump_buf, dump, "mfw-version", mfw_ver_str);
1187 }
1188 
1189 /* Reads the chip revision from the chip and writes it as a param to the
1190  * specified buffer. Returns the dumped size in dwords.
1191  */
qed_dump_chip_revision_param(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)1192 static u32 qed_dump_chip_revision_param(struct qed_hwfn *p_hwfn,
1193 					struct qed_ptt *p_ptt,
1194 					u32 *dump_buf, bool dump)
1195 {
1196 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1197 	char param_str[3] = "??";
1198 
1199 	if (dev_data->hw_type == HW_TYPE_ASIC) {
1200 		u32 chip_rev, chip_metal;
1201 
1202 		chip_rev = qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_REV);
1203 		chip_metal = qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_METAL);
1204 
1205 		param_str[0] = 'a' + (u8)chip_rev;
1206 		param_str[1] = '0' + (u8)chip_metal;
1207 	}
1208 
1209 	return qed_dump_str_param(dump_buf, dump, "chip-revision", param_str);
1210 }
1211 
1212 /* Writes a section header to the specified buffer.
1213  * Returns the dumped size in dwords.
1214  */
qed_dump_section_hdr(u32 * dump_buf,bool dump,const char * name,u32 num_params)1215 static u32 qed_dump_section_hdr(u32 *dump_buf,
1216 				bool dump, const char *name, u32 num_params)
1217 {
1218 	return qed_dump_num_param(dump_buf, dump, name, num_params);
1219 }
1220 
1221 /* Writes the common global params to the specified buffer.
1222  * Returns the dumped size in dwords.
1223  */
qed_dump_common_global_params(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u8 num_specific_global_params)1224 static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
1225 					 struct qed_ptt *p_ptt,
1226 					 u32 *dump_buf,
1227 					 bool dump,
1228 					 u8 num_specific_global_params)
1229 {
1230 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1231 	u32 offset = 0;
1232 	u8 num_params;
1233 
1234 	/* Dump global params section header */
1235 	num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params +
1236 		(dev_data->chip_id == CHIP_BB ? 1 : 0);
1237 	offset += qed_dump_section_hdr(dump_buf + offset,
1238 				       dump, "global_params", num_params);
1239 
1240 	/* Store params */
1241 	offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
1242 	offset += qed_dump_mfw_ver_param(p_hwfn,
1243 					 p_ptt, dump_buf + offset, dump);
1244 	offset += qed_dump_chip_revision_param(p_hwfn,
1245 					       p_ptt, dump_buf + offset, dump);
1246 	offset += qed_dump_num_param(dump_buf + offset,
1247 				     dump, "tools-version", TOOLS_VERSION);
1248 	offset += qed_dump_str_param(dump_buf + offset,
1249 				     dump,
1250 				     "chip",
1251 				     s_chip_defs[dev_data->chip_id].name);
1252 	offset += qed_dump_str_param(dump_buf + offset,
1253 				     dump,
1254 				     "platform",
1255 				     s_hw_type_defs[dev_data->hw_type].name);
1256 	offset += qed_dump_num_param(dump_buf + offset,
1257 				     dump, "pci-func", p_hwfn->abs_pf_id);
1258 	if (dev_data->chip_id == CHIP_BB)
1259 		offset += qed_dump_num_param(dump_buf + offset,
1260 					     dump, "path", QED_PATH_ID(p_hwfn));
1261 
1262 	return offset;
1263 }
1264 
1265 /* Writes the "last" section (including CRC) to the specified buffer at the
1266  * given offset. Returns the dumped size in dwords.
1267  */
qed_dump_last_section(u32 * dump_buf,u32 offset,bool dump)1268 static u32 qed_dump_last_section(u32 *dump_buf, u32 offset, bool dump)
1269 {
1270 	u32 start_offset = offset;
1271 
1272 	/* Dump CRC section header */
1273 	offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0);
1274 
1275 	/* Calculate CRC32 and add it to the dword after the "last" section */
1276 	if (dump)
1277 		*(dump_buf + offset) = ~crc32(0xffffffff,
1278 					      (u8 *)dump_buf,
1279 					      DWORDS_TO_BYTES(offset));
1280 
1281 	offset++;
1282 
1283 	return offset - start_offset;
1284 }
1285 
1286 /* Update blocks reset state  */
qed_update_blocks_reset_state(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1287 static void qed_update_blocks_reset_state(struct qed_hwfn *p_hwfn,
1288 					  struct qed_ptt *p_ptt)
1289 {
1290 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1291 	u32 reg_val[NUM_DBG_RESET_REGS] = { 0 };
1292 	u8 rst_reg_id;
1293 	u32 blk_id;
1294 
1295 	/* Read reset registers */
1296 	for (rst_reg_id = 0; rst_reg_id < NUM_DBG_RESET_REGS; rst_reg_id++) {
1297 		const struct dbg_reset_reg *rst_reg;
1298 		bool rst_reg_removed;
1299 		u32 rst_reg_addr;
1300 
1301 		rst_reg = qed_get_dbg_reset_reg(p_hwfn, rst_reg_id);
1302 		rst_reg_removed = GET_FIELD(rst_reg->data,
1303 					    DBG_RESET_REG_IS_REMOVED);
1304 		rst_reg_addr = DWORDS_TO_BYTES(GET_FIELD(rst_reg->data,
1305 							 DBG_RESET_REG_ADDR));
1306 
1307 		if (!rst_reg_removed)
1308 			reg_val[rst_reg_id] = qed_rd(p_hwfn, p_ptt,
1309 						     rst_reg_addr);
1310 	}
1311 
1312 	/* Check if blocks are in reset */
1313 	for (blk_id = 0; blk_id < NUM_PHYS_BLOCKS; blk_id++) {
1314 		const struct dbg_block_chip *blk;
1315 		bool has_rst_reg;
1316 		bool is_removed;
1317 
1318 		blk = qed_get_dbg_block_per_chip(p_hwfn, (enum block_id)blk_id);
1319 		is_removed = GET_FIELD(blk->flags, DBG_BLOCK_CHIP_IS_REMOVED);
1320 		has_rst_reg = GET_FIELD(blk->flags,
1321 					DBG_BLOCK_CHIP_HAS_RESET_REG);
1322 
1323 		if (!is_removed && has_rst_reg)
1324 			dev_data->block_in_reset[blk_id] =
1325 			    !(reg_val[blk->reset_reg_id] &
1326 			      BIT(blk->reset_reg_bit_offset));
1327 	}
1328 }
1329 
1330 /* is_mode_match recursive function */
qed_is_mode_match_rec(struct qed_hwfn * p_hwfn,u16 * modes_buf_offset,u8 rec_depth)1331 static bool qed_is_mode_match_rec(struct qed_hwfn *p_hwfn,
1332 				  u16 *modes_buf_offset, u8 rec_depth)
1333 {
1334 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1335 	u8 *dbg_array;
1336 	bool arg1, arg2;
1337 	u8 tree_val;
1338 
1339 	if (rec_depth > MAX_RECURSION_DEPTH) {
1340 		DP_NOTICE(p_hwfn,
1341 			  "Unexpected error: is_mode_match_rec exceeded the max recursion depth. This is probably due to a corrupt init/debug buffer.\n");
1342 		return false;
1343 	}
1344 
1345 	/* Get next element from modes tree buffer */
1346 	dbg_array = p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr;
1347 	tree_val = dbg_array[(*modes_buf_offset)++];
1348 
1349 	switch (tree_val) {
1350 	case INIT_MODE_OP_NOT:
1351 		return !qed_is_mode_match_rec(p_hwfn,
1352 					      modes_buf_offset, rec_depth + 1);
1353 	case INIT_MODE_OP_OR:
1354 	case INIT_MODE_OP_AND:
1355 		arg1 = qed_is_mode_match_rec(p_hwfn,
1356 					     modes_buf_offset, rec_depth + 1);
1357 		arg2 = qed_is_mode_match_rec(p_hwfn,
1358 					     modes_buf_offset, rec_depth + 1);
1359 		return (tree_val == INIT_MODE_OP_OR) ? (arg1 ||
1360 							arg2) : (arg1 && arg2);
1361 	default:
1362 		return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0;
1363 	}
1364 }
1365 
1366 /* Returns true if the mode (specified using modes_buf_offset) is enabled */
qed_is_mode_match(struct qed_hwfn * p_hwfn,u16 * modes_buf_offset)1367 static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset)
1368 {
1369 	return qed_is_mode_match_rec(p_hwfn, modes_buf_offset, 0);
1370 }
1371 
1372 /* Enable / disable the Debug block */
qed_bus_enable_dbg_block(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,bool enable)1373 static void qed_bus_enable_dbg_block(struct qed_hwfn *p_hwfn,
1374 				     struct qed_ptt *p_ptt, bool enable)
1375 {
1376 	qed_wr(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON, enable ? 1 : 0);
1377 }
1378 
1379 /* Resets the Debug block */
qed_bus_reset_dbg_block(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1380 static void qed_bus_reset_dbg_block(struct qed_hwfn *p_hwfn,
1381 				    struct qed_ptt *p_ptt)
1382 {
1383 	u32 reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
1384 	const struct dbg_reset_reg *reset_reg;
1385 	const struct dbg_block_chip *block;
1386 
1387 	block = qed_get_dbg_block_per_chip(p_hwfn, BLOCK_DBG);
1388 	reset_reg = qed_get_dbg_reset_reg(p_hwfn, block->reset_reg_id);
1389 	reset_reg_addr =
1390 	    DWORDS_TO_BYTES(GET_FIELD(reset_reg->data, DBG_RESET_REG_ADDR));
1391 
1392 	old_reset_reg_val = qed_rd(p_hwfn, p_ptt, reset_reg_addr);
1393 	new_reset_reg_val =
1394 	    old_reset_reg_val & ~BIT(block->reset_reg_bit_offset);
1395 
1396 	qed_wr(p_hwfn, p_ptt, reset_reg_addr, new_reset_reg_val);
1397 	qed_wr(p_hwfn, p_ptt, reset_reg_addr, old_reset_reg_val);
1398 }
1399 
1400 /* Enable / disable Debug Bus clients according to the specified mask
1401  * (1 = enable, 0 = disable).
1402  */
qed_bus_enable_clients(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 client_mask)1403 static void qed_bus_enable_clients(struct qed_hwfn *p_hwfn,
1404 				   struct qed_ptt *p_ptt, u32 client_mask)
1405 {
1406 	qed_wr(p_hwfn, p_ptt, DBG_REG_CLIENT_ENABLE, client_mask);
1407 }
1408 
qed_bus_config_dbg_line(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,enum block_id block_id,u8 line_id,u8 enable_mask,u8 right_shift,u8 force_valid_mask,u8 force_frame_mask)1409 static void qed_bus_config_dbg_line(struct qed_hwfn *p_hwfn,
1410 				    struct qed_ptt *p_ptt,
1411 				    enum block_id block_id,
1412 				    u8 line_id,
1413 				    u8 enable_mask,
1414 				    u8 right_shift,
1415 				    u8 force_valid_mask, u8 force_frame_mask)
1416 {
1417 	const struct dbg_block_chip *block =
1418 		qed_get_dbg_block_per_chip(p_hwfn, block_id);
1419 
1420 	qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_select_reg_addr),
1421 	       line_id);
1422 	qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_dword_enable_reg_addr),
1423 	       enable_mask);
1424 	qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_shift_reg_addr),
1425 	       right_shift);
1426 	qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_force_valid_reg_addr),
1427 	       force_valid_mask);
1428 	qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_force_frame_reg_addr),
1429 	       force_frame_mask);
1430 }
1431 
1432 /* Disable debug bus in all blocks */
qed_bus_disable_blocks(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1433 static void qed_bus_disable_blocks(struct qed_hwfn *p_hwfn,
1434 				   struct qed_ptt *p_ptt)
1435 {
1436 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1437 	u32 block_id;
1438 
1439 	/* Disable all blocks */
1440 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
1441 		const struct dbg_block_chip *block_per_chip =
1442 		    qed_get_dbg_block_per_chip(p_hwfn,
1443 					       (enum block_id)block_id);
1444 
1445 		if (GET_FIELD(block_per_chip->flags,
1446 			      DBG_BLOCK_CHIP_IS_REMOVED) ||
1447 		    dev_data->block_in_reset[block_id])
1448 			continue;
1449 
1450 		/* Disable debug bus */
1451 		if (GET_FIELD(block_per_chip->flags,
1452 			      DBG_BLOCK_CHIP_HAS_DBG_BUS)) {
1453 			u32 dbg_en_addr =
1454 				block_per_chip->dbg_dword_enable_reg_addr;
1455 			u16 modes_buf_offset =
1456 			    GET_FIELD(block_per_chip->dbg_bus_mode.data,
1457 				      DBG_MODE_HDR_MODES_BUF_OFFSET);
1458 			bool eval_mode =
1459 			    GET_FIELD(block_per_chip->dbg_bus_mode.data,
1460 				      DBG_MODE_HDR_EVAL_MODE) > 0;
1461 
1462 			if (!eval_mode ||
1463 			    qed_is_mode_match(p_hwfn, &modes_buf_offset))
1464 				qed_wr(p_hwfn, p_ptt,
1465 				       DWORDS_TO_BYTES(dbg_en_addr),
1466 				       0);
1467 		}
1468 	}
1469 }
1470 
1471 /* Returns true if the specified entity (indicated by GRC param) should be
1472  * included in the dump, false otherwise.
1473  */
qed_grc_is_included(struct qed_hwfn * p_hwfn,enum dbg_grc_params grc_param)1474 static bool qed_grc_is_included(struct qed_hwfn *p_hwfn,
1475 				enum dbg_grc_params grc_param)
1476 {
1477 	return qed_grc_get_param(p_hwfn, grc_param) > 0;
1478 }
1479 
1480 /* Returns the storm_id that matches the specified Storm letter,
1481  * or MAX_DBG_STORMS if invalid storm letter.
1482  */
qed_get_id_from_letter(char storm_letter)1483 static enum dbg_storms qed_get_id_from_letter(char storm_letter)
1484 {
1485 	u8 storm_id;
1486 
1487 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++)
1488 		if (s_storm_defs[storm_id].letter == storm_letter)
1489 			return (enum dbg_storms)storm_id;
1490 
1491 	return MAX_DBG_STORMS;
1492 }
1493 
1494 /* Returns true of the specified Storm should be included in the dump, false
1495  * otherwise.
1496  */
qed_grc_is_storm_included(struct qed_hwfn * p_hwfn,enum dbg_storms storm)1497 static bool qed_grc_is_storm_included(struct qed_hwfn *p_hwfn,
1498 				      enum dbg_storms storm)
1499 {
1500 	return qed_grc_get_param(p_hwfn, (enum dbg_grc_params)storm) > 0;
1501 }
1502 
1503 /* Returns true if the specified memory should be included in the dump, false
1504  * otherwise.
1505  */
qed_grc_is_mem_included(struct qed_hwfn * p_hwfn,enum block_id block_id,u8 mem_group_id)1506 static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
1507 				    enum block_id block_id, u8 mem_group_id)
1508 {
1509 	const struct dbg_block *block;
1510 	u8 i;
1511 
1512 	block = get_dbg_block(p_hwfn, block_id);
1513 
1514 	/* If the block is associated with a Storm, check Storm match */
1515 	if (block->associated_storm_letter) {
1516 		enum dbg_storms associated_storm_id =
1517 		    qed_get_id_from_letter(block->associated_storm_letter);
1518 
1519 		if (associated_storm_id == MAX_DBG_STORMS ||
1520 		    !qed_grc_is_storm_included(p_hwfn, associated_storm_id))
1521 			return false;
1522 	}
1523 
1524 	for (i = 0; i < NUM_BIG_RAM_TYPES; i++) {
1525 		struct big_ram_defs *big_ram = &s_big_ram_defs[i];
1526 
1527 		if (mem_group_id == big_ram->mem_group_id ||
1528 		    mem_group_id == big_ram->ram_mem_group_id)
1529 			return qed_grc_is_included(p_hwfn, big_ram->grc_param);
1530 	}
1531 
1532 	switch (mem_group_id) {
1533 	case MEM_GROUP_PXP_ILT:
1534 	case MEM_GROUP_PXP_MEM:
1535 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP);
1536 	case MEM_GROUP_RAM:
1537 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM);
1538 	case MEM_GROUP_PBUF:
1539 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF);
1540 	case MEM_GROUP_CAU_MEM:
1541 	case MEM_GROUP_CAU_SB:
1542 	case MEM_GROUP_CAU_PI:
1543 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU);
1544 	case MEM_GROUP_CAU_MEM_EXT:
1545 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU_EXT);
1546 	case MEM_GROUP_QM_MEM:
1547 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM);
1548 	case MEM_GROUP_CFC_MEM:
1549 	case MEM_GROUP_CONN_CFC_MEM:
1550 	case MEM_GROUP_TASK_CFC_MEM:
1551 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC) ||
1552 		       qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX);
1553 	case MEM_GROUP_DORQ_MEM:
1554 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DORQ);
1555 	case MEM_GROUP_IGU_MEM:
1556 	case MEM_GROUP_IGU_MSIX:
1557 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
1558 	case MEM_GROUP_MULD_MEM:
1559 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD);
1560 	case MEM_GROUP_PRS_MEM:
1561 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS);
1562 	case MEM_GROUP_DMAE_MEM:
1563 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE);
1564 	case MEM_GROUP_TM_MEM:
1565 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM);
1566 	case MEM_GROUP_SDM_MEM:
1567 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM);
1568 	case MEM_GROUP_TDIF_CTX:
1569 	case MEM_GROUP_RDIF_CTX:
1570 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF);
1571 	case MEM_GROUP_CM_MEM:
1572 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM);
1573 	case MEM_GROUP_IOR:
1574 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR);
1575 	default:
1576 		return true;
1577 	}
1578 }
1579 
1580 /* Stalls all Storms */
qed_grc_stall_storms(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,bool stall)1581 static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn,
1582 				 struct qed_ptt *p_ptt, bool stall)
1583 {
1584 	u32 reg_addr;
1585 	u8 storm_id;
1586 
1587 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
1588 		if (!qed_grc_is_storm_included(p_hwfn,
1589 					       (enum dbg_storms)storm_id))
1590 			continue;
1591 
1592 		reg_addr = s_storm_defs[storm_id].sem_fast_mem_addr +
1593 		    SEM_FAST_REG_STALL_0_BB_K2;
1594 		qed_wr(p_hwfn, p_ptt, reg_addr, stall ? 1 : 0);
1595 	}
1596 
1597 	msleep(STALL_DELAY_MS);
1598 }
1599 
1600 /* Takes all blocks out of reset. If rbc_only is true, only RBC clients are
1601  * taken out of reset.
1602  */
qed_grc_unreset_blocks(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,bool rbc_only)1603 static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
1604 				   struct qed_ptt *p_ptt, bool rbc_only)
1605 {
1606 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1607 	u8 chip_id = dev_data->chip_id;
1608 	u32 i;
1609 
1610 	/* Take RBCs out of reset */
1611 	for (i = 0; i < ARRAY_SIZE(s_rbc_reset_defs); i++)
1612 		if (s_rbc_reset_defs[i].reset_val[dev_data->chip_id])
1613 			qed_wr(p_hwfn,
1614 			       p_ptt,
1615 			       s_rbc_reset_defs[i].reset_reg_addr +
1616 			       RESET_REG_UNRESET_OFFSET,
1617 			       s_rbc_reset_defs[i].reset_val[chip_id]);
1618 
1619 	if (!rbc_only) {
1620 		u32 reg_val[NUM_DBG_RESET_REGS] = { 0 };
1621 		u8 reset_reg_id;
1622 		u32 block_id;
1623 
1624 		/* Fill reset regs values */
1625 		for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
1626 			bool is_removed, has_reset_reg, unreset_before_dump;
1627 			const struct dbg_block_chip *block;
1628 
1629 			block = qed_get_dbg_block_per_chip(p_hwfn,
1630 							   (enum block_id)
1631 							   block_id);
1632 			is_removed =
1633 			    GET_FIELD(block->flags, DBG_BLOCK_CHIP_IS_REMOVED);
1634 			has_reset_reg =
1635 			    GET_FIELD(block->flags,
1636 				      DBG_BLOCK_CHIP_HAS_RESET_REG);
1637 			unreset_before_dump =
1638 			    GET_FIELD(block->flags,
1639 				      DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP);
1640 
1641 			if (!is_removed && has_reset_reg && unreset_before_dump)
1642 				reg_val[block->reset_reg_id] |=
1643 				    BIT(block->reset_reg_bit_offset);
1644 		}
1645 
1646 		/* Write reset registers */
1647 		for (reset_reg_id = 0; reset_reg_id < NUM_DBG_RESET_REGS;
1648 		     reset_reg_id++) {
1649 			const struct dbg_reset_reg *reset_reg;
1650 			u32 reset_reg_addr;
1651 
1652 			reset_reg = qed_get_dbg_reset_reg(p_hwfn, reset_reg_id);
1653 
1654 			if (GET_FIELD
1655 			    (reset_reg->data, DBG_RESET_REG_IS_REMOVED))
1656 				continue;
1657 
1658 			if (reg_val[reset_reg_id]) {
1659 				reset_reg_addr =
1660 				    GET_FIELD(reset_reg->data,
1661 					      DBG_RESET_REG_ADDR);
1662 				qed_wr(p_hwfn,
1663 				       p_ptt,
1664 				       DWORDS_TO_BYTES(reset_reg_addr) +
1665 				       RESET_REG_UNRESET_OFFSET,
1666 				       reg_val[reset_reg_id]);
1667 			}
1668 		}
1669 	}
1670 }
1671 
1672 /* Returns the attention block data of the specified block */
1673 static const struct dbg_attn_block_type_data *
qed_get_block_attn_data(struct qed_hwfn * p_hwfn,enum block_id block_id,enum dbg_attn_type attn_type)1674 qed_get_block_attn_data(struct qed_hwfn *p_hwfn,
1675 			enum block_id block_id, enum dbg_attn_type attn_type)
1676 {
1677 	const struct dbg_attn_block *base_attn_block_arr =
1678 	    (const struct dbg_attn_block *)
1679 	    p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr;
1680 
1681 	return &base_attn_block_arr[block_id].per_type_data[attn_type];
1682 }
1683 
1684 /* Returns the attention registers of the specified block */
1685 static const struct dbg_attn_reg *
qed_get_block_attn_regs(struct qed_hwfn * p_hwfn,enum block_id block_id,enum dbg_attn_type attn_type,u8 * num_attn_regs)1686 qed_get_block_attn_regs(struct qed_hwfn *p_hwfn,
1687 			enum block_id block_id, enum dbg_attn_type attn_type,
1688 			u8 *num_attn_regs)
1689 {
1690 	const struct dbg_attn_block_type_data *block_type_data =
1691 	    qed_get_block_attn_data(p_hwfn, block_id, attn_type);
1692 
1693 	*num_attn_regs = block_type_data->num_regs;
1694 
1695 	return (const struct dbg_attn_reg *)
1696 		p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr +
1697 		block_type_data->regs_offset;
1698 }
1699 
1700 /* For each block, clear the status of all parities */
qed_grc_clear_all_prty(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1701 static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
1702 				   struct qed_ptt *p_ptt)
1703 {
1704 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1705 	const struct dbg_attn_reg *attn_reg_arr;
1706 	u8 reg_idx, num_attn_regs;
1707 	u32 block_id;
1708 
1709 	for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
1710 		if (dev_data->block_in_reset[block_id])
1711 			continue;
1712 
1713 		attn_reg_arr = qed_get_block_attn_regs(p_hwfn,
1714 						       (enum block_id)block_id,
1715 						       ATTN_TYPE_PARITY,
1716 						       &num_attn_regs);
1717 
1718 		for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
1719 			const struct dbg_attn_reg *reg_data =
1720 				&attn_reg_arr[reg_idx];
1721 			u16 modes_buf_offset;
1722 			bool eval_mode;
1723 
1724 			/* Check mode */
1725 			eval_mode = GET_FIELD(reg_data->mode.data,
1726 					      DBG_MODE_HDR_EVAL_MODE) > 0;
1727 			modes_buf_offset =
1728 				GET_FIELD(reg_data->mode.data,
1729 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
1730 
1731 			/* If Mode match: clear parity status */
1732 			if (!eval_mode ||
1733 			    qed_is_mode_match(p_hwfn, &modes_buf_offset))
1734 				qed_rd(p_hwfn, p_ptt,
1735 				       DWORDS_TO_BYTES(reg_data->
1736 						       sts_clr_address));
1737 		}
1738 	}
1739 }
1740 
1741 /* Dumps GRC registers section header. Returns the dumped size in dwords.
1742  * the following parameters are dumped:
1743  * - count: no. of dumped entries
1744  * - split_type: split type
1745  * - split_id: split ID (dumped only if split_id != SPLIT_TYPE_NONE)
1746  * - reg_type_name: register type name (dumped only if reg_type_name != NULL)
1747  */
qed_grc_dump_regs_hdr(u32 * dump_buf,bool dump,u32 num_reg_entries,enum init_split_types split_type,u8 split_id,const char * reg_type_name)1748 static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
1749 				 bool dump,
1750 				 u32 num_reg_entries,
1751 				 enum init_split_types split_type,
1752 				 u8 split_id, const char *reg_type_name)
1753 {
1754 	u8 num_params = 2 +
1755 	    (split_type != SPLIT_TYPE_NONE ? 1 : 0) + (reg_type_name ? 1 : 0);
1756 	u32 offset = 0;
1757 
1758 	offset += qed_dump_section_hdr(dump_buf + offset,
1759 				       dump, "grc_regs", num_params);
1760 	offset += qed_dump_num_param(dump_buf + offset,
1761 				     dump, "count", num_reg_entries);
1762 	offset += qed_dump_str_param(dump_buf + offset,
1763 				     dump, "split",
1764 				     s_split_type_defs[split_type].name);
1765 	if (split_type != SPLIT_TYPE_NONE)
1766 		offset += qed_dump_num_param(dump_buf + offset,
1767 					     dump, "id", split_id);
1768 	if (reg_type_name)
1769 		offset += qed_dump_str_param(dump_buf + offset,
1770 					     dump, "type", reg_type_name);
1771 
1772 	return offset;
1773 }
1774 
1775 /* Reads the specified registers into the specified buffer.
1776  * The addr and len arguments are specified in dwords.
1777  */
qed_read_regs(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * buf,u32 addr,u32 len)1778 void qed_read_regs(struct qed_hwfn *p_hwfn,
1779 		   struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len)
1780 {
1781 	u32 i;
1782 
1783 	for (i = 0; i < len; i++)
1784 		buf[i] = qed_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr + i));
1785 }
1786 
1787 /* Dumps the GRC registers in the specified address range.
1788  * Returns the dumped size in dwords.
1789  * The addr and len arguments are specified in dwords.
1790  */
qed_grc_dump_addr_range(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u32 addr,u32 len,bool wide_bus,enum init_split_types split_type,u8 split_id)1791 static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
1792 				   struct qed_ptt *p_ptt,
1793 				   u32 *dump_buf,
1794 				   bool dump, u32 addr, u32 len, bool wide_bus,
1795 				   enum init_split_types split_type,
1796 				   u8 split_id)
1797 {
1798 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1799 	u8 port_id = 0, pf_id = 0, vf_id = 0;
1800 	bool read_using_dmae = false;
1801 	u32 thresh;
1802 	u16 fid;
1803 
1804 	if (!dump)
1805 		return len;
1806 
1807 	switch (split_type) {
1808 	case SPLIT_TYPE_PORT:
1809 		port_id = split_id;
1810 		break;
1811 	case SPLIT_TYPE_PF:
1812 		pf_id = split_id;
1813 		break;
1814 	case SPLIT_TYPE_PORT_PF:
1815 		port_id = split_id / dev_data->num_pfs_per_port;
1816 		pf_id = port_id + dev_data->num_ports *
1817 		    (split_id % dev_data->num_pfs_per_port);
1818 		break;
1819 	case SPLIT_TYPE_VF:
1820 		vf_id = split_id;
1821 		break;
1822 	default:
1823 		break;
1824 	}
1825 
1826 	/* Try reading using DMAE */
1827 	if (dev_data->use_dmae && split_type != SPLIT_TYPE_VF &&
1828 	    (len >= s_hw_type_defs[dev_data->hw_type].dmae_thresh ||
1829 	     (PROTECT_WIDE_BUS && wide_bus))) {
1830 		struct qed_dmae_params dmae_params;
1831 
1832 		/* Set DMAE params */
1833 		memset(&dmae_params, 0, sizeof(dmae_params));
1834 		SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_COMPLETION_DST, 1);
1835 		switch (split_type) {
1836 		case SPLIT_TYPE_PORT:
1837 			SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_PORT_VALID,
1838 				  1);
1839 			dmae_params.port_id = port_id;
1840 			break;
1841 		case SPLIT_TYPE_PF:
1842 			SET_FIELD(dmae_params.flags,
1843 				  QED_DMAE_PARAMS_SRC_PF_VALID, 1);
1844 			dmae_params.src_pfid = pf_id;
1845 			break;
1846 		case SPLIT_TYPE_PORT_PF:
1847 			SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_PORT_VALID,
1848 				  1);
1849 			SET_FIELD(dmae_params.flags,
1850 				  QED_DMAE_PARAMS_SRC_PF_VALID, 1);
1851 			dmae_params.port_id = port_id;
1852 			dmae_params.src_pfid = pf_id;
1853 			break;
1854 		default:
1855 			break;
1856 		}
1857 
1858 		/* Execute DMAE command */
1859 		read_using_dmae = !qed_dmae_grc2host(p_hwfn,
1860 						     p_ptt,
1861 						     DWORDS_TO_BYTES(addr),
1862 						     (u64)(uintptr_t)(dump_buf),
1863 						     len, &dmae_params);
1864 		if (!read_using_dmae) {
1865 			dev_data->use_dmae = 0;
1866 			DP_VERBOSE(p_hwfn,
1867 				   QED_MSG_DEBUG,
1868 				   "Failed reading from chip using DMAE, using GRC instead\n");
1869 		}
1870 	}
1871 
1872 	if (read_using_dmae)
1873 		goto print_log;
1874 
1875 	/* If not read using DMAE, read using GRC */
1876 
1877 	/* Set pretend */
1878 	if (split_type != dev_data->pretend.split_type ||
1879 	    split_id != dev_data->pretend.split_id) {
1880 		switch (split_type) {
1881 		case SPLIT_TYPE_PORT:
1882 			qed_port_pretend(p_hwfn, p_ptt, port_id);
1883 			break;
1884 		case SPLIT_TYPE_PF:
1885 			fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
1886 					  pf_id);
1887 			qed_fid_pretend(p_hwfn, p_ptt, fid);
1888 			break;
1889 		case SPLIT_TYPE_PORT_PF:
1890 			fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
1891 					  pf_id);
1892 			qed_port_fid_pretend(p_hwfn, p_ptt, port_id, fid);
1893 			break;
1894 		case SPLIT_TYPE_VF:
1895 			fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_VFVALID, 1)
1896 			      | FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_VFID,
1897 					  vf_id);
1898 			qed_fid_pretend(p_hwfn, p_ptt, fid);
1899 			break;
1900 		default:
1901 			break;
1902 		}
1903 
1904 		dev_data->pretend.split_type = (u8)split_type;
1905 		dev_data->pretend.split_id = split_id;
1906 	}
1907 
1908 	/* Read registers using GRC */
1909 	qed_read_regs(p_hwfn, p_ptt, dump_buf, addr, len);
1910 
1911 print_log:
1912 	/* Print log */
1913 	dev_data->num_regs_read += len;
1914 	thresh = s_hw_type_defs[dev_data->hw_type].log_thresh;
1915 	if ((dev_data->num_regs_read / thresh) >
1916 	    ((dev_data->num_regs_read - len) / thresh))
1917 		DP_VERBOSE(p_hwfn,
1918 			   QED_MSG_DEBUG,
1919 			   "Dumped %d registers...\n", dev_data->num_regs_read);
1920 
1921 	return len;
1922 }
1923 
1924 /* Dumps GRC registers sequence header. Returns the dumped size in dwords.
1925  * The addr and len arguments are specified in dwords.
1926  */
qed_grc_dump_reg_entry_hdr(u32 * dump_buf,bool dump,u32 addr,u32 len)1927 static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf,
1928 				      bool dump, u32 addr, u32 len)
1929 {
1930 	if (dump)
1931 		*dump_buf = addr | (len << REG_DUMP_LEN_SHIFT);
1932 
1933 	return 1;
1934 }
1935 
1936 /* Dumps GRC registers sequence. Returns the dumped size in dwords.
1937  * The addr and len arguments are specified in dwords.
1938  */
qed_grc_dump_reg_entry(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u32 addr,u32 len,bool wide_bus,enum init_split_types split_type,u8 split_id)1939 static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn,
1940 				  struct qed_ptt *p_ptt,
1941 				  u32 *dump_buf,
1942 				  bool dump, u32 addr, u32 len, bool wide_bus,
1943 				  enum init_split_types split_type, u8 split_id)
1944 {
1945 	u32 offset = 0;
1946 
1947 	offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len);
1948 	offset += qed_grc_dump_addr_range(p_hwfn,
1949 					  p_ptt,
1950 					  dump_buf + offset,
1951 					  dump, addr, len, wide_bus,
1952 					  split_type, split_id);
1953 
1954 	return offset;
1955 }
1956 
1957 /* Dumps GRC registers sequence with skip cycle.
1958  * Returns the dumped size in dwords.
1959  * - addr:	start GRC address in dwords
1960  * - total_len:	total no. of dwords to dump
1961  * - read_len:	no. consecutive dwords to read
1962  * - skip_len:	no. of dwords to skip (and fill with zeros)
1963  */
qed_grc_dump_reg_entry_skip(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u32 addr,u32 total_len,u32 read_len,u32 skip_len)1964 static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn,
1965 				       struct qed_ptt *p_ptt,
1966 				       u32 *dump_buf,
1967 				       bool dump,
1968 				       u32 addr,
1969 				       u32 total_len,
1970 				       u32 read_len, u32 skip_len)
1971 {
1972 	u32 offset = 0, reg_offset = 0;
1973 
1974 	offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len);
1975 
1976 	if (!dump)
1977 		return offset + total_len;
1978 
1979 	while (reg_offset < total_len) {
1980 		u32 curr_len = min_t(u32, read_len, total_len - reg_offset);
1981 
1982 		offset += qed_grc_dump_addr_range(p_hwfn,
1983 						  p_ptt,
1984 						  dump_buf + offset,
1985 						  dump,  addr, curr_len, false,
1986 						  SPLIT_TYPE_NONE, 0);
1987 		reg_offset += curr_len;
1988 		addr += curr_len;
1989 
1990 		if (reg_offset < total_len) {
1991 			curr_len = min_t(u32, skip_len, total_len - skip_len);
1992 			memset(dump_buf + offset, 0, DWORDS_TO_BYTES(curr_len));
1993 			offset += curr_len;
1994 			reg_offset += curr_len;
1995 			addr += curr_len;
1996 		}
1997 	}
1998 
1999 	return offset;
2000 }
2001 
2002 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
qed_grc_dump_regs_entries(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct virt_mem_desc input_regs_arr,u32 * dump_buf,bool dump,enum init_split_types split_type,u8 split_id,bool block_enable[MAX_BLOCK_ID],u32 * num_dumped_reg_entries)2003 static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
2004 				     struct qed_ptt *p_ptt,
2005 				     struct virt_mem_desc input_regs_arr,
2006 				     u32 *dump_buf,
2007 				     bool dump,
2008 				     enum init_split_types split_type,
2009 				     u8 split_id,
2010 				     bool block_enable[MAX_BLOCK_ID],
2011 				     u32 *num_dumped_reg_entries)
2012 {
2013 	u32 i, offset = 0, input_offset = 0;
2014 	bool mode_match = true;
2015 
2016 	*num_dumped_reg_entries = 0;
2017 
2018 	while (input_offset < BYTES_TO_DWORDS(input_regs_arr.size)) {
2019 		const struct dbg_dump_cond_hdr *cond_hdr =
2020 		    (const struct dbg_dump_cond_hdr *)
2021 		    input_regs_arr.ptr + input_offset++;
2022 		u16 modes_buf_offset;
2023 		bool eval_mode;
2024 
2025 		/* Check mode/block */
2026 		eval_mode = GET_FIELD(cond_hdr->mode.data,
2027 				      DBG_MODE_HDR_EVAL_MODE) > 0;
2028 		if (eval_mode) {
2029 			modes_buf_offset =
2030 				GET_FIELD(cond_hdr->mode.data,
2031 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2032 			mode_match = qed_is_mode_match(p_hwfn,
2033 						       &modes_buf_offset);
2034 		}
2035 
2036 		if (!mode_match || !block_enable[cond_hdr->block_id]) {
2037 			input_offset += cond_hdr->data_size;
2038 			continue;
2039 		}
2040 
2041 		for (i = 0; i < cond_hdr->data_size; i++, input_offset++) {
2042 			const struct dbg_dump_reg *reg =
2043 			    (const struct dbg_dump_reg *)
2044 			    input_regs_arr.ptr + input_offset;
2045 			u32 addr, len;
2046 			bool wide_bus;
2047 
2048 			addr = GET_FIELD(reg->data, DBG_DUMP_REG_ADDRESS);
2049 			len = GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH);
2050 			wide_bus = GET_FIELD(reg->data, DBG_DUMP_REG_WIDE_BUS);
2051 			offset += qed_grc_dump_reg_entry(p_hwfn,
2052 							 p_ptt,
2053 							 dump_buf + offset,
2054 							 dump,
2055 							 addr,
2056 							 len,
2057 							 wide_bus,
2058 							 split_type, split_id);
2059 			(*num_dumped_reg_entries)++;
2060 		}
2061 	}
2062 
2063 	return offset;
2064 }
2065 
2066 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
qed_grc_dump_split_data(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct virt_mem_desc input_regs_arr,u32 * dump_buf,bool dump,bool block_enable[MAX_BLOCK_ID],enum init_split_types split_type,u8 split_id,const char * reg_type_name)2067 static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
2068 				   struct qed_ptt *p_ptt,
2069 				   struct virt_mem_desc input_regs_arr,
2070 				   u32 *dump_buf,
2071 				   bool dump,
2072 				   bool block_enable[MAX_BLOCK_ID],
2073 				   enum init_split_types split_type,
2074 				   u8 split_id, const char *reg_type_name)
2075 {
2076 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2077 	enum init_split_types hdr_split_type = split_type;
2078 	u32 num_dumped_reg_entries, offset;
2079 	u8 hdr_split_id = split_id;
2080 
2081 	/* In PORT_PF split type, print a port split header */
2082 	if (split_type == SPLIT_TYPE_PORT_PF) {
2083 		hdr_split_type = SPLIT_TYPE_PORT;
2084 		hdr_split_id = split_id / dev_data->num_pfs_per_port;
2085 	}
2086 
2087 	/* Calculate register dump header size (and skip it for now) */
2088 	offset = qed_grc_dump_regs_hdr(dump_buf,
2089 				       false,
2090 				       0,
2091 				       hdr_split_type,
2092 				       hdr_split_id, reg_type_name);
2093 
2094 	/* Dump registers */
2095 	offset += qed_grc_dump_regs_entries(p_hwfn,
2096 					    p_ptt,
2097 					    input_regs_arr,
2098 					    dump_buf + offset,
2099 					    dump,
2100 					    split_type,
2101 					    split_id,
2102 					    block_enable,
2103 					    &num_dumped_reg_entries);
2104 
2105 	/* Write register dump header */
2106 	if (dump && num_dumped_reg_entries > 0)
2107 		qed_grc_dump_regs_hdr(dump_buf,
2108 				      dump,
2109 				      num_dumped_reg_entries,
2110 				      hdr_split_type,
2111 				      hdr_split_id, reg_type_name);
2112 
2113 	return num_dumped_reg_entries > 0 ? offset : 0;
2114 }
2115 
2116 /* Dumps registers according to the input registers array. Returns the dumped
2117  * size in dwords.
2118  */
qed_grc_dump_registers(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,bool block_enable[MAX_BLOCK_ID],const char * reg_type_name)2119 static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
2120 				  struct qed_ptt *p_ptt,
2121 				  u32 *dump_buf,
2122 				  bool dump,
2123 				  bool block_enable[MAX_BLOCK_ID],
2124 				  const char *reg_type_name)
2125 {
2126 	struct virt_mem_desc *dbg_buf =
2127 	    &p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG];
2128 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2129 	u32 offset = 0, input_offset = 0;
2130 
2131 	while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) {
2132 		const struct dbg_dump_split_hdr *split_hdr;
2133 		struct virt_mem_desc curr_input_regs_arr;
2134 		enum init_split_types split_type;
2135 		u16 split_count = 0;
2136 		u32 split_data_size;
2137 		u8 split_id;
2138 
2139 		split_hdr =
2140 		    (const struct dbg_dump_split_hdr *)
2141 		    dbg_buf->ptr + input_offset++;
2142 		split_type =
2143 		    GET_FIELD(split_hdr->hdr,
2144 			      DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
2145 		split_data_size = GET_FIELD(split_hdr->hdr,
2146 					    DBG_DUMP_SPLIT_HDR_DATA_SIZE);
2147 		curr_input_regs_arr.ptr =
2148 		    (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr +
2149 		    input_offset;
2150 		curr_input_regs_arr.size = DWORDS_TO_BYTES(split_data_size);
2151 
2152 		switch (split_type) {
2153 		case SPLIT_TYPE_NONE:
2154 			split_count = 1;
2155 			break;
2156 		case SPLIT_TYPE_PORT:
2157 			split_count = dev_data->num_ports;
2158 			break;
2159 		case SPLIT_TYPE_PF:
2160 		case SPLIT_TYPE_PORT_PF:
2161 			split_count = dev_data->num_ports *
2162 			    dev_data->num_pfs_per_port;
2163 			break;
2164 		case SPLIT_TYPE_VF:
2165 			split_count = dev_data->num_vfs;
2166 			break;
2167 		default:
2168 			return 0;
2169 		}
2170 
2171 		for (split_id = 0; split_id < split_count; split_id++)
2172 			offset += qed_grc_dump_split_data(p_hwfn, p_ptt,
2173 							  curr_input_regs_arr,
2174 							  dump_buf + offset,
2175 							  dump, block_enable,
2176 							  split_type,
2177 							  split_id,
2178 							  reg_type_name);
2179 
2180 		input_offset += split_data_size;
2181 	}
2182 
2183 	/* Cancel pretends (pretend to original PF) */
2184 	if (dump) {
2185 		qed_fid_pretend(p_hwfn, p_ptt,
2186 				FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
2187 					    p_hwfn->rel_pf_id));
2188 		dev_data->pretend.split_type = SPLIT_TYPE_NONE;
2189 		dev_data->pretend.split_id = 0;
2190 	}
2191 
2192 	return offset;
2193 }
2194 
2195 /* Dump reset registers. Returns the dumped size in dwords. */
qed_grc_dump_reset_regs(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)2196 static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
2197 				   struct qed_ptt *p_ptt,
2198 				   u32 *dump_buf, bool dump)
2199 {
2200 	u32 offset = 0, num_regs = 0;
2201 	u8 reset_reg_id;
2202 
2203 	/* Calculate header size */
2204 	offset += qed_grc_dump_regs_hdr(dump_buf,
2205 					false,
2206 					0, SPLIT_TYPE_NONE, 0, "RESET_REGS");
2207 
2208 	/* Write reset registers */
2209 	for (reset_reg_id = 0; reset_reg_id < NUM_DBG_RESET_REGS;
2210 	     reset_reg_id++) {
2211 		const struct dbg_reset_reg *reset_reg;
2212 		u32 reset_reg_addr;
2213 
2214 		reset_reg = qed_get_dbg_reset_reg(p_hwfn, reset_reg_id);
2215 
2216 		if (GET_FIELD(reset_reg->data, DBG_RESET_REG_IS_REMOVED))
2217 			continue;
2218 
2219 		reset_reg_addr = GET_FIELD(reset_reg->data, DBG_RESET_REG_ADDR);
2220 		offset += qed_grc_dump_reg_entry(p_hwfn,
2221 						 p_ptt,
2222 						 dump_buf + offset,
2223 						 dump,
2224 						 reset_reg_addr,
2225 						 1, false, SPLIT_TYPE_NONE, 0);
2226 		num_regs++;
2227 	}
2228 
2229 	/* Write header */
2230 	if (dump)
2231 		qed_grc_dump_regs_hdr(dump_buf,
2232 				      true, num_regs, SPLIT_TYPE_NONE,
2233 				      0, "RESET_REGS");
2234 
2235 	return offset;
2236 }
2237 
2238 /* Dump registers that are modified during GRC Dump and therefore must be
2239  * dumped first. Returns the dumped size in dwords.
2240  */
qed_grc_dump_modified_regs(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)2241 static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
2242 				      struct qed_ptt *p_ptt,
2243 				      u32 *dump_buf, bool dump)
2244 {
2245 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2246 	u32 block_id, offset = 0, stall_regs_offset;
2247 	const struct dbg_attn_reg *attn_reg_arr;
2248 	u8 storm_id, reg_idx, num_attn_regs;
2249 	u32 num_reg_entries = 0;
2250 
2251 	/* Write empty header for attention registers */
2252 	offset += qed_grc_dump_regs_hdr(dump_buf,
2253 					false,
2254 					0, SPLIT_TYPE_NONE, 0, "ATTN_REGS");
2255 
2256 	/* Write parity registers */
2257 	for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
2258 		if (dev_data->block_in_reset[block_id] && dump)
2259 			continue;
2260 
2261 		attn_reg_arr = qed_get_block_attn_regs(p_hwfn,
2262 						       (enum block_id)block_id,
2263 						       ATTN_TYPE_PARITY,
2264 						       &num_attn_regs);
2265 
2266 		for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2267 			const struct dbg_attn_reg *reg_data =
2268 				&attn_reg_arr[reg_idx];
2269 			u16 modes_buf_offset;
2270 			bool eval_mode;
2271 			u32 addr;
2272 
2273 			/* Check mode */
2274 			eval_mode = GET_FIELD(reg_data->mode.data,
2275 					      DBG_MODE_HDR_EVAL_MODE) > 0;
2276 			modes_buf_offset =
2277 				GET_FIELD(reg_data->mode.data,
2278 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2279 			if (eval_mode &&
2280 			    !qed_is_mode_match(p_hwfn, &modes_buf_offset))
2281 				continue;
2282 
2283 			/* Mode match: read & dump registers */
2284 			addr = reg_data->mask_address;
2285 			offset += qed_grc_dump_reg_entry(p_hwfn,
2286 							 p_ptt,
2287 							 dump_buf + offset,
2288 							 dump,
2289 							 addr,
2290 							 1, false,
2291 							 SPLIT_TYPE_NONE, 0);
2292 			addr = GET_FIELD(reg_data->data,
2293 					 DBG_ATTN_REG_STS_ADDRESS);
2294 			offset += qed_grc_dump_reg_entry(p_hwfn,
2295 							 p_ptt,
2296 							 dump_buf + offset,
2297 							 dump,
2298 							 addr,
2299 							 1, false,
2300 							 SPLIT_TYPE_NONE, 0);
2301 			num_reg_entries += 2;
2302 		}
2303 	}
2304 
2305 	/* Overwrite header for attention registers */
2306 	if (dump)
2307 		qed_grc_dump_regs_hdr(dump_buf,
2308 				      true,
2309 				      num_reg_entries,
2310 				      SPLIT_TYPE_NONE, 0, "ATTN_REGS");
2311 
2312 	/* Write empty header for stall registers */
2313 	stall_regs_offset = offset;
2314 	offset += qed_grc_dump_regs_hdr(dump_buf,
2315 					false, 0, SPLIT_TYPE_NONE, 0, "REGS");
2316 
2317 	/* Write Storm stall status registers */
2318 	for (storm_id = 0, num_reg_entries = 0; storm_id < MAX_DBG_STORMS;
2319 	     storm_id++) {
2320 		struct storm_defs *storm = &s_storm_defs[storm_id];
2321 		u32 addr;
2322 
2323 		if (dev_data->block_in_reset[storm->sem_block_id] && dump)
2324 			continue;
2325 
2326 		addr =
2327 		    BYTES_TO_DWORDS(storm->sem_fast_mem_addr +
2328 				    SEM_FAST_REG_STALLED);
2329 		offset += qed_grc_dump_reg_entry(p_hwfn,
2330 						 p_ptt,
2331 						 dump_buf + offset,
2332 						 dump,
2333 						 addr,
2334 						 1,
2335 						 false, SPLIT_TYPE_NONE, 0);
2336 		num_reg_entries++;
2337 	}
2338 
2339 	/* Overwrite header for stall registers */
2340 	if (dump)
2341 		qed_grc_dump_regs_hdr(dump_buf + stall_regs_offset,
2342 				      true,
2343 				      num_reg_entries,
2344 				      SPLIT_TYPE_NONE, 0, "REGS");
2345 
2346 	return offset;
2347 }
2348 
2349 /* Dumps registers that can't be represented in the debug arrays */
qed_grc_dump_special_regs(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)2350 static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn,
2351 				     struct qed_ptt *p_ptt,
2352 				     u32 *dump_buf, bool dump)
2353 {
2354 	u32 offset = 0, addr;
2355 
2356 	offset += qed_grc_dump_regs_hdr(dump_buf,
2357 					dump, 2, SPLIT_TYPE_NONE, 0, "REGS");
2358 
2359 	/* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be
2360 	 * skipped).
2361 	 */
2362 	addr = BYTES_TO_DWORDS(RDIF_REG_DEBUG_ERROR_INFO);
2363 	offset += qed_grc_dump_reg_entry_skip(p_hwfn,
2364 					      p_ptt,
2365 					      dump_buf + offset,
2366 					      dump,
2367 					      addr,
2368 					      RDIF_REG_DEBUG_ERROR_INFO_SIZE,
2369 					      7,
2370 					      1);
2371 	addr = BYTES_TO_DWORDS(TDIF_REG_DEBUG_ERROR_INFO);
2372 	offset +=
2373 	    qed_grc_dump_reg_entry_skip(p_hwfn,
2374 					p_ptt,
2375 					dump_buf + offset,
2376 					dump,
2377 					addr,
2378 					TDIF_REG_DEBUG_ERROR_INFO_SIZE,
2379 					7,
2380 					1);
2381 
2382 	return offset;
2383 }
2384 
2385 /* Dumps a GRC memory header (section and params). Returns the dumped size in
2386  * dwords. The following parameters are dumped:
2387  * - name:	   dumped only if it's not NULL.
2388  * - addr:	   in dwords, dumped only if name is NULL.
2389  * - len:	   in dwords, always dumped.
2390  * - width:	   dumped if it's not zero.
2391  * - packed:	   dumped only if it's not false.
2392  * - mem_group:	   always dumped.
2393  * - is_storm:	   true only if the memory is related to a Storm.
2394  * - storm_letter: valid only if is_storm is true.
2395  *
2396  */
qed_grc_dump_mem_hdr(struct qed_hwfn * p_hwfn,u32 * dump_buf,bool dump,const char * name,u32 addr,u32 len,u32 bit_width,bool packed,const char * mem_group,char storm_letter)2397 static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
2398 				u32 *dump_buf,
2399 				bool dump,
2400 				const char *name,
2401 				u32 addr,
2402 				u32 len,
2403 				u32 bit_width,
2404 				bool packed,
2405 				const char *mem_group, char storm_letter)
2406 {
2407 	u8 num_params = 3;
2408 	u32 offset = 0;
2409 	char buf[64];
2410 
2411 	if (!len)
2412 		DP_NOTICE(p_hwfn,
2413 			  "Unexpected GRC Dump error: dumped memory size must be non-zero\n");
2414 
2415 	if (bit_width)
2416 		num_params++;
2417 	if (packed)
2418 		num_params++;
2419 
2420 	/* Dump section header */
2421 	offset += qed_dump_section_hdr(dump_buf + offset,
2422 				       dump, "grc_mem", num_params);
2423 
2424 	if (name) {
2425 		/* Dump name */
2426 		if (storm_letter) {
2427 			strcpy(buf, "?STORM_");
2428 			buf[0] = storm_letter;
2429 			strcpy(buf + strlen(buf), name);
2430 		} else {
2431 			strcpy(buf, name);
2432 		}
2433 
2434 		offset += qed_dump_str_param(dump_buf + offset,
2435 					     dump, "name", buf);
2436 	} else {
2437 		/* Dump address */
2438 		u32 addr_in_bytes = DWORDS_TO_BYTES(addr);
2439 
2440 		offset += qed_dump_num_param(dump_buf + offset,
2441 					     dump, "addr", addr_in_bytes);
2442 	}
2443 
2444 	/* Dump len */
2445 	offset += qed_dump_num_param(dump_buf + offset, dump, "len", len);
2446 
2447 	/* Dump bit width */
2448 	if (bit_width)
2449 		offset += qed_dump_num_param(dump_buf + offset,
2450 					     dump, "width", bit_width);
2451 
2452 	/* Dump packed */
2453 	if (packed)
2454 		offset += qed_dump_num_param(dump_buf + offset,
2455 					     dump, "packed", 1);
2456 
2457 	/* Dump reg type */
2458 	if (storm_letter) {
2459 		strcpy(buf, "?STORM_");
2460 		buf[0] = storm_letter;
2461 		strcpy(buf + strlen(buf), mem_group);
2462 	} else {
2463 		strcpy(buf, mem_group);
2464 	}
2465 
2466 	offset += qed_dump_str_param(dump_buf + offset, dump, "type", buf);
2467 
2468 	return offset;
2469 }
2470 
2471 /* Dumps a single GRC memory. If name is NULL, the memory is stored by address.
2472  * Returns the dumped size in dwords.
2473  * The addr and len arguments are specified in dwords.
2474  */
qed_grc_dump_mem(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,const char * name,u32 addr,u32 len,bool wide_bus,u32 bit_width,bool packed,const char * mem_group,char storm_letter)2475 static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
2476 			    struct qed_ptt *p_ptt,
2477 			    u32 *dump_buf,
2478 			    bool dump,
2479 			    const char *name,
2480 			    u32 addr,
2481 			    u32 len,
2482 			    bool wide_bus,
2483 			    u32 bit_width,
2484 			    bool packed,
2485 			    const char *mem_group, char storm_letter)
2486 {
2487 	u32 offset = 0;
2488 
2489 	offset += qed_grc_dump_mem_hdr(p_hwfn,
2490 				       dump_buf + offset,
2491 				       dump,
2492 				       name,
2493 				       addr,
2494 				       len,
2495 				       bit_width,
2496 				       packed, mem_group, storm_letter);
2497 	offset += qed_grc_dump_addr_range(p_hwfn,
2498 					  p_ptt,
2499 					  dump_buf + offset,
2500 					  dump, addr, len, wide_bus,
2501 					  SPLIT_TYPE_NONE, 0);
2502 
2503 	return offset;
2504 }
2505 
2506 /* Dumps GRC memories entries. Returns the dumped size in dwords. */
qed_grc_dump_mem_entries(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct virt_mem_desc input_mems_arr,u32 * dump_buf,bool dump)2507 static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
2508 				    struct qed_ptt *p_ptt,
2509 				    struct virt_mem_desc input_mems_arr,
2510 				    u32 *dump_buf, bool dump)
2511 {
2512 	u32 i, offset = 0, input_offset = 0;
2513 	bool mode_match = true;
2514 
2515 	while (input_offset < BYTES_TO_DWORDS(input_mems_arr.size)) {
2516 		const struct dbg_dump_cond_hdr *cond_hdr;
2517 		u16 modes_buf_offset;
2518 		u32 num_entries;
2519 		bool eval_mode;
2520 
2521 		cond_hdr =
2522 		    (const struct dbg_dump_cond_hdr *)input_mems_arr.ptr +
2523 		    input_offset++;
2524 		num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
2525 
2526 		/* Check required mode */
2527 		eval_mode = GET_FIELD(cond_hdr->mode.data,
2528 				      DBG_MODE_HDR_EVAL_MODE) > 0;
2529 		if (eval_mode) {
2530 			modes_buf_offset =
2531 				GET_FIELD(cond_hdr->mode.data,
2532 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2533 			mode_match = qed_is_mode_match(p_hwfn,
2534 						       &modes_buf_offset);
2535 		}
2536 
2537 		if (!mode_match) {
2538 			input_offset += cond_hdr->data_size;
2539 			continue;
2540 		}
2541 
2542 		for (i = 0; i < num_entries;
2543 		     i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) {
2544 			const struct dbg_dump_mem *mem =
2545 			    (const struct dbg_dump_mem *)((u32 *)
2546 							  input_mems_arr.ptr
2547 							  + input_offset);
2548 			const struct dbg_block *block;
2549 			char storm_letter = 0;
2550 			u32 mem_addr, mem_len;
2551 			bool mem_wide_bus;
2552 			u8 mem_group_id;
2553 
2554 			mem_group_id = GET_FIELD(mem->dword0,
2555 						 DBG_DUMP_MEM_MEM_GROUP_ID);
2556 			if (mem_group_id >= MEM_GROUPS_NUM) {
2557 				DP_NOTICE(p_hwfn, "Invalid mem_group_id\n");
2558 				return 0;
2559 			}
2560 
2561 			if (!qed_grc_is_mem_included(p_hwfn,
2562 						     (enum block_id)
2563 						     cond_hdr->block_id,
2564 						     mem_group_id))
2565 				continue;
2566 
2567 			mem_addr = GET_FIELD(mem->dword0, DBG_DUMP_MEM_ADDRESS);
2568 			mem_len = GET_FIELD(mem->dword1, DBG_DUMP_MEM_LENGTH);
2569 			mem_wide_bus = GET_FIELD(mem->dword1,
2570 						 DBG_DUMP_MEM_WIDE_BUS);
2571 
2572 			block = get_dbg_block(p_hwfn,
2573 					      cond_hdr->block_id);
2574 
2575 			/* If memory is associated with Storm,
2576 			 * update storm details
2577 			 */
2578 			if (block->associated_storm_letter)
2579 				storm_letter = block->associated_storm_letter;
2580 
2581 			/* Dump memory */
2582 			offset += qed_grc_dump_mem(p_hwfn,
2583 						p_ptt,
2584 						dump_buf + offset,
2585 						dump,
2586 						NULL,
2587 						mem_addr,
2588 						mem_len,
2589 						mem_wide_bus,
2590 						0,
2591 						false,
2592 						s_mem_group_names[mem_group_id],
2593 						storm_letter);
2594 		}
2595 	}
2596 
2597 	return offset;
2598 }
2599 
2600 /* Dumps GRC memories according to the input array dump_mem.
2601  * Returns the dumped size in dwords.
2602  */
qed_grc_dump_memories(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)2603 static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn,
2604 				 struct qed_ptt *p_ptt,
2605 				 u32 *dump_buf, bool dump)
2606 {
2607 	struct virt_mem_desc *dbg_buf =
2608 	    &p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_MEM];
2609 	u32 offset = 0, input_offset = 0;
2610 
2611 	while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) {
2612 		const struct dbg_dump_split_hdr *split_hdr;
2613 		struct virt_mem_desc curr_input_mems_arr;
2614 		enum init_split_types split_type;
2615 		u32 split_data_size;
2616 
2617 		split_hdr =
2618 		    (const struct dbg_dump_split_hdr *)dbg_buf->ptr +
2619 		    input_offset++;
2620 		split_type = GET_FIELD(split_hdr->hdr,
2621 				       DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
2622 		split_data_size = GET_FIELD(split_hdr->hdr,
2623 					    DBG_DUMP_SPLIT_HDR_DATA_SIZE);
2624 		curr_input_mems_arr.ptr = (u32 *)dbg_buf->ptr + input_offset;
2625 		curr_input_mems_arr.size = DWORDS_TO_BYTES(split_data_size);
2626 
2627 		if (split_type == SPLIT_TYPE_NONE)
2628 			offset += qed_grc_dump_mem_entries(p_hwfn,
2629 							   p_ptt,
2630 							   curr_input_mems_arr,
2631 							   dump_buf + offset,
2632 							   dump);
2633 		else
2634 			DP_NOTICE(p_hwfn,
2635 				  "Dumping split memories is currently not supported\n");
2636 
2637 		input_offset += split_data_size;
2638 	}
2639 
2640 	return offset;
2641 }
2642 
2643 /* Dumps GRC context data for the specified Storm.
2644  * Returns the dumped size in dwords.
2645  * The lid_size argument is specified in quad-regs.
2646  */
qed_grc_dump_ctx_data(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,const char * name,u32 num_lids,enum cm_ctx_types ctx_type,u8 storm_id)2647 static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
2648 				 struct qed_ptt *p_ptt,
2649 				 u32 *dump_buf,
2650 				 bool dump,
2651 				 const char *name,
2652 				 u32 num_lids,
2653 				 enum cm_ctx_types ctx_type, u8 storm_id)
2654 {
2655 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2656 	struct storm_defs *storm = &s_storm_defs[storm_id];
2657 	u32 i, lid, lid_size, total_size;
2658 	u32 rd_reg_addr, offset = 0;
2659 
2660 	/* Convert quad-regs to dwords */
2661 	lid_size = storm->cm_ctx_lid_sizes[dev_data->chip_id][ctx_type] * 4;
2662 
2663 	if (!lid_size)
2664 		return 0;
2665 
2666 	total_size = num_lids * lid_size;
2667 
2668 	offset += qed_grc_dump_mem_hdr(p_hwfn,
2669 				       dump_buf + offset,
2670 				       dump,
2671 				       name,
2672 				       0,
2673 				       total_size,
2674 				       lid_size * 32,
2675 				       false, name, storm->letter);
2676 
2677 	if (!dump)
2678 		return offset + total_size;
2679 
2680 	rd_reg_addr = BYTES_TO_DWORDS(storm->cm_ctx_rd_addr[ctx_type]);
2681 
2682 	/* Dump context data */
2683 	for (lid = 0; lid < num_lids; lid++) {
2684 		for (i = 0; i < lid_size; i++) {
2685 			qed_wr(p_hwfn,
2686 			       p_ptt, storm->cm_ctx_wr_addr, (i << 9) | lid);
2687 			offset += qed_grc_dump_addr_range(p_hwfn,
2688 							  p_ptt,
2689 							  dump_buf + offset,
2690 							  dump,
2691 							  rd_reg_addr,
2692 							  1,
2693 							  false,
2694 							  SPLIT_TYPE_NONE, 0);
2695 		}
2696 	}
2697 
2698 	return offset;
2699 }
2700 
2701 /* Dumps GRC contexts. Returns the dumped size in dwords. */
qed_grc_dump_ctx(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)2702 static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn,
2703 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
2704 {
2705 	u32 offset = 0;
2706 	u8 storm_id;
2707 
2708 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2709 		if (!qed_grc_is_storm_included(p_hwfn,
2710 					       (enum dbg_storms)storm_id))
2711 			continue;
2712 
2713 		/* Dump Conn AG context size */
2714 		offset += qed_grc_dump_ctx_data(p_hwfn,
2715 						p_ptt,
2716 						dump_buf + offset,
2717 						dump,
2718 						"CONN_AG_CTX",
2719 						NUM_OF_LCIDS,
2720 						CM_CTX_CONN_AG, storm_id);
2721 
2722 		/* Dump Conn ST context size */
2723 		offset += qed_grc_dump_ctx_data(p_hwfn,
2724 						p_ptt,
2725 						dump_buf + offset,
2726 						dump,
2727 						"CONN_ST_CTX",
2728 						NUM_OF_LCIDS,
2729 						CM_CTX_CONN_ST, storm_id);
2730 
2731 		/* Dump Task AG context size */
2732 		offset += qed_grc_dump_ctx_data(p_hwfn,
2733 						p_ptt,
2734 						dump_buf + offset,
2735 						dump,
2736 						"TASK_AG_CTX",
2737 						NUM_OF_LTIDS,
2738 						CM_CTX_TASK_AG, storm_id);
2739 
2740 		/* Dump Task ST context size */
2741 		offset += qed_grc_dump_ctx_data(p_hwfn,
2742 						p_ptt,
2743 						dump_buf + offset,
2744 						dump,
2745 						"TASK_ST_CTX",
2746 						NUM_OF_LTIDS,
2747 						CM_CTX_TASK_ST, storm_id);
2748 	}
2749 
2750 	return offset;
2751 }
2752 
2753 #define VFC_STATUS_RESP_READY_BIT	0
2754 #define VFC_STATUS_BUSY_BIT		1
2755 #define VFC_STATUS_SENDING_CMD_BIT	2
2756 
2757 #define VFC_POLLING_DELAY_MS	1
2758 #define VFC_POLLING_COUNT		20
2759 
2760 /* Reads data from VFC. Returns the number of dwords read (0 on error).
2761  * Sizes are specified in dwords.
2762  */
qed_grc_dump_read_from_vfc(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct storm_defs * storm,u32 * cmd_data,u32 cmd_size,u32 * addr_data,u32 addr_size,u32 resp_size,u32 * dump_buf)2763 static u32 qed_grc_dump_read_from_vfc(struct qed_hwfn *p_hwfn,
2764 				      struct qed_ptt *p_ptt,
2765 				      struct storm_defs *storm,
2766 				      u32 *cmd_data,
2767 				      u32 cmd_size,
2768 				      u32 *addr_data,
2769 				      u32 addr_size,
2770 				      u32 resp_size, u32 *dump_buf)
2771 {
2772 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2773 	u32 vfc_status, polling_ms, polling_count = 0, i;
2774 	u32 reg_addr, sem_base;
2775 	bool is_ready = false;
2776 
2777 	sem_base = storm->sem_fast_mem_addr;
2778 	polling_ms = VFC_POLLING_DELAY_MS *
2779 	    s_hw_type_defs[dev_data->hw_type].delay_factor;
2780 
2781 	/* Write VFC command */
2782 	ARR_REG_WR(p_hwfn,
2783 		   p_ptt,
2784 		   sem_base + SEM_FAST_REG_VFC_DATA_WR,
2785 		   cmd_data, cmd_size);
2786 
2787 	/* Write VFC address */
2788 	ARR_REG_WR(p_hwfn,
2789 		   p_ptt,
2790 		   sem_base + SEM_FAST_REG_VFC_ADDR,
2791 		   addr_data, addr_size);
2792 
2793 	/* Read response */
2794 	for (i = 0; i < resp_size; i++) {
2795 		/* Poll until ready */
2796 		do {
2797 			reg_addr = sem_base + SEM_FAST_REG_VFC_STATUS;
2798 			qed_grc_dump_addr_range(p_hwfn,
2799 						p_ptt,
2800 						&vfc_status,
2801 						true,
2802 						BYTES_TO_DWORDS(reg_addr),
2803 						1,
2804 						false, SPLIT_TYPE_NONE, 0);
2805 			is_ready = vfc_status & BIT(VFC_STATUS_RESP_READY_BIT);
2806 
2807 			if (!is_ready) {
2808 				if (polling_count++ == VFC_POLLING_COUNT)
2809 					return 0;
2810 
2811 				msleep(polling_ms);
2812 			}
2813 		} while (!is_ready);
2814 
2815 		reg_addr = sem_base + SEM_FAST_REG_VFC_DATA_RD;
2816 		qed_grc_dump_addr_range(p_hwfn,
2817 					p_ptt,
2818 					dump_buf + i,
2819 					true,
2820 					BYTES_TO_DWORDS(reg_addr),
2821 					1, false, SPLIT_TYPE_NONE, 0);
2822 	}
2823 
2824 	return resp_size;
2825 }
2826 
2827 /* Dump VFC CAM. Returns the dumped size in dwords. */
qed_grc_dump_vfc_cam(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u8 storm_id)2828 static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn,
2829 				struct qed_ptt *p_ptt,
2830 				u32 *dump_buf, bool dump, u8 storm_id)
2831 {
2832 	u32 total_size = VFC_CAM_NUM_ROWS * VFC_CAM_RESP_DWORDS;
2833 	struct storm_defs *storm = &s_storm_defs[storm_id];
2834 	u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 };
2835 	u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 };
2836 	u32 row, offset = 0;
2837 
2838 	offset += qed_grc_dump_mem_hdr(p_hwfn,
2839 				       dump_buf + offset,
2840 				       dump,
2841 				       "vfc_cam",
2842 				       0,
2843 				       total_size,
2844 				       256,
2845 				       false, "vfc_cam", storm->letter);
2846 
2847 	if (!dump)
2848 		return offset + total_size;
2849 
2850 	/* Prepare CAM address */
2851 	SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
2852 
2853 	/* Read VFC CAM data */
2854 	for (row = 0; row < VFC_CAM_NUM_ROWS; row++) {
2855 		SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
2856 		offset += qed_grc_dump_read_from_vfc(p_hwfn,
2857 						     p_ptt,
2858 						     storm,
2859 						     cam_cmd,
2860 						     VFC_CAM_CMD_DWORDS,
2861 						     cam_addr,
2862 						     VFC_CAM_ADDR_DWORDS,
2863 						     VFC_CAM_RESP_DWORDS,
2864 						     dump_buf + offset);
2865 	}
2866 
2867 	return offset;
2868 }
2869 
2870 /* Dump VFC RAM. Returns the dumped size in dwords. */
qed_grc_dump_vfc_ram(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u8 storm_id,struct vfc_ram_defs * ram_defs)2871 static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn,
2872 				struct qed_ptt *p_ptt,
2873 				u32 *dump_buf,
2874 				bool dump,
2875 				u8 storm_id, struct vfc_ram_defs *ram_defs)
2876 {
2877 	u32 total_size = ram_defs->num_rows * VFC_RAM_RESP_DWORDS;
2878 	struct storm_defs *storm = &s_storm_defs[storm_id];
2879 	u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 };
2880 	u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 };
2881 	u32 row, offset = 0;
2882 
2883 	offset += qed_grc_dump_mem_hdr(p_hwfn,
2884 				       dump_buf + offset,
2885 				       dump,
2886 				       ram_defs->mem_name,
2887 				       0,
2888 				       total_size,
2889 				       256,
2890 				       false,
2891 				       ram_defs->type_name,
2892 				       storm->letter);
2893 
2894 	if (!dump)
2895 		return offset + total_size;
2896 
2897 	/* Prepare RAM address */
2898 	SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
2899 
2900 	/* Read VFC RAM data */
2901 	for (row = ram_defs->base_row;
2902 	     row < ram_defs->base_row + ram_defs->num_rows; row++) {
2903 		SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row);
2904 		offset += qed_grc_dump_read_from_vfc(p_hwfn,
2905 						     p_ptt,
2906 						     storm,
2907 						     ram_cmd,
2908 						     VFC_RAM_CMD_DWORDS,
2909 						     ram_addr,
2910 						     VFC_RAM_ADDR_DWORDS,
2911 						     VFC_RAM_RESP_DWORDS,
2912 						     dump_buf + offset);
2913 	}
2914 
2915 	return offset;
2916 }
2917 
2918 /* Dumps GRC VFC data. Returns the dumped size in dwords. */
qed_grc_dump_vfc(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)2919 static u32 qed_grc_dump_vfc(struct qed_hwfn *p_hwfn,
2920 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
2921 {
2922 	u8 storm_id, i;
2923 	u32 offset = 0;
2924 
2925 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2926 		if (!qed_grc_is_storm_included(p_hwfn,
2927 					       (enum dbg_storms)storm_id) ||
2928 		    !s_storm_defs[storm_id].has_vfc)
2929 			continue;
2930 
2931 		/* Read CAM */
2932 		offset += qed_grc_dump_vfc_cam(p_hwfn,
2933 					       p_ptt,
2934 					       dump_buf + offset,
2935 					       dump, storm_id);
2936 
2937 		/* Read RAM */
2938 		for (i = 0; i < NUM_VFC_RAM_TYPES; i++)
2939 			offset += qed_grc_dump_vfc_ram(p_hwfn,
2940 						       p_ptt,
2941 						       dump_buf + offset,
2942 						       dump,
2943 						       storm_id,
2944 						       &s_vfc_ram_defs[i]);
2945 	}
2946 
2947 	return offset;
2948 }
2949 
2950 /* Dumps GRC RSS data. Returns the dumped size in dwords. */
qed_grc_dump_rss(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)2951 static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
2952 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
2953 {
2954 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2955 	u32 offset = 0;
2956 	u8 rss_mem_id;
2957 
2958 	for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
2959 		u32 rss_addr, num_entries, total_dwords;
2960 		struct rss_mem_defs *rss_defs;
2961 		u32 addr, num_dwords_to_read;
2962 		bool packed;
2963 
2964 		rss_defs = &s_rss_mem_defs[rss_mem_id];
2965 		rss_addr = rss_defs->addr;
2966 		num_entries = rss_defs->num_entries[dev_data->chip_id];
2967 		total_dwords = (num_entries * rss_defs->entry_width) / 32;
2968 		packed = (rss_defs->entry_width == 16);
2969 
2970 		offset += qed_grc_dump_mem_hdr(p_hwfn,
2971 					       dump_buf + offset,
2972 					       dump,
2973 					       rss_defs->mem_name,
2974 					       0,
2975 					       total_dwords,
2976 					       rss_defs->entry_width,
2977 					       packed,
2978 					       rss_defs->type_name, 0);
2979 
2980 		/* Dump RSS data */
2981 		if (!dump) {
2982 			offset += total_dwords;
2983 			continue;
2984 		}
2985 
2986 		addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA);
2987 		while (total_dwords) {
2988 			num_dwords_to_read = min_t(u32,
2989 						   RSS_REG_RSS_RAM_DATA_SIZE,
2990 						   total_dwords);
2991 			qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
2992 			offset += qed_grc_dump_addr_range(p_hwfn,
2993 							  p_ptt,
2994 							  dump_buf + offset,
2995 							  dump,
2996 							  addr,
2997 							  num_dwords_to_read,
2998 							  false,
2999 							  SPLIT_TYPE_NONE, 0);
3000 			total_dwords -= num_dwords_to_read;
3001 			rss_addr++;
3002 		}
3003 	}
3004 
3005 	return offset;
3006 }
3007 
3008 /* Dumps GRC Big RAM. Returns the dumped size in dwords. */
qed_grc_dump_big_ram(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u8 big_ram_id)3009 static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
3010 				struct qed_ptt *p_ptt,
3011 				u32 *dump_buf, bool dump, u8 big_ram_id)
3012 {
3013 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3014 	u32 block_size, ram_size, offset = 0, reg_val, i;
3015 	char mem_name[12] = "???_BIG_RAM";
3016 	char type_name[8] = "???_RAM";
3017 	struct big_ram_defs *big_ram;
3018 
3019 	big_ram = &s_big_ram_defs[big_ram_id];
3020 	ram_size = big_ram->ram_size[dev_data->chip_id];
3021 
3022 	reg_val = qed_rd(p_hwfn, p_ptt, big_ram->is_256b_reg_addr);
3023 	block_size = reg_val &
3024 		     BIT(big_ram->is_256b_bit_offset[dev_data->chip_id]) ? 256
3025 									 : 128;
3026 
3027 	strncpy(type_name, big_ram->instance_name, BIG_RAM_NAME_LEN);
3028 	strncpy(mem_name, big_ram->instance_name, BIG_RAM_NAME_LEN);
3029 
3030 	/* Dump memory header */
3031 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3032 				       dump_buf + offset,
3033 				       dump,
3034 				       mem_name,
3035 				       0,
3036 				       ram_size,
3037 				       block_size * 8,
3038 				       false, type_name, 0);
3039 
3040 	/* Read and dump Big RAM data */
3041 	if (!dump)
3042 		return offset + ram_size;
3043 
3044 	/* Dump Big RAM */
3045 	for (i = 0; i < DIV_ROUND_UP(ram_size, BRB_REG_BIG_RAM_DATA_SIZE);
3046 	     i++) {
3047 		u32 addr, len;
3048 
3049 		qed_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
3050 		addr = BYTES_TO_DWORDS(big_ram->data_reg_addr);
3051 		len = BRB_REG_BIG_RAM_DATA_SIZE;
3052 		offset += qed_grc_dump_addr_range(p_hwfn,
3053 						  p_ptt,
3054 						  dump_buf + offset,
3055 						  dump,
3056 						  addr,
3057 						  len,
3058 						  false, SPLIT_TYPE_NONE, 0);
3059 	}
3060 
3061 	return offset;
3062 }
3063 
3064 /* Dumps MCP scratchpad. Returns the dumped size in dwords. */
qed_grc_dump_mcp(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)3065 static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
3066 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3067 {
3068 	bool block_enable[MAX_BLOCK_ID] = { 0 };
3069 	u32 offset = 0, addr;
3070 	bool halted = false;
3071 
3072 	/* Halt MCP */
3073 	if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3074 		halted = !qed_mcp_halt(p_hwfn, p_ptt);
3075 		if (!halted)
3076 			DP_NOTICE(p_hwfn, "MCP halt failed!\n");
3077 	}
3078 
3079 	/* Dump MCP scratchpad */
3080 	offset += qed_grc_dump_mem(p_hwfn,
3081 				   p_ptt,
3082 				   dump_buf + offset,
3083 				   dump,
3084 				   NULL,
3085 				   BYTES_TO_DWORDS(MCP_REG_SCRATCH),
3086 				   MCP_REG_SCRATCH_SIZE,
3087 				   false, 0, false, "MCP", 0);
3088 
3089 	/* Dump MCP cpu_reg_file */
3090 	offset += qed_grc_dump_mem(p_hwfn,
3091 				   p_ptt,
3092 				   dump_buf + offset,
3093 				   dump,
3094 				   NULL,
3095 				   BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE),
3096 				   MCP_REG_CPU_REG_FILE_SIZE,
3097 				   false, 0, false, "MCP", 0);
3098 
3099 	/* Dump MCP registers */
3100 	block_enable[BLOCK_MCP] = true;
3101 	offset += qed_grc_dump_registers(p_hwfn,
3102 					 p_ptt,
3103 					 dump_buf + offset,
3104 					 dump, block_enable, "MCP");
3105 
3106 	/* Dump required non-MCP registers */
3107 	offset += qed_grc_dump_regs_hdr(dump_buf + offset,
3108 					dump, 1, SPLIT_TYPE_NONE, 0,
3109 					"MCP");
3110 	addr = BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR);
3111 	offset += qed_grc_dump_reg_entry(p_hwfn,
3112 					 p_ptt,
3113 					 dump_buf + offset,
3114 					 dump,
3115 					 addr,
3116 					 1,
3117 					 false, SPLIT_TYPE_NONE, 0);
3118 
3119 	/* Release MCP */
3120 	if (halted && qed_mcp_resume(p_hwfn, p_ptt))
3121 		DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
3122 
3123 	return offset;
3124 }
3125 
3126 /* Dumps the tbus indirect memory for all PHYs.
3127  * Returns the dumped size in dwords.
3128  */
qed_grc_dump_phy(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)3129 static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
3130 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3131 {
3132 	u32 offset = 0, tbus_lo_offset, tbus_hi_offset;
3133 	char mem_name[32];
3134 	u8 phy_id;
3135 
3136 	for (phy_id = 0; phy_id < ARRAY_SIZE(s_phy_defs); phy_id++) {
3137 		u32 addr_lo_addr, addr_hi_addr, data_lo_addr, data_hi_addr;
3138 		struct phy_defs *phy_defs;
3139 		u8 *bytes_buf;
3140 
3141 		phy_defs = &s_phy_defs[phy_id];
3142 		addr_lo_addr = phy_defs->base_addr +
3143 			       phy_defs->tbus_addr_lo_addr;
3144 		addr_hi_addr = phy_defs->base_addr +
3145 			       phy_defs->tbus_addr_hi_addr;
3146 		data_lo_addr = phy_defs->base_addr +
3147 			       phy_defs->tbus_data_lo_addr;
3148 		data_hi_addr = phy_defs->base_addr +
3149 			       phy_defs->tbus_data_hi_addr;
3150 
3151 		if (snprintf(mem_name, sizeof(mem_name), "tbus_%s",
3152 			     phy_defs->phy_name) < 0)
3153 			DP_NOTICE(p_hwfn,
3154 				  "Unexpected debug error: invalid PHY memory name\n");
3155 
3156 		offset += qed_grc_dump_mem_hdr(p_hwfn,
3157 					       dump_buf + offset,
3158 					       dump,
3159 					       mem_name,
3160 					       0,
3161 					       PHY_DUMP_SIZE_DWORDS,
3162 					       16, true, mem_name, 0);
3163 
3164 		if (!dump) {
3165 			offset += PHY_DUMP_SIZE_DWORDS;
3166 			continue;
3167 		}
3168 
3169 		bytes_buf = (u8 *)(dump_buf + offset);
3170 		for (tbus_hi_offset = 0;
3171 		     tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8);
3172 		     tbus_hi_offset++) {
3173 			qed_wr(p_hwfn, p_ptt, addr_hi_addr, tbus_hi_offset);
3174 			for (tbus_lo_offset = 0; tbus_lo_offset < 256;
3175 			     tbus_lo_offset++) {
3176 				qed_wr(p_hwfn,
3177 				       p_ptt, addr_lo_addr, tbus_lo_offset);
3178 				*(bytes_buf++) = (u8)qed_rd(p_hwfn,
3179 							    p_ptt,
3180 							    data_lo_addr);
3181 				*(bytes_buf++) = (u8)qed_rd(p_hwfn,
3182 							    p_ptt,
3183 							    data_hi_addr);
3184 			}
3185 		}
3186 
3187 		offset += PHY_DUMP_SIZE_DWORDS;
3188 	}
3189 
3190 	return offset;
3191 }
3192 
3193 static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
3194 					    struct qed_ptt *p_ptt,
3195 					    u32 image_type,
3196 					    u32 *nvram_offset_bytes,
3197 					    u32 *nvram_size_bytes);
3198 
3199 static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
3200 				      struct qed_ptt *p_ptt,
3201 				      u32 nvram_offset_bytes,
3202 				      u32 nvram_size_bytes, u32 *ret_buf);
3203 
3204 /* Dumps the MCP HW dump from NVRAM. Returns the dumped size in dwords. */
qed_grc_dump_mcp_hw_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)3205 static u32 qed_grc_dump_mcp_hw_dump(struct qed_hwfn *p_hwfn,
3206 				    struct qed_ptt *p_ptt,
3207 				    u32 *dump_buf, bool dump)
3208 {
3209 	u32 hw_dump_offset_bytes = 0, hw_dump_size_bytes = 0;
3210 	u32 hw_dump_size_dwords = 0, offset = 0;
3211 	enum dbg_status status;
3212 
3213 	/* Read HW dump image from NVRAM */
3214 	status = qed_find_nvram_image(p_hwfn,
3215 				      p_ptt,
3216 				      NVM_TYPE_HW_DUMP_OUT,
3217 				      &hw_dump_offset_bytes,
3218 				      &hw_dump_size_bytes);
3219 	if (status != DBG_STATUS_OK)
3220 		return 0;
3221 
3222 	hw_dump_size_dwords = BYTES_TO_DWORDS(hw_dump_size_bytes);
3223 
3224 	/* Dump HW dump image section */
3225 	offset += qed_dump_section_hdr(dump_buf + offset,
3226 				       dump, "mcp_hw_dump", 1);
3227 	offset += qed_dump_num_param(dump_buf + offset,
3228 				     dump, "size", hw_dump_size_dwords);
3229 
3230 	/* Read MCP HW dump image into dump buffer */
3231 	if (dump && hw_dump_size_dwords) {
3232 		status = qed_nvram_read(p_hwfn,
3233 					p_ptt,
3234 					hw_dump_offset_bytes,
3235 					hw_dump_size_bytes, dump_buf + offset);
3236 		if (status != DBG_STATUS_OK) {
3237 			DP_NOTICE(p_hwfn,
3238 				  "Failed to read MCP HW Dump image from NVRAM\n");
3239 			return 0;
3240 		}
3241 	}
3242 	offset += hw_dump_size_dwords;
3243 
3244 	return offset;
3245 }
3246 
3247 /* Dumps Static Debug data. Returns the dumped size in dwords. */
qed_grc_dump_static_debug(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)3248 static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
3249 				     struct qed_ptt *p_ptt,
3250 				     u32 *dump_buf, bool dump)
3251 {
3252 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3253 	u32 block_id, line_id, offset = 0, addr, len;
3254 
3255 	/* Don't dump static debug if a debug bus recording is in progress */
3256 	if (dump && qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
3257 		return 0;
3258 
3259 	if (dump) {
3260 		/* Disable debug bus in all blocks */
3261 		qed_bus_disable_blocks(p_hwfn, p_ptt);
3262 
3263 		qed_bus_reset_dbg_block(p_hwfn, p_ptt);
3264 		qed_wr(p_hwfn,
3265 		       p_ptt, DBG_REG_FRAMING_MODE, DBG_BUS_FRAME_MODE_8HW);
3266 		qed_wr(p_hwfn,
3267 		       p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_INT_BUF);
3268 		qed_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, 1);
3269 		qed_bus_enable_dbg_block(p_hwfn, p_ptt, true);
3270 	}
3271 
3272 	/* Dump all static debug lines for each relevant block */
3273 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3274 		const struct dbg_block_chip *block_per_chip;
3275 		const struct dbg_block *block;
3276 		bool is_removed, has_dbg_bus;
3277 		u16 modes_buf_offset;
3278 		u32 block_dwords;
3279 
3280 		block_per_chip =
3281 		    qed_get_dbg_block_per_chip(p_hwfn, (enum block_id)block_id);
3282 		is_removed = GET_FIELD(block_per_chip->flags,
3283 				       DBG_BLOCK_CHIP_IS_REMOVED);
3284 		has_dbg_bus = GET_FIELD(block_per_chip->flags,
3285 					DBG_BLOCK_CHIP_HAS_DBG_BUS);
3286 
3287 		/* read+clear for NWS parity is not working, skip NWS block */
3288 		if (block_id == BLOCK_NWS)
3289 			continue;
3290 
3291 		if (!is_removed && has_dbg_bus &&
3292 		    GET_FIELD(block_per_chip->dbg_bus_mode.data,
3293 			      DBG_MODE_HDR_EVAL_MODE) > 0) {
3294 			modes_buf_offset =
3295 			    GET_FIELD(block_per_chip->dbg_bus_mode.data,
3296 				      DBG_MODE_HDR_MODES_BUF_OFFSET);
3297 			if (!qed_is_mode_match(p_hwfn, &modes_buf_offset))
3298 				has_dbg_bus = false;
3299 		}
3300 
3301 		if (is_removed || !has_dbg_bus)
3302 			continue;
3303 
3304 		block_dwords = NUM_DBG_LINES(block_per_chip) *
3305 			       STATIC_DEBUG_LINE_DWORDS;
3306 
3307 		/* Dump static section params */
3308 		block = get_dbg_block(p_hwfn, (enum block_id)block_id);
3309 		offset += qed_grc_dump_mem_hdr(p_hwfn,
3310 					       dump_buf + offset,
3311 					       dump,
3312 					       block->name,
3313 					       0,
3314 					       block_dwords,
3315 					       32, false, "STATIC", 0);
3316 
3317 		if (!dump) {
3318 			offset += block_dwords;
3319 			continue;
3320 		}
3321 
3322 		/* If all lines are invalid - dump zeros */
3323 		if (dev_data->block_in_reset[block_id]) {
3324 			memset(dump_buf + offset, 0,
3325 			       DWORDS_TO_BYTES(block_dwords));
3326 			offset += block_dwords;
3327 			continue;
3328 		}
3329 
3330 		/* Enable block's client */
3331 		qed_bus_enable_clients(p_hwfn,
3332 				       p_ptt,
3333 				       BIT(block_per_chip->dbg_client_id));
3334 
3335 		addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA);
3336 		len = STATIC_DEBUG_LINE_DWORDS;
3337 		for (line_id = 0; line_id < (u32)NUM_DBG_LINES(block_per_chip);
3338 		     line_id++) {
3339 			/* Configure debug line ID */
3340 			qed_bus_config_dbg_line(p_hwfn,
3341 						p_ptt,
3342 						(enum block_id)block_id,
3343 						(u8)line_id, 0xf, 0, 0, 0);
3344 
3345 			/* Read debug line info */
3346 			offset += qed_grc_dump_addr_range(p_hwfn,
3347 							  p_ptt,
3348 							  dump_buf + offset,
3349 							  dump,
3350 							  addr,
3351 							  len,
3352 							  true, SPLIT_TYPE_NONE,
3353 							  0);
3354 		}
3355 
3356 		/* Disable block's client and debug output */
3357 		qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3358 		qed_bus_config_dbg_line(p_hwfn, p_ptt,
3359 					(enum block_id)block_id, 0, 0, 0, 0, 0);
3360 	}
3361 
3362 	if (dump) {
3363 		qed_bus_enable_dbg_block(p_hwfn, p_ptt, false);
3364 		qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3365 	}
3366 
3367 	return offset;
3368 }
3369 
3370 /* Performs GRC Dump to the specified buffer.
3371  * Returns the dumped size in dwords.
3372  */
qed_grc_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u32 * num_dumped_dwords)3373 static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
3374 				    struct qed_ptt *p_ptt,
3375 				    u32 *dump_buf,
3376 				    bool dump, u32 *num_dumped_dwords)
3377 {
3378 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3379 	u32 dwords_read, offset = 0;
3380 	bool parities_masked = false;
3381 	u8 i;
3382 
3383 	*num_dumped_dwords = 0;
3384 	dev_data->num_regs_read = 0;
3385 
3386 	/* Update reset state */
3387 	if (dump)
3388 		qed_update_blocks_reset_state(p_hwfn, p_ptt);
3389 
3390 	/* Dump global params */
3391 	offset += qed_dump_common_global_params(p_hwfn,
3392 						p_ptt,
3393 						dump_buf + offset, dump, 4);
3394 	offset += qed_dump_str_param(dump_buf + offset,
3395 				     dump, "dump-type", "grc-dump");
3396 	offset += qed_dump_num_param(dump_buf + offset,
3397 				     dump,
3398 				     "num-lcids",
3399 				     NUM_OF_LCIDS);
3400 	offset += qed_dump_num_param(dump_buf + offset,
3401 				     dump,
3402 				     "num-ltids",
3403 				     NUM_OF_LTIDS);
3404 	offset += qed_dump_num_param(dump_buf + offset,
3405 				     dump, "num-ports", dev_data->num_ports);
3406 
3407 	/* Dump reset registers (dumped before taking blocks out of reset ) */
3408 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
3409 		offset += qed_grc_dump_reset_regs(p_hwfn,
3410 						  p_ptt,
3411 						  dump_buf + offset, dump);
3412 
3413 	/* Take all blocks out of reset (using reset registers) */
3414 	if (dump) {
3415 		qed_grc_unreset_blocks(p_hwfn, p_ptt, false);
3416 		qed_update_blocks_reset_state(p_hwfn, p_ptt);
3417 	}
3418 
3419 	/* Disable all parities using MFW command */
3420 	if (dump &&
3421 	    !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3422 		parities_masked = !qed_mcp_mask_parities(p_hwfn, p_ptt, 1);
3423 		if (!parities_masked) {
3424 			DP_NOTICE(p_hwfn,
3425 				  "Failed to mask parities using MFW\n");
3426 			if (qed_grc_get_param
3427 			    (p_hwfn, DBG_GRC_PARAM_PARITY_SAFE))
3428 				return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY;
3429 		}
3430 	}
3431 
3432 	/* Dump modified registers (dumped before modifying them) */
3433 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
3434 		offset += qed_grc_dump_modified_regs(p_hwfn,
3435 						     p_ptt,
3436 						     dump_buf + offset, dump);
3437 
3438 	/* Stall storms */
3439 	if (dump &&
3440 	    (qed_grc_is_included(p_hwfn,
3441 				 DBG_GRC_PARAM_DUMP_IOR) ||
3442 	     qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)))
3443 		qed_grc_stall_storms(p_hwfn, p_ptt, true);
3444 
3445 	/* Dump all regs  */
3446 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) {
3447 		bool block_enable[MAX_BLOCK_ID];
3448 
3449 		/* Dump all blocks except MCP */
3450 		for (i = 0; i < MAX_BLOCK_ID; i++)
3451 			block_enable[i] = true;
3452 		block_enable[BLOCK_MCP] = false;
3453 		offset += qed_grc_dump_registers(p_hwfn,
3454 						 p_ptt,
3455 						 dump_buf +
3456 						 offset,
3457 						 dump,
3458 						 block_enable, NULL);
3459 
3460 		/* Dump special registers */
3461 		offset += qed_grc_dump_special_regs(p_hwfn,
3462 						    p_ptt,
3463 						    dump_buf + offset, dump);
3464 	}
3465 
3466 	/* Dump memories */
3467 	offset += qed_grc_dump_memories(p_hwfn, p_ptt, dump_buf + offset, dump);
3468 
3469 	/* Dump MCP */
3470 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP))
3471 		offset += qed_grc_dump_mcp(p_hwfn,
3472 					   p_ptt, dump_buf + offset, dump);
3473 
3474 	/* Dump context */
3475 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX))
3476 		offset += qed_grc_dump_ctx(p_hwfn,
3477 					   p_ptt, dump_buf + offset, dump);
3478 
3479 	/* Dump RSS memories */
3480 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RSS))
3481 		offset += qed_grc_dump_rss(p_hwfn,
3482 					   p_ptt, dump_buf + offset, dump);
3483 
3484 	/* Dump Big RAM */
3485 	for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
3486 		if (qed_grc_is_included(p_hwfn, s_big_ram_defs[i].grc_param))
3487 			offset += qed_grc_dump_big_ram(p_hwfn,
3488 						       p_ptt,
3489 						       dump_buf + offset,
3490 						       dump, i);
3491 
3492 	/* Dump VFC */
3493 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)) {
3494 		dwords_read = qed_grc_dump_vfc(p_hwfn,
3495 					       p_ptt, dump_buf + offset, dump);
3496 		offset += dwords_read;
3497 		if (!dwords_read)
3498 			return DBG_STATUS_VFC_READ_ERROR;
3499 	}
3500 
3501 	/* Dump PHY tbus */
3502 	if (qed_grc_is_included(p_hwfn,
3503 				DBG_GRC_PARAM_DUMP_PHY) && dev_data->chip_id ==
3504 	    CHIP_K2 && dev_data->hw_type == HW_TYPE_ASIC)
3505 		offset += qed_grc_dump_phy(p_hwfn,
3506 					   p_ptt, dump_buf + offset, dump);
3507 
3508 	/* Dump MCP HW Dump */
3509 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP_HW_DUMP) &&
3510 	    !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP) && 1)
3511 		offset += qed_grc_dump_mcp_hw_dump(p_hwfn,
3512 						   p_ptt,
3513 						   dump_buf + offset, dump);
3514 
3515 	/* Dump static debug data (only if not during debug bus recording) */
3516 	if (qed_grc_is_included(p_hwfn,
3517 				DBG_GRC_PARAM_DUMP_STATIC) &&
3518 	    (!dump || dev_data->bus.state == DBG_BUS_STATE_IDLE))
3519 		offset += qed_grc_dump_static_debug(p_hwfn,
3520 						    p_ptt,
3521 						    dump_buf + offset, dump);
3522 
3523 	/* Dump last section */
3524 	offset += qed_dump_last_section(dump_buf, offset, dump);
3525 
3526 	if (dump) {
3527 		/* Unstall storms */
3528 		if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL))
3529 			qed_grc_stall_storms(p_hwfn, p_ptt, false);
3530 
3531 		/* Clear parity status */
3532 		qed_grc_clear_all_prty(p_hwfn, p_ptt);
3533 
3534 		/* Enable all parities using MFW command */
3535 		if (parities_masked)
3536 			qed_mcp_mask_parities(p_hwfn, p_ptt, 0);
3537 	}
3538 
3539 	*num_dumped_dwords = offset;
3540 
3541 	return DBG_STATUS_OK;
3542 }
3543 
3544 /* Writes the specified failing Idle Check rule to the specified buffer.
3545  * Returns the dumped size in dwords.
3546  */
qed_idle_chk_dump_failure(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u16 rule_id,const struct dbg_idle_chk_rule * rule,u16 fail_entry_id,u32 * cond_reg_values)3547 static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
3548 				     struct qed_ptt *p_ptt,
3549 				     u32 *
3550 				     dump_buf,
3551 				     bool dump,
3552 				     u16 rule_id,
3553 				     const struct dbg_idle_chk_rule *rule,
3554 				     u16 fail_entry_id, u32 *cond_reg_values)
3555 {
3556 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3557 	const struct dbg_idle_chk_cond_reg *cond_regs;
3558 	const struct dbg_idle_chk_info_reg *info_regs;
3559 	u32 i, next_reg_offset = 0, offset = 0;
3560 	struct dbg_idle_chk_result_hdr *hdr;
3561 	const union dbg_idle_chk_reg *regs;
3562 	u8 reg_id;
3563 
3564 	hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
3565 	regs = (const union dbg_idle_chk_reg *)
3566 		p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr +
3567 		rule->reg_offset;
3568 	cond_regs = &regs[0].cond_reg;
3569 	info_regs = &regs[rule->num_cond_regs].info_reg;
3570 
3571 	/* Dump rule data */
3572 	if (dump) {
3573 		memset(hdr, 0, sizeof(*hdr));
3574 		hdr->rule_id = rule_id;
3575 		hdr->mem_entry_id = fail_entry_id;
3576 		hdr->severity = rule->severity;
3577 		hdr->num_dumped_cond_regs = rule->num_cond_regs;
3578 	}
3579 
3580 	offset += IDLE_CHK_RESULT_HDR_DWORDS;
3581 
3582 	/* Dump condition register values */
3583 	for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
3584 		const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
3585 		struct dbg_idle_chk_result_reg_hdr *reg_hdr;
3586 
3587 		reg_hdr =
3588 		    (struct dbg_idle_chk_result_reg_hdr *)(dump_buf + offset);
3589 
3590 		/* Write register header */
3591 		if (!dump) {
3592 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS +
3593 			    reg->entry_size;
3594 			continue;
3595 		}
3596 
3597 		offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
3598 		memset(reg_hdr, 0, sizeof(*reg_hdr));
3599 		reg_hdr->start_entry = reg->start_entry;
3600 		reg_hdr->size = reg->entry_size;
3601 		SET_FIELD(reg_hdr->data,
3602 			  DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM,
3603 			  reg->num_entries > 1 || reg->start_entry > 0 ? 1 : 0);
3604 		SET_FIELD(reg_hdr->data,
3605 			  DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id);
3606 
3607 		/* Write register values */
3608 		for (i = 0; i < reg_hdr->size; i++, next_reg_offset++, offset++)
3609 			dump_buf[offset] = cond_reg_values[next_reg_offset];
3610 	}
3611 
3612 	/* Dump info register values */
3613 	for (reg_id = 0; reg_id < rule->num_info_regs; reg_id++) {
3614 		const struct dbg_idle_chk_info_reg *reg = &info_regs[reg_id];
3615 		u32 block_id;
3616 
3617 		/* Check if register's block is in reset */
3618 		if (!dump) {
3619 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->size;
3620 			continue;
3621 		}
3622 
3623 		block_id = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_BLOCK_ID);
3624 		if (block_id >= MAX_BLOCK_ID) {
3625 			DP_NOTICE(p_hwfn, "Invalid block_id\n");
3626 			return 0;
3627 		}
3628 
3629 		if (!dev_data->block_in_reset[block_id]) {
3630 			struct dbg_idle_chk_result_reg_hdr *reg_hdr;
3631 			bool wide_bus, eval_mode, mode_match = true;
3632 			u16 modes_buf_offset;
3633 			u32 addr;
3634 
3635 			reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)
3636 				  (dump_buf + offset);
3637 
3638 			/* Check mode */
3639 			eval_mode = GET_FIELD(reg->mode.data,
3640 					      DBG_MODE_HDR_EVAL_MODE) > 0;
3641 			if (eval_mode) {
3642 				modes_buf_offset =
3643 				    GET_FIELD(reg->mode.data,
3644 					      DBG_MODE_HDR_MODES_BUF_OFFSET);
3645 				mode_match =
3646 					qed_is_mode_match(p_hwfn,
3647 							  &modes_buf_offset);
3648 			}
3649 
3650 			if (!mode_match)
3651 				continue;
3652 
3653 			addr = GET_FIELD(reg->data,
3654 					 DBG_IDLE_CHK_INFO_REG_ADDRESS);
3655 			wide_bus = GET_FIELD(reg->data,
3656 					     DBG_IDLE_CHK_INFO_REG_WIDE_BUS);
3657 
3658 			/* Write register header */
3659 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
3660 			hdr->num_dumped_info_regs++;
3661 			memset(reg_hdr, 0, sizeof(*reg_hdr));
3662 			reg_hdr->size = reg->size;
3663 			SET_FIELD(reg_hdr->data,
3664 				  DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
3665 				  rule->num_cond_regs + reg_id);
3666 
3667 			/* Write register values */
3668 			offset += qed_grc_dump_addr_range(p_hwfn,
3669 							  p_ptt,
3670 							  dump_buf + offset,
3671 							  dump,
3672 							  addr,
3673 							  reg->size, wide_bus,
3674 							  SPLIT_TYPE_NONE, 0);
3675 		}
3676 	}
3677 
3678 	return offset;
3679 }
3680 
3681 /* Dumps idle check rule entries. Returns the dumped size in dwords. */
3682 static u32
qed_idle_chk_dump_rule_entries(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,const struct dbg_idle_chk_rule * input_rules,u32 num_input_rules,u32 * num_failing_rules)3683 qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
3684 			       u32 *dump_buf, bool dump,
3685 			       const struct dbg_idle_chk_rule *input_rules,
3686 			       u32 num_input_rules, u32 *num_failing_rules)
3687 {
3688 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3689 	u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE];
3690 	u32 i, offset = 0;
3691 	u16 entry_id;
3692 	u8 reg_id;
3693 
3694 	*num_failing_rules = 0;
3695 
3696 	for (i = 0; i < num_input_rules; i++) {
3697 		const struct dbg_idle_chk_cond_reg *cond_regs;
3698 		const struct dbg_idle_chk_rule *rule;
3699 		const union dbg_idle_chk_reg *regs;
3700 		u16 num_reg_entries = 1;
3701 		bool check_rule = true;
3702 		const u32 *imm_values;
3703 
3704 		rule = &input_rules[i];
3705 		regs = (const union dbg_idle_chk_reg *)
3706 			p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr +
3707 			rule->reg_offset;
3708 		cond_regs = &regs[0].cond_reg;
3709 		imm_values =
3710 		    (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr +
3711 		    rule->imm_offset;
3712 
3713 		/* Check if all condition register blocks are out of reset, and
3714 		 * find maximal number of entries (all condition registers that
3715 		 * are memories must have the same size, which is > 1).
3716 		 */
3717 		for (reg_id = 0; reg_id < rule->num_cond_regs && check_rule;
3718 		     reg_id++) {
3719 			u32 block_id =
3720 				GET_FIELD(cond_regs[reg_id].data,
3721 					  DBG_IDLE_CHK_COND_REG_BLOCK_ID);
3722 
3723 			if (block_id >= MAX_BLOCK_ID) {
3724 				DP_NOTICE(p_hwfn, "Invalid block_id\n");
3725 				return 0;
3726 			}
3727 
3728 			check_rule = !dev_data->block_in_reset[block_id];
3729 			if (cond_regs[reg_id].num_entries > num_reg_entries)
3730 				num_reg_entries = cond_regs[reg_id].num_entries;
3731 		}
3732 
3733 		if (!check_rule && dump)
3734 			continue;
3735 
3736 		if (!dump) {
3737 			u32 entry_dump_size =
3738 				qed_idle_chk_dump_failure(p_hwfn,
3739 							  p_ptt,
3740 							  dump_buf + offset,
3741 							  false,
3742 							  rule->rule_id,
3743 							  rule,
3744 							  0,
3745 							  NULL);
3746 
3747 			offset += num_reg_entries * entry_dump_size;
3748 			(*num_failing_rules) += num_reg_entries;
3749 			continue;
3750 		}
3751 
3752 		/* Go over all register entries (number of entries is the same
3753 		 * for all condition registers).
3754 		 */
3755 		for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
3756 			u32 next_reg_offset = 0;
3757 
3758 			/* Read current entry of all condition registers */
3759 			for (reg_id = 0; reg_id < rule->num_cond_regs;
3760 			     reg_id++) {
3761 				const struct dbg_idle_chk_cond_reg *reg =
3762 					&cond_regs[reg_id];
3763 				u32 padded_entry_size, addr;
3764 				bool wide_bus;
3765 
3766 				/* Find GRC address (if it's a memory, the
3767 				 * address of the specific entry is calculated).
3768 				 */
3769 				addr = GET_FIELD(reg->data,
3770 						 DBG_IDLE_CHK_COND_REG_ADDRESS);
3771 				wide_bus =
3772 				    GET_FIELD(reg->data,
3773 					      DBG_IDLE_CHK_COND_REG_WIDE_BUS);
3774 				if (reg->num_entries > 1 ||
3775 				    reg->start_entry > 0) {
3776 					padded_entry_size =
3777 					   reg->entry_size > 1 ?
3778 					   roundup_pow_of_two(reg->entry_size) :
3779 					   1;
3780 					addr += (reg->start_entry + entry_id) *
3781 						padded_entry_size;
3782 				}
3783 
3784 				/* Read registers */
3785 				if (next_reg_offset + reg->entry_size >=
3786 				    IDLE_CHK_MAX_ENTRIES_SIZE) {
3787 					DP_NOTICE(p_hwfn,
3788 						  "idle check registers entry is too large\n");
3789 					return 0;
3790 				}
3791 
3792 				next_reg_offset +=
3793 				    qed_grc_dump_addr_range(p_hwfn, p_ptt,
3794 							    cond_reg_values +
3795 							    next_reg_offset,
3796 							    dump, addr,
3797 							    reg->entry_size,
3798 							    wide_bus,
3799 							    SPLIT_TYPE_NONE, 0);
3800 			}
3801 
3802 			/* Call rule condition function.
3803 			 * If returns true, it's a failure.
3804 			 */
3805 			if ((*cond_arr[rule->cond_id]) (cond_reg_values,
3806 							imm_values)) {
3807 				offset += qed_idle_chk_dump_failure(p_hwfn,
3808 							p_ptt,
3809 							dump_buf + offset,
3810 							dump,
3811 							rule->rule_id,
3812 							rule,
3813 							entry_id,
3814 							cond_reg_values);
3815 				(*num_failing_rules)++;
3816 			}
3817 		}
3818 	}
3819 
3820 	return offset;
3821 }
3822 
3823 /* Performs Idle Check Dump to the specified buffer.
3824  * Returns the dumped size in dwords.
3825  */
qed_idle_chk_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)3826 static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
3827 			     struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3828 {
3829 	struct virt_mem_desc *dbg_buf =
3830 	    &p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES];
3831 	u32 num_failing_rules_offset, offset = 0,
3832 	    input_offset = 0, num_failing_rules = 0;
3833 
3834 	/* Dump global params  - 1 must match below amount of params */
3835 	offset += qed_dump_common_global_params(p_hwfn,
3836 						p_ptt,
3837 						dump_buf + offset, dump, 1);
3838 	offset += qed_dump_str_param(dump_buf + offset,
3839 				     dump, "dump-type", "idle-chk");
3840 
3841 	/* Dump idle check section header with a single parameter */
3842 	offset += qed_dump_section_hdr(dump_buf + offset, dump, "idle_chk", 1);
3843 	num_failing_rules_offset = offset;
3844 	offset += qed_dump_num_param(dump_buf + offset, dump, "num_rules", 0);
3845 
3846 	while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) {
3847 		const struct dbg_idle_chk_cond_hdr *cond_hdr =
3848 		    (const struct dbg_idle_chk_cond_hdr *)dbg_buf->ptr +
3849 		    input_offset++;
3850 		bool eval_mode, mode_match = true;
3851 		u32 curr_failing_rules;
3852 		u16 modes_buf_offset;
3853 
3854 		/* Check mode */
3855 		eval_mode = GET_FIELD(cond_hdr->mode.data,
3856 				      DBG_MODE_HDR_EVAL_MODE) > 0;
3857 		if (eval_mode) {
3858 			modes_buf_offset =
3859 				GET_FIELD(cond_hdr->mode.data,
3860 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
3861 			mode_match = qed_is_mode_match(p_hwfn,
3862 						       &modes_buf_offset);
3863 		}
3864 
3865 		if (mode_match) {
3866 			const struct dbg_idle_chk_rule *rule =
3867 			    (const struct dbg_idle_chk_rule *)((u32 *)
3868 							       dbg_buf->ptr
3869 							       + input_offset);
3870 			u32 num_input_rules =
3871 				cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS;
3872 			offset +=
3873 			    qed_idle_chk_dump_rule_entries(p_hwfn,
3874 							   p_ptt,
3875 							   dump_buf +
3876 							   offset,
3877 							   dump,
3878 							   rule,
3879 							   num_input_rules,
3880 							   &curr_failing_rules);
3881 			num_failing_rules += curr_failing_rules;
3882 		}
3883 
3884 		input_offset += cond_hdr->data_size;
3885 	}
3886 
3887 	/* Overwrite num_rules parameter */
3888 	if (dump)
3889 		qed_dump_num_param(dump_buf + num_failing_rules_offset,
3890 				   dump, "num_rules", num_failing_rules);
3891 
3892 	/* Dump last section */
3893 	offset += qed_dump_last_section(dump_buf, offset, dump);
3894 
3895 	return offset;
3896 }
3897 
3898 /* Finds the meta data image in NVRAM */
qed_find_nvram_image(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 image_type,u32 * nvram_offset_bytes,u32 * nvram_size_bytes)3899 static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
3900 					    struct qed_ptt *p_ptt,
3901 					    u32 image_type,
3902 					    u32 *nvram_offset_bytes,
3903 					    u32 *nvram_size_bytes)
3904 {
3905 	u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
3906 	struct mcp_file_att file_att;
3907 	int nvm_result;
3908 
3909 	/* Call NVRAM get file command */
3910 	nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn,
3911 					p_ptt,
3912 					DRV_MSG_CODE_NVM_GET_FILE_ATT,
3913 					image_type,
3914 					&ret_mcp_resp,
3915 					&ret_mcp_param,
3916 					&ret_txn_size, (u32 *)&file_att);
3917 
3918 	/* Check response */
3919 	if (nvm_result ||
3920 	    (ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
3921 		return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
3922 
3923 	/* Update return values */
3924 	*nvram_offset_bytes = file_att.nvm_start_addr;
3925 	*nvram_size_bytes = file_att.len;
3926 
3927 	DP_VERBOSE(p_hwfn,
3928 		   QED_MSG_DEBUG,
3929 		   "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n",
3930 		   image_type, *nvram_offset_bytes, *nvram_size_bytes);
3931 
3932 	/* Check alignment */
3933 	if (*nvram_size_bytes & 0x3)
3934 		return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
3935 
3936 	return DBG_STATUS_OK;
3937 }
3938 
3939 /* Reads data from NVRAM */
qed_nvram_read(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 nvram_offset_bytes,u32 nvram_size_bytes,u32 * ret_buf)3940 static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
3941 				      struct qed_ptt *p_ptt,
3942 				      u32 nvram_offset_bytes,
3943 				      u32 nvram_size_bytes, u32 *ret_buf)
3944 {
3945 	u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
3946 	s32 bytes_left = nvram_size_bytes;
3947 	u32 read_offset = 0, param = 0;
3948 
3949 	DP_VERBOSE(p_hwfn,
3950 		   QED_MSG_DEBUG,
3951 		   "nvram_read: reading image of size %d bytes from NVRAM\n",
3952 		   nvram_size_bytes);
3953 
3954 	do {
3955 		bytes_to_copy =
3956 		    (bytes_left >
3957 		     MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
3958 
3959 		/* Call NVRAM read command */
3960 		SET_MFW_FIELD(param,
3961 			      DRV_MB_PARAM_NVM_OFFSET,
3962 			      nvram_offset_bytes + read_offset);
3963 		SET_MFW_FIELD(param, DRV_MB_PARAM_NVM_LEN, bytes_to_copy);
3964 		if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3965 				       DRV_MSG_CODE_NVM_READ_NVRAM, param,
3966 				       &ret_mcp_resp,
3967 				       &ret_mcp_param, &ret_read_size,
3968 				       (u32 *)((u8 *)ret_buf + read_offset)))
3969 			return DBG_STATUS_NVRAM_READ_FAILED;
3970 
3971 		/* Check response */
3972 		if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
3973 			return DBG_STATUS_NVRAM_READ_FAILED;
3974 
3975 		/* Update read offset */
3976 		read_offset += ret_read_size;
3977 		bytes_left -= ret_read_size;
3978 	} while (bytes_left > 0);
3979 
3980 	return DBG_STATUS_OK;
3981 }
3982 
3983 /* Get info on the MCP Trace data in the scratchpad:
3984  * - trace_data_grc_addr (OUT): trace data GRC address in bytes
3985  * - trace_data_size (OUT): trace data size in bytes (without the header)
3986  */
qed_mcp_trace_get_data_info(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * trace_data_grc_addr,u32 * trace_data_size)3987 static enum dbg_status qed_mcp_trace_get_data_info(struct qed_hwfn *p_hwfn,
3988 						   struct qed_ptt *p_ptt,
3989 						   u32 *trace_data_grc_addr,
3990 						   u32 *trace_data_size)
3991 {
3992 	u32 spad_trace_offsize, signature;
3993 
3994 	/* Read trace section offsize structure from MCP scratchpad */
3995 	spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
3996 
3997 	/* Extract trace section address from offsize (in scratchpad) */
3998 	*trace_data_grc_addr =
3999 		MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize);
4000 
4001 	/* Read signature from MCP trace section */
4002 	signature = qed_rd(p_hwfn, p_ptt,
4003 			   *trace_data_grc_addr +
4004 			   offsetof(struct mcp_trace, signature));
4005 
4006 	if (signature != MFW_TRACE_SIGNATURE)
4007 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4008 
4009 	/* Read trace size from MCP trace section */
4010 	*trace_data_size = qed_rd(p_hwfn,
4011 				  p_ptt,
4012 				  *trace_data_grc_addr +
4013 				  offsetof(struct mcp_trace, size));
4014 
4015 	return DBG_STATUS_OK;
4016 }
4017 
4018 /* Reads MCP trace meta data image from NVRAM
4019  * - running_bundle_id (OUT): running bundle ID (invalid when loaded from file)
4020  * - trace_meta_offset (OUT): trace meta offset in NVRAM in bytes (invalid when
4021  *			      loaded from file).
4022  * - trace_meta_size (OUT):   size in bytes of the trace meta data.
4023  */
qed_mcp_trace_get_meta_info(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 trace_data_size_bytes,u32 * running_bundle_id,u32 * trace_meta_offset,u32 * trace_meta_size)4024 static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
4025 						   struct qed_ptt *p_ptt,
4026 						   u32 trace_data_size_bytes,
4027 						   u32 *running_bundle_id,
4028 						   u32 *trace_meta_offset,
4029 						   u32 *trace_meta_size)
4030 {
4031 	u32 spad_trace_offsize, nvram_image_type, running_mfw_addr;
4032 
4033 	/* Read MCP trace section offsize structure from MCP scratchpad */
4034 	spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4035 
4036 	/* Find running bundle ID */
4037 	running_mfw_addr =
4038 		MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) +
4039 		QED_SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
4040 	*running_bundle_id = qed_rd(p_hwfn, p_ptt, running_mfw_addr);
4041 	if (*running_bundle_id > 1)
4042 		return DBG_STATUS_INVALID_NVRAM_BUNDLE;
4043 
4044 	/* Find image in NVRAM */
4045 	nvram_image_type =
4046 	    (*running_bundle_id ==
4047 	     DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
4048 	return qed_find_nvram_image(p_hwfn,
4049 				    p_ptt,
4050 				    nvram_image_type,
4051 				    trace_meta_offset, trace_meta_size);
4052 }
4053 
4054 /* Reads the MCP Trace meta data from NVRAM into the specified buffer */
qed_mcp_trace_read_meta(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 nvram_offset_in_bytes,u32 size_in_bytes,u32 * buf)4055 static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn,
4056 					       struct qed_ptt *p_ptt,
4057 					       u32 nvram_offset_in_bytes,
4058 					       u32 size_in_bytes, u32 *buf)
4059 {
4060 	u8 modules_num, module_len, i, *byte_buf = (u8 *)buf;
4061 	enum dbg_status status;
4062 	u32 signature;
4063 
4064 	/* Read meta data from NVRAM */
4065 	status = qed_nvram_read(p_hwfn,
4066 				p_ptt,
4067 				nvram_offset_in_bytes, size_in_bytes, buf);
4068 	if (status != DBG_STATUS_OK)
4069 		return status;
4070 
4071 	/* Extract and check first signature */
4072 	signature = qed_read_unaligned_dword(byte_buf);
4073 	byte_buf += sizeof(signature);
4074 	if (signature != NVM_MAGIC_VALUE)
4075 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4076 
4077 	/* Extract number of modules */
4078 	modules_num = *(byte_buf++);
4079 
4080 	/* Skip all modules */
4081 	for (i = 0; i < modules_num; i++) {
4082 		module_len = *(byte_buf++);
4083 		byte_buf += module_len;
4084 	}
4085 
4086 	/* Extract and check second signature */
4087 	signature = qed_read_unaligned_dword(byte_buf);
4088 	byte_buf += sizeof(signature);
4089 	if (signature != NVM_MAGIC_VALUE)
4090 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4091 
4092 	return DBG_STATUS_OK;
4093 }
4094 
4095 /* Dump MCP Trace */
qed_mcp_trace_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u32 * num_dumped_dwords)4096 static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
4097 					  struct qed_ptt *p_ptt,
4098 					  u32 *dump_buf,
4099 					  bool dump, u32 *num_dumped_dwords)
4100 {
4101 	u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
4102 	u32 trace_meta_size_dwords = 0, running_bundle_id, offset = 0;
4103 	u32 trace_meta_offset_bytes = 0, trace_meta_size_bytes = 0;
4104 	enum dbg_status status;
4105 	int halted = 0;
4106 	bool use_mfw;
4107 
4108 	*num_dumped_dwords = 0;
4109 
4110 	use_mfw = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
4111 
4112 	/* Get trace data info */
4113 	status = qed_mcp_trace_get_data_info(p_hwfn,
4114 					     p_ptt,
4115 					     &trace_data_grc_addr,
4116 					     &trace_data_size_bytes);
4117 	if (status != DBG_STATUS_OK)
4118 		return status;
4119 
4120 	/* Dump global params */
4121 	offset += qed_dump_common_global_params(p_hwfn,
4122 						p_ptt,
4123 						dump_buf + offset, dump, 1);
4124 	offset += qed_dump_str_param(dump_buf + offset,
4125 				     dump, "dump-type", "mcp-trace");
4126 
4127 	/* Halt MCP while reading from scratchpad so the read data will be
4128 	 * consistent. if halt fails, MCP trace is taken anyway, with a small
4129 	 * risk that it may be corrupt.
4130 	 */
4131 	if (dump && use_mfw) {
4132 		halted = !qed_mcp_halt(p_hwfn, p_ptt);
4133 		if (!halted)
4134 			DP_NOTICE(p_hwfn, "MCP halt failed!\n");
4135 	}
4136 
4137 	/* Find trace data size */
4138 	trace_data_size_dwords =
4139 	    DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace),
4140 			 BYTES_IN_DWORD);
4141 
4142 	/* Dump trace data section header and param */
4143 	offset += qed_dump_section_hdr(dump_buf + offset,
4144 				       dump, "mcp_trace_data", 1);
4145 	offset += qed_dump_num_param(dump_buf + offset,
4146 				     dump, "size", trace_data_size_dwords);
4147 
4148 	/* Read trace data from scratchpad into dump buffer */
4149 	offset += qed_grc_dump_addr_range(p_hwfn,
4150 					  p_ptt,
4151 					  dump_buf + offset,
4152 					  dump,
4153 					  BYTES_TO_DWORDS(trace_data_grc_addr),
4154 					  trace_data_size_dwords, false,
4155 					  SPLIT_TYPE_NONE, 0);
4156 
4157 	/* Resume MCP (only if halt succeeded) */
4158 	if (halted && qed_mcp_resume(p_hwfn, p_ptt))
4159 		DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
4160 
4161 	/* Dump trace meta section header */
4162 	offset += qed_dump_section_hdr(dump_buf + offset,
4163 				       dump, "mcp_trace_meta", 1);
4164 
4165 	/* If MCP Trace meta size parameter was set, use it.
4166 	 * Otherwise, read trace meta.
4167 	 * trace_meta_size_bytes is dword-aligned.
4168 	 */
4169 	trace_meta_size_bytes =
4170 		qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_MCP_TRACE_META_SIZE);
4171 	if ((!trace_meta_size_bytes || dump) && use_mfw)
4172 		status = qed_mcp_trace_get_meta_info(p_hwfn,
4173 						     p_ptt,
4174 						     trace_data_size_bytes,
4175 						     &running_bundle_id,
4176 						     &trace_meta_offset_bytes,
4177 						     &trace_meta_size_bytes);
4178 	if (status == DBG_STATUS_OK)
4179 		trace_meta_size_dwords = BYTES_TO_DWORDS(trace_meta_size_bytes);
4180 
4181 	/* Dump trace meta size param */
4182 	offset += qed_dump_num_param(dump_buf + offset,
4183 				     dump, "size", trace_meta_size_dwords);
4184 
4185 	/* Read trace meta image into dump buffer */
4186 	if (dump && trace_meta_size_dwords)
4187 		status = qed_mcp_trace_read_meta(p_hwfn,
4188 						 p_ptt,
4189 						 trace_meta_offset_bytes,
4190 						 trace_meta_size_bytes,
4191 						 dump_buf + offset);
4192 	if (status == DBG_STATUS_OK)
4193 		offset += trace_meta_size_dwords;
4194 
4195 	/* Dump last section */
4196 	offset += qed_dump_last_section(dump_buf, offset, dump);
4197 
4198 	*num_dumped_dwords = offset;
4199 
4200 	/* If no mcp access, indicate that the dump doesn't contain the meta
4201 	 * data from NVRAM.
4202 	 */
4203 	return use_mfw ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4204 }
4205 
4206 /* Dump GRC FIFO */
qed_reg_fifo_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u32 * num_dumped_dwords)4207 static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
4208 					 struct qed_ptt *p_ptt,
4209 					 u32 *dump_buf,
4210 					 bool dump, u32 *num_dumped_dwords)
4211 {
4212 	u32 dwords_read, size_param_offset, offset = 0, addr, len;
4213 	bool fifo_has_data;
4214 
4215 	*num_dumped_dwords = 0;
4216 
4217 	/* Dump global params */
4218 	offset += qed_dump_common_global_params(p_hwfn,
4219 						p_ptt,
4220 						dump_buf + offset, dump, 1);
4221 	offset += qed_dump_str_param(dump_buf + offset,
4222 				     dump, "dump-type", "reg-fifo");
4223 
4224 	/* Dump fifo data section header and param. The size param is 0 for
4225 	 * now, and is overwritten after reading the FIFO.
4226 	 */
4227 	offset += qed_dump_section_hdr(dump_buf + offset,
4228 				       dump, "reg_fifo_data", 1);
4229 	size_param_offset = offset;
4230 	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4231 
4232 	if (!dump) {
4233 		/* FIFO max size is REG_FIFO_DEPTH_DWORDS. There is no way to
4234 		 * test how much data is available, except for reading it.
4235 		 */
4236 		offset += REG_FIFO_DEPTH_DWORDS;
4237 		goto out;
4238 	}
4239 
4240 	fifo_has_data = qed_rd(p_hwfn, p_ptt,
4241 			       GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4242 
4243 	/* Pull available data from fifo. Use DMAE since this is widebus memory
4244 	 * and must be accessed atomically. Test for dwords_read not passing
4245 	 * buffer size since more entries could be added to the buffer as we are
4246 	 * emptying it.
4247 	 */
4248 	addr = BYTES_TO_DWORDS(GRC_REG_TRACE_FIFO);
4249 	len = REG_FIFO_ELEMENT_DWORDS;
4250 	for (dwords_read = 0;
4251 	     fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS;
4252 	     dwords_read += REG_FIFO_ELEMENT_DWORDS) {
4253 		offset += qed_grc_dump_addr_range(p_hwfn,
4254 						  p_ptt,
4255 						  dump_buf + offset,
4256 						  true,
4257 						  addr,
4258 						  len,
4259 						  true, SPLIT_TYPE_NONE,
4260 						  0);
4261 		fifo_has_data = qed_rd(p_hwfn, p_ptt,
4262 				       GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4263 	}
4264 
4265 	qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4266 			   dwords_read);
4267 out:
4268 	/* Dump last section */
4269 	offset += qed_dump_last_section(dump_buf, offset, dump);
4270 
4271 	*num_dumped_dwords = offset;
4272 
4273 	return DBG_STATUS_OK;
4274 }
4275 
4276 /* Dump IGU FIFO */
qed_igu_fifo_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u32 * num_dumped_dwords)4277 static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
4278 					 struct qed_ptt *p_ptt,
4279 					 u32 *dump_buf,
4280 					 bool dump, u32 *num_dumped_dwords)
4281 {
4282 	u32 dwords_read, size_param_offset, offset = 0, addr, len;
4283 	bool fifo_has_data;
4284 
4285 	*num_dumped_dwords = 0;
4286 
4287 	/* Dump global params */
4288 	offset += qed_dump_common_global_params(p_hwfn,
4289 						p_ptt,
4290 						dump_buf + offset, dump, 1);
4291 	offset += qed_dump_str_param(dump_buf + offset,
4292 				     dump, "dump-type", "igu-fifo");
4293 
4294 	/* Dump fifo data section header and param. The size param is 0 for
4295 	 * now, and is overwritten after reading the FIFO.
4296 	 */
4297 	offset += qed_dump_section_hdr(dump_buf + offset,
4298 				       dump, "igu_fifo_data", 1);
4299 	size_param_offset = offset;
4300 	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4301 
4302 	if (!dump) {
4303 		/* FIFO max size is IGU_FIFO_DEPTH_DWORDS. There is no way to
4304 		 * test how much data is available, except for reading it.
4305 		 */
4306 		offset += IGU_FIFO_DEPTH_DWORDS;
4307 		goto out;
4308 	}
4309 
4310 	fifo_has_data = qed_rd(p_hwfn, p_ptt,
4311 			       IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4312 
4313 	/* Pull available data from fifo. Use DMAE since this is widebus memory
4314 	 * and must be accessed atomically. Test for dwords_read not passing
4315 	 * buffer size since more entries could be added to the buffer as we are
4316 	 * emptying it.
4317 	 */
4318 	addr = BYTES_TO_DWORDS(IGU_REG_ERROR_HANDLING_MEMORY);
4319 	len = IGU_FIFO_ELEMENT_DWORDS;
4320 	for (dwords_read = 0;
4321 	     fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS;
4322 	     dwords_read += IGU_FIFO_ELEMENT_DWORDS) {
4323 		offset += qed_grc_dump_addr_range(p_hwfn,
4324 						  p_ptt,
4325 						  dump_buf + offset,
4326 						  true,
4327 						  addr,
4328 						  len,
4329 						  true, SPLIT_TYPE_NONE,
4330 						  0);
4331 		fifo_has_data = qed_rd(p_hwfn, p_ptt,
4332 				       IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4333 	}
4334 
4335 	qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4336 			   dwords_read);
4337 out:
4338 	/* Dump last section */
4339 	offset += qed_dump_last_section(dump_buf, offset, dump);
4340 
4341 	*num_dumped_dwords = offset;
4342 
4343 	return DBG_STATUS_OK;
4344 }
4345 
4346 /* Protection Override dump */
qed_protection_override_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u32 * num_dumped_dwords)4347 static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
4348 						    struct qed_ptt *p_ptt,
4349 						    u32 *dump_buf,
4350 						    bool dump,
4351 						    u32 *num_dumped_dwords)
4352 {
4353 	u32 size_param_offset, override_window_dwords, offset = 0, addr;
4354 
4355 	*num_dumped_dwords = 0;
4356 
4357 	/* Dump global params */
4358 	offset += qed_dump_common_global_params(p_hwfn,
4359 						p_ptt,
4360 						dump_buf + offset, dump, 1);
4361 	offset += qed_dump_str_param(dump_buf + offset,
4362 				     dump, "dump-type", "protection-override");
4363 
4364 	/* Dump data section header and param. The size param is 0 for now,
4365 	 * and is overwritten after reading the data.
4366 	 */
4367 	offset += qed_dump_section_hdr(dump_buf + offset,
4368 				       dump, "protection_override_data", 1);
4369 	size_param_offset = offset;
4370 	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4371 
4372 	if (!dump) {
4373 		offset += PROTECTION_OVERRIDE_DEPTH_DWORDS;
4374 		goto out;
4375 	}
4376 
4377 	/* Add override window info to buffer */
4378 	override_window_dwords =
4379 		qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
4380 		PROTECTION_OVERRIDE_ELEMENT_DWORDS;
4381 	if (override_window_dwords) {
4382 		addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW);
4383 		offset += qed_grc_dump_addr_range(p_hwfn,
4384 						  p_ptt,
4385 						  dump_buf + offset,
4386 						  true,
4387 						  addr,
4388 						  override_window_dwords,
4389 						  true, SPLIT_TYPE_NONE, 0);
4390 		qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4391 				   override_window_dwords);
4392 	}
4393 out:
4394 	/* Dump last section */
4395 	offset += qed_dump_last_section(dump_buf, offset, dump);
4396 
4397 	*num_dumped_dwords = offset;
4398 
4399 	return DBG_STATUS_OK;
4400 }
4401 
4402 /* Performs FW Asserts Dump to the specified buffer.
4403  * Returns the dumped size in dwords.
4404  */
qed_fw_asserts_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)4405 static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
4406 			       struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
4407 {
4408 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4409 	struct fw_asserts_ram_section *asserts;
4410 	char storm_letter_str[2] = "?";
4411 	struct fw_info fw_info;
4412 	u32 offset = 0;
4413 	u8 storm_id;
4414 
4415 	/* Dump global params */
4416 	offset += qed_dump_common_global_params(p_hwfn,
4417 						p_ptt,
4418 						dump_buf + offset, dump, 1);
4419 	offset += qed_dump_str_param(dump_buf + offset,
4420 				     dump, "dump-type", "fw-asserts");
4421 
4422 	/* Find Storm dump size */
4423 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
4424 		u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx;
4425 		struct storm_defs *storm = &s_storm_defs[storm_id];
4426 		u32 last_list_idx, addr;
4427 
4428 		if (dev_data->block_in_reset[storm->sem_block_id])
4429 			continue;
4430 
4431 		/* Read FW info for the current Storm */
4432 		qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
4433 
4434 		asserts = &fw_info.fw_asserts_section;
4435 
4436 		/* Dump FW Asserts section header and params */
4437 		storm_letter_str[0] = storm->letter;
4438 		offset += qed_dump_section_hdr(dump_buf + offset,
4439 					       dump, "fw_asserts", 2);
4440 		offset += qed_dump_str_param(dump_buf + offset,
4441 					     dump, "storm", storm_letter_str);
4442 		offset += qed_dump_num_param(dump_buf + offset,
4443 					     dump,
4444 					     "size",
4445 					     asserts->list_element_dword_size);
4446 
4447 		/* Read and dump FW Asserts data */
4448 		if (!dump) {
4449 			offset += asserts->list_element_dword_size;
4450 			continue;
4451 		}
4452 
4453 		addr = le16_to_cpu(asserts->section_ram_line_offset);
4454 		fw_asserts_section_addr = storm->sem_fast_mem_addr +
4455 					  SEM_FAST_REG_INT_RAM +
4456 					  RAM_LINES_TO_BYTES(addr);
4457 
4458 		next_list_idx_addr = fw_asserts_section_addr +
4459 			DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
4460 		next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
4461 		last_list_idx = (next_list_idx > 0 ?
4462 				 next_list_idx :
4463 				 asserts->list_num_elements) - 1;
4464 		addr = BYTES_TO_DWORDS(fw_asserts_section_addr) +
4465 		       asserts->list_dword_offset +
4466 		       last_list_idx * asserts->list_element_dword_size;
4467 		offset +=
4468 		    qed_grc_dump_addr_range(p_hwfn, p_ptt,
4469 					    dump_buf + offset,
4470 					    dump, addr,
4471 					    asserts->list_element_dword_size,
4472 						  false, SPLIT_TYPE_NONE, 0);
4473 	}
4474 
4475 	/* Dump last section */
4476 	offset += qed_dump_last_section(dump_buf, offset, dump);
4477 
4478 	return offset;
4479 }
4480 
4481 /* Dumps the specified ILT pages to the specified buffer.
4482  * Returns the dumped size in dwords.
4483  */
qed_ilt_dump_pages_range(u32 * dump_buf,bool dump,u32 start_page_id,u32 num_pages,struct phys_mem_desc * ilt_pages,bool dump_page_ids)4484 static u32 qed_ilt_dump_pages_range(u32 *dump_buf,
4485 				    bool dump,
4486 				    u32 start_page_id,
4487 				    u32 num_pages,
4488 				    struct phys_mem_desc *ilt_pages,
4489 				    bool dump_page_ids)
4490 {
4491 	u32 page_id, end_page_id, offset = 0;
4492 
4493 	if (num_pages == 0)
4494 		return offset;
4495 
4496 	end_page_id = start_page_id + num_pages - 1;
4497 
4498 	for (page_id = start_page_id; page_id <= end_page_id; page_id++) {
4499 		struct phys_mem_desc *mem_desc = &ilt_pages[page_id];
4500 
4501 		/**
4502 		 *
4503 		 * if (page_id >= ->p_cxt_mngr->ilt_shadow_size)
4504 		 *     break;
4505 		 */
4506 
4507 		if (!ilt_pages[page_id].virt_addr)
4508 			continue;
4509 
4510 		if (dump_page_ids) {
4511 			/* Copy page ID to dump buffer */
4512 			if (dump)
4513 				*(dump_buf + offset) = page_id;
4514 			offset++;
4515 		} else {
4516 			/* Copy page memory to dump buffer */
4517 			if (dump)
4518 				memcpy(dump_buf + offset,
4519 				       mem_desc->virt_addr, mem_desc->size);
4520 			offset += BYTES_TO_DWORDS(mem_desc->size);
4521 		}
4522 	}
4523 
4524 	return offset;
4525 }
4526 
4527 /* Dumps a section containing the dumped ILT pages.
4528  * Returns the dumped size in dwords.
4529  */
qed_ilt_dump_pages_section(struct qed_hwfn * p_hwfn,u32 * dump_buf,bool dump,u32 valid_conn_pf_pages,u32 valid_conn_vf_pages,struct phys_mem_desc * ilt_pages,bool dump_page_ids)4530 static u32 qed_ilt_dump_pages_section(struct qed_hwfn *p_hwfn,
4531 				      u32 *dump_buf,
4532 				      bool dump,
4533 				      u32 valid_conn_pf_pages,
4534 				      u32 valid_conn_vf_pages,
4535 				      struct phys_mem_desc *ilt_pages,
4536 				      bool dump_page_ids)
4537 {
4538 	struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
4539 	u32 pf_start_line, start_page_id, offset = 0;
4540 	u32 cdut_pf_init_pages, cdut_vf_init_pages;
4541 	u32 cdut_pf_work_pages, cdut_vf_work_pages;
4542 	u32 base_data_offset, size_param_offset;
4543 	u32 cdut_pf_pages, cdut_vf_pages;
4544 	const char *section_name;
4545 	u8 i;
4546 
4547 	section_name = dump_page_ids ? "ilt_page_ids" : "ilt_page_mem";
4548 	cdut_pf_init_pages = qed_get_cdut_num_pf_init_pages(p_hwfn);
4549 	cdut_vf_init_pages = qed_get_cdut_num_vf_init_pages(p_hwfn);
4550 	cdut_pf_work_pages = qed_get_cdut_num_pf_work_pages(p_hwfn);
4551 	cdut_vf_work_pages = qed_get_cdut_num_vf_work_pages(p_hwfn);
4552 	cdut_pf_pages = cdut_pf_init_pages + cdut_pf_work_pages;
4553 	cdut_vf_pages = cdut_vf_init_pages + cdut_vf_work_pages;
4554 	pf_start_line = p_hwfn->p_cxt_mngr->pf_start_line;
4555 
4556 	offset +=
4557 	    qed_dump_section_hdr(dump_buf + offset, dump, section_name, 1);
4558 
4559 	/* Dump size parameter (0 for now, overwritten with real size later) */
4560 	size_param_offset = offset;
4561 	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4562 	base_data_offset = offset;
4563 
4564 	/* CDUC pages are ordered as follows:
4565 	 * - PF pages - valid section (included in PF connection type mapping)
4566 	 * - PF pages - invalid section (not dumped)
4567 	 * - For each VF in the PF:
4568 	 *   - VF pages - valid section (included in VF connection type mapping)
4569 	 *   - VF pages - invalid section (not dumped)
4570 	 */
4571 	if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_DUMP_ILT_CDUC)) {
4572 		/* Dump connection PF pages */
4573 		start_page_id = clients[ILT_CLI_CDUC].first.val - pf_start_line;
4574 		offset += qed_ilt_dump_pages_range(dump_buf + offset,
4575 						   dump,
4576 						   start_page_id,
4577 						   valid_conn_pf_pages,
4578 						   ilt_pages, dump_page_ids);
4579 
4580 		/* Dump connection VF pages */
4581 		start_page_id += clients[ILT_CLI_CDUC].pf_total_lines;
4582 		for (i = 0; i < p_hwfn->p_cxt_mngr->vf_count;
4583 		     i++, start_page_id += clients[ILT_CLI_CDUC].vf_total_lines)
4584 			offset += qed_ilt_dump_pages_range(dump_buf + offset,
4585 							   dump,
4586 							   start_page_id,
4587 							   valid_conn_vf_pages,
4588 							   ilt_pages,
4589 							   dump_page_ids);
4590 	}
4591 
4592 	/* CDUT pages are ordered as follows:
4593 	 * - PF init pages (not dumped)
4594 	 * - PF work pages
4595 	 * - For each VF in the PF:
4596 	 *   - VF init pages (not dumped)
4597 	 *   - VF work pages
4598 	 */
4599 	if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_DUMP_ILT_CDUT)) {
4600 		/* Dump task PF pages */
4601 		start_page_id = clients[ILT_CLI_CDUT].first.val +
4602 		    cdut_pf_init_pages - pf_start_line;
4603 		offset += qed_ilt_dump_pages_range(dump_buf + offset,
4604 						   dump,
4605 						   start_page_id,
4606 						   cdut_pf_work_pages,
4607 						   ilt_pages, dump_page_ids);
4608 
4609 		/* Dump task VF pages */
4610 		start_page_id = clients[ILT_CLI_CDUT].first.val +
4611 		    cdut_pf_pages + cdut_vf_init_pages - pf_start_line;
4612 		for (i = 0; i < p_hwfn->p_cxt_mngr->vf_count;
4613 		     i++, start_page_id += cdut_vf_pages)
4614 			offset += qed_ilt_dump_pages_range(dump_buf + offset,
4615 							   dump,
4616 							   start_page_id,
4617 							   cdut_vf_work_pages,
4618 							   ilt_pages,
4619 							   dump_page_ids);
4620 	}
4621 
4622 	/* Overwrite size param */
4623 	if (dump)
4624 		qed_dump_num_param(dump_buf + size_param_offset,
4625 				   dump, "size", offset - base_data_offset);
4626 
4627 	return offset;
4628 }
4629 
4630 /* Performs ILT Dump to the specified buffer.
4631  * Returns the dumped size in dwords.
4632  */
qed_ilt_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)4633 static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn,
4634 			struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
4635 {
4636 	struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
4637 	u32 valid_conn_vf_cids, valid_conn_vf_pages, offset = 0;
4638 	u32 valid_conn_pf_cids, valid_conn_pf_pages, num_pages;
4639 	u32 num_cids_per_page, conn_ctx_size;
4640 	u32 cduc_page_size, cdut_page_size;
4641 	struct phys_mem_desc *ilt_pages;
4642 	u8 conn_type;
4643 
4644 	cduc_page_size = 1 <<
4645 	    (clients[ILT_CLI_CDUC].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN);
4646 	cdut_page_size = 1 <<
4647 	    (clients[ILT_CLI_CDUT].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN);
4648 	conn_ctx_size = p_hwfn->p_cxt_mngr->conn_ctx_size;
4649 	num_cids_per_page = (int)(cduc_page_size / conn_ctx_size);
4650 	ilt_pages = p_hwfn->p_cxt_mngr->ilt_shadow;
4651 
4652 	/* Dump global params - 22 must match number of params below */
4653 	offset += qed_dump_common_global_params(p_hwfn, p_ptt,
4654 						dump_buf + offset, dump, 22);
4655 	offset += qed_dump_str_param(dump_buf + offset,
4656 				     dump, "dump-type", "ilt-dump");
4657 	offset += qed_dump_num_param(dump_buf + offset,
4658 				     dump,
4659 				     "cduc-page-size", cduc_page_size);
4660 	offset += qed_dump_num_param(dump_buf + offset,
4661 				     dump,
4662 				     "cduc-first-page-id",
4663 				     clients[ILT_CLI_CDUC].first.val);
4664 	offset += qed_dump_num_param(dump_buf + offset,
4665 				     dump,
4666 				     "cduc-last-page-id",
4667 				     clients[ILT_CLI_CDUC].last.val);
4668 	offset += qed_dump_num_param(dump_buf + offset,
4669 				     dump,
4670 				     "cduc-num-pf-pages",
4671 				     clients
4672 				     [ILT_CLI_CDUC].pf_total_lines);
4673 	offset += qed_dump_num_param(dump_buf + offset,
4674 				     dump,
4675 				     "cduc-num-vf-pages",
4676 				     clients
4677 				     [ILT_CLI_CDUC].vf_total_lines);
4678 	offset += qed_dump_num_param(dump_buf + offset,
4679 				     dump,
4680 				     "max-conn-ctx-size",
4681 				     conn_ctx_size);
4682 	offset += qed_dump_num_param(dump_buf + offset,
4683 				     dump,
4684 				     "cdut-page-size", cdut_page_size);
4685 	offset += qed_dump_num_param(dump_buf + offset,
4686 				     dump,
4687 				     "cdut-first-page-id",
4688 				     clients[ILT_CLI_CDUT].first.val);
4689 	offset += qed_dump_num_param(dump_buf + offset,
4690 				     dump,
4691 				     "cdut-last-page-id",
4692 				     clients[ILT_CLI_CDUT].last.val);
4693 	offset += qed_dump_num_param(dump_buf + offset,
4694 				     dump,
4695 				     "cdut-num-pf-init-pages",
4696 				     qed_get_cdut_num_pf_init_pages(p_hwfn));
4697 	offset += qed_dump_num_param(dump_buf + offset,
4698 				     dump,
4699 				     "cdut-num-vf-init-pages",
4700 				     qed_get_cdut_num_vf_init_pages(p_hwfn));
4701 	offset += qed_dump_num_param(dump_buf + offset,
4702 				     dump,
4703 				     "cdut-num-pf-work-pages",
4704 				     qed_get_cdut_num_pf_work_pages(p_hwfn));
4705 	offset += qed_dump_num_param(dump_buf + offset,
4706 				     dump,
4707 				     "cdut-num-vf-work-pages",
4708 				     qed_get_cdut_num_vf_work_pages(p_hwfn));
4709 	offset += qed_dump_num_param(dump_buf + offset,
4710 				     dump,
4711 				     "max-task-ctx-size",
4712 				     p_hwfn->p_cxt_mngr->task_ctx_size);
4713 	offset += qed_dump_num_param(dump_buf + offset,
4714 				     dump,
4715 				     "task-type-id",
4716 				     p_hwfn->p_cxt_mngr->task_type_id);
4717 	offset += qed_dump_num_param(dump_buf + offset,
4718 				     dump,
4719 				     "first-vf-id-in-pf",
4720 				     p_hwfn->p_cxt_mngr->first_vf_in_pf);
4721 	offset += /* 18 */ qed_dump_num_param(dump_buf + offset,
4722 					      dump,
4723 					      "num-vfs-in-pf",
4724 					      p_hwfn->p_cxt_mngr->vf_count);
4725 	offset += qed_dump_num_param(dump_buf + offset,
4726 				     dump,
4727 				     "ptr-size-bytes", sizeof(void *));
4728 	offset += qed_dump_num_param(dump_buf + offset,
4729 				     dump,
4730 				     "pf-start-line",
4731 				     p_hwfn->p_cxt_mngr->pf_start_line);
4732 	offset += qed_dump_num_param(dump_buf + offset,
4733 				     dump,
4734 				     "page-mem-desc-size-dwords",
4735 				     PAGE_MEM_DESC_SIZE_DWORDS);
4736 	offset += qed_dump_num_param(dump_buf + offset,
4737 				     dump,
4738 				     "ilt-shadow-size",
4739 				     p_hwfn->p_cxt_mngr->ilt_shadow_size);
4740 	/* Additional/Less parameters require matching of number in call to
4741 	 * dump_common_global_params()
4742 	 */
4743 
4744 	/* Dump section containing number of PF CIDs per connection type */
4745 	offset += qed_dump_section_hdr(dump_buf + offset,
4746 				       dump, "num_pf_cids_per_conn_type", 1);
4747 	offset += qed_dump_num_param(dump_buf + offset,
4748 				     dump, "size", NUM_OF_CONNECTION_TYPES_E4);
4749 	for (conn_type = 0, valid_conn_pf_cids = 0;
4750 	     conn_type < NUM_OF_CONNECTION_TYPES_E4; conn_type++, offset++) {
4751 		u32 num_pf_cids =
4752 		    p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cid_count;
4753 
4754 		if (dump)
4755 			*(dump_buf + offset) = num_pf_cids;
4756 		valid_conn_pf_cids += num_pf_cids;
4757 	}
4758 
4759 	/* Dump section containing number of VF CIDs per connection type */
4760 	offset += qed_dump_section_hdr(dump_buf + offset,
4761 				       dump, "num_vf_cids_per_conn_type", 1);
4762 	offset += qed_dump_num_param(dump_buf + offset,
4763 				     dump, "size", NUM_OF_CONNECTION_TYPES_E4);
4764 	for (conn_type = 0, valid_conn_vf_cids = 0;
4765 	     conn_type < NUM_OF_CONNECTION_TYPES_E4; conn_type++, offset++) {
4766 		u32 num_vf_cids =
4767 		    p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cids_per_vf;
4768 
4769 		if (dump)
4770 			*(dump_buf + offset) = num_vf_cids;
4771 		valid_conn_vf_cids += num_vf_cids;
4772 	}
4773 
4774 	/* Dump section containing physical memory descs for each ILT page */
4775 	num_pages = p_hwfn->p_cxt_mngr->ilt_shadow_size;
4776 	offset += qed_dump_section_hdr(dump_buf + offset,
4777 				       dump, "ilt_page_desc", 1);
4778 	offset += qed_dump_num_param(dump_buf + offset,
4779 				     dump,
4780 				     "size",
4781 				     num_pages * PAGE_MEM_DESC_SIZE_DWORDS);
4782 
4783 	/* Copy memory descriptors to dump buffer */
4784 	if (dump) {
4785 		u32 page_id;
4786 
4787 		for (page_id = 0; page_id < num_pages;
4788 		     page_id++, offset += PAGE_MEM_DESC_SIZE_DWORDS)
4789 			memcpy(dump_buf + offset,
4790 			       &ilt_pages[page_id],
4791 			       DWORDS_TO_BYTES(PAGE_MEM_DESC_SIZE_DWORDS));
4792 	} else {
4793 		offset += num_pages * PAGE_MEM_DESC_SIZE_DWORDS;
4794 	}
4795 
4796 	valid_conn_pf_pages = DIV_ROUND_UP(valid_conn_pf_cids,
4797 					   num_cids_per_page);
4798 	valid_conn_vf_pages = DIV_ROUND_UP(valid_conn_vf_cids,
4799 					   num_cids_per_page);
4800 
4801 	/* Dump ILT pages IDs */
4802 	offset += qed_ilt_dump_pages_section(p_hwfn,
4803 					     dump_buf + offset,
4804 					     dump,
4805 					     valid_conn_pf_pages,
4806 					     valid_conn_vf_pages,
4807 					     ilt_pages, true);
4808 
4809 	/* Dump ILT pages memory */
4810 	offset += qed_ilt_dump_pages_section(p_hwfn,
4811 					     dump_buf + offset,
4812 					     dump,
4813 					     valid_conn_pf_pages,
4814 					     valid_conn_vf_pages,
4815 					     ilt_pages, false);
4816 
4817 	/* Dump last section */
4818 	offset += qed_dump_last_section(dump_buf, offset, dump);
4819 
4820 	return offset;
4821 }
4822 
4823 /***************************** Public Functions *******************************/
4824 
qed_dbg_set_bin_ptr(struct qed_hwfn * p_hwfn,const u8 * const bin_ptr)4825 enum dbg_status qed_dbg_set_bin_ptr(struct qed_hwfn *p_hwfn,
4826 				    const u8 * const bin_ptr)
4827 {
4828 	struct bin_buffer_hdr *buf_hdrs = (struct bin_buffer_hdr *)bin_ptr;
4829 	u8 buf_id;
4830 
4831 	/* Convert binary data to debug arrays */
4832 	for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++)
4833 		qed_set_dbg_bin_buf(p_hwfn,
4834 				    buf_id,
4835 				    (u32 *)(bin_ptr + buf_hdrs[buf_id].offset),
4836 				    buf_hdrs[buf_id].length);
4837 
4838 	return DBG_STATUS_OK;
4839 }
4840 
qed_read_fw_info(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct fw_info * fw_info)4841 bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
4842 		      struct qed_ptt *p_ptt, struct fw_info *fw_info)
4843 {
4844 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4845 	u8 storm_id;
4846 
4847 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
4848 		struct storm_defs *storm = &s_storm_defs[storm_id];
4849 
4850 		/* Skip Storm if it's in reset */
4851 		if (dev_data->block_in_reset[storm->sem_block_id])
4852 			continue;
4853 
4854 		/* Read FW info for the current Storm */
4855 		qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, fw_info);
4856 
4857 		return true;
4858 	}
4859 
4860 	return false;
4861 }
4862 
qed_dbg_grc_config(struct qed_hwfn * p_hwfn,enum dbg_grc_params grc_param,u32 val)4863 enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn,
4864 				   enum dbg_grc_params grc_param, u32 val)
4865 {
4866 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4867 	enum dbg_status status;
4868 	int i;
4869 
4870 	DP_VERBOSE(p_hwfn,
4871 		   QED_MSG_DEBUG,
4872 		   "dbg_grc_config: paramId = %d, val = %d\n", grc_param, val);
4873 
4874 	status = qed_dbg_dev_init(p_hwfn);
4875 	if (status != DBG_STATUS_OK)
4876 		return status;
4877 
4878 	/* Initializes the GRC parameters (if not initialized). Needed in order
4879 	 * to set the default parameter values for the first time.
4880 	 */
4881 	qed_dbg_grc_init_params(p_hwfn);
4882 
4883 	if (grc_param >= MAX_DBG_GRC_PARAMS)
4884 		return DBG_STATUS_INVALID_ARGS;
4885 	if (val < s_grc_param_defs[grc_param].min ||
4886 	    val > s_grc_param_defs[grc_param].max)
4887 		return DBG_STATUS_INVALID_ARGS;
4888 
4889 	if (s_grc_param_defs[grc_param].is_preset) {
4890 		/* Preset param */
4891 
4892 		/* Disabling a preset is not allowed. Call
4893 		 * dbg_grc_set_params_default instead.
4894 		 */
4895 		if (!val)
4896 			return DBG_STATUS_INVALID_ARGS;
4897 
4898 		/* Update all params with the preset values */
4899 		for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) {
4900 			struct grc_param_defs *defs = &s_grc_param_defs[i];
4901 			u32 preset_val;
4902 			/* Skip persistent params */
4903 			if (defs->is_persistent)
4904 				continue;
4905 
4906 			/* Find preset value */
4907 			if (grc_param == DBG_GRC_PARAM_EXCLUDE_ALL)
4908 				preset_val =
4909 				    defs->exclude_all_preset_val;
4910 			else if (grc_param == DBG_GRC_PARAM_CRASH)
4911 				preset_val =
4912 				    defs->crash_preset_val[dev_data->chip_id];
4913 			else
4914 				return DBG_STATUS_INVALID_ARGS;
4915 
4916 			qed_grc_set_param(p_hwfn, i, preset_val);
4917 		}
4918 	} else {
4919 		/* Regular param - set its value */
4920 		qed_grc_set_param(p_hwfn, grc_param, val);
4921 	}
4922 
4923 	return DBG_STATUS_OK;
4924 }
4925 
4926 /* Assign default GRC param values */
qed_dbg_grc_set_params_default(struct qed_hwfn * p_hwfn)4927 void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
4928 {
4929 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4930 	u32 i;
4931 
4932 	for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
4933 		if (!s_grc_param_defs[i].is_persistent)
4934 			dev_data->grc.param_val[i] =
4935 			    s_grc_param_defs[i].default_val[dev_data->chip_id];
4936 }
4937 
qed_dbg_grc_get_dump_buf_size(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * buf_size)4938 enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
4939 					      struct qed_ptt *p_ptt,
4940 					      u32 *buf_size)
4941 {
4942 	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
4943 
4944 	*buf_size = 0;
4945 
4946 	if (status != DBG_STATUS_OK)
4947 		return status;
4948 
4949 	if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
4950 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr ||
4951 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr ||
4952 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
4953 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
4954 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
4955 
4956 	return qed_grc_dump(p_hwfn, p_ptt, NULL, false, buf_size);
4957 }
4958 
qed_dbg_grc_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)4959 enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
4960 				 struct qed_ptt *p_ptt,
4961 				 u32 *dump_buf,
4962 				 u32 buf_size_in_dwords,
4963 				 u32 *num_dumped_dwords)
4964 {
4965 	u32 needed_buf_size_in_dwords;
4966 	enum dbg_status status;
4967 
4968 	*num_dumped_dwords = 0;
4969 
4970 	status = qed_dbg_grc_get_dump_buf_size(p_hwfn,
4971 					       p_ptt,
4972 					       &needed_buf_size_in_dwords);
4973 	if (status != DBG_STATUS_OK)
4974 		return status;
4975 
4976 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
4977 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
4978 
4979 	/* GRC Dump */
4980 	status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
4981 
4982 	/* Revert GRC params to their default */
4983 	qed_dbg_grc_set_params_default(p_hwfn);
4984 
4985 	return status;
4986 }
4987 
qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * buf_size)4988 enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
4989 						   struct qed_ptt *p_ptt,
4990 						   u32 *buf_size)
4991 {
4992 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4993 	struct idle_chk_data *idle_chk = &dev_data->idle_chk;
4994 	enum dbg_status status;
4995 
4996 	*buf_size = 0;
4997 
4998 	status = qed_dbg_dev_init(p_hwfn);
4999 	if (status != DBG_STATUS_OK)
5000 		return status;
5001 
5002 	if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5003 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr ||
5004 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr ||
5005 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr)
5006 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
5007 
5008 	if (!idle_chk->buf_size_set) {
5009 		idle_chk->buf_size = qed_idle_chk_dump(p_hwfn,
5010 						       p_ptt, NULL, false);
5011 		idle_chk->buf_size_set = true;
5012 	}
5013 
5014 	*buf_size = idle_chk->buf_size;
5015 
5016 	return DBG_STATUS_OK;
5017 }
5018 
qed_dbg_idle_chk_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)5019 enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
5020 				      struct qed_ptt *p_ptt,
5021 				      u32 *dump_buf,
5022 				      u32 buf_size_in_dwords,
5023 				      u32 *num_dumped_dwords)
5024 {
5025 	u32 needed_buf_size_in_dwords;
5026 	enum dbg_status status;
5027 
5028 	*num_dumped_dwords = 0;
5029 
5030 	status = qed_dbg_idle_chk_get_dump_buf_size(p_hwfn,
5031 						    p_ptt,
5032 						    &needed_buf_size_in_dwords);
5033 	if (status != DBG_STATUS_OK)
5034 		return status;
5035 
5036 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5037 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5038 
5039 	/* Update reset state */
5040 	qed_grc_unreset_blocks(p_hwfn, p_ptt, true);
5041 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5042 
5043 	/* Idle Check Dump */
5044 	*num_dumped_dwords = qed_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true);
5045 
5046 	/* Revert GRC params to their default */
5047 	qed_dbg_grc_set_params_default(p_hwfn);
5048 
5049 	return DBG_STATUS_OK;
5050 }
5051 
qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * buf_size)5052 enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5053 						    struct qed_ptt *p_ptt,
5054 						    u32 *buf_size)
5055 {
5056 	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5057 
5058 	*buf_size = 0;
5059 
5060 	if (status != DBG_STATUS_OK)
5061 		return status;
5062 
5063 	return qed_mcp_trace_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5064 }
5065 
qed_dbg_mcp_trace_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)5066 enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
5067 				       struct qed_ptt *p_ptt,
5068 				       u32 *dump_buf,
5069 				       u32 buf_size_in_dwords,
5070 				       u32 *num_dumped_dwords)
5071 {
5072 	u32 needed_buf_size_in_dwords;
5073 	enum dbg_status status;
5074 
5075 	status =
5076 		qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn,
5077 						    p_ptt,
5078 						    &needed_buf_size_in_dwords);
5079 	if (status != DBG_STATUS_OK && status !=
5080 	    DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
5081 		return status;
5082 
5083 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5084 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5085 
5086 	/* Update reset state */
5087 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5088 
5089 	/* Perform dump */
5090 	status = qed_mcp_trace_dump(p_hwfn,
5091 				    p_ptt, dump_buf, true, num_dumped_dwords);
5092 
5093 	/* Revert GRC params to their default */
5094 	qed_dbg_grc_set_params_default(p_hwfn);
5095 
5096 	return status;
5097 }
5098 
qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * buf_size)5099 enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5100 						   struct qed_ptt *p_ptt,
5101 						   u32 *buf_size)
5102 {
5103 	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5104 
5105 	*buf_size = 0;
5106 
5107 	if (status != DBG_STATUS_OK)
5108 		return status;
5109 
5110 	return qed_reg_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5111 }
5112 
qed_dbg_reg_fifo_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)5113 enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
5114 				      struct qed_ptt *p_ptt,
5115 				      u32 *dump_buf,
5116 				      u32 buf_size_in_dwords,
5117 				      u32 *num_dumped_dwords)
5118 {
5119 	u32 needed_buf_size_in_dwords;
5120 	enum dbg_status status;
5121 
5122 	*num_dumped_dwords = 0;
5123 
5124 	status = qed_dbg_reg_fifo_get_dump_buf_size(p_hwfn,
5125 						    p_ptt,
5126 						    &needed_buf_size_in_dwords);
5127 	if (status != DBG_STATUS_OK)
5128 		return status;
5129 
5130 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5131 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5132 
5133 	/* Update reset state */
5134 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5135 
5136 	status = qed_reg_fifo_dump(p_hwfn,
5137 				   p_ptt, dump_buf, true, num_dumped_dwords);
5138 
5139 	/* Revert GRC params to their default */
5140 	qed_dbg_grc_set_params_default(p_hwfn);
5141 
5142 	return status;
5143 }
5144 
qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * buf_size)5145 enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5146 						   struct qed_ptt *p_ptt,
5147 						   u32 *buf_size)
5148 {
5149 	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5150 
5151 	*buf_size = 0;
5152 
5153 	if (status != DBG_STATUS_OK)
5154 		return status;
5155 
5156 	return qed_igu_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5157 }
5158 
qed_dbg_igu_fifo_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)5159 enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
5160 				      struct qed_ptt *p_ptt,
5161 				      u32 *dump_buf,
5162 				      u32 buf_size_in_dwords,
5163 				      u32 *num_dumped_dwords)
5164 {
5165 	u32 needed_buf_size_in_dwords;
5166 	enum dbg_status status;
5167 
5168 	*num_dumped_dwords = 0;
5169 
5170 	status = qed_dbg_igu_fifo_get_dump_buf_size(p_hwfn,
5171 						    p_ptt,
5172 						    &needed_buf_size_in_dwords);
5173 	if (status != DBG_STATUS_OK)
5174 		return status;
5175 
5176 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5177 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5178 
5179 	/* Update reset state */
5180 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5181 
5182 	status = qed_igu_fifo_dump(p_hwfn,
5183 				   p_ptt, dump_buf, true, num_dumped_dwords);
5184 	/* Revert GRC params to their default */
5185 	qed_dbg_grc_set_params_default(p_hwfn);
5186 
5187 	return status;
5188 }
5189 
5190 enum dbg_status
qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * buf_size)5191 qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5192 					      struct qed_ptt *p_ptt,
5193 					      u32 *buf_size)
5194 {
5195 	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5196 
5197 	*buf_size = 0;
5198 
5199 	if (status != DBG_STATUS_OK)
5200 		return status;
5201 
5202 	return qed_protection_override_dump(p_hwfn,
5203 					    p_ptt, NULL, false, buf_size);
5204 }
5205 
qed_dbg_protection_override_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)5206 enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
5207 						 struct qed_ptt *p_ptt,
5208 						 u32 *dump_buf,
5209 						 u32 buf_size_in_dwords,
5210 						 u32 *num_dumped_dwords)
5211 {
5212 	u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
5213 	enum dbg_status status;
5214 
5215 	*num_dumped_dwords = 0;
5216 
5217 	status =
5218 		qed_dbg_protection_override_get_dump_buf_size(p_hwfn,
5219 							      p_ptt,
5220 							      p_size);
5221 	if (status != DBG_STATUS_OK)
5222 		return status;
5223 
5224 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5225 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5226 
5227 	/* Update reset state */
5228 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5229 
5230 	status = qed_protection_override_dump(p_hwfn,
5231 					      p_ptt,
5232 					      dump_buf,
5233 					      true, num_dumped_dwords);
5234 
5235 	/* Revert GRC params to their default */
5236 	qed_dbg_grc_set_params_default(p_hwfn);
5237 
5238 	return status;
5239 }
5240 
qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * buf_size)5241 enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5242 						     struct qed_ptt *p_ptt,
5243 						     u32 *buf_size)
5244 {
5245 	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5246 
5247 	*buf_size = 0;
5248 
5249 	if (status != DBG_STATUS_OK)
5250 		return status;
5251 
5252 	/* Update reset state */
5253 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5254 
5255 	*buf_size = qed_fw_asserts_dump(p_hwfn, p_ptt, NULL, false);
5256 
5257 	return DBG_STATUS_OK;
5258 }
5259 
qed_dbg_fw_asserts_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)5260 enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
5261 					struct qed_ptt *p_ptt,
5262 					u32 *dump_buf,
5263 					u32 buf_size_in_dwords,
5264 					u32 *num_dumped_dwords)
5265 {
5266 	u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
5267 	enum dbg_status status;
5268 
5269 	*num_dumped_dwords = 0;
5270 
5271 	status =
5272 		qed_dbg_fw_asserts_get_dump_buf_size(p_hwfn,
5273 						     p_ptt,
5274 						     p_size);
5275 	if (status != DBG_STATUS_OK)
5276 		return status;
5277 
5278 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5279 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5280 
5281 	*num_dumped_dwords = qed_fw_asserts_dump(p_hwfn, p_ptt, dump_buf, true);
5282 
5283 	/* Revert GRC params to their default */
5284 	qed_dbg_grc_set_params_default(p_hwfn);
5285 
5286 	return DBG_STATUS_OK;
5287 }
5288 
qed_dbg_ilt_get_dump_buf_size(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * buf_size)5289 static enum dbg_status qed_dbg_ilt_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5290 						     struct qed_ptt *p_ptt,
5291 						     u32 *buf_size)
5292 {
5293 	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5294 
5295 	*buf_size = 0;
5296 
5297 	if (status != DBG_STATUS_OK)
5298 		return status;
5299 
5300 	*buf_size = qed_ilt_dump(p_hwfn, p_ptt, NULL, false);
5301 
5302 	return DBG_STATUS_OK;
5303 }
5304 
qed_dbg_ilt_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)5305 static enum dbg_status qed_dbg_ilt_dump(struct qed_hwfn *p_hwfn,
5306 					struct qed_ptt *p_ptt,
5307 					u32 *dump_buf,
5308 					u32 buf_size_in_dwords,
5309 					u32 *num_dumped_dwords)
5310 {
5311 	u32 needed_buf_size_in_dwords;
5312 	enum dbg_status status;
5313 
5314 	*num_dumped_dwords = 0;
5315 
5316 	status = qed_dbg_ilt_get_dump_buf_size(p_hwfn,
5317 					       p_ptt,
5318 					       &needed_buf_size_in_dwords);
5319 	if (status != DBG_STATUS_OK)
5320 		return status;
5321 
5322 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5323 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5324 
5325 	*num_dumped_dwords = qed_ilt_dump(p_hwfn, p_ptt, dump_buf, true);
5326 
5327 	/* Reveret GRC params to their default */
5328 	qed_dbg_grc_set_params_default(p_hwfn);
5329 
5330 	return DBG_STATUS_OK;
5331 }
5332 
qed_dbg_read_attn(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,enum block_id block_id,enum dbg_attn_type attn_type,bool clear_status,struct dbg_attn_block_result * results)5333 enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
5334 				  struct qed_ptt *p_ptt,
5335 				  enum block_id block_id,
5336 				  enum dbg_attn_type attn_type,
5337 				  bool clear_status,
5338 				  struct dbg_attn_block_result *results)
5339 {
5340 	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5341 	u8 reg_idx, num_attn_regs, num_result_regs = 0;
5342 	const struct dbg_attn_reg *attn_reg_arr;
5343 
5344 	if (status != DBG_STATUS_OK)
5345 		return status;
5346 
5347 	if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5348 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
5349 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
5350 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
5351 
5352 	attn_reg_arr = qed_get_block_attn_regs(p_hwfn,
5353 					       block_id,
5354 					       attn_type, &num_attn_regs);
5355 
5356 	for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
5357 		const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
5358 		struct dbg_attn_reg_result *reg_result;
5359 		u32 sts_addr, sts_val;
5360 		u16 modes_buf_offset;
5361 		bool eval_mode;
5362 
5363 		/* Check mode */
5364 		eval_mode = GET_FIELD(reg_data->mode.data,
5365 				      DBG_MODE_HDR_EVAL_MODE) > 0;
5366 		modes_buf_offset = GET_FIELD(reg_data->mode.data,
5367 					     DBG_MODE_HDR_MODES_BUF_OFFSET);
5368 		if (eval_mode && !qed_is_mode_match(p_hwfn, &modes_buf_offset))
5369 			continue;
5370 
5371 		/* Mode match - read attention status register */
5372 		sts_addr = DWORDS_TO_BYTES(clear_status ?
5373 					   reg_data->sts_clr_address :
5374 					   GET_FIELD(reg_data->data,
5375 						     DBG_ATTN_REG_STS_ADDRESS));
5376 		sts_val = qed_rd(p_hwfn, p_ptt, sts_addr);
5377 		if (!sts_val)
5378 			continue;
5379 
5380 		/* Non-zero attention status - add to results */
5381 		reg_result = &results->reg_results[num_result_regs];
5382 		SET_FIELD(reg_result->data,
5383 			  DBG_ATTN_REG_RESULT_STS_ADDRESS, sts_addr);
5384 		SET_FIELD(reg_result->data,
5385 			  DBG_ATTN_REG_RESULT_NUM_REG_ATTN,
5386 			  GET_FIELD(reg_data->data, DBG_ATTN_REG_NUM_REG_ATTN));
5387 		reg_result->block_attn_offset = reg_data->block_attn_offset;
5388 		reg_result->sts_val = sts_val;
5389 		reg_result->mask_val = qed_rd(p_hwfn,
5390 					      p_ptt,
5391 					      DWORDS_TO_BYTES
5392 					      (reg_data->mask_address));
5393 		num_result_regs++;
5394 	}
5395 
5396 	results->block_id = (u8)block_id;
5397 	results->names_offset =
5398 	    qed_get_block_attn_data(p_hwfn, block_id, attn_type)->names_offset;
5399 	SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE, attn_type);
5400 	SET_FIELD(results->data,
5401 		  DBG_ATTN_BLOCK_RESULT_NUM_REGS, num_result_regs);
5402 
5403 	return DBG_STATUS_OK;
5404 }
5405 
5406 /******************************* Data Types **********************************/
5407 
5408 /* REG fifo element */
5409 struct reg_fifo_element {
5410 	u64 data;
5411 #define REG_FIFO_ELEMENT_ADDRESS_SHIFT		0
5412 #define REG_FIFO_ELEMENT_ADDRESS_MASK		0x7fffff
5413 #define REG_FIFO_ELEMENT_ACCESS_SHIFT		23
5414 #define REG_FIFO_ELEMENT_ACCESS_MASK		0x1
5415 #define REG_FIFO_ELEMENT_PF_SHIFT		24
5416 #define REG_FIFO_ELEMENT_PF_MASK		0xf
5417 #define REG_FIFO_ELEMENT_VF_SHIFT		28
5418 #define REG_FIFO_ELEMENT_VF_MASK		0xff
5419 #define REG_FIFO_ELEMENT_PORT_SHIFT		36
5420 #define REG_FIFO_ELEMENT_PORT_MASK		0x3
5421 #define REG_FIFO_ELEMENT_PRIVILEGE_SHIFT	38
5422 #define REG_FIFO_ELEMENT_PRIVILEGE_MASK		0x3
5423 #define REG_FIFO_ELEMENT_PROTECTION_SHIFT	40
5424 #define REG_FIFO_ELEMENT_PROTECTION_MASK	0x7
5425 #define REG_FIFO_ELEMENT_MASTER_SHIFT		43
5426 #define REG_FIFO_ELEMENT_MASTER_MASK		0xf
5427 #define REG_FIFO_ELEMENT_ERROR_SHIFT		47
5428 #define REG_FIFO_ELEMENT_ERROR_MASK		0x1f
5429 };
5430 
5431 /* REG fifo error element */
5432 struct reg_fifo_err {
5433 	u32 err_code;
5434 	const char *err_msg;
5435 };
5436 
5437 /* IGU fifo element */
5438 struct igu_fifo_element {
5439 	u32 dword0;
5440 #define IGU_FIFO_ELEMENT_DWORD0_FID_SHIFT		0
5441 #define IGU_FIFO_ELEMENT_DWORD0_FID_MASK		0xff
5442 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_SHIFT		8
5443 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_MASK		0x1
5444 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_SHIFT		9
5445 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_MASK		0xf
5446 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_SHIFT		13
5447 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_MASK		0xf
5448 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_SHIFT		17
5449 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_MASK		0x7fff
5450 	u32 dword1;
5451 	u32 dword2;
5452 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_SHIFT	0
5453 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_MASK		0x1
5454 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_SHIFT		1
5455 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_MASK		0xffffffff
5456 	u32 reserved;
5457 };
5458 
5459 struct igu_fifo_wr_data {
5460 	u32 data;
5461 #define IGU_FIFO_WR_DATA_PROD_CONS_SHIFT		0
5462 #define IGU_FIFO_WR_DATA_PROD_CONS_MASK			0xffffff
5463 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_SHIFT		24
5464 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_MASK		0x1
5465 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_SHIFT	25
5466 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_MASK		0x3
5467 #define IGU_FIFO_WR_DATA_SEGMENT_SHIFT			27
5468 #define IGU_FIFO_WR_DATA_SEGMENT_MASK			0x1
5469 #define IGU_FIFO_WR_DATA_TIMER_MASK_SHIFT		28
5470 #define IGU_FIFO_WR_DATA_TIMER_MASK_MASK		0x1
5471 #define IGU_FIFO_WR_DATA_CMD_TYPE_SHIFT			31
5472 #define IGU_FIFO_WR_DATA_CMD_TYPE_MASK			0x1
5473 };
5474 
5475 struct igu_fifo_cleanup_wr_data {
5476 	u32 data;
5477 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_SHIFT		0
5478 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_MASK		0x7ffffff
5479 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_SHIFT	27
5480 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_MASK	0x1
5481 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_SHIFT	28
5482 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_MASK	0x7
5483 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_SHIFT		31
5484 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_MASK		0x1
5485 };
5486 
5487 /* Protection override element */
5488 struct protection_override_element {
5489 	u64 data;
5490 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_SHIFT		0
5491 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_MASK		0x7fffff
5492 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_SHIFT		23
5493 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_MASK		0xffffff
5494 #define PROTECTION_OVERRIDE_ELEMENT_READ_SHIFT			47
5495 #define PROTECTION_OVERRIDE_ELEMENT_READ_MASK			0x1
5496 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_SHIFT			48
5497 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_MASK			0x1
5498 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_SHIFT	49
5499 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_MASK	0x7
5500 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_SHIFT	52
5501 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_MASK	0x7
5502 };
5503 
5504 enum igu_fifo_sources {
5505 	IGU_SRC_PXP0,
5506 	IGU_SRC_PXP1,
5507 	IGU_SRC_PXP2,
5508 	IGU_SRC_PXP3,
5509 	IGU_SRC_PXP4,
5510 	IGU_SRC_PXP5,
5511 	IGU_SRC_PXP6,
5512 	IGU_SRC_PXP7,
5513 	IGU_SRC_CAU,
5514 	IGU_SRC_ATTN,
5515 	IGU_SRC_GRC
5516 };
5517 
5518 enum igu_fifo_addr_types {
5519 	IGU_ADDR_TYPE_MSIX_MEM,
5520 	IGU_ADDR_TYPE_WRITE_PBA,
5521 	IGU_ADDR_TYPE_WRITE_INT_ACK,
5522 	IGU_ADDR_TYPE_WRITE_ATTN_BITS,
5523 	IGU_ADDR_TYPE_READ_INT,
5524 	IGU_ADDR_TYPE_WRITE_PROD_UPDATE,
5525 	IGU_ADDR_TYPE_RESERVED
5526 };
5527 
5528 struct igu_fifo_addr_data {
5529 	u16 start_addr;
5530 	u16 end_addr;
5531 	char *desc;
5532 	char *vf_desc;
5533 	enum igu_fifo_addr_types type;
5534 };
5535 
5536 /******************************** Constants **********************************/
5537 
5538 #define MAX_MSG_LEN				1024
5539 
5540 #define MCP_TRACE_MAX_MODULE_LEN		8
5541 #define MCP_TRACE_FORMAT_MAX_PARAMS		3
5542 #define MCP_TRACE_FORMAT_PARAM_WIDTH \
5543 	(MCP_TRACE_FORMAT_P2_SIZE_OFFSET - MCP_TRACE_FORMAT_P1_SIZE_OFFSET)
5544 
5545 #define REG_FIFO_ELEMENT_ADDR_FACTOR		4
5546 #define REG_FIFO_ELEMENT_IS_PF_VF_VAL		127
5547 
5548 #define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR	4
5549 
5550 /***************************** Constant Arrays *******************************/
5551 
5552 /* Status string array */
5553 static const char * const s_status_str[] = {
5554 	/* DBG_STATUS_OK */
5555 	"Operation completed successfully",
5556 
5557 	/* DBG_STATUS_APP_VERSION_NOT_SET */
5558 	"Debug application version wasn't set",
5559 
5560 	/* DBG_STATUS_UNSUPPORTED_APP_VERSION */
5561 	"Unsupported debug application version",
5562 
5563 	/* DBG_STATUS_DBG_BLOCK_NOT_RESET */
5564 	"The debug block wasn't reset since the last recording",
5565 
5566 	/* DBG_STATUS_INVALID_ARGS */
5567 	"Invalid arguments",
5568 
5569 	/* DBG_STATUS_OUTPUT_ALREADY_SET */
5570 	"The debug output was already set",
5571 
5572 	/* DBG_STATUS_INVALID_PCI_BUF_SIZE */
5573 	"Invalid PCI buffer size",
5574 
5575 	/* DBG_STATUS_PCI_BUF_ALLOC_FAILED */
5576 	"PCI buffer allocation failed",
5577 
5578 	/* DBG_STATUS_PCI_BUF_NOT_ALLOCATED */
5579 	"A PCI buffer wasn't allocated",
5580 
5581 	/* DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS */
5582 	"The filter/trigger constraint dword offsets are not enabled for recording",
5583 	/* DBG_STATUS_NO_MATCHING_FRAMING_MODE */
5584 	"No matching framing mode",
5585 
5586 	/* DBG_STATUS_VFC_READ_ERROR */
5587 	"Error reading from VFC",
5588 
5589 	/* DBG_STATUS_STORM_ALREADY_ENABLED */
5590 	"The Storm was already enabled",
5591 
5592 	/* DBG_STATUS_STORM_NOT_ENABLED */
5593 	"The specified Storm wasn't enabled",
5594 
5595 	/* DBG_STATUS_BLOCK_ALREADY_ENABLED */
5596 	"The block was already enabled",
5597 
5598 	/* DBG_STATUS_BLOCK_NOT_ENABLED */
5599 	"The specified block wasn't enabled",
5600 
5601 	/* DBG_STATUS_NO_INPUT_ENABLED */
5602 	"No input was enabled for recording",
5603 
5604 	/* DBG_STATUS_NO_FILTER_TRIGGER_256B */
5605 	"Filters and triggers are not allowed in E4 256-bit mode",
5606 
5607 	/* DBG_STATUS_FILTER_ALREADY_ENABLED */
5608 	"The filter was already enabled",
5609 
5610 	/* DBG_STATUS_TRIGGER_ALREADY_ENABLED */
5611 	"The trigger was already enabled",
5612 
5613 	/* DBG_STATUS_TRIGGER_NOT_ENABLED */
5614 	"The trigger wasn't enabled",
5615 
5616 	/* DBG_STATUS_CANT_ADD_CONSTRAINT */
5617 	"A constraint can be added only after a filter was enabled or a trigger state was added",
5618 
5619 	/* DBG_STATUS_TOO_MANY_TRIGGER_STATES */
5620 	"Cannot add more than 3 trigger states",
5621 
5622 	/* DBG_STATUS_TOO_MANY_CONSTRAINTS */
5623 	"Cannot add more than 4 constraints per filter or trigger state",
5624 
5625 	/* DBG_STATUS_RECORDING_NOT_STARTED */
5626 	"The recording wasn't started",
5627 
5628 	/* DBG_STATUS_DATA_DIDNT_TRIGGER */
5629 	"A trigger was configured, but it didn't trigger",
5630 
5631 	/* DBG_STATUS_NO_DATA_RECORDED */
5632 	"No data was recorded",
5633 
5634 	/* DBG_STATUS_DUMP_BUF_TOO_SMALL */
5635 	"Dump buffer is too small",
5636 
5637 	/* DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED */
5638 	"Dumped data is not aligned to chunks",
5639 
5640 	/* DBG_STATUS_UNKNOWN_CHIP */
5641 	"Unknown chip",
5642 
5643 	/* DBG_STATUS_VIRT_MEM_ALLOC_FAILED */
5644 	"Failed allocating virtual memory",
5645 
5646 	/* DBG_STATUS_BLOCK_IN_RESET */
5647 	"The input block is in reset",
5648 
5649 	/* DBG_STATUS_INVALID_TRACE_SIGNATURE */
5650 	"Invalid MCP trace signature found in NVRAM",
5651 
5652 	/* DBG_STATUS_INVALID_NVRAM_BUNDLE */
5653 	"Invalid bundle ID found in NVRAM",
5654 
5655 	/* DBG_STATUS_NVRAM_GET_IMAGE_FAILED */
5656 	"Failed getting NVRAM image",
5657 
5658 	/* DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE */
5659 	"NVRAM image is not dword-aligned",
5660 
5661 	/* DBG_STATUS_NVRAM_READ_FAILED */
5662 	"Failed reading from NVRAM",
5663 
5664 	/* DBG_STATUS_IDLE_CHK_PARSE_FAILED */
5665 	"Idle check parsing failed",
5666 
5667 	/* DBG_STATUS_MCP_TRACE_BAD_DATA */
5668 	"MCP Trace data is corrupt",
5669 
5670 	/* DBG_STATUS_MCP_TRACE_NO_META */
5671 	"Dump doesn't contain meta data - it must be provided in image file",
5672 
5673 	/* DBG_STATUS_MCP_COULD_NOT_HALT */
5674 	"Failed to halt MCP",
5675 
5676 	/* DBG_STATUS_MCP_COULD_NOT_RESUME */
5677 	"Failed to resume MCP after halt",
5678 
5679 	/* DBG_STATUS_RESERVED0 */
5680 	"",
5681 
5682 	/* DBG_STATUS_SEMI_FIFO_NOT_EMPTY */
5683 	"Failed to empty SEMI sync FIFO",
5684 
5685 	/* DBG_STATUS_IGU_FIFO_BAD_DATA */
5686 	"IGU FIFO data is corrupt",
5687 
5688 	/* DBG_STATUS_MCP_COULD_NOT_MASK_PRTY */
5689 	"MCP failed to mask parities",
5690 
5691 	/* DBG_STATUS_FW_ASSERTS_PARSE_FAILED */
5692 	"FW Asserts parsing failed",
5693 
5694 	/* DBG_STATUS_REG_FIFO_BAD_DATA */
5695 	"GRC FIFO data is corrupt",
5696 
5697 	/* DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA */
5698 	"Protection Override data is corrupt",
5699 
5700 	/* DBG_STATUS_DBG_ARRAY_NOT_SET */
5701 	"Debug arrays were not set (when using binary files, dbg_set_bin_ptr must be called)",
5702 
5703 	/* DBG_STATUS_RESERVED1 */
5704 	"",
5705 
5706 	/* DBG_STATUS_NON_MATCHING_LINES */
5707 	"Non-matching debug lines - in E4, all lines must be of the same type (either 128b or 256b)",
5708 
5709 	/* DBG_STATUS_INSUFFICIENT_HW_IDS */
5710 	"Insufficient HW IDs. Try to record less Storms/blocks",
5711 
5712 	/* DBG_STATUS_DBG_BUS_IN_USE */
5713 	"The debug bus is in use",
5714 
5715 	/* DBG_STATUS_INVALID_STORM_DBG_MODE */
5716 	"The storm debug mode is not supported in the current chip",
5717 
5718 	/* DBG_STATUS_OTHER_ENGINE_BB_ONLY */
5719 	"Other engine is supported only in BB",
5720 
5721 	/* DBG_STATUS_FILTER_SINGLE_HW_ID */
5722 	"The configured filter mode requires a single Storm/block input",
5723 
5724 	/* DBG_STATUS_TRIGGER_SINGLE_HW_ID */
5725 	"The configured filter mode requires that all the constraints of a single trigger state will be defined on a single Storm/block input",
5726 
5727 	/* DBG_STATUS_MISSING_TRIGGER_STATE_STORM */
5728 	"When triggering on Storm data, the Storm to trigger on must be specified"
5729 };
5730 
5731 /* Idle check severity names array */
5732 static const char * const s_idle_chk_severity_str[] = {
5733 	"Error",
5734 	"Error if no traffic",
5735 	"Warning"
5736 };
5737 
5738 /* MCP Trace level names array */
5739 static const char * const s_mcp_trace_level_str[] = {
5740 	"ERROR",
5741 	"TRACE",
5742 	"DEBUG"
5743 };
5744 
5745 /* Access type names array */
5746 static const char * const s_access_strs[] = {
5747 	"read",
5748 	"write"
5749 };
5750 
5751 /* Privilege type names array */
5752 static const char * const s_privilege_strs[] = {
5753 	"VF",
5754 	"PDA",
5755 	"HV",
5756 	"UA"
5757 };
5758 
5759 /* Protection type names array */
5760 static const char * const s_protection_strs[] = {
5761 	"(default)",
5762 	"(default)",
5763 	"(default)",
5764 	"(default)",
5765 	"override VF",
5766 	"override PDA",
5767 	"override HV",
5768 	"override UA"
5769 };
5770 
5771 /* Master type names array */
5772 static const char * const s_master_strs[] = {
5773 	"???",
5774 	"pxp",
5775 	"mcp",
5776 	"msdm",
5777 	"psdm",
5778 	"ysdm",
5779 	"usdm",
5780 	"tsdm",
5781 	"xsdm",
5782 	"dbu",
5783 	"dmae",
5784 	"jdap",
5785 	"???",
5786 	"???",
5787 	"???",
5788 	"???"
5789 };
5790 
5791 /* REG FIFO error messages array */
5792 static struct reg_fifo_err s_reg_fifo_errors[] = {
5793 	{1, "grc timeout"},
5794 	{2, "address doesn't belong to any block"},
5795 	{4, "reserved address in block or write to read-only address"},
5796 	{8, "privilege/protection mismatch"},
5797 	{16, "path isolation error"},
5798 	{17, "RSL error"}
5799 };
5800 
5801 /* IGU FIFO sources array */
5802 static const char * const s_igu_fifo_source_strs[] = {
5803 	"TSTORM",
5804 	"MSTORM",
5805 	"USTORM",
5806 	"XSTORM",
5807 	"YSTORM",
5808 	"PSTORM",
5809 	"PCIE",
5810 	"NIG_QM_PBF",
5811 	"CAU",
5812 	"ATTN",
5813 	"GRC",
5814 };
5815 
5816 /* IGU FIFO error messages */
5817 static const char * const s_igu_fifo_error_strs[] = {
5818 	"no error",
5819 	"length error",
5820 	"function disabled",
5821 	"VF sent command to attention address",
5822 	"host sent prod update command",
5823 	"read of during interrupt register while in MIMD mode",
5824 	"access to PXP BAR reserved address",
5825 	"producer update command to attention index",
5826 	"unknown error",
5827 	"SB index not valid",
5828 	"SB relative index and FID not found",
5829 	"FID not match",
5830 	"command with error flag asserted (PCI error or CAU discard)",
5831 	"VF sent cleanup and RF cleanup is disabled",
5832 	"cleanup command on type bigger than 4"
5833 };
5834 
5835 /* IGU FIFO address data */
5836 static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = {
5837 	{0x0, 0x101, "MSI-X Memory", NULL,
5838 	 IGU_ADDR_TYPE_MSIX_MEM},
5839 	{0x102, 0x1ff, "reserved", NULL,
5840 	 IGU_ADDR_TYPE_RESERVED},
5841 	{0x200, 0x200, "Write PBA[0:63]", NULL,
5842 	 IGU_ADDR_TYPE_WRITE_PBA},
5843 	{0x201, 0x201, "Write PBA[64:127]", "reserved",
5844 	 IGU_ADDR_TYPE_WRITE_PBA},
5845 	{0x202, 0x202, "Write PBA[128]", "reserved",
5846 	 IGU_ADDR_TYPE_WRITE_PBA},
5847 	{0x203, 0x3ff, "reserved", NULL,
5848 	 IGU_ADDR_TYPE_RESERVED},
5849 	{0x400, 0x5ef, "Write interrupt acknowledgment", NULL,
5850 	 IGU_ADDR_TYPE_WRITE_INT_ACK},
5851 	{0x5f0, 0x5f0, "Attention bits update", NULL,
5852 	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
5853 	{0x5f1, 0x5f1, "Attention bits set", NULL,
5854 	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
5855 	{0x5f2, 0x5f2, "Attention bits clear", NULL,
5856 	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
5857 	{0x5f3, 0x5f3, "Read interrupt 0:63 with mask", NULL,
5858 	 IGU_ADDR_TYPE_READ_INT},
5859 	{0x5f4, 0x5f4, "Read interrupt 0:31 with mask", NULL,
5860 	 IGU_ADDR_TYPE_READ_INT},
5861 	{0x5f5, 0x5f5, "Read interrupt 32:63 with mask", NULL,
5862 	 IGU_ADDR_TYPE_READ_INT},
5863 	{0x5f6, 0x5f6, "Read interrupt 0:63 without mask", NULL,
5864 	 IGU_ADDR_TYPE_READ_INT},
5865 	{0x5f7, 0x5ff, "reserved", NULL,
5866 	 IGU_ADDR_TYPE_RESERVED},
5867 	{0x600, 0x7ff, "Producer update", NULL,
5868 	 IGU_ADDR_TYPE_WRITE_PROD_UPDATE}
5869 };
5870 
5871 /******************************** Variables **********************************/
5872 
5873 /* Temporary buffer, used for print size calculations */
5874 static char s_temp_buf[MAX_MSG_LEN];
5875 
5876 /**************************** Private Functions ******************************/
5877 
qed_cyclic_add(u32 a,u32 b,u32 size)5878 static u32 qed_cyclic_add(u32 a, u32 b, u32 size)
5879 {
5880 	return (a + b) % size;
5881 }
5882 
qed_cyclic_sub(u32 a,u32 b,u32 size)5883 static u32 qed_cyclic_sub(u32 a, u32 b, u32 size)
5884 {
5885 	return (size + a - b) % size;
5886 }
5887 
5888 /* Reads the specified number of bytes from the specified cyclic buffer (up to 4
5889  * bytes) and returns them as a dword value. the specified buffer offset is
5890  * updated.
5891  */
qed_read_from_cyclic_buf(void * buf,u32 * offset,u32 buf_size,u8 num_bytes_to_read)5892 static u32 qed_read_from_cyclic_buf(void *buf,
5893 				    u32 *offset,
5894 				    u32 buf_size, u8 num_bytes_to_read)
5895 {
5896 	u8 i, *val_ptr, *bytes_buf = (u8 *)buf;
5897 	u32 val = 0;
5898 
5899 	val_ptr = (u8 *)&val;
5900 
5901 	/* Assume running on a LITTLE ENDIAN and the buffer is network order
5902 	 * (BIG ENDIAN), as high order bytes are placed in lower memory address.
5903 	 */
5904 	for (i = 0; i < num_bytes_to_read; i++) {
5905 		val_ptr[i] = bytes_buf[*offset];
5906 		*offset = qed_cyclic_add(*offset, 1, buf_size);
5907 	}
5908 
5909 	return val;
5910 }
5911 
5912 /* Reads and returns the next byte from the specified buffer.
5913  * The specified buffer offset is updated.
5914  */
qed_read_byte_from_buf(void * buf,u32 * offset)5915 static u8 qed_read_byte_from_buf(void *buf, u32 *offset)
5916 {
5917 	return ((u8 *)buf)[(*offset)++];
5918 }
5919 
5920 /* Reads and returns the next dword from the specified buffer.
5921  * The specified buffer offset is updated.
5922  */
qed_read_dword_from_buf(void * buf,u32 * offset)5923 static u32 qed_read_dword_from_buf(void *buf, u32 *offset)
5924 {
5925 	u32 dword_val = *(u32 *)&((u8 *)buf)[*offset];
5926 
5927 	*offset += 4;
5928 
5929 	return dword_val;
5930 }
5931 
5932 /* Reads the next string from the specified buffer, and copies it to the
5933  * specified pointer. The specified buffer offset is updated.
5934  */
qed_read_str_from_buf(void * buf,u32 * offset,u32 size,char * dest)5935 static void qed_read_str_from_buf(void *buf, u32 *offset, u32 size, char *dest)
5936 {
5937 	const char *source_str = &((const char *)buf)[*offset];
5938 
5939 	strncpy(dest, source_str, size);
5940 	dest[size - 1] = '\0';
5941 	*offset += size;
5942 }
5943 
5944 /* Returns a pointer to the specified offset (in bytes) of the specified buffer.
5945  * If the specified buffer in NULL, a temporary buffer pointer is returned.
5946  */
qed_get_buf_ptr(void * buf,u32 offset)5947 static char *qed_get_buf_ptr(void *buf, u32 offset)
5948 {
5949 	return buf ? (char *)buf + offset : s_temp_buf;
5950 }
5951 
5952 /* Reads a param from the specified buffer. Returns the number of dwords read.
5953  * If the returned str_param is NULL, the param is numeric and its value is
5954  * returned in num_param.
5955  * Otheriwise, the param is a string and its pointer is returned in str_param.
5956  */
qed_read_param(u32 * dump_buf,const char ** param_name,const char ** param_str_val,u32 * param_num_val)5957 static u32 qed_read_param(u32 *dump_buf,
5958 			  const char **param_name,
5959 			  const char **param_str_val, u32 *param_num_val)
5960 {
5961 	char *char_buf = (char *)dump_buf;
5962 	size_t offset = 0;
5963 
5964 	/* Extract param name */
5965 	*param_name = char_buf;
5966 	offset += strlen(*param_name) + 1;
5967 
5968 	/* Check param type */
5969 	if (*(char_buf + offset++)) {
5970 		/* String param */
5971 		*param_str_val = char_buf + offset;
5972 		*param_num_val = 0;
5973 		offset += strlen(*param_str_val) + 1;
5974 		if (offset & 0x3)
5975 			offset += (4 - (offset & 0x3));
5976 	} else {
5977 		/* Numeric param */
5978 		*param_str_val = NULL;
5979 		if (offset & 0x3)
5980 			offset += (4 - (offset & 0x3));
5981 		*param_num_val = *(u32 *)(char_buf + offset);
5982 		offset += 4;
5983 	}
5984 
5985 	return (u32)offset / 4;
5986 }
5987 
5988 /* Reads a section header from the specified buffer.
5989  * Returns the number of dwords read.
5990  */
qed_read_section_hdr(u32 * dump_buf,const char ** section_name,u32 * num_section_params)5991 static u32 qed_read_section_hdr(u32 *dump_buf,
5992 				const char **section_name,
5993 				u32 *num_section_params)
5994 {
5995 	const char *param_str_val;
5996 
5997 	return qed_read_param(dump_buf,
5998 			      section_name, &param_str_val, num_section_params);
5999 }
6000 
6001 /* Reads section params from the specified buffer and prints them to the results
6002  * buffer. Returns the number of dwords read.
6003  */
qed_print_section_params(u32 * dump_buf,u32 num_section_params,char * results_buf,u32 * num_chars_printed)6004 static u32 qed_print_section_params(u32 *dump_buf,
6005 				    u32 num_section_params,
6006 				    char *results_buf, u32 *num_chars_printed)
6007 {
6008 	u32 i, dump_offset = 0, results_offset = 0;
6009 
6010 	for (i = 0; i < num_section_params; i++) {
6011 		const char *param_name, *param_str_val;
6012 		u32 param_num_val = 0;
6013 
6014 		dump_offset += qed_read_param(dump_buf + dump_offset,
6015 					      &param_name,
6016 					      &param_str_val, &param_num_val);
6017 
6018 		if (param_str_val)
6019 			results_offset +=
6020 				sprintf(qed_get_buf_ptr(results_buf,
6021 							results_offset),
6022 					"%s: %s\n", param_name, param_str_val);
6023 		else if (strcmp(param_name, "fw-timestamp"))
6024 			results_offset +=
6025 				sprintf(qed_get_buf_ptr(results_buf,
6026 							results_offset),
6027 					"%s: %d\n", param_name, param_num_val);
6028 	}
6029 
6030 	results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset),
6031 				  "\n");
6032 
6033 	*num_chars_printed = results_offset;
6034 
6035 	return dump_offset;
6036 }
6037 
6038 /* Returns the block name that matches the specified block ID,
6039  * or NULL if not found.
6040  */
qed_dbg_get_block_name(struct qed_hwfn * p_hwfn,enum block_id block_id)6041 static const char *qed_dbg_get_block_name(struct qed_hwfn *p_hwfn,
6042 					  enum block_id block_id)
6043 {
6044 	const struct dbg_block_user *block =
6045 	    (const struct dbg_block_user *)
6046 	    p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS_USER_DATA].ptr + block_id;
6047 
6048 	return (const char *)block->name;
6049 }
6050 
qed_dbg_get_user_data(struct qed_hwfn * p_hwfn)6051 static struct dbg_tools_user_data *qed_dbg_get_user_data(struct qed_hwfn
6052 							 *p_hwfn)
6053 {
6054 	return (struct dbg_tools_user_data *)p_hwfn->dbg_user_info;
6055 }
6056 
6057 /* Parses the idle check rules and returns the number of characters printed.
6058  * In case of parsing error, returns 0.
6059  */
qed_parse_idle_chk_dump_rules(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 * dump_buf_end,u32 num_rules,bool print_fw_idle_chk,char * results_buf,u32 * num_errors,u32 * num_warnings)6060 static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn,
6061 					 u32 *dump_buf,
6062 					 u32 *dump_buf_end,
6063 					 u32 num_rules,
6064 					 bool print_fw_idle_chk,
6065 					 char *results_buf,
6066 					 u32 *num_errors, u32 *num_warnings)
6067 {
6068 	/* Offset in results_buf in bytes */
6069 	u32 results_offset = 0;
6070 
6071 	u32 rule_idx;
6072 	u16 i, j;
6073 
6074 	*num_errors = 0;
6075 	*num_warnings = 0;
6076 
6077 	/* Go over dumped results */
6078 	for (rule_idx = 0; rule_idx < num_rules && dump_buf < dump_buf_end;
6079 	     rule_idx++) {
6080 		const struct dbg_idle_chk_rule_parsing_data *rule_parsing_data;
6081 		struct dbg_idle_chk_result_hdr *hdr;
6082 		const char *parsing_str, *lsi_msg;
6083 		u32 parsing_str_offset;
6084 		bool has_fw_msg;
6085 		u8 curr_reg_id;
6086 
6087 		hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
6088 		rule_parsing_data =
6089 		    (const struct dbg_idle_chk_rule_parsing_data *)
6090 		    p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr +
6091 		    hdr->rule_id;
6092 		parsing_str_offset =
6093 		    GET_FIELD(rule_parsing_data->data,
6094 			      DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET);
6095 		has_fw_msg =
6096 		    GET_FIELD(rule_parsing_data->data,
6097 			      DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG) > 0;
6098 		parsing_str = (const char *)
6099 		    p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr +
6100 		    parsing_str_offset;
6101 		lsi_msg = parsing_str;
6102 		curr_reg_id = 0;
6103 
6104 		if (hdr->severity >= MAX_DBG_IDLE_CHK_SEVERITY_TYPES)
6105 			return 0;
6106 
6107 		/* Skip rule header */
6108 		dump_buf += BYTES_TO_DWORDS(sizeof(*hdr));
6109 
6110 		/* Update errors/warnings count */
6111 		if (hdr->severity == IDLE_CHK_SEVERITY_ERROR ||
6112 		    hdr->severity == IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC)
6113 			(*num_errors)++;
6114 		else
6115 			(*num_warnings)++;
6116 
6117 		/* Print rule severity */
6118 		results_offset +=
6119 		    sprintf(qed_get_buf_ptr(results_buf,
6120 					    results_offset), "%s: ",
6121 			    s_idle_chk_severity_str[hdr->severity]);
6122 
6123 		/* Print rule message */
6124 		if (has_fw_msg)
6125 			parsing_str += strlen(parsing_str) + 1;
6126 		results_offset +=
6127 		    sprintf(qed_get_buf_ptr(results_buf,
6128 					    results_offset), "%s.",
6129 			    has_fw_msg &&
6130 			    print_fw_idle_chk ? parsing_str : lsi_msg);
6131 		parsing_str += strlen(parsing_str) + 1;
6132 
6133 		/* Print register values */
6134 		results_offset +=
6135 		    sprintf(qed_get_buf_ptr(results_buf,
6136 					    results_offset), " Registers:");
6137 		for (i = 0;
6138 		     i < hdr->num_dumped_cond_regs + hdr->num_dumped_info_regs;
6139 		     i++) {
6140 			struct dbg_idle_chk_result_reg_hdr *reg_hdr;
6141 			bool is_mem;
6142 			u8 reg_id;
6143 
6144 			reg_hdr =
6145 				(struct dbg_idle_chk_result_reg_hdr *)dump_buf;
6146 			is_mem = GET_FIELD(reg_hdr->data,
6147 					   DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM);
6148 			reg_id = GET_FIELD(reg_hdr->data,
6149 					   DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID);
6150 
6151 			/* Skip reg header */
6152 			dump_buf += BYTES_TO_DWORDS(sizeof(*reg_hdr));
6153 
6154 			/* Skip register names until the required reg_id is
6155 			 * reached.
6156 			 */
6157 			for (; reg_id > curr_reg_id;
6158 			     curr_reg_id++,
6159 			     parsing_str += strlen(parsing_str) + 1);
6160 
6161 			results_offset +=
6162 			    sprintf(qed_get_buf_ptr(results_buf,
6163 						    results_offset), " %s",
6164 				    parsing_str);
6165 			if (i < hdr->num_dumped_cond_regs && is_mem)
6166 				results_offset +=
6167 				    sprintf(qed_get_buf_ptr(results_buf,
6168 							    results_offset),
6169 					    "[%d]", hdr->mem_entry_id +
6170 					    reg_hdr->start_entry);
6171 			results_offset +=
6172 			    sprintf(qed_get_buf_ptr(results_buf,
6173 						    results_offset), "=");
6174 			for (j = 0; j < reg_hdr->size; j++, dump_buf++) {
6175 				results_offset +=
6176 				    sprintf(qed_get_buf_ptr(results_buf,
6177 							    results_offset),
6178 					    "0x%x", *dump_buf);
6179 				if (j < reg_hdr->size - 1)
6180 					results_offset +=
6181 					    sprintf(qed_get_buf_ptr
6182 						    (results_buf,
6183 						     results_offset), ",");
6184 			}
6185 		}
6186 
6187 		results_offset +=
6188 		    sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
6189 	}
6190 
6191 	/* Check if end of dump buffer was exceeded */
6192 	if (dump_buf > dump_buf_end)
6193 		return 0;
6194 
6195 	return results_offset;
6196 }
6197 
6198 /* Parses an idle check dump buffer.
6199  * If result_buf is not NULL, the idle check results are printed to it.
6200  * In any case, the required results buffer size is assigned to
6201  * parsed_results_bytes.
6202  * The parsing status is returned.
6203  */
qed_parse_idle_chk_dump(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,char * results_buf,u32 * parsed_results_bytes,u32 * num_errors,u32 * num_warnings)6204 static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
6205 					       u32 *dump_buf,
6206 					       u32 num_dumped_dwords,
6207 					       char *results_buf,
6208 					       u32 *parsed_results_bytes,
6209 					       u32 *num_errors,
6210 					       u32 *num_warnings)
6211 {
6212 	const char *section_name, *param_name, *param_str_val;
6213 	u32 *dump_buf_end = dump_buf + num_dumped_dwords;
6214 	u32 num_section_params = 0, num_rules;
6215 
6216 	/* Offset in results_buf in bytes */
6217 	u32 results_offset = 0;
6218 
6219 	*parsed_results_bytes = 0;
6220 	*num_errors = 0;
6221 	*num_warnings = 0;
6222 
6223 	if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr ||
6224 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr)
6225 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
6226 
6227 	/* Read global_params section */
6228 	dump_buf += qed_read_section_hdr(dump_buf,
6229 					 &section_name, &num_section_params);
6230 	if (strcmp(section_name, "global_params"))
6231 		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6232 
6233 	/* Print global params */
6234 	dump_buf += qed_print_section_params(dump_buf,
6235 					     num_section_params,
6236 					     results_buf, &results_offset);
6237 
6238 	/* Read idle_chk section */
6239 	dump_buf += qed_read_section_hdr(dump_buf,
6240 					 &section_name, &num_section_params);
6241 	if (strcmp(section_name, "idle_chk") || num_section_params != 1)
6242 		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6243 	dump_buf += qed_read_param(dump_buf,
6244 				   &param_name, &param_str_val, &num_rules);
6245 	if (strcmp(param_name, "num_rules"))
6246 		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6247 
6248 	if (num_rules) {
6249 		u32 rules_print_size;
6250 
6251 		/* Print FW output */
6252 		results_offset +=
6253 		    sprintf(qed_get_buf_ptr(results_buf,
6254 					    results_offset),
6255 			    "FW_IDLE_CHECK:\n");
6256 		rules_print_size =
6257 			qed_parse_idle_chk_dump_rules(p_hwfn,
6258 						      dump_buf,
6259 						      dump_buf_end,
6260 						      num_rules,
6261 						      true,
6262 						      results_buf ?
6263 						      results_buf +
6264 						      results_offset :
6265 						      NULL,
6266 						      num_errors,
6267 						      num_warnings);
6268 		results_offset += rules_print_size;
6269 		if (!rules_print_size)
6270 			return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6271 
6272 		/* Print LSI output */
6273 		results_offset +=
6274 		    sprintf(qed_get_buf_ptr(results_buf,
6275 					    results_offset),
6276 			    "\nLSI_IDLE_CHECK:\n");
6277 		rules_print_size =
6278 			qed_parse_idle_chk_dump_rules(p_hwfn,
6279 						      dump_buf,
6280 						      dump_buf_end,
6281 						      num_rules,
6282 						      false,
6283 						      results_buf ?
6284 						      results_buf +
6285 						      results_offset :
6286 						      NULL,
6287 						      num_errors,
6288 						      num_warnings);
6289 		results_offset += rules_print_size;
6290 		if (!rules_print_size)
6291 			return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6292 	}
6293 
6294 	/* Print errors/warnings count */
6295 	if (*num_errors)
6296 		results_offset +=
6297 		    sprintf(qed_get_buf_ptr(results_buf,
6298 					    results_offset),
6299 			    "\nIdle Check failed!!! (with %d errors and %d warnings)\n",
6300 			    *num_errors, *num_warnings);
6301 	else if (*num_warnings)
6302 		results_offset +=
6303 		    sprintf(qed_get_buf_ptr(results_buf,
6304 					    results_offset),
6305 			    "\nIdle Check completed successfully (with %d warnings)\n",
6306 			    *num_warnings);
6307 	else
6308 		results_offset +=
6309 		    sprintf(qed_get_buf_ptr(results_buf,
6310 					    results_offset),
6311 			    "\nIdle Check completed successfully\n");
6312 
6313 	/* Add 1 for string NULL termination */
6314 	*parsed_results_bytes = results_offset + 1;
6315 
6316 	return DBG_STATUS_OK;
6317 }
6318 
6319 /* Allocates and fills MCP Trace meta data based on the specified meta data
6320  * dump buffer.
6321  * Returns debug status code.
6322  */
6323 static enum dbg_status
qed_mcp_trace_alloc_meta_data(struct qed_hwfn * p_hwfn,const u32 * meta_buf)6324 qed_mcp_trace_alloc_meta_data(struct qed_hwfn *p_hwfn,
6325 			      const u32 *meta_buf)
6326 {
6327 	struct dbg_tools_user_data *dev_user_data;
6328 	u32 offset = 0, signature, i;
6329 	struct mcp_trace_meta *meta;
6330 	u8 *meta_buf_bytes;
6331 
6332 	dev_user_data = qed_dbg_get_user_data(p_hwfn);
6333 	meta = &dev_user_data->mcp_trace_meta;
6334 	meta_buf_bytes = (u8 *)meta_buf;
6335 
6336 	/* Free the previous meta before loading a new one. */
6337 	if (meta->is_allocated)
6338 		qed_mcp_trace_free_meta_data(p_hwfn);
6339 
6340 	memset(meta, 0, sizeof(*meta));
6341 
6342 	/* Read first signature */
6343 	signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6344 	if (signature != NVM_MAGIC_VALUE)
6345 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
6346 
6347 	/* Read no. of modules and allocate memory for their pointers */
6348 	meta->modules_num = qed_read_byte_from_buf(meta_buf_bytes, &offset);
6349 	meta->modules = kcalloc(meta->modules_num, sizeof(char *),
6350 				GFP_KERNEL);
6351 	if (!meta->modules)
6352 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6353 
6354 	/* Allocate and read all module strings */
6355 	for (i = 0; i < meta->modules_num; i++) {
6356 		u8 module_len = qed_read_byte_from_buf(meta_buf_bytes, &offset);
6357 
6358 		*(meta->modules + i) = kzalloc(module_len, GFP_KERNEL);
6359 		if (!(*(meta->modules + i))) {
6360 			/* Update number of modules to be released */
6361 			meta->modules_num = i ? i - 1 : 0;
6362 			return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6363 		}
6364 
6365 		qed_read_str_from_buf(meta_buf_bytes, &offset, module_len,
6366 				      *(meta->modules + i));
6367 		if (module_len > MCP_TRACE_MAX_MODULE_LEN)
6368 			(*(meta->modules + i))[MCP_TRACE_MAX_MODULE_LEN] = '\0';
6369 	}
6370 
6371 	/* Read second signature */
6372 	signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6373 	if (signature != NVM_MAGIC_VALUE)
6374 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
6375 
6376 	/* Read number of formats and allocate memory for all formats */
6377 	meta->formats_num = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6378 	meta->formats = kcalloc(meta->formats_num,
6379 				sizeof(struct mcp_trace_format),
6380 				GFP_KERNEL);
6381 	if (!meta->formats)
6382 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6383 
6384 	/* Allocate and read all strings */
6385 	for (i = 0; i < meta->formats_num; i++) {
6386 		struct mcp_trace_format *format_ptr = &meta->formats[i];
6387 		u8 format_len;
6388 
6389 		format_ptr->data = qed_read_dword_from_buf(meta_buf_bytes,
6390 							   &offset);
6391 		format_len = GET_MFW_FIELD(format_ptr->data,
6392 					   MCP_TRACE_FORMAT_LEN);
6393 		format_ptr->format_str = kzalloc(format_len, GFP_KERNEL);
6394 		if (!format_ptr->format_str) {
6395 			/* Update number of modules to be released */
6396 			meta->formats_num = i ? i - 1 : 0;
6397 			return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6398 		}
6399 
6400 		qed_read_str_from_buf(meta_buf_bytes,
6401 				      &offset,
6402 				      format_len, format_ptr->format_str);
6403 	}
6404 
6405 	meta->is_allocated = true;
6406 	return DBG_STATUS_OK;
6407 }
6408 
6409 /* Parses an MCP trace buffer. If result_buf is not NULL, the MCP Trace results
6410  * are printed to it. The parsing status is returned.
6411  * Arguments:
6412  * trace_buf - MCP trace cyclic buffer
6413  * trace_buf_size - MCP trace cyclic buffer size in bytes
6414  * data_offset - offset in bytes of the data to parse in the MCP trace cyclic
6415  *		 buffer.
6416  * data_size - size in bytes of data to parse.
6417  * parsed_buf - destination buffer for parsed data.
6418  * parsed_results_bytes - size of parsed data in bytes.
6419  */
qed_parse_mcp_trace_buf(struct qed_hwfn * p_hwfn,u8 * trace_buf,u32 trace_buf_size,u32 data_offset,u32 data_size,char * parsed_buf,u32 * parsed_results_bytes)6420 static enum dbg_status qed_parse_mcp_trace_buf(struct qed_hwfn *p_hwfn,
6421 					       u8 *trace_buf,
6422 					       u32 trace_buf_size,
6423 					       u32 data_offset,
6424 					       u32 data_size,
6425 					       char *parsed_buf,
6426 					       u32 *parsed_results_bytes)
6427 {
6428 	struct dbg_tools_user_data *dev_user_data;
6429 	struct mcp_trace_meta *meta;
6430 	u32 param_mask, param_shift;
6431 	enum dbg_status status;
6432 
6433 	dev_user_data = qed_dbg_get_user_data(p_hwfn);
6434 	meta = &dev_user_data->mcp_trace_meta;
6435 	*parsed_results_bytes = 0;
6436 
6437 	if (!meta->is_allocated)
6438 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6439 
6440 	status = DBG_STATUS_OK;
6441 
6442 	while (data_size) {
6443 		struct mcp_trace_format *format_ptr;
6444 		u8 format_level, format_module;
6445 		u32 params[3] = { 0, 0, 0 };
6446 		u32 header, format_idx, i;
6447 
6448 		if (data_size < MFW_TRACE_ENTRY_SIZE)
6449 			return DBG_STATUS_MCP_TRACE_BAD_DATA;
6450 
6451 		header = qed_read_from_cyclic_buf(trace_buf,
6452 						  &data_offset,
6453 						  trace_buf_size,
6454 						  MFW_TRACE_ENTRY_SIZE);
6455 		data_size -= MFW_TRACE_ENTRY_SIZE;
6456 		format_idx = header & MFW_TRACE_EVENTID_MASK;
6457 
6458 		/* Skip message if its index doesn't exist in the meta data */
6459 		if (format_idx >= meta->formats_num) {
6460 			u8 format_size = (u8)GET_MFW_FIELD(header,
6461 							   MFW_TRACE_PRM_SIZE);
6462 
6463 			if (data_size < format_size)
6464 				return DBG_STATUS_MCP_TRACE_BAD_DATA;
6465 
6466 			data_offset = qed_cyclic_add(data_offset,
6467 						     format_size,
6468 						     trace_buf_size);
6469 			data_size -= format_size;
6470 			continue;
6471 		}
6472 
6473 		format_ptr = &meta->formats[format_idx];
6474 
6475 		for (i = 0,
6476 		     param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK, param_shift =
6477 		     MCP_TRACE_FORMAT_P1_SIZE_OFFSET;
6478 		     i < MCP_TRACE_FORMAT_MAX_PARAMS;
6479 		     i++, param_mask <<= MCP_TRACE_FORMAT_PARAM_WIDTH,
6480 		     param_shift += MCP_TRACE_FORMAT_PARAM_WIDTH) {
6481 			/* Extract param size (0..3) */
6482 			u8 param_size = (u8)((format_ptr->data & param_mask) >>
6483 					     param_shift);
6484 
6485 			/* If the param size is zero, there are no other
6486 			 * parameters.
6487 			 */
6488 			if (!param_size)
6489 				break;
6490 
6491 			/* Size is encoded using 2 bits, where 3 is used to
6492 			 * encode 4.
6493 			 */
6494 			if (param_size == 3)
6495 				param_size = 4;
6496 
6497 			if (data_size < param_size)
6498 				return DBG_STATUS_MCP_TRACE_BAD_DATA;
6499 
6500 			params[i] = qed_read_from_cyclic_buf(trace_buf,
6501 							     &data_offset,
6502 							     trace_buf_size,
6503 							     param_size);
6504 			data_size -= param_size;
6505 		}
6506 
6507 		format_level = (u8)GET_MFW_FIELD(format_ptr->data,
6508 						 MCP_TRACE_FORMAT_LEVEL);
6509 		format_module = (u8)GET_MFW_FIELD(format_ptr->data,
6510 						  MCP_TRACE_FORMAT_MODULE);
6511 		if (format_level >= ARRAY_SIZE(s_mcp_trace_level_str))
6512 			return DBG_STATUS_MCP_TRACE_BAD_DATA;
6513 
6514 		/* Print current message to results buffer */
6515 		*parsed_results_bytes +=
6516 			sprintf(qed_get_buf_ptr(parsed_buf,
6517 						*parsed_results_bytes),
6518 				"%s %-8s: ",
6519 				s_mcp_trace_level_str[format_level],
6520 				meta->modules[format_module]);
6521 		*parsed_results_bytes +=
6522 		    sprintf(qed_get_buf_ptr(parsed_buf, *parsed_results_bytes),
6523 			    format_ptr->format_str,
6524 			    params[0], params[1], params[2]);
6525 	}
6526 
6527 	/* Add string NULL terminator */
6528 	(*parsed_results_bytes)++;
6529 
6530 	return status;
6531 }
6532 
6533 /* Parses an MCP Trace dump buffer.
6534  * If result_buf is not NULL, the MCP Trace results are printed to it.
6535  * In any case, the required results buffer size is assigned to
6536  * parsed_results_bytes.
6537  * The parsing status is returned.
6538  */
qed_parse_mcp_trace_dump(struct qed_hwfn * p_hwfn,u32 * dump_buf,char * results_buf,u32 * parsed_results_bytes,bool free_meta_data)6539 static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
6540 						u32 *dump_buf,
6541 						char *results_buf,
6542 						u32 *parsed_results_bytes,
6543 						bool free_meta_data)
6544 {
6545 	const char *section_name, *param_name, *param_str_val;
6546 	u32 data_size, trace_data_dwords, trace_meta_dwords;
6547 	u32 offset, results_offset, results_buf_bytes;
6548 	u32 param_num_val, num_section_params;
6549 	struct mcp_trace *trace;
6550 	enum dbg_status status;
6551 	const u32 *meta_buf;
6552 	u8 *trace_buf;
6553 
6554 	*parsed_results_bytes = 0;
6555 
6556 	/* Read global_params section */
6557 	dump_buf += qed_read_section_hdr(dump_buf,
6558 					 &section_name, &num_section_params);
6559 	if (strcmp(section_name, "global_params"))
6560 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6561 
6562 	/* Print global params */
6563 	dump_buf += qed_print_section_params(dump_buf,
6564 					     num_section_params,
6565 					     results_buf, &results_offset);
6566 
6567 	/* Read trace_data section */
6568 	dump_buf += qed_read_section_hdr(dump_buf,
6569 					 &section_name, &num_section_params);
6570 	if (strcmp(section_name, "mcp_trace_data") || num_section_params != 1)
6571 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6572 	dump_buf += qed_read_param(dump_buf,
6573 				   &param_name, &param_str_val, &param_num_val);
6574 	if (strcmp(param_name, "size"))
6575 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6576 	trace_data_dwords = param_num_val;
6577 
6578 	/* Prepare trace info */
6579 	trace = (struct mcp_trace *)dump_buf;
6580 	if (trace->signature != MFW_TRACE_SIGNATURE || !trace->size)
6581 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6582 
6583 	trace_buf = (u8 *)dump_buf + sizeof(*trace);
6584 	offset = trace->trace_oldest;
6585 	data_size = qed_cyclic_sub(trace->trace_prod, offset, trace->size);
6586 	dump_buf += trace_data_dwords;
6587 
6588 	/* Read meta_data section */
6589 	dump_buf += qed_read_section_hdr(dump_buf,
6590 					 &section_name, &num_section_params);
6591 	if (strcmp(section_name, "mcp_trace_meta"))
6592 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6593 	dump_buf += qed_read_param(dump_buf,
6594 				   &param_name, &param_str_val, &param_num_val);
6595 	if (strcmp(param_name, "size"))
6596 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6597 	trace_meta_dwords = param_num_val;
6598 
6599 	/* Choose meta data buffer */
6600 	if (!trace_meta_dwords) {
6601 		/* Dump doesn't include meta data */
6602 		struct dbg_tools_user_data *dev_user_data =
6603 			qed_dbg_get_user_data(p_hwfn);
6604 
6605 		if (!dev_user_data->mcp_trace_user_meta_buf)
6606 			return DBG_STATUS_MCP_TRACE_NO_META;
6607 
6608 		meta_buf = dev_user_data->mcp_trace_user_meta_buf;
6609 	} else {
6610 		/* Dump includes meta data */
6611 		meta_buf = dump_buf;
6612 	}
6613 
6614 	/* Allocate meta data memory */
6615 	status = qed_mcp_trace_alloc_meta_data(p_hwfn, meta_buf);
6616 	if (status != DBG_STATUS_OK)
6617 		return status;
6618 
6619 	status = qed_parse_mcp_trace_buf(p_hwfn,
6620 					 trace_buf,
6621 					 trace->size,
6622 					 offset,
6623 					 data_size,
6624 					 results_buf ?
6625 					 results_buf + results_offset :
6626 					 NULL,
6627 					 &results_buf_bytes);
6628 	if (status != DBG_STATUS_OK)
6629 		return status;
6630 
6631 	if (free_meta_data)
6632 		qed_mcp_trace_free_meta_data(p_hwfn);
6633 
6634 	*parsed_results_bytes = results_offset + results_buf_bytes;
6635 
6636 	return DBG_STATUS_OK;
6637 }
6638 
6639 /* Parses a Reg FIFO dump buffer.
6640  * If result_buf is not NULL, the Reg FIFO results are printed to it.
6641  * In any case, the required results buffer size is assigned to
6642  * parsed_results_bytes.
6643  * The parsing status is returned.
6644  */
qed_parse_reg_fifo_dump(u32 * dump_buf,char * results_buf,u32 * parsed_results_bytes)6645 static enum dbg_status qed_parse_reg_fifo_dump(u32 *dump_buf,
6646 					       char *results_buf,
6647 					       u32 *parsed_results_bytes)
6648 {
6649 	const char *section_name, *param_name, *param_str_val;
6650 	u32 param_num_val, num_section_params, num_elements;
6651 	struct reg_fifo_element *elements;
6652 	u8 i, j, err_code, vf_val;
6653 	u32 results_offset = 0;
6654 	char vf_str[4];
6655 
6656 	/* Read global_params section */
6657 	dump_buf += qed_read_section_hdr(dump_buf,
6658 					 &section_name, &num_section_params);
6659 	if (strcmp(section_name, "global_params"))
6660 		return DBG_STATUS_REG_FIFO_BAD_DATA;
6661 
6662 	/* Print global params */
6663 	dump_buf += qed_print_section_params(dump_buf,
6664 					     num_section_params,
6665 					     results_buf, &results_offset);
6666 
6667 	/* Read reg_fifo_data section */
6668 	dump_buf += qed_read_section_hdr(dump_buf,
6669 					 &section_name, &num_section_params);
6670 	if (strcmp(section_name, "reg_fifo_data"))
6671 		return DBG_STATUS_REG_FIFO_BAD_DATA;
6672 	dump_buf += qed_read_param(dump_buf,
6673 				   &param_name, &param_str_val, &param_num_val);
6674 	if (strcmp(param_name, "size"))
6675 		return DBG_STATUS_REG_FIFO_BAD_DATA;
6676 	if (param_num_val % REG_FIFO_ELEMENT_DWORDS)
6677 		return DBG_STATUS_REG_FIFO_BAD_DATA;
6678 	num_elements = param_num_val / REG_FIFO_ELEMENT_DWORDS;
6679 	elements = (struct reg_fifo_element *)dump_buf;
6680 
6681 	/* Decode elements */
6682 	for (i = 0; i < num_elements; i++) {
6683 		const char *err_msg = NULL;
6684 
6685 		/* Discover if element belongs to a VF or a PF */
6686 		vf_val = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_VF);
6687 		if (vf_val == REG_FIFO_ELEMENT_IS_PF_VF_VAL)
6688 			sprintf(vf_str, "%s", "N/A");
6689 		else
6690 			sprintf(vf_str, "%d", vf_val);
6691 
6692 		/* Find error message */
6693 		err_code = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_ERROR);
6694 		for (j = 0; j < ARRAY_SIZE(s_reg_fifo_errors) && !err_msg; j++)
6695 			if (err_code == s_reg_fifo_errors[j].err_code)
6696 				err_msg = s_reg_fifo_errors[j].err_msg;
6697 
6698 		/* Add parsed element to parsed buffer */
6699 		results_offset +=
6700 		    sprintf(qed_get_buf_ptr(results_buf,
6701 					    results_offset),
6702 			    "raw: 0x%016llx, address: 0x%07x, access: %-5s, pf: %2d, vf: %s, port: %d, privilege: %-3s, protection: %-12s, master: %-4s, error: %s\n",
6703 			    elements[i].data,
6704 			    (u32)GET_FIELD(elements[i].data,
6705 					   REG_FIFO_ELEMENT_ADDRESS) *
6706 			    REG_FIFO_ELEMENT_ADDR_FACTOR,
6707 			    s_access_strs[GET_FIELD(elements[i].data,
6708 						    REG_FIFO_ELEMENT_ACCESS)],
6709 			    (u32)GET_FIELD(elements[i].data,
6710 					   REG_FIFO_ELEMENT_PF),
6711 			    vf_str,
6712 			    (u32)GET_FIELD(elements[i].data,
6713 					   REG_FIFO_ELEMENT_PORT),
6714 			    s_privilege_strs[GET_FIELD(elements[i].data,
6715 						REG_FIFO_ELEMENT_PRIVILEGE)],
6716 			    s_protection_strs[GET_FIELD(elements[i].data,
6717 						REG_FIFO_ELEMENT_PROTECTION)],
6718 			    s_master_strs[GET_FIELD(elements[i].data,
6719 						    REG_FIFO_ELEMENT_MASTER)],
6720 			    err_msg ? err_msg : "unknown error code");
6721 	}
6722 
6723 	results_offset += sprintf(qed_get_buf_ptr(results_buf,
6724 						  results_offset),
6725 				  "fifo contained %d elements", num_elements);
6726 
6727 	/* Add 1 for string NULL termination */
6728 	*parsed_results_bytes = results_offset + 1;
6729 
6730 	return DBG_STATUS_OK;
6731 }
6732 
qed_parse_igu_fifo_element(struct igu_fifo_element * element,char * results_buf,u32 * results_offset)6733 static enum dbg_status qed_parse_igu_fifo_element(struct igu_fifo_element
6734 						  *element, char
6735 						  *results_buf,
6736 						  u32 *results_offset)
6737 {
6738 	const struct igu_fifo_addr_data *found_addr = NULL;
6739 	u8 source, err_type, i, is_cleanup;
6740 	char parsed_addr_data[32];
6741 	char parsed_wr_data[256];
6742 	u32 wr_data, prod_cons;
6743 	bool is_wr_cmd, is_pf;
6744 	u16 cmd_addr;
6745 	u64 dword12;
6746 
6747 	/* Dword12 (dword index 1 and 2) contains bits 32..95 of the
6748 	 * FIFO element.
6749 	 */
6750 	dword12 = ((u64)element->dword2 << 32) | element->dword1;
6751 	is_wr_cmd = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD);
6752 	is_pf = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_IS_PF);
6753 	cmd_addr = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR);
6754 	source = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_SOURCE);
6755 	err_type = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE);
6756 
6757 	if (source >= ARRAY_SIZE(s_igu_fifo_source_strs))
6758 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
6759 	if (err_type >= ARRAY_SIZE(s_igu_fifo_error_strs))
6760 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
6761 
6762 	/* Find address data */
6763 	for (i = 0; i < ARRAY_SIZE(s_igu_fifo_addr_data) && !found_addr; i++) {
6764 		const struct igu_fifo_addr_data *curr_addr =
6765 			&s_igu_fifo_addr_data[i];
6766 
6767 		if (cmd_addr >= curr_addr->start_addr && cmd_addr <=
6768 		    curr_addr->end_addr)
6769 			found_addr = curr_addr;
6770 	}
6771 
6772 	if (!found_addr)
6773 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
6774 
6775 	/* Prepare parsed address data */
6776 	switch (found_addr->type) {
6777 	case IGU_ADDR_TYPE_MSIX_MEM:
6778 		sprintf(parsed_addr_data, " vector_num = 0x%x", cmd_addr / 2);
6779 		break;
6780 	case IGU_ADDR_TYPE_WRITE_INT_ACK:
6781 	case IGU_ADDR_TYPE_WRITE_PROD_UPDATE:
6782 		sprintf(parsed_addr_data,
6783 			" SB = 0x%x", cmd_addr - found_addr->start_addr);
6784 		break;
6785 	default:
6786 		parsed_addr_data[0] = '\0';
6787 	}
6788 
6789 	if (!is_wr_cmd) {
6790 		parsed_wr_data[0] = '\0';
6791 		goto out;
6792 	}
6793 
6794 	/* Prepare parsed write data */
6795 	wr_data = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_WR_DATA);
6796 	prod_cons = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_PROD_CONS);
6797 	is_cleanup = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_CMD_TYPE);
6798 
6799 	if (source == IGU_SRC_ATTN) {
6800 		sprintf(parsed_wr_data, "prod: 0x%x, ", prod_cons);
6801 	} else {
6802 		if (is_cleanup) {
6803 			u8 cleanup_val, cleanup_type;
6804 
6805 			cleanup_val =
6806 				GET_FIELD(wr_data,
6807 					  IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL);
6808 			cleanup_type =
6809 			    GET_FIELD(wr_data,
6810 				      IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE);
6811 
6812 			sprintf(parsed_wr_data,
6813 				"cmd_type: cleanup, cleanup_val: %s, cleanup_type : %d, ",
6814 				cleanup_val ? "set" : "clear",
6815 				cleanup_type);
6816 		} else {
6817 			u8 update_flag, en_dis_int_for_sb, segment;
6818 			u8 timer_mask;
6819 
6820 			update_flag = GET_FIELD(wr_data,
6821 						IGU_FIFO_WR_DATA_UPDATE_FLAG);
6822 			en_dis_int_for_sb =
6823 				GET_FIELD(wr_data,
6824 					  IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB);
6825 			segment = GET_FIELD(wr_data,
6826 					    IGU_FIFO_WR_DATA_SEGMENT);
6827 			timer_mask = GET_FIELD(wr_data,
6828 					       IGU_FIFO_WR_DATA_TIMER_MASK);
6829 
6830 			sprintf(parsed_wr_data,
6831 				"cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb : %s, segment : %s, timer_mask = %d, ",
6832 				prod_cons,
6833 				update_flag ? "update" : "nop",
6834 				en_dis_int_for_sb ?
6835 				(en_dis_int_for_sb == 1 ? "disable" : "nop") :
6836 				"enable",
6837 				segment ? "attn" : "regular",
6838 				timer_mask);
6839 		}
6840 	}
6841 out:
6842 	/* Add parsed element to parsed buffer */
6843 	*results_offset += sprintf(qed_get_buf_ptr(results_buf,
6844 						   *results_offset),
6845 				   "raw: 0x%01x%08x%08x, %s: %d, source : %s, type : %s, cmd_addr : 0x%x(%s%s), %serror: %s\n",
6846 				   element->dword2, element->dword1,
6847 				   element->dword0,
6848 				   is_pf ? "pf" : "vf",
6849 				   GET_FIELD(element->dword0,
6850 					     IGU_FIFO_ELEMENT_DWORD0_FID),
6851 				   s_igu_fifo_source_strs[source],
6852 				   is_wr_cmd ? "wr" : "rd",
6853 				   cmd_addr,
6854 				   (!is_pf && found_addr->vf_desc)
6855 				   ? found_addr->vf_desc
6856 				   : found_addr->desc,
6857 				   parsed_addr_data,
6858 				   parsed_wr_data,
6859 				   s_igu_fifo_error_strs[err_type]);
6860 
6861 	return DBG_STATUS_OK;
6862 }
6863 
6864 /* Parses an IGU FIFO dump buffer.
6865  * If result_buf is not NULL, the IGU FIFO results are printed to it.
6866  * In any case, the required results buffer size is assigned to
6867  * parsed_results_bytes.
6868  * The parsing status is returned.
6869  */
qed_parse_igu_fifo_dump(u32 * dump_buf,char * results_buf,u32 * parsed_results_bytes)6870 static enum dbg_status qed_parse_igu_fifo_dump(u32 *dump_buf,
6871 					       char *results_buf,
6872 					       u32 *parsed_results_bytes)
6873 {
6874 	const char *section_name, *param_name, *param_str_val;
6875 	u32 param_num_val, num_section_params, num_elements;
6876 	struct igu_fifo_element *elements;
6877 	enum dbg_status status;
6878 	u32 results_offset = 0;
6879 	u8 i;
6880 
6881 	/* Read global_params section */
6882 	dump_buf += qed_read_section_hdr(dump_buf,
6883 					 &section_name, &num_section_params);
6884 	if (strcmp(section_name, "global_params"))
6885 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
6886 
6887 	/* Print global params */
6888 	dump_buf += qed_print_section_params(dump_buf,
6889 					     num_section_params,
6890 					     results_buf, &results_offset);
6891 
6892 	/* Read igu_fifo_data section */
6893 	dump_buf += qed_read_section_hdr(dump_buf,
6894 					 &section_name, &num_section_params);
6895 	if (strcmp(section_name, "igu_fifo_data"))
6896 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
6897 	dump_buf += qed_read_param(dump_buf,
6898 				   &param_name, &param_str_val, &param_num_val);
6899 	if (strcmp(param_name, "size"))
6900 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
6901 	if (param_num_val % IGU_FIFO_ELEMENT_DWORDS)
6902 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
6903 	num_elements = param_num_val / IGU_FIFO_ELEMENT_DWORDS;
6904 	elements = (struct igu_fifo_element *)dump_buf;
6905 
6906 	/* Decode elements */
6907 	for (i = 0; i < num_elements; i++) {
6908 		status = qed_parse_igu_fifo_element(&elements[i],
6909 						    results_buf,
6910 						    &results_offset);
6911 		if (status != DBG_STATUS_OK)
6912 			return status;
6913 	}
6914 
6915 	results_offset += sprintf(qed_get_buf_ptr(results_buf,
6916 						  results_offset),
6917 				  "fifo contained %d elements", num_elements);
6918 
6919 	/* Add 1 for string NULL termination */
6920 	*parsed_results_bytes = results_offset + 1;
6921 
6922 	return DBG_STATUS_OK;
6923 }
6924 
6925 static enum dbg_status
qed_parse_protection_override_dump(u32 * dump_buf,char * results_buf,u32 * parsed_results_bytes)6926 qed_parse_protection_override_dump(u32 *dump_buf,
6927 				   char *results_buf,
6928 				   u32 *parsed_results_bytes)
6929 {
6930 	const char *section_name, *param_name, *param_str_val;
6931 	u32 param_num_val, num_section_params, num_elements;
6932 	struct protection_override_element *elements;
6933 	u32 results_offset = 0;
6934 	u8 i;
6935 
6936 	/* Read global_params section */
6937 	dump_buf += qed_read_section_hdr(dump_buf,
6938 					 &section_name, &num_section_params);
6939 	if (strcmp(section_name, "global_params"))
6940 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
6941 
6942 	/* Print global params */
6943 	dump_buf += qed_print_section_params(dump_buf,
6944 					     num_section_params,
6945 					     results_buf, &results_offset);
6946 
6947 	/* Read protection_override_data section */
6948 	dump_buf += qed_read_section_hdr(dump_buf,
6949 					 &section_name, &num_section_params);
6950 	if (strcmp(section_name, "protection_override_data"))
6951 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
6952 	dump_buf += qed_read_param(dump_buf,
6953 				   &param_name, &param_str_val, &param_num_val);
6954 	if (strcmp(param_name, "size"))
6955 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
6956 	if (param_num_val % PROTECTION_OVERRIDE_ELEMENT_DWORDS)
6957 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
6958 	num_elements = param_num_val / PROTECTION_OVERRIDE_ELEMENT_DWORDS;
6959 	elements = (struct protection_override_element *)dump_buf;
6960 
6961 	/* Decode elements */
6962 	for (i = 0; i < num_elements; i++) {
6963 		u32 address = GET_FIELD(elements[i].data,
6964 					PROTECTION_OVERRIDE_ELEMENT_ADDRESS) *
6965 			      PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR;
6966 
6967 		results_offset +=
6968 		    sprintf(qed_get_buf_ptr(results_buf,
6969 					    results_offset),
6970 			    "window %2d, address: 0x%07x, size: %7d regs, read: %d, write: %d, read protection: %-12s, write protection: %-12s\n",
6971 			    i, address,
6972 			    (u32)GET_FIELD(elements[i].data,
6973 				      PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE),
6974 			    (u32)GET_FIELD(elements[i].data,
6975 				      PROTECTION_OVERRIDE_ELEMENT_READ),
6976 			    (u32)GET_FIELD(elements[i].data,
6977 				      PROTECTION_OVERRIDE_ELEMENT_WRITE),
6978 			    s_protection_strs[GET_FIELD(elements[i].data,
6979 				PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION)],
6980 			    s_protection_strs[GET_FIELD(elements[i].data,
6981 				PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION)]);
6982 	}
6983 
6984 	results_offset += sprintf(qed_get_buf_ptr(results_buf,
6985 						  results_offset),
6986 				  "protection override contained %d elements",
6987 				  num_elements);
6988 
6989 	/* Add 1 for string NULL termination */
6990 	*parsed_results_bytes = results_offset + 1;
6991 
6992 	return DBG_STATUS_OK;
6993 }
6994 
6995 /* Parses a FW Asserts dump buffer.
6996  * If result_buf is not NULL, the FW Asserts results are printed to it.
6997  * In any case, the required results buffer size is assigned to
6998  * parsed_results_bytes.
6999  * The parsing status is returned.
7000  */
qed_parse_fw_asserts_dump(u32 * dump_buf,char * results_buf,u32 * parsed_results_bytes)7001 static enum dbg_status qed_parse_fw_asserts_dump(u32 *dump_buf,
7002 						 char *results_buf,
7003 						 u32 *parsed_results_bytes)
7004 {
7005 	u32 num_section_params, param_num_val, i, results_offset = 0;
7006 	const char *param_name, *param_str_val, *section_name;
7007 	bool last_section_found = false;
7008 
7009 	*parsed_results_bytes = 0;
7010 
7011 	/* Read global_params section */
7012 	dump_buf += qed_read_section_hdr(dump_buf,
7013 					 &section_name, &num_section_params);
7014 	if (strcmp(section_name, "global_params"))
7015 		return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7016 
7017 	/* Print global params */
7018 	dump_buf += qed_print_section_params(dump_buf,
7019 					     num_section_params,
7020 					     results_buf, &results_offset);
7021 
7022 	while (!last_section_found) {
7023 		dump_buf += qed_read_section_hdr(dump_buf,
7024 						 &section_name,
7025 						 &num_section_params);
7026 		if (!strcmp(section_name, "fw_asserts")) {
7027 			/* Extract params */
7028 			const char *storm_letter = NULL;
7029 			u32 storm_dump_size = 0;
7030 
7031 			for (i = 0; i < num_section_params; i++) {
7032 				dump_buf += qed_read_param(dump_buf,
7033 							   &param_name,
7034 							   &param_str_val,
7035 							   &param_num_val);
7036 				if (!strcmp(param_name, "storm"))
7037 					storm_letter = param_str_val;
7038 				else if (!strcmp(param_name, "size"))
7039 					storm_dump_size = param_num_val;
7040 				else
7041 					return
7042 					    DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7043 			}
7044 
7045 			if (!storm_letter || !storm_dump_size)
7046 				return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7047 
7048 			/* Print data */
7049 			results_offset +=
7050 			    sprintf(qed_get_buf_ptr(results_buf,
7051 						    results_offset),
7052 				    "\n%sSTORM_ASSERT: size=%d\n",
7053 				    storm_letter, storm_dump_size);
7054 			for (i = 0; i < storm_dump_size; i++, dump_buf++)
7055 				results_offset +=
7056 				    sprintf(qed_get_buf_ptr(results_buf,
7057 							    results_offset),
7058 					    "%08x\n", *dump_buf);
7059 		} else if (!strcmp(section_name, "last")) {
7060 			last_section_found = true;
7061 		} else {
7062 			return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7063 		}
7064 	}
7065 
7066 	/* Add 1 for string NULL termination */
7067 	*parsed_results_bytes = results_offset + 1;
7068 
7069 	return DBG_STATUS_OK;
7070 }
7071 
7072 /***************************** Public Functions *******************************/
7073 
qed_dbg_user_set_bin_ptr(struct qed_hwfn * p_hwfn,const u8 * const bin_ptr)7074 enum dbg_status qed_dbg_user_set_bin_ptr(struct qed_hwfn *p_hwfn,
7075 					 const u8 * const bin_ptr)
7076 {
7077 	struct bin_buffer_hdr *buf_hdrs = (struct bin_buffer_hdr *)bin_ptr;
7078 	u8 buf_id;
7079 
7080 	/* Convert binary data to debug arrays */
7081 	for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++)
7082 		qed_set_dbg_bin_buf(p_hwfn,
7083 				    (enum bin_dbg_buffer_type)buf_id,
7084 				    (u32 *)(bin_ptr + buf_hdrs[buf_id].offset),
7085 				    buf_hdrs[buf_id].length);
7086 
7087 	return DBG_STATUS_OK;
7088 }
7089 
qed_dbg_alloc_user_data(struct qed_hwfn * p_hwfn,void ** user_data_ptr)7090 enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn,
7091 					void **user_data_ptr)
7092 {
7093 	*user_data_ptr = kzalloc(sizeof(struct dbg_tools_user_data),
7094 				 GFP_KERNEL);
7095 	if (!(*user_data_ptr))
7096 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7097 
7098 	return DBG_STATUS_OK;
7099 }
7100 
qed_dbg_get_status_str(enum dbg_status status)7101 const char *qed_dbg_get_status_str(enum dbg_status status)
7102 {
7103 	return (status <
7104 		MAX_DBG_STATUS) ? s_status_str[status] : "Invalid debug status";
7105 }
7106 
qed_get_idle_chk_results_buf_size(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,u32 * results_buf_size)7107 enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
7108 						  u32 *dump_buf,
7109 						  u32 num_dumped_dwords,
7110 						  u32 *results_buf_size)
7111 {
7112 	u32 num_errors, num_warnings;
7113 
7114 	return qed_parse_idle_chk_dump(p_hwfn,
7115 				       dump_buf,
7116 				       num_dumped_dwords,
7117 				       NULL,
7118 				       results_buf_size,
7119 				       &num_errors, &num_warnings);
7120 }
7121 
qed_print_idle_chk_results(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,char * results_buf,u32 * num_errors,u32 * num_warnings)7122 enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
7123 					   u32 *dump_buf,
7124 					   u32 num_dumped_dwords,
7125 					   char *results_buf,
7126 					   u32 *num_errors,
7127 					   u32 *num_warnings)
7128 {
7129 	u32 parsed_buf_size;
7130 
7131 	return qed_parse_idle_chk_dump(p_hwfn,
7132 				       dump_buf,
7133 				       num_dumped_dwords,
7134 				       results_buf,
7135 				       &parsed_buf_size,
7136 				       num_errors, num_warnings);
7137 }
7138 
qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn * p_hwfn,const u32 * meta_buf)7139 void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn,
7140 				     const u32 *meta_buf)
7141 {
7142 	struct dbg_tools_user_data *dev_user_data =
7143 		qed_dbg_get_user_data(p_hwfn);
7144 
7145 	dev_user_data->mcp_trace_user_meta_buf = meta_buf;
7146 }
7147 
qed_get_mcp_trace_results_buf_size(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,u32 * results_buf_size)7148 enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
7149 						   u32 *dump_buf,
7150 						   u32 num_dumped_dwords,
7151 						   u32 *results_buf_size)
7152 {
7153 	return qed_parse_mcp_trace_dump(p_hwfn,
7154 					dump_buf, NULL, results_buf_size, true);
7155 }
7156 
qed_print_mcp_trace_results(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,char * results_buf)7157 enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
7158 					    u32 *dump_buf,
7159 					    u32 num_dumped_dwords,
7160 					    char *results_buf)
7161 {
7162 	u32 parsed_buf_size;
7163 
7164 	return qed_parse_mcp_trace_dump(p_hwfn,
7165 					dump_buf,
7166 					results_buf, &parsed_buf_size, true);
7167 }
7168 
qed_print_mcp_trace_results_cont(struct qed_hwfn * p_hwfn,u32 * dump_buf,char * results_buf)7169 enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn,
7170 						 u32 *dump_buf,
7171 						 char *results_buf)
7172 {
7173 	u32 parsed_buf_size;
7174 
7175 	return qed_parse_mcp_trace_dump(p_hwfn, dump_buf, results_buf,
7176 					&parsed_buf_size, false);
7177 }
7178 
qed_print_mcp_trace_line(struct qed_hwfn * p_hwfn,u8 * dump_buf,u32 num_dumped_bytes,char * results_buf)7179 enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn,
7180 					 u8 *dump_buf,
7181 					 u32 num_dumped_bytes,
7182 					 char *results_buf)
7183 {
7184 	u32 parsed_results_bytes;
7185 
7186 	return qed_parse_mcp_trace_buf(p_hwfn,
7187 				       dump_buf,
7188 				       num_dumped_bytes,
7189 				       0,
7190 				       num_dumped_bytes,
7191 				       results_buf, &parsed_results_bytes);
7192 }
7193 
7194 /* Frees the specified MCP Trace meta data */
qed_mcp_trace_free_meta_data(struct qed_hwfn * p_hwfn)7195 void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn)
7196 {
7197 	struct dbg_tools_user_data *dev_user_data;
7198 	struct mcp_trace_meta *meta;
7199 	u32 i;
7200 
7201 	dev_user_data = qed_dbg_get_user_data(p_hwfn);
7202 	meta = &dev_user_data->mcp_trace_meta;
7203 	if (!meta->is_allocated)
7204 		return;
7205 
7206 	/* Release modules */
7207 	if (meta->modules) {
7208 		for (i = 0; i < meta->modules_num; i++)
7209 			kfree(meta->modules[i]);
7210 		kfree(meta->modules);
7211 	}
7212 
7213 	/* Release formats */
7214 	if (meta->formats) {
7215 		for (i = 0; i < meta->formats_num; i++)
7216 			kfree(meta->formats[i].format_str);
7217 		kfree(meta->formats);
7218 	}
7219 
7220 	meta->is_allocated = false;
7221 }
7222 
qed_get_reg_fifo_results_buf_size(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,u32 * results_buf_size)7223 enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
7224 						  u32 *dump_buf,
7225 						  u32 num_dumped_dwords,
7226 						  u32 *results_buf_size)
7227 {
7228 	return qed_parse_reg_fifo_dump(dump_buf, NULL, results_buf_size);
7229 }
7230 
qed_print_reg_fifo_results(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,char * results_buf)7231 enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
7232 					   u32 *dump_buf,
7233 					   u32 num_dumped_dwords,
7234 					   char *results_buf)
7235 {
7236 	u32 parsed_buf_size;
7237 
7238 	return qed_parse_reg_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
7239 }
7240 
qed_get_igu_fifo_results_buf_size(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,u32 * results_buf_size)7241 enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
7242 						  u32 *dump_buf,
7243 						  u32 num_dumped_dwords,
7244 						  u32 *results_buf_size)
7245 {
7246 	return qed_parse_igu_fifo_dump(dump_buf, NULL, results_buf_size);
7247 }
7248 
qed_print_igu_fifo_results(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,char * results_buf)7249 enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
7250 					   u32 *dump_buf,
7251 					   u32 num_dumped_dwords,
7252 					   char *results_buf)
7253 {
7254 	u32 parsed_buf_size;
7255 
7256 	return qed_parse_igu_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
7257 }
7258 
7259 enum dbg_status
qed_get_protection_override_results_buf_size(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,u32 * results_buf_size)7260 qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
7261 					     u32 *dump_buf,
7262 					     u32 num_dumped_dwords,
7263 					     u32 *results_buf_size)
7264 {
7265 	return qed_parse_protection_override_dump(dump_buf,
7266 						  NULL, results_buf_size);
7267 }
7268 
qed_print_protection_override_results(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,char * results_buf)7269 enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
7270 						      u32 *dump_buf,
7271 						      u32 num_dumped_dwords,
7272 						      char *results_buf)
7273 {
7274 	u32 parsed_buf_size;
7275 
7276 	return qed_parse_protection_override_dump(dump_buf,
7277 						  results_buf,
7278 						  &parsed_buf_size);
7279 }
7280 
qed_get_fw_asserts_results_buf_size(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,u32 * results_buf_size)7281 enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
7282 						    u32 *dump_buf,
7283 						    u32 num_dumped_dwords,
7284 						    u32 *results_buf_size)
7285 {
7286 	return qed_parse_fw_asserts_dump(dump_buf, NULL, results_buf_size);
7287 }
7288 
qed_print_fw_asserts_results(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,char * results_buf)7289 enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
7290 					     u32 *dump_buf,
7291 					     u32 num_dumped_dwords,
7292 					     char *results_buf)
7293 {
7294 	u32 parsed_buf_size;
7295 
7296 	return qed_parse_fw_asserts_dump(dump_buf,
7297 					 results_buf, &parsed_buf_size);
7298 }
7299 
qed_dbg_parse_attn(struct qed_hwfn * p_hwfn,struct dbg_attn_block_result * results)7300 enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
7301 				   struct dbg_attn_block_result *results)
7302 {
7303 	const u32 *block_attn_name_offsets;
7304 	const char *attn_name_base;
7305 	const char *block_name;
7306 	enum dbg_attn_type attn_type;
7307 	u8 num_regs, i, j;
7308 
7309 	num_regs = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS);
7310 	attn_type = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE);
7311 	block_name = qed_dbg_get_block_name(p_hwfn, results->block_id);
7312 	if (!block_name)
7313 		return DBG_STATUS_INVALID_ARGS;
7314 
7315 	if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr ||
7316 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr ||
7317 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
7318 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
7319 
7320 	block_attn_name_offsets =
7321 	    (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr +
7322 	    results->names_offset;
7323 
7324 	attn_name_base = p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr;
7325 
7326 	/* Go over registers with a non-zero attention status */
7327 	for (i = 0; i < num_regs; i++) {
7328 		struct dbg_attn_bit_mapping *bit_mapping;
7329 		struct dbg_attn_reg_result *reg_result;
7330 		u8 num_reg_attn, bit_idx = 0;
7331 
7332 		reg_result = &results->reg_results[i];
7333 		num_reg_attn = GET_FIELD(reg_result->data,
7334 					 DBG_ATTN_REG_RESULT_NUM_REG_ATTN);
7335 		bit_mapping = (struct dbg_attn_bit_mapping *)
7336 		    p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr +
7337 		    reg_result->block_attn_offset;
7338 
7339 		/* Go over attention status bits */
7340 		for (j = 0; j < num_reg_attn; j++, bit_idx++) {
7341 			u16 attn_idx_val = GET_FIELD(bit_mapping[j].data,
7342 						     DBG_ATTN_BIT_MAPPING_VAL);
7343 			const char *attn_name, *attn_type_str, *masked_str;
7344 			u32 attn_name_offset;
7345 			u32 sts_addr;
7346 
7347 			/* Check if bit mask should be advanced (due to unused
7348 			 * bits).
7349 			 */
7350 			if (GET_FIELD(bit_mapping[j].data,
7351 				      DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT)) {
7352 				bit_idx += (u8)attn_idx_val;
7353 				continue;
7354 			}
7355 
7356 			/* Check current bit index */
7357 			if (!(reg_result->sts_val & BIT(bit_idx)))
7358 				continue;
7359 
7360 			/* An attention bit with value=1 was found
7361 			 * Find attention name
7362 			 */
7363 			attn_name_offset =
7364 				block_attn_name_offsets[attn_idx_val];
7365 			attn_name = attn_name_base + attn_name_offset;
7366 			attn_type_str =
7367 				(attn_type ==
7368 				 ATTN_TYPE_INTERRUPT ? "Interrupt" :
7369 				 "Parity");
7370 			masked_str = reg_result->mask_val & BIT(bit_idx) ?
7371 				     " [masked]" : "";
7372 			sts_addr = GET_FIELD(reg_result->data,
7373 					     DBG_ATTN_REG_RESULT_STS_ADDRESS);
7374 			DP_NOTICE(p_hwfn,
7375 				  "%s (%s) : %s [address 0x%08x, bit %d]%s\n",
7376 				  block_name, attn_type_str, attn_name,
7377 				  sts_addr * 4, bit_idx, masked_str);
7378 		}
7379 	}
7380 
7381 	return DBG_STATUS_OK;
7382 }
7383 
7384 static DEFINE_MUTEX(qed_dbg_lock);
7385 
7386 /* Wrapper for unifying the idle_chk and mcp_trace api */
7387 static enum dbg_status
qed_print_idle_chk_results_wrapper(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,char * results_buf)7388 qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
7389 				   u32 *dump_buf,
7390 				   u32 num_dumped_dwords,
7391 				   char *results_buf)
7392 {
7393 	u32 num_errors, num_warnnings;
7394 
7395 	return qed_print_idle_chk_results(p_hwfn, dump_buf, num_dumped_dwords,
7396 					  results_buf, &num_errors,
7397 					  &num_warnnings);
7398 }
7399 
7400 /* Feature meta data lookup table */
7401 static struct {
7402 	char *name;
7403 	enum dbg_status (*get_size)(struct qed_hwfn *p_hwfn,
7404 				    struct qed_ptt *p_ptt, u32 *size);
7405 	enum dbg_status (*perform_dump)(struct qed_hwfn *p_hwfn,
7406 					struct qed_ptt *p_ptt, u32 *dump_buf,
7407 					u32 buf_size, u32 *dumped_dwords);
7408 	enum dbg_status (*print_results)(struct qed_hwfn *p_hwfn,
7409 					 u32 *dump_buf, u32 num_dumped_dwords,
7410 					 char *results_buf);
7411 	enum dbg_status (*results_buf_size)(struct qed_hwfn *p_hwfn,
7412 					    u32 *dump_buf,
7413 					    u32 num_dumped_dwords,
7414 					    u32 *results_buf_size);
7415 } qed_features_lookup[] = {
7416 	{
7417 	"grc", qed_dbg_grc_get_dump_buf_size,
7418 		    qed_dbg_grc_dump, NULL, NULL}, {
7419 	"idle_chk",
7420 		    qed_dbg_idle_chk_get_dump_buf_size,
7421 		    qed_dbg_idle_chk_dump,
7422 		    qed_print_idle_chk_results_wrapper,
7423 		    qed_get_idle_chk_results_buf_size}, {
7424 	"mcp_trace",
7425 		    qed_dbg_mcp_trace_get_dump_buf_size,
7426 		    qed_dbg_mcp_trace_dump, qed_print_mcp_trace_results,
7427 		    qed_get_mcp_trace_results_buf_size}, {
7428 	"reg_fifo",
7429 		    qed_dbg_reg_fifo_get_dump_buf_size,
7430 		    qed_dbg_reg_fifo_dump, qed_print_reg_fifo_results,
7431 		    qed_get_reg_fifo_results_buf_size}, {
7432 	"igu_fifo",
7433 		    qed_dbg_igu_fifo_get_dump_buf_size,
7434 		    qed_dbg_igu_fifo_dump, qed_print_igu_fifo_results,
7435 		    qed_get_igu_fifo_results_buf_size}, {
7436 	"protection_override",
7437 		    qed_dbg_protection_override_get_dump_buf_size,
7438 		    qed_dbg_protection_override_dump,
7439 		    qed_print_protection_override_results,
7440 		    qed_get_protection_override_results_buf_size}, {
7441 	"fw_asserts",
7442 		    qed_dbg_fw_asserts_get_dump_buf_size,
7443 		    qed_dbg_fw_asserts_dump,
7444 		    qed_print_fw_asserts_results,
7445 		    qed_get_fw_asserts_results_buf_size}, {
7446 	"ilt",
7447 		    qed_dbg_ilt_get_dump_buf_size,
7448 		    qed_dbg_ilt_dump, NULL, NULL},};
7449 
qed_dbg_print_feature(u8 * p_text_buf,u32 text_size)7450 static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size)
7451 {
7452 	u32 i, precision = 80;
7453 
7454 	if (!p_text_buf)
7455 		return;
7456 
7457 	pr_notice("\n%.*s", precision, p_text_buf);
7458 	for (i = precision; i < text_size; i += precision)
7459 		pr_cont("%.*s", precision, p_text_buf + i);
7460 	pr_cont("\n");
7461 }
7462 
7463 #define QED_RESULTS_BUF_MIN_SIZE 16
7464 /* Generic function for decoding debug feature info */
format_feature(struct qed_hwfn * p_hwfn,enum qed_dbg_features feature_idx)7465 static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
7466 				      enum qed_dbg_features feature_idx)
7467 {
7468 	struct qed_dbg_feature *feature =
7469 	    &p_hwfn->cdev->dbg_features[feature_idx];
7470 	u32 text_size_bytes, null_char_pos, i;
7471 	enum dbg_status rc;
7472 	char *text_buf;
7473 
7474 	/* Check if feature supports formatting capability */
7475 	if (!qed_features_lookup[feature_idx].results_buf_size)
7476 		return DBG_STATUS_OK;
7477 
7478 	/* Obtain size of formatted output */
7479 	rc = qed_features_lookup[feature_idx].
7480 		results_buf_size(p_hwfn, (u32 *)feature->dump_buf,
7481 				 feature->dumped_dwords, &text_size_bytes);
7482 	if (rc != DBG_STATUS_OK)
7483 		return rc;
7484 
7485 	/* Make sure that the allocated size is a multiple of dword (4 bytes) */
7486 	null_char_pos = text_size_bytes - 1;
7487 	text_size_bytes = (text_size_bytes + 3) & ~0x3;
7488 
7489 	if (text_size_bytes < QED_RESULTS_BUF_MIN_SIZE) {
7490 		DP_NOTICE(p_hwfn->cdev,
7491 			  "formatted size of feature was too small %d. Aborting\n",
7492 			  text_size_bytes);
7493 		return DBG_STATUS_INVALID_ARGS;
7494 	}
7495 
7496 	/* Allocate temp text buf */
7497 	text_buf = vzalloc(text_size_bytes);
7498 	if (!text_buf)
7499 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7500 
7501 	/* Decode feature opcodes to string on temp buf */
7502 	rc = qed_features_lookup[feature_idx].
7503 		print_results(p_hwfn, (u32 *)feature->dump_buf,
7504 			      feature->dumped_dwords, text_buf);
7505 	if (rc != DBG_STATUS_OK) {
7506 		vfree(text_buf);
7507 		return rc;
7508 	}
7509 
7510 	/* Replace the original null character with a '\n' character.
7511 	 * The bytes that were added as a result of the dword alignment are also
7512 	 * padded with '\n' characters.
7513 	 */
7514 	for (i = null_char_pos; i < text_size_bytes; i++)
7515 		text_buf[i] = '\n';
7516 
7517 	/* Dump printable feature to log */
7518 	if (p_hwfn->cdev->print_dbg_data)
7519 		qed_dbg_print_feature(text_buf, text_size_bytes);
7520 
7521 	/* Just return the original binary buffer if requested */
7522 	if (p_hwfn->cdev->dbg_bin_dump) {
7523 		vfree(text_buf);
7524 		return DBG_STATUS_OK;
7525 	}
7526 
7527 	/* Free the old dump_buf and point the dump_buf to the newly allocagted
7528 	 * and formatted text buffer.
7529 	 */
7530 	vfree(feature->dump_buf);
7531 	feature->dump_buf = text_buf;
7532 	feature->buf_size = text_size_bytes;
7533 	feature->dumped_dwords = text_size_bytes / 4;
7534 	return rc;
7535 }
7536 
7537 #define MAX_DBG_FEATURE_SIZE_DWORDS	0x3FFFFFFF
7538 
7539 /* Generic function for performing the dump of a debug feature. */
qed_dbg_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,enum qed_dbg_features feature_idx)7540 static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
7541 				    struct qed_ptt *p_ptt,
7542 				    enum qed_dbg_features feature_idx)
7543 {
7544 	struct qed_dbg_feature *feature =
7545 	    &p_hwfn->cdev->dbg_features[feature_idx];
7546 	u32 buf_size_dwords;
7547 	enum dbg_status rc;
7548 
7549 	DP_NOTICE(p_hwfn->cdev, "Collecting a debug feature [\"%s\"]\n",
7550 		  qed_features_lookup[feature_idx].name);
7551 
7552 	/* Dump_buf was already allocated need to free (this can happen if dump
7553 	 * was called but file was never read).
7554 	 * We can't use the buffer as is since size may have changed.
7555 	 */
7556 	if (feature->dump_buf) {
7557 		vfree(feature->dump_buf);
7558 		feature->dump_buf = NULL;
7559 	}
7560 
7561 	/* Get buffer size from hsi, allocate accordingly, and perform the
7562 	 * dump.
7563 	 */
7564 	rc = qed_features_lookup[feature_idx].get_size(p_hwfn, p_ptt,
7565 						       &buf_size_dwords);
7566 	if (rc != DBG_STATUS_OK && rc != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
7567 		return rc;
7568 
7569 	if (buf_size_dwords > MAX_DBG_FEATURE_SIZE_DWORDS) {
7570 		feature->buf_size = 0;
7571 		DP_NOTICE(p_hwfn->cdev,
7572 			  "Debug feature [\"%s\"] size (0x%x dwords) exceeds maximum size (0x%x dwords)\n",
7573 			  qed_features_lookup[feature_idx].name,
7574 			  buf_size_dwords, MAX_DBG_FEATURE_SIZE_DWORDS);
7575 
7576 		return DBG_STATUS_OK;
7577 	}
7578 
7579 	feature->buf_size = buf_size_dwords * sizeof(u32);
7580 	feature->dump_buf = vmalloc(feature->buf_size);
7581 	if (!feature->dump_buf)
7582 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7583 
7584 	rc = qed_features_lookup[feature_idx].
7585 		perform_dump(p_hwfn, p_ptt, (u32 *)feature->dump_buf,
7586 			     feature->buf_size / sizeof(u32),
7587 			     &feature->dumped_dwords);
7588 
7589 	/* If mcp is stuck we get DBG_STATUS_NVRAM_GET_IMAGE_FAILED error.
7590 	 * In this case the buffer holds valid binary data, but we wont able
7591 	 * to parse it (since parsing relies on data in NVRAM which is only
7592 	 * accessible when MFW is responsive). skip the formatting but return
7593 	 * success so that binary data is provided.
7594 	 */
7595 	if (rc == DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
7596 		return DBG_STATUS_OK;
7597 
7598 	if (rc != DBG_STATUS_OK)
7599 		return rc;
7600 
7601 	/* Format output */
7602 	rc = format_feature(p_hwfn, feature_idx);
7603 	return rc;
7604 }
7605 
qed_dbg_grc(struct qed_dev * cdev,void * buffer,u32 * num_dumped_bytes)7606 int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7607 {
7608 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_GRC, num_dumped_bytes);
7609 }
7610 
qed_dbg_grc_size(struct qed_dev * cdev)7611 int qed_dbg_grc_size(struct qed_dev *cdev)
7612 {
7613 	return qed_dbg_feature_size(cdev, DBG_FEATURE_GRC);
7614 }
7615 
qed_dbg_idle_chk(struct qed_dev * cdev,void * buffer,u32 * num_dumped_bytes)7616 int qed_dbg_idle_chk(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7617 {
7618 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IDLE_CHK,
7619 			       num_dumped_bytes);
7620 }
7621 
qed_dbg_idle_chk_size(struct qed_dev * cdev)7622 int qed_dbg_idle_chk_size(struct qed_dev *cdev)
7623 {
7624 	return qed_dbg_feature_size(cdev, DBG_FEATURE_IDLE_CHK);
7625 }
7626 
qed_dbg_reg_fifo(struct qed_dev * cdev,void * buffer,u32 * num_dumped_bytes)7627 int qed_dbg_reg_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7628 {
7629 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_REG_FIFO,
7630 			       num_dumped_bytes);
7631 }
7632 
qed_dbg_reg_fifo_size(struct qed_dev * cdev)7633 int qed_dbg_reg_fifo_size(struct qed_dev *cdev)
7634 {
7635 	return qed_dbg_feature_size(cdev, DBG_FEATURE_REG_FIFO);
7636 }
7637 
qed_dbg_igu_fifo(struct qed_dev * cdev,void * buffer,u32 * num_dumped_bytes)7638 int qed_dbg_igu_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7639 {
7640 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IGU_FIFO,
7641 			       num_dumped_bytes);
7642 }
7643 
qed_dbg_igu_fifo_size(struct qed_dev * cdev)7644 int qed_dbg_igu_fifo_size(struct qed_dev *cdev)
7645 {
7646 	return qed_dbg_feature_size(cdev, DBG_FEATURE_IGU_FIFO);
7647 }
7648 
qed_dbg_nvm_image_length(struct qed_hwfn * p_hwfn,enum qed_nvm_images image_id,u32 * length)7649 static int qed_dbg_nvm_image_length(struct qed_hwfn *p_hwfn,
7650 				    enum qed_nvm_images image_id, u32 *length)
7651 {
7652 	struct qed_nvm_image_att image_att;
7653 	int rc;
7654 
7655 	*length = 0;
7656 	rc = qed_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att);
7657 	if (rc)
7658 		return rc;
7659 
7660 	*length = image_att.length;
7661 
7662 	return rc;
7663 }
7664 
qed_dbg_nvm_image(struct qed_dev * cdev,void * buffer,u32 * num_dumped_bytes,enum qed_nvm_images image_id)7665 static int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer,
7666 			     u32 *num_dumped_bytes,
7667 			     enum qed_nvm_images image_id)
7668 {
7669 	struct qed_hwfn *p_hwfn =
7670 		&cdev->hwfns[cdev->engine_for_debug];
7671 	u32 len_rounded;
7672 	int rc;
7673 
7674 	*num_dumped_bytes = 0;
7675 	rc = qed_dbg_nvm_image_length(p_hwfn, image_id, &len_rounded);
7676 	if (rc)
7677 		return rc;
7678 
7679 	DP_NOTICE(p_hwfn->cdev,
7680 		  "Collecting a debug feature [\"nvram image %d\"]\n",
7681 		  image_id);
7682 
7683 	len_rounded = roundup(len_rounded, sizeof(u32));
7684 	rc = qed_mcp_get_nvm_image(p_hwfn, image_id, buffer, len_rounded);
7685 	if (rc)
7686 		return rc;
7687 
7688 	/* QED_NVM_IMAGE_NVM_META image is not swapped like other images */
7689 	if (image_id != QED_NVM_IMAGE_NVM_META)
7690 		cpu_to_be32_array((__force __be32 *)buffer,
7691 				  (const u32 *)buffer,
7692 				  len_rounded / sizeof(u32));
7693 
7694 	*num_dumped_bytes = len_rounded;
7695 
7696 	return rc;
7697 }
7698 
qed_dbg_protection_override(struct qed_dev * cdev,void * buffer,u32 * num_dumped_bytes)7699 int qed_dbg_protection_override(struct qed_dev *cdev, void *buffer,
7700 				u32 *num_dumped_bytes)
7701 {
7702 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_PROTECTION_OVERRIDE,
7703 			       num_dumped_bytes);
7704 }
7705 
qed_dbg_protection_override_size(struct qed_dev * cdev)7706 int qed_dbg_protection_override_size(struct qed_dev *cdev)
7707 {
7708 	return qed_dbg_feature_size(cdev, DBG_FEATURE_PROTECTION_OVERRIDE);
7709 }
7710 
qed_dbg_fw_asserts(struct qed_dev * cdev,void * buffer,u32 * num_dumped_bytes)7711 int qed_dbg_fw_asserts(struct qed_dev *cdev, void *buffer,
7712 		       u32 *num_dumped_bytes)
7713 {
7714 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_FW_ASSERTS,
7715 			       num_dumped_bytes);
7716 }
7717 
qed_dbg_fw_asserts_size(struct qed_dev * cdev)7718 int qed_dbg_fw_asserts_size(struct qed_dev *cdev)
7719 {
7720 	return qed_dbg_feature_size(cdev, DBG_FEATURE_FW_ASSERTS);
7721 }
7722 
qed_dbg_ilt(struct qed_dev * cdev,void * buffer,u32 * num_dumped_bytes)7723 int qed_dbg_ilt(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7724 {
7725 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_ILT, num_dumped_bytes);
7726 }
7727 
qed_dbg_ilt_size(struct qed_dev * cdev)7728 int qed_dbg_ilt_size(struct qed_dev *cdev)
7729 {
7730 	return qed_dbg_feature_size(cdev, DBG_FEATURE_ILT);
7731 }
7732 
qed_dbg_mcp_trace(struct qed_dev * cdev,void * buffer,u32 * num_dumped_bytes)7733 int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer,
7734 		      u32 *num_dumped_bytes)
7735 {
7736 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_MCP_TRACE,
7737 			       num_dumped_bytes);
7738 }
7739 
qed_dbg_mcp_trace_size(struct qed_dev * cdev)7740 int qed_dbg_mcp_trace_size(struct qed_dev *cdev)
7741 {
7742 	return qed_dbg_feature_size(cdev, DBG_FEATURE_MCP_TRACE);
7743 }
7744 
7745 /* Defines the amount of bytes allocated for recording the length of debugfs
7746  * feature buffer.
7747  */
7748 #define REGDUMP_HEADER_SIZE			sizeof(u32)
7749 #define REGDUMP_HEADER_SIZE_SHIFT		0
7750 #define REGDUMP_HEADER_SIZE_MASK		0xffffff
7751 #define REGDUMP_HEADER_FEATURE_SHIFT		24
7752 #define REGDUMP_HEADER_FEATURE_MASK		0x1f
7753 #define REGDUMP_HEADER_BIN_DUMP_SHIFT		29
7754 #define REGDUMP_HEADER_BIN_DUMP_MASK		0x1
7755 #define REGDUMP_HEADER_OMIT_ENGINE_SHIFT	30
7756 #define REGDUMP_HEADER_OMIT_ENGINE_MASK		0x1
7757 #define REGDUMP_HEADER_ENGINE_SHIFT		31
7758 #define REGDUMP_HEADER_ENGINE_MASK		0x1
7759 #define REGDUMP_MAX_SIZE			0x1000000
7760 #define ILT_DUMP_MAX_SIZE			(1024 * 1024 * 15)
7761 
7762 enum debug_print_features {
7763 	OLD_MODE = 0,
7764 	IDLE_CHK = 1,
7765 	GRC_DUMP = 2,
7766 	MCP_TRACE = 3,
7767 	REG_FIFO = 4,
7768 	PROTECTION_OVERRIDE = 5,
7769 	IGU_FIFO = 6,
7770 	PHY = 7,
7771 	FW_ASSERTS = 8,
7772 	NVM_CFG1 = 9,
7773 	DEFAULT_CFG = 10,
7774 	NVM_META = 11,
7775 	MDUMP = 12,
7776 	ILT_DUMP = 13,
7777 };
7778 
qed_calc_regdump_header(struct qed_dev * cdev,enum debug_print_features feature,int engine,u32 feature_size,u8 omit_engine)7779 static u32 qed_calc_regdump_header(struct qed_dev *cdev,
7780 				   enum debug_print_features feature,
7781 				   int engine, u32 feature_size, u8 omit_engine)
7782 {
7783 	u32 res = 0;
7784 
7785 	SET_FIELD(res, REGDUMP_HEADER_SIZE, feature_size);
7786 	if (res != feature_size)
7787 		DP_NOTICE(cdev,
7788 			  "Feature %d is too large (size 0x%x) and will corrupt the dump\n",
7789 			  feature, feature_size);
7790 
7791 	SET_FIELD(res, REGDUMP_HEADER_FEATURE, feature);
7792 	SET_FIELD(res, REGDUMP_HEADER_BIN_DUMP, 1);
7793 	SET_FIELD(res, REGDUMP_HEADER_OMIT_ENGINE, omit_engine);
7794 	SET_FIELD(res, REGDUMP_HEADER_ENGINE, engine);
7795 
7796 	return res;
7797 }
7798 
qed_dbg_all_data(struct qed_dev * cdev,void * buffer)7799 int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
7800 {
7801 	u8 cur_engine, omit_engine = 0, org_engine;
7802 	struct qed_hwfn *p_hwfn =
7803 		&cdev->hwfns[cdev->engine_for_debug];
7804 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
7805 	int grc_params[MAX_DBG_GRC_PARAMS], i;
7806 	u32 offset = 0, feature_size;
7807 	int rc;
7808 
7809 	for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
7810 		grc_params[i] = dev_data->grc.param_val[i];
7811 
7812 	if (!QED_IS_CMT(cdev))
7813 		omit_engine = 1;
7814 
7815 	mutex_lock(&qed_dbg_lock);
7816 	cdev->dbg_bin_dump = true;
7817 
7818 	org_engine = qed_get_debug_engine(cdev);
7819 	for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
7820 		/* Collect idle_chks and grcDump for each hw function */
7821 		DP_VERBOSE(cdev, QED_MSG_DEBUG,
7822 			   "obtaining idle_chk and grcdump for current engine\n");
7823 		qed_set_debug_engine(cdev, cur_engine);
7824 
7825 		/* First idle_chk */
7826 		rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
7827 				      REGDUMP_HEADER_SIZE, &feature_size);
7828 		if (!rc) {
7829 			*(u32 *)((u8 *)buffer + offset) =
7830 			    qed_calc_regdump_header(cdev, IDLE_CHK, cur_engine,
7831 						    feature_size, omit_engine);
7832 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7833 		} else {
7834 			DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
7835 		}
7836 
7837 		/* Second idle_chk */
7838 		rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
7839 				      REGDUMP_HEADER_SIZE, &feature_size);
7840 		if (!rc) {
7841 			*(u32 *)((u8 *)buffer + offset) =
7842 			    qed_calc_regdump_header(cdev, IDLE_CHK, cur_engine,
7843 						    feature_size, omit_engine);
7844 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7845 		} else {
7846 			DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
7847 		}
7848 
7849 		/* reg_fifo dump */
7850 		rc = qed_dbg_reg_fifo(cdev, (u8 *)buffer + offset +
7851 				      REGDUMP_HEADER_SIZE, &feature_size);
7852 		if (!rc) {
7853 			*(u32 *)((u8 *)buffer + offset) =
7854 			    qed_calc_regdump_header(cdev, REG_FIFO, cur_engine,
7855 						    feature_size, omit_engine);
7856 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7857 		} else {
7858 			DP_ERR(cdev, "qed_dbg_reg_fifo failed. rc = %d\n", rc);
7859 		}
7860 
7861 		/* igu_fifo dump */
7862 		rc = qed_dbg_igu_fifo(cdev, (u8 *)buffer + offset +
7863 				      REGDUMP_HEADER_SIZE, &feature_size);
7864 		if (!rc) {
7865 			*(u32 *)((u8 *)buffer + offset) =
7866 			    qed_calc_regdump_header(cdev, IGU_FIFO, cur_engine,
7867 						    feature_size, omit_engine);
7868 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7869 		} else {
7870 			DP_ERR(cdev, "qed_dbg_igu_fifo failed. rc = %d", rc);
7871 		}
7872 
7873 		/* protection_override dump */
7874 		rc = qed_dbg_protection_override(cdev, (u8 *)buffer + offset +
7875 						 REGDUMP_HEADER_SIZE,
7876 						 &feature_size);
7877 		if (!rc) {
7878 			*(u32 *)((u8 *)buffer + offset) =
7879 			    qed_calc_regdump_header(cdev, PROTECTION_OVERRIDE,
7880 						    cur_engine,
7881 						    feature_size, omit_engine);
7882 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7883 		} else {
7884 			DP_ERR(cdev,
7885 			       "qed_dbg_protection_override failed. rc = %d\n",
7886 			       rc);
7887 		}
7888 
7889 		/* fw_asserts dump */
7890 		rc = qed_dbg_fw_asserts(cdev, (u8 *)buffer + offset +
7891 					REGDUMP_HEADER_SIZE, &feature_size);
7892 		if (!rc) {
7893 			*(u32 *)((u8 *)buffer + offset) =
7894 			    qed_calc_regdump_header(cdev, FW_ASSERTS,
7895 						    cur_engine, feature_size,
7896 						    omit_engine);
7897 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7898 		} else {
7899 			DP_ERR(cdev, "qed_dbg_fw_asserts failed. rc = %d\n",
7900 			       rc);
7901 		}
7902 
7903 		feature_size = qed_dbg_ilt_size(cdev);
7904 		if (!cdev->disable_ilt_dump &&
7905 		    feature_size < ILT_DUMP_MAX_SIZE) {
7906 			rc = qed_dbg_ilt(cdev, (u8 *)buffer + offset +
7907 					 REGDUMP_HEADER_SIZE, &feature_size);
7908 			if (!rc) {
7909 				*(u32 *)((u8 *)buffer + offset) =
7910 				    qed_calc_regdump_header(cdev, ILT_DUMP,
7911 							    cur_engine,
7912 							    feature_size,
7913 							    omit_engine);
7914 				offset += feature_size + REGDUMP_HEADER_SIZE;
7915 			} else {
7916 				DP_ERR(cdev, "qed_dbg_ilt failed. rc = %d\n",
7917 				       rc);
7918 			}
7919 		}
7920 
7921 		/* GRC dump - must be last because when mcp stuck it will
7922 		 * clutter idle_chk, reg_fifo, ...
7923 		 */
7924 		for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
7925 			dev_data->grc.param_val[i] = grc_params[i];
7926 
7927 		rc = qed_dbg_grc(cdev, (u8 *)buffer + offset +
7928 				 REGDUMP_HEADER_SIZE, &feature_size);
7929 		if (!rc) {
7930 			*(u32 *)((u8 *)buffer + offset) =
7931 			    qed_calc_regdump_header(cdev, GRC_DUMP,
7932 						    cur_engine,
7933 						    feature_size, omit_engine);
7934 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7935 		} else {
7936 			DP_ERR(cdev, "qed_dbg_grc failed. rc = %d", rc);
7937 		}
7938 	}
7939 
7940 	qed_set_debug_engine(cdev, org_engine);
7941 
7942 	/* mcp_trace */
7943 	rc = qed_dbg_mcp_trace(cdev, (u8 *)buffer + offset +
7944 			       REGDUMP_HEADER_SIZE, &feature_size);
7945 	if (!rc) {
7946 		*(u32 *)((u8 *)buffer + offset) =
7947 		    qed_calc_regdump_header(cdev, MCP_TRACE, cur_engine,
7948 					    feature_size, omit_engine);
7949 		offset += (feature_size + REGDUMP_HEADER_SIZE);
7950 	} else {
7951 		DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc);
7952 	}
7953 
7954 	/* Re-populate nvm attribute info */
7955 	qed_mcp_nvm_info_free(p_hwfn);
7956 	qed_mcp_nvm_info_populate(p_hwfn);
7957 
7958 	/* nvm cfg1 */
7959 	rc = qed_dbg_nvm_image(cdev,
7960 			       (u8 *)buffer + offset +
7961 			       REGDUMP_HEADER_SIZE, &feature_size,
7962 			       QED_NVM_IMAGE_NVM_CFG1);
7963 	if (!rc) {
7964 		*(u32 *)((u8 *)buffer + offset) =
7965 		    qed_calc_regdump_header(cdev, NVM_CFG1, cur_engine,
7966 					    feature_size, omit_engine);
7967 		offset += (feature_size + REGDUMP_HEADER_SIZE);
7968 	} else if (rc != -ENOENT) {
7969 		DP_ERR(cdev,
7970 		       "qed_dbg_nvm_image failed for image  %d (%s), rc = %d\n",
7971 		       QED_NVM_IMAGE_NVM_CFG1, "QED_NVM_IMAGE_NVM_CFG1", rc);
7972 	}
7973 
7974 	/* nvm default */
7975 	rc = qed_dbg_nvm_image(cdev,
7976 			       (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
7977 			       &feature_size, QED_NVM_IMAGE_DEFAULT_CFG);
7978 	if (!rc) {
7979 		*(u32 *)((u8 *)buffer + offset) =
7980 		    qed_calc_regdump_header(cdev, DEFAULT_CFG, cur_engine,
7981 					    feature_size, omit_engine);
7982 		offset += (feature_size + REGDUMP_HEADER_SIZE);
7983 	} else if (rc != -ENOENT) {
7984 		DP_ERR(cdev,
7985 		       "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
7986 		       QED_NVM_IMAGE_DEFAULT_CFG, "QED_NVM_IMAGE_DEFAULT_CFG",
7987 		       rc);
7988 	}
7989 
7990 	/* nvm meta */
7991 	rc = qed_dbg_nvm_image(cdev,
7992 			       (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
7993 			       &feature_size, QED_NVM_IMAGE_NVM_META);
7994 	if (!rc) {
7995 		*(u32 *)((u8 *)buffer + offset) =
7996 			qed_calc_regdump_header(cdev, NVM_META, cur_engine,
7997 						feature_size, omit_engine);
7998 		offset += (feature_size + REGDUMP_HEADER_SIZE);
7999 	} else if (rc != -ENOENT) {
8000 		DP_ERR(cdev,
8001 		       "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
8002 		       QED_NVM_IMAGE_NVM_META, "QED_NVM_IMAGE_NVM_META", rc);
8003 	}
8004 
8005 	/* nvm mdump */
8006 	rc = qed_dbg_nvm_image(cdev, (u8 *)buffer + offset +
8007 			       REGDUMP_HEADER_SIZE, &feature_size,
8008 			       QED_NVM_IMAGE_MDUMP);
8009 	if (!rc) {
8010 		*(u32 *)((u8 *)buffer + offset) =
8011 			qed_calc_regdump_header(cdev, MDUMP, cur_engine,
8012 						feature_size, omit_engine);
8013 		offset += (feature_size + REGDUMP_HEADER_SIZE);
8014 	} else if (rc != -ENOENT) {
8015 		DP_ERR(cdev,
8016 		       "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
8017 		       QED_NVM_IMAGE_MDUMP, "QED_NVM_IMAGE_MDUMP", rc);
8018 	}
8019 
8020 	cdev->dbg_bin_dump = false;
8021 	mutex_unlock(&qed_dbg_lock);
8022 
8023 	return 0;
8024 }
8025 
qed_dbg_all_data_size(struct qed_dev * cdev)8026 int qed_dbg_all_data_size(struct qed_dev *cdev)
8027 {
8028 	struct qed_hwfn *p_hwfn =
8029 		&cdev->hwfns[cdev->engine_for_debug];
8030 	u32 regs_len = 0, image_len = 0, ilt_len = 0, total_ilt_len = 0;
8031 	u8 cur_engine, org_engine;
8032 
8033 	cdev->disable_ilt_dump = false;
8034 	org_engine = qed_get_debug_engine(cdev);
8035 	for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
8036 		/* Engine specific */
8037 		DP_VERBOSE(cdev, QED_MSG_DEBUG,
8038 			   "calculating idle_chk and grcdump register length for current engine\n");
8039 		qed_set_debug_engine(cdev, cur_engine);
8040 		regs_len += REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
8041 			    REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
8042 			    REGDUMP_HEADER_SIZE + qed_dbg_grc_size(cdev) +
8043 			    REGDUMP_HEADER_SIZE + qed_dbg_reg_fifo_size(cdev) +
8044 			    REGDUMP_HEADER_SIZE + qed_dbg_igu_fifo_size(cdev) +
8045 			    REGDUMP_HEADER_SIZE +
8046 			    qed_dbg_protection_override_size(cdev) +
8047 			    REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev);
8048 
8049 		ilt_len = REGDUMP_HEADER_SIZE + qed_dbg_ilt_size(cdev);
8050 		if (ilt_len < ILT_DUMP_MAX_SIZE) {
8051 			total_ilt_len += ilt_len;
8052 			regs_len += ilt_len;
8053 		}
8054 	}
8055 
8056 	qed_set_debug_engine(cdev, org_engine);
8057 
8058 	/* Engine common */
8059 	regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev);
8060 	qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_CFG1, &image_len);
8061 	if (image_len)
8062 		regs_len += REGDUMP_HEADER_SIZE + image_len;
8063 	qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_DEFAULT_CFG, &image_len);
8064 	if (image_len)
8065 		regs_len += REGDUMP_HEADER_SIZE + image_len;
8066 	qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_META, &image_len);
8067 	if (image_len)
8068 		regs_len += REGDUMP_HEADER_SIZE + image_len;
8069 	qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_MDUMP, &image_len);
8070 	if (image_len)
8071 		regs_len += REGDUMP_HEADER_SIZE + image_len;
8072 
8073 	if (regs_len > REGDUMP_MAX_SIZE) {
8074 		DP_VERBOSE(cdev, QED_MSG_DEBUG,
8075 			   "Dump exceeds max size 0x%x, disable ILT dump\n",
8076 			   REGDUMP_MAX_SIZE);
8077 		cdev->disable_ilt_dump = true;
8078 		regs_len -= total_ilt_len;
8079 	}
8080 
8081 	return regs_len;
8082 }
8083 
qed_dbg_feature(struct qed_dev * cdev,void * buffer,enum qed_dbg_features feature,u32 * num_dumped_bytes)8084 int qed_dbg_feature(struct qed_dev *cdev, void *buffer,
8085 		    enum qed_dbg_features feature, u32 *num_dumped_bytes)
8086 {
8087 	struct qed_hwfn *p_hwfn =
8088 		&cdev->hwfns[cdev->engine_for_debug];
8089 	struct qed_dbg_feature *qed_feature =
8090 		&cdev->dbg_features[feature];
8091 	enum dbg_status dbg_rc;
8092 	struct qed_ptt *p_ptt;
8093 	int rc = 0;
8094 
8095 	/* Acquire ptt */
8096 	p_ptt = qed_ptt_acquire(p_hwfn);
8097 	if (!p_ptt)
8098 		return -EINVAL;
8099 
8100 	/* Get dump */
8101 	dbg_rc = qed_dbg_dump(p_hwfn, p_ptt, feature);
8102 	if (dbg_rc != DBG_STATUS_OK) {
8103 		DP_VERBOSE(cdev, QED_MSG_DEBUG, "%s\n",
8104 			   qed_dbg_get_status_str(dbg_rc));
8105 		*num_dumped_bytes = 0;
8106 		rc = -EINVAL;
8107 		goto out;
8108 	}
8109 
8110 	DP_VERBOSE(cdev, QED_MSG_DEBUG,
8111 		   "copying debugfs feature to external buffer\n");
8112 	memcpy(buffer, qed_feature->dump_buf, qed_feature->buf_size);
8113 	*num_dumped_bytes = cdev->dbg_features[feature].dumped_dwords *
8114 			    4;
8115 
8116 out:
8117 	qed_ptt_release(p_hwfn, p_ptt);
8118 	return rc;
8119 }
8120 
qed_dbg_feature_size(struct qed_dev * cdev,enum qed_dbg_features feature)8121 int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
8122 {
8123 	struct qed_hwfn *p_hwfn =
8124 		&cdev->hwfns[cdev->engine_for_debug];
8125 	struct qed_dbg_feature *qed_feature = &cdev->dbg_features[feature];
8126 	struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
8127 	u32 buf_size_dwords;
8128 	enum dbg_status rc;
8129 
8130 	if (!p_ptt)
8131 		return -EINVAL;
8132 
8133 	rc = qed_features_lookup[feature].get_size(p_hwfn, p_ptt,
8134 						   &buf_size_dwords);
8135 	if (rc != DBG_STATUS_OK)
8136 		buf_size_dwords = 0;
8137 
8138 	/* Feature will not be dumped if it exceeds maximum size */
8139 	if (buf_size_dwords > MAX_DBG_FEATURE_SIZE_DWORDS)
8140 		buf_size_dwords = 0;
8141 
8142 	qed_ptt_release(p_hwfn, p_ptt);
8143 	qed_feature->buf_size = buf_size_dwords * sizeof(u32);
8144 	return qed_feature->buf_size;
8145 }
8146 
qed_get_debug_engine(struct qed_dev * cdev)8147 u8 qed_get_debug_engine(struct qed_dev *cdev)
8148 {
8149 	return cdev->engine_for_debug;
8150 }
8151 
qed_set_debug_engine(struct qed_dev * cdev,int engine_number)8152 void qed_set_debug_engine(struct qed_dev *cdev, int engine_number)
8153 {
8154 	DP_VERBOSE(cdev, QED_MSG_DEBUG, "set debug engine to %d\n",
8155 		   engine_number);
8156 	cdev->engine_for_debug = engine_number;
8157 }
8158 
qed_dbg_pf_init(struct qed_dev * cdev)8159 void qed_dbg_pf_init(struct qed_dev *cdev)
8160 {
8161 	const u8 *dbg_values = NULL;
8162 	int i;
8163 
8164 	/* Debug values are after init values.
8165 	 * The offset is the first dword of the file.
8166 	 */
8167 	dbg_values = cdev->firmware->data + *(u32 *)cdev->firmware->data;
8168 
8169 	for_each_hwfn(cdev, i) {
8170 		qed_dbg_set_bin_ptr(&cdev->hwfns[i], dbg_values);
8171 		qed_dbg_user_set_bin_ptr(&cdev->hwfns[i], dbg_values);
8172 	}
8173 
8174 	/* Set the hwfn to be 0 as default */
8175 	cdev->engine_for_debug = 0;
8176 }
8177 
qed_dbg_pf_exit(struct qed_dev * cdev)8178 void qed_dbg_pf_exit(struct qed_dev *cdev)
8179 {
8180 	struct qed_dbg_feature *feature = NULL;
8181 	enum qed_dbg_features feature_idx;
8182 
8183 	/* debug features' buffers may be allocated if debug feature was used
8184 	 * but dump wasn't called
8185 	 */
8186 	for (feature_idx = 0; feature_idx < DBG_FEATURE_NUM; feature_idx++) {
8187 		feature = &cdev->dbg_features[feature_idx];
8188 		if (feature->dump_buf) {
8189 			vfree(feature->dump_buf);
8190 			feature->dump_buf = NULL;
8191 		}
8192 	}
8193 }
8194