1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
3 *
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
6 * this source tree.
7 */
8
9 #include <linux/module.h>
10 #include <linux/vmalloc.h>
11 #include <linux/crc32.h>
12 #include "qed.h"
13 #include "qed_hsi.h"
14 #include "qed_hw.h"
15 #include "qed_mcp.h"
16 #include "qed_reg_addr.h"
17
18 /* Chip IDs enum */
19 enum chip_ids {
20 CHIP_RESERVED,
21 CHIP_BB_B0,
22 CHIP_K2,
23 MAX_CHIP_IDS
24 };
25
26 /* Memory groups enum */
27 enum mem_groups {
28 MEM_GROUP_PXP_MEM,
29 MEM_GROUP_DMAE_MEM,
30 MEM_GROUP_CM_MEM,
31 MEM_GROUP_QM_MEM,
32 MEM_GROUP_TM_MEM,
33 MEM_GROUP_BRB_RAM,
34 MEM_GROUP_BRB_MEM,
35 MEM_GROUP_PRS_MEM,
36 MEM_GROUP_SDM_MEM,
37 MEM_GROUP_PBUF,
38 MEM_GROUP_IOR,
39 MEM_GROUP_RAM,
40 MEM_GROUP_BTB_RAM,
41 MEM_GROUP_RDIF_CTX,
42 MEM_GROUP_TDIF_CTX,
43 MEM_GROUP_CONN_CFC_MEM,
44 MEM_GROUP_TASK_CFC_MEM,
45 MEM_GROUP_CAU_PI,
46 MEM_GROUP_CAU_MEM,
47 MEM_GROUP_PXP_ILT,
48 MEM_GROUP_MULD_MEM,
49 MEM_GROUP_BTB_MEM,
50 MEM_GROUP_IGU_MEM,
51 MEM_GROUP_IGU_MSIX,
52 MEM_GROUP_CAU_SB,
53 MEM_GROUP_BMB_RAM,
54 MEM_GROUP_BMB_MEM,
55 MEM_GROUPS_NUM
56 };
57
58 /* Memory groups names */
59 static const char * const s_mem_group_names[] = {
60 "PXP_MEM",
61 "DMAE_MEM",
62 "CM_MEM",
63 "QM_MEM",
64 "TM_MEM",
65 "BRB_RAM",
66 "BRB_MEM",
67 "PRS_MEM",
68 "SDM_MEM",
69 "PBUF",
70 "IOR",
71 "RAM",
72 "BTB_RAM",
73 "RDIF_CTX",
74 "TDIF_CTX",
75 "CONN_CFC_MEM",
76 "TASK_CFC_MEM",
77 "CAU_PI",
78 "CAU_MEM",
79 "PXP_ILT",
80 "MULD_MEM",
81 "BTB_MEM",
82 "IGU_MEM",
83 "IGU_MSIX",
84 "CAU_SB",
85 "BMB_RAM",
86 "BMB_MEM",
87 };
88
89 /* Idle check conditions */
cond4(const u32 * r,const u32 * imm)90 static u32 cond4(const u32 *r, const u32 *imm)
91 {
92 return ((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]);
93 }
94
cond6(const u32 * r,const u32 * imm)95 static u32 cond6(const u32 *r, const u32 *imm)
96 {
97 return ((r[0] >> imm[0]) & imm[1]) != imm[2];
98 }
99
cond5(const u32 * r,const u32 * imm)100 static u32 cond5(const u32 *r, const u32 *imm)
101 {
102 return (r[0] & imm[0]) != imm[1];
103 }
104
cond8(const u32 * r,const u32 * imm)105 static u32 cond8(const u32 *r, const u32 *imm)
106 {
107 return ((r[0] & imm[0]) >> imm[1]) !=
108 (((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5]));
109 }
110
cond9(const u32 * r,const u32 * imm)111 static u32 cond9(const u32 *r, const u32 *imm)
112 {
113 return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]);
114 }
115
cond1(const u32 * r,const u32 * imm)116 static u32 cond1(const u32 *r, const u32 *imm)
117 {
118 return (r[0] & ~imm[0]) != imm[1];
119 }
120
cond0(const u32 * r,const u32 * imm)121 static u32 cond0(const u32 *r, const u32 *imm)
122 {
123 return r[0] != imm[0];
124 }
125
cond10(const u32 * r,const u32 * imm)126 static u32 cond10(const u32 *r, const u32 *imm)
127 {
128 return r[0] != r[1] && r[2] == imm[0];
129 }
130
cond11(const u32 * r,const u32 * imm)131 static u32 cond11(const u32 *r, const u32 *imm)
132 {
133 return r[0] != r[1] && r[2] > imm[0];
134 }
135
cond3(const u32 * r,const u32 * imm)136 static u32 cond3(const u32 *r, const u32 *imm)
137 {
138 return r[0] != r[1];
139 }
140
cond12(const u32 * r,const u32 * imm)141 static u32 cond12(const u32 *r, const u32 *imm)
142 {
143 return r[0] & imm[0];
144 }
145
cond7(const u32 * r,const u32 * imm)146 static u32 cond7(const u32 *r, const u32 *imm)
147 {
148 return r[0] < (r[1] - imm[0]);
149 }
150
cond2(const u32 * r,const u32 * imm)151 static u32 cond2(const u32 *r, const u32 *imm)
152 {
153 return r[0] > imm[0];
154 }
155
156 /* Array of Idle Check conditions */
157 static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = {
158 cond0,
159 cond1,
160 cond2,
161 cond3,
162 cond4,
163 cond5,
164 cond6,
165 cond7,
166 cond8,
167 cond9,
168 cond10,
169 cond11,
170 cond12,
171 };
172
173 /******************************* Data Types **********************************/
174
175 enum platform_ids {
176 PLATFORM_ASIC,
177 PLATFORM_RESERVED,
178 PLATFORM_RESERVED2,
179 PLATFORM_RESERVED3,
180 MAX_PLATFORM_IDS
181 };
182
183 struct dbg_array {
184 const u32 *ptr;
185 u32 size_in_dwords;
186 };
187
188 /* Chip constant definitions */
189 struct chip_defs {
190 const char *name;
191 struct {
192 u8 num_ports;
193 u8 num_pfs;
194 } per_platform[MAX_PLATFORM_IDS];
195 };
196
197 /* Platform constant definitions */
198 struct platform_defs {
199 const char *name;
200 u32 delay_factor;
201 };
202
203 /* Storm constant definitions */
204 struct storm_defs {
205 char letter;
206 enum block_id block_id;
207 enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
208 bool has_vfc;
209 u32 sem_fast_mem_addr;
210 u32 sem_frame_mode_addr;
211 u32 sem_slow_enable_addr;
212 u32 sem_slow_mode_addr;
213 u32 sem_slow_mode1_conf_addr;
214 u32 sem_sync_dbg_empty_addr;
215 u32 sem_slow_dbg_empty_addr;
216 u32 cm_ctx_wr_addr;
217 u32 cm_conn_ag_ctx_lid_size; /* In quad-regs */
218 u32 cm_conn_ag_ctx_rd_addr;
219 u32 cm_conn_st_ctx_lid_size; /* In quad-regs */
220 u32 cm_conn_st_ctx_rd_addr;
221 u32 cm_task_ag_ctx_lid_size; /* In quad-regs */
222 u32 cm_task_ag_ctx_rd_addr;
223 u32 cm_task_st_ctx_lid_size; /* In quad-regs */
224 u32 cm_task_st_ctx_rd_addr;
225 };
226
227 /* Block constant definitions */
228 struct block_defs {
229 const char *name;
230 bool has_dbg_bus[MAX_CHIP_IDS];
231 bool associated_to_storm;
232 u32 storm_id; /* Valid only if associated_to_storm is true */
233 enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
234 u32 dbg_select_addr;
235 u32 dbg_cycle_enable_addr;
236 u32 dbg_shift_addr;
237 u32 dbg_force_valid_addr;
238 u32 dbg_force_frame_addr;
239 bool has_reset_bit;
240 bool unreset; /* If true, the block is taken out of reset before dump */
241 enum dbg_reset_regs reset_reg;
242 u8 reset_bit_offset; /* Bit offset in reset register */
243 };
244
245 /* Reset register definitions */
246 struct reset_reg_defs {
247 u32 addr;
248 u32 unreset_val;
249 bool exists[MAX_CHIP_IDS];
250 };
251
252 struct grc_param_defs {
253 u32 default_val[MAX_CHIP_IDS];
254 u32 min;
255 u32 max;
256 bool is_preset;
257 u32 exclude_all_preset_val;
258 u32 crash_preset_val;
259 };
260
261 struct rss_mem_defs {
262 const char *mem_name;
263 const char *type_name;
264 u32 addr; /* In 128b units */
265 u32 num_entries[MAX_CHIP_IDS];
266 u32 entry_width[MAX_CHIP_IDS]; /* In bits */
267 };
268
269 struct vfc_ram_defs {
270 const char *mem_name;
271 const char *type_name;
272 u32 base_row;
273 u32 num_rows;
274 };
275
276 struct big_ram_defs {
277 const char *instance_name;
278 enum mem_groups mem_group_id;
279 enum mem_groups ram_mem_group_id;
280 enum dbg_grc_params grc_param;
281 u32 addr_reg_addr;
282 u32 data_reg_addr;
283 u32 num_of_blocks[MAX_CHIP_IDS];
284 };
285
286 struct phy_defs {
287 const char *phy_name;
288 u32 base_addr;
289 u32 tbus_addr_lo_addr;
290 u32 tbus_addr_hi_addr;
291 u32 tbus_data_lo_addr;
292 u32 tbus_data_hi_addr;
293 };
294
295 /******************************** Constants **********************************/
296
297 #define MAX_LCIDS 320
298 #define MAX_LTIDS 320
299 #define NUM_IOR_SETS 2
300 #define IORS_PER_SET 176
301 #define IOR_SET_OFFSET(set_id) ((set_id) * 256)
302 #define BYTES_IN_DWORD sizeof(u32)
303
304 /* In the macros below, size and offset are specified in bits */
305 #define CEIL_DWORDS(size) DIV_ROUND_UP(size, 32)
306 #define FIELD_BIT_OFFSET(type, field) type ## _ ## field ## _ ## OFFSET
307 #define FIELD_BIT_SIZE(type, field) type ## _ ## field ## _ ## SIZE
308 #define FIELD_DWORD_OFFSET(type, field) \
309 (int)(FIELD_BIT_OFFSET(type, field) / 32)
310 #define FIELD_DWORD_SHIFT(type, field) (FIELD_BIT_OFFSET(type, field) % 32)
311 #define FIELD_BIT_MASK(type, field) \
312 (((1 << FIELD_BIT_SIZE(type, field)) - 1) << \
313 FIELD_DWORD_SHIFT(type, field))
314 #define SET_VAR_FIELD(var, type, field, val) \
315 do { \
316 var[FIELD_DWORD_OFFSET(type, field)] &= \
317 (~FIELD_BIT_MASK(type, field)); \
318 var[FIELD_DWORD_OFFSET(type, field)] |= \
319 (val) << FIELD_DWORD_SHIFT(type, field); \
320 } while (0)
321 #define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \
322 do { \
323 for (i = 0; i < (arr_size); i++) \
324 qed_wr(dev, ptt, addr, (arr)[i]); \
325 } while (0)
326 #define ARR_REG_RD(dev, ptt, addr, arr, arr_size) \
327 do { \
328 for (i = 0; i < (arr_size); i++) \
329 (arr)[i] = qed_rd(dev, ptt, addr); \
330 } while (0)
331
332 #define DWORDS_TO_BYTES(dwords) ((dwords) * BYTES_IN_DWORD)
333 #define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD)
334 #define RAM_LINES_TO_DWORDS(lines) ((lines) * 2)
335 #define RAM_LINES_TO_BYTES(lines) \
336 DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines))
337 #define REG_DUMP_LEN_SHIFT 24
338 #define MEM_DUMP_ENTRY_SIZE_DWORDS \
339 BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem))
340 #define IDLE_CHK_RULE_SIZE_DWORDS \
341 BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule))
342 #define IDLE_CHK_RESULT_HDR_DWORDS \
343 BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr))
344 #define IDLE_CHK_RESULT_REG_HDR_DWORDS \
345 BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr))
346 #define IDLE_CHK_MAX_ENTRIES_SIZE 32
347
348 /* The sizes and offsets below are specified in bits */
349 #define VFC_CAM_CMD_STRUCT_SIZE 64
350 #define VFC_CAM_CMD_ROW_OFFSET 48
351 #define VFC_CAM_CMD_ROW_SIZE 9
352 #define VFC_CAM_ADDR_STRUCT_SIZE 16
353 #define VFC_CAM_ADDR_OP_OFFSET 0
354 #define VFC_CAM_ADDR_OP_SIZE 4
355 #define VFC_CAM_RESP_STRUCT_SIZE 256
356 #define VFC_RAM_ADDR_STRUCT_SIZE 16
357 #define VFC_RAM_ADDR_OP_OFFSET 0
358 #define VFC_RAM_ADDR_OP_SIZE 2
359 #define VFC_RAM_ADDR_ROW_OFFSET 2
360 #define VFC_RAM_ADDR_ROW_SIZE 10
361 #define VFC_RAM_RESP_STRUCT_SIZE 256
362 #define VFC_CAM_CMD_DWORDS CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE)
363 #define VFC_CAM_ADDR_DWORDS CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE)
364 #define VFC_CAM_RESP_DWORDS CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE)
365 #define VFC_RAM_CMD_DWORDS VFC_CAM_CMD_DWORDS
366 #define VFC_RAM_ADDR_DWORDS CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE)
367 #define VFC_RAM_RESP_DWORDS CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE)
368 #define NUM_VFC_RAM_TYPES 4
369 #define VFC_CAM_NUM_ROWS 512
370 #define VFC_OPCODE_CAM_RD 14
371 #define VFC_OPCODE_RAM_RD 0
372 #define NUM_RSS_MEM_TYPES 5
373 #define NUM_BIG_RAM_TYPES 3
374 #define BIG_RAM_BLOCK_SIZE_BYTES 128
375 #define BIG_RAM_BLOCK_SIZE_DWORDS \
376 BYTES_TO_DWORDS(BIG_RAM_BLOCK_SIZE_BYTES)
377 #define NUM_PHY_TBUS_ADDRESSES 2048
378 #define PHY_DUMP_SIZE_DWORDS (NUM_PHY_TBUS_ADDRESSES / 2)
379 #define RESET_REG_UNRESET_OFFSET 4
380 #define STALL_DELAY_MS 500
381 #define STATIC_DEBUG_LINE_DWORDS 9
382 #define NUM_DBG_BUS_LINES 256
383 #define NUM_COMMON_GLOBAL_PARAMS 8
384 #define FW_IMG_MAIN 1
385 #define REG_FIFO_DEPTH_ELEMENTS 32
386 #define REG_FIFO_ELEMENT_DWORDS 2
387 #define REG_FIFO_DEPTH_DWORDS \
388 (REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
389 #define IGU_FIFO_DEPTH_ELEMENTS 64
390 #define IGU_FIFO_ELEMENT_DWORDS 4
391 #define IGU_FIFO_DEPTH_DWORDS \
392 (IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
393 #define PROTECTION_OVERRIDE_DEPTH_ELEMENTS 20
394 #define PROTECTION_OVERRIDE_ELEMENT_DWORDS 2
395 #define PROTECTION_OVERRIDE_DEPTH_DWORDS \
396 (PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \
397 PROTECTION_OVERRIDE_ELEMENT_DWORDS)
398 #define MCP_SPAD_TRACE_OFFSIZE_ADDR \
399 (MCP_REG_SCRATCH + \
400 offsetof(struct static_init, sections[SPAD_SECTION_TRACE]))
401 #define MCP_TRACE_META_IMAGE_SIGNATURE 0x669955aa
402 #define EMPTY_FW_VERSION_STR "???_???_???_???"
403 #define EMPTY_FW_IMAGE_STR "???????????????"
404
405 /***************************** Constant Arrays *******************************/
406
407 /* Debug arrays */
408 static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
409
410 /* Chip constant definitions array */
411 static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
412 { "reserved", { {0, 0}, {0, 0}, {0, 0}, {0, 0} } },
413 { "bb_b0",
414 { {MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB}, {0, 0}, {0, 0}, {0, 0} } },
415 { "k2", { {MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2}, {0, 0}, {0, 0}, {0, 0} } }
416 };
417
418 /* Storm constant definitions array */
419 static struct storm_defs s_storm_defs[] = {
420 /* Tstorm */
421 {'T', BLOCK_TSEM,
422 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT,
423 DBG_BUS_CLIENT_RBCT}, true,
424 TSEM_REG_FAST_MEMORY,
425 TSEM_REG_DBG_FRAME_MODE, TSEM_REG_SLOW_DBG_ACTIVE,
426 TSEM_REG_SLOW_DBG_MODE, TSEM_REG_DBG_MODE1_CFG,
427 TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY,
428 TCM_REG_CTX_RBC_ACCS,
429 4, TCM_REG_AGG_CON_CTX,
430 16, TCM_REG_SM_CON_CTX,
431 2, TCM_REG_AGG_TASK_CTX,
432 4, TCM_REG_SM_TASK_CTX},
433 /* Mstorm */
434 {'M', BLOCK_MSEM,
435 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT,
436 DBG_BUS_CLIENT_RBCM}, false,
437 MSEM_REG_FAST_MEMORY,
438 MSEM_REG_DBG_FRAME_MODE, MSEM_REG_SLOW_DBG_ACTIVE,
439 MSEM_REG_SLOW_DBG_MODE, MSEM_REG_DBG_MODE1_CFG,
440 MSEM_REG_SYNC_DBG_EMPTY, MSEM_REG_SLOW_DBG_EMPTY,
441 MCM_REG_CTX_RBC_ACCS,
442 1, MCM_REG_AGG_CON_CTX,
443 10, MCM_REG_SM_CON_CTX,
444 2, MCM_REG_AGG_TASK_CTX,
445 7, MCM_REG_SM_TASK_CTX},
446 /* Ustorm */
447 {'U', BLOCK_USEM,
448 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
449 DBG_BUS_CLIENT_RBCU}, false,
450 USEM_REG_FAST_MEMORY,
451 USEM_REG_DBG_FRAME_MODE, USEM_REG_SLOW_DBG_ACTIVE,
452 USEM_REG_SLOW_DBG_MODE, USEM_REG_DBG_MODE1_CFG,
453 USEM_REG_SYNC_DBG_EMPTY, USEM_REG_SLOW_DBG_EMPTY,
454 UCM_REG_CTX_RBC_ACCS,
455 2, UCM_REG_AGG_CON_CTX,
456 13, UCM_REG_SM_CON_CTX,
457 3, UCM_REG_AGG_TASK_CTX,
458 3, UCM_REG_SM_TASK_CTX},
459 /* Xstorm */
460 {'X', BLOCK_XSEM,
461 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX,
462 DBG_BUS_CLIENT_RBCX}, false,
463 XSEM_REG_FAST_MEMORY,
464 XSEM_REG_DBG_FRAME_MODE, XSEM_REG_SLOW_DBG_ACTIVE,
465 XSEM_REG_SLOW_DBG_MODE, XSEM_REG_DBG_MODE1_CFG,
466 XSEM_REG_SYNC_DBG_EMPTY, XSEM_REG_SLOW_DBG_EMPTY,
467 XCM_REG_CTX_RBC_ACCS,
468 9, XCM_REG_AGG_CON_CTX,
469 15, XCM_REG_SM_CON_CTX,
470 0, 0,
471 0, 0},
472 /* Ystorm */
473 {'Y', BLOCK_YSEM,
474 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX,
475 DBG_BUS_CLIENT_RBCY}, false,
476 YSEM_REG_FAST_MEMORY,
477 YSEM_REG_DBG_FRAME_MODE, YSEM_REG_SLOW_DBG_ACTIVE,
478 YSEM_REG_SLOW_DBG_MODE, YSEM_REG_DBG_MODE1_CFG,
479 YSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY,
480 YCM_REG_CTX_RBC_ACCS,
481 2, YCM_REG_AGG_CON_CTX,
482 3, YCM_REG_SM_CON_CTX,
483 2, YCM_REG_AGG_TASK_CTX,
484 12, YCM_REG_SM_TASK_CTX},
485 /* Pstorm */
486 {'P', BLOCK_PSEM,
487 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS,
488 DBG_BUS_CLIENT_RBCS}, true,
489 PSEM_REG_FAST_MEMORY,
490 PSEM_REG_DBG_FRAME_MODE, PSEM_REG_SLOW_DBG_ACTIVE,
491 PSEM_REG_SLOW_DBG_MODE, PSEM_REG_DBG_MODE1_CFG,
492 PSEM_REG_SYNC_DBG_EMPTY, PSEM_REG_SLOW_DBG_EMPTY,
493 PCM_REG_CTX_RBC_ACCS,
494 0, 0,
495 10, PCM_REG_SM_CON_CTX,
496 0, 0,
497 0, 0}
498 };
499
500 /* Block definitions array */
501 static struct block_defs block_grc_defs = {
502 "grc", {true, true, true}, false, 0,
503 {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
504 GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE,
505 GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID,
506 GRC_REG_DBG_FORCE_FRAME,
507 true, false, DBG_RESET_REG_MISC_PL_UA, 1
508 };
509
510 static struct block_defs block_miscs_defs = {
511 "miscs", {false, false, false}, false, 0,
512 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
513 0, 0, 0, 0, 0,
514 false, false, MAX_DBG_RESET_REGS, 0
515 };
516
517 static struct block_defs block_misc_defs = {
518 "misc", {false, false, false}, false, 0,
519 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
520 0, 0, 0, 0, 0,
521 false, false, MAX_DBG_RESET_REGS, 0
522 };
523
524 static struct block_defs block_dbu_defs = {
525 "dbu", {false, false, false}, false, 0,
526 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
527 0, 0, 0, 0, 0,
528 false, false, MAX_DBG_RESET_REGS, 0
529 };
530
531 static struct block_defs block_pglue_b_defs = {
532 "pglue_b", {true, true, true}, false, 0,
533 {DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
534 PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE,
535 PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID,
536 PGLUE_B_REG_DBG_FORCE_FRAME,
537 true, false, DBG_RESET_REG_MISCS_PL_HV, 1
538 };
539
540 static struct block_defs block_cnig_defs = {
541 "cnig", {false, false, true}, false, 0,
542 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
543 CNIG_REG_DBG_SELECT_K2, CNIG_REG_DBG_DWORD_ENABLE_K2,
544 CNIG_REG_DBG_SHIFT_K2, CNIG_REG_DBG_FORCE_VALID_K2,
545 CNIG_REG_DBG_FORCE_FRAME_K2,
546 true, false, DBG_RESET_REG_MISCS_PL_HV, 0
547 };
548
549 static struct block_defs block_cpmu_defs = {
550 "cpmu", {false, false, false}, false, 0,
551 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
552 0, 0, 0, 0, 0,
553 true, false, DBG_RESET_REG_MISCS_PL_HV, 8
554 };
555
556 static struct block_defs block_ncsi_defs = {
557 "ncsi", {true, true, true}, false, 0,
558 {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
559 NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE,
560 NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID,
561 NCSI_REG_DBG_FORCE_FRAME,
562 true, false, DBG_RESET_REG_MISCS_PL_HV, 5
563 };
564
565 static struct block_defs block_opte_defs = {
566 "opte", {false, false, false}, false, 0,
567 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
568 0, 0, 0, 0, 0,
569 true, false, DBG_RESET_REG_MISCS_PL_HV, 4
570 };
571
572 static struct block_defs block_bmb_defs = {
573 "bmb", {true, true, true}, false, 0,
574 {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB},
575 BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE,
576 BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID,
577 BMB_REG_DBG_FORCE_FRAME,
578 true, false, DBG_RESET_REG_MISCS_PL_UA, 7
579 };
580
581 static struct block_defs block_pcie_defs = {
582 "pcie", {false, false, true}, false, 0,
583 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
584 PCIE_REG_DBG_COMMON_SELECT, PCIE_REG_DBG_COMMON_DWORD_ENABLE,
585 PCIE_REG_DBG_COMMON_SHIFT, PCIE_REG_DBG_COMMON_FORCE_VALID,
586 PCIE_REG_DBG_COMMON_FORCE_FRAME,
587 false, false, MAX_DBG_RESET_REGS, 0
588 };
589
590 static struct block_defs block_mcp_defs = {
591 "mcp", {false, false, false}, false, 0,
592 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
593 0, 0, 0, 0, 0,
594 false, false, MAX_DBG_RESET_REGS, 0
595 };
596
597 static struct block_defs block_mcp2_defs = {
598 "mcp2", {true, true, true}, false, 0,
599 {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
600 MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE,
601 MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID,
602 MCP2_REG_DBG_FORCE_FRAME,
603 false, false, MAX_DBG_RESET_REGS, 0
604 };
605
606 static struct block_defs block_pswhst_defs = {
607 "pswhst", {true, true, true}, false, 0,
608 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
609 PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE,
610 PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID,
611 PSWHST_REG_DBG_FORCE_FRAME,
612 true, false, DBG_RESET_REG_MISC_PL_HV, 0
613 };
614
615 static struct block_defs block_pswhst2_defs = {
616 "pswhst2", {true, true, true}, false, 0,
617 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
618 PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE,
619 PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID,
620 PSWHST2_REG_DBG_FORCE_FRAME,
621 true, false, DBG_RESET_REG_MISC_PL_HV, 0
622 };
623
624 static struct block_defs block_pswrd_defs = {
625 "pswrd", {true, true, true}, false, 0,
626 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
627 PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE,
628 PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID,
629 PSWRD_REG_DBG_FORCE_FRAME,
630 true, false, DBG_RESET_REG_MISC_PL_HV, 2
631 };
632
633 static struct block_defs block_pswrd2_defs = {
634 "pswrd2", {true, true, true}, false, 0,
635 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
636 PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE,
637 PSWRD2_REG_DBG_SHIFT, PSWRD2_REG_DBG_FORCE_VALID,
638 PSWRD2_REG_DBG_FORCE_FRAME,
639 true, false, DBG_RESET_REG_MISC_PL_HV, 2
640 };
641
642 static struct block_defs block_pswwr_defs = {
643 "pswwr", {true, true, true}, false, 0,
644 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
645 PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE,
646 PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID,
647 PSWWR_REG_DBG_FORCE_FRAME,
648 true, false, DBG_RESET_REG_MISC_PL_HV, 3
649 };
650
651 static struct block_defs block_pswwr2_defs = {
652 "pswwr2", {false, false, false}, false, 0,
653 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
654 0, 0, 0, 0, 0,
655 true, false, DBG_RESET_REG_MISC_PL_HV, 3
656 };
657
658 static struct block_defs block_pswrq_defs = {
659 "pswrq", {true, true, true}, false, 0,
660 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
661 PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE,
662 PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID,
663 PSWRQ_REG_DBG_FORCE_FRAME,
664 true, false, DBG_RESET_REG_MISC_PL_HV, 1
665 };
666
667 static struct block_defs block_pswrq2_defs = {
668 "pswrq2", {true, true, true}, false, 0,
669 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
670 PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE,
671 PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID,
672 PSWRQ2_REG_DBG_FORCE_FRAME,
673 true, false, DBG_RESET_REG_MISC_PL_HV, 1
674 };
675
676 static struct block_defs block_pglcs_defs = {
677 "pglcs", {false, false, true}, false, 0,
678 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
679 PGLCS_REG_DBG_SELECT, PGLCS_REG_DBG_DWORD_ENABLE,
680 PGLCS_REG_DBG_SHIFT, PGLCS_REG_DBG_FORCE_VALID,
681 PGLCS_REG_DBG_FORCE_FRAME,
682 true, false, DBG_RESET_REG_MISCS_PL_HV, 2
683 };
684
685 static struct block_defs block_ptu_defs = {
686 "ptu", {true, true, true}, false, 0,
687 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
688 PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE,
689 PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID,
690 PTU_REG_DBG_FORCE_FRAME,
691 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 20
692 };
693
694 static struct block_defs block_dmae_defs = {
695 "dmae", {true, true, true}, false, 0,
696 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
697 DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE,
698 DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID,
699 DMAE_REG_DBG_FORCE_FRAME,
700 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 28
701 };
702
703 static struct block_defs block_tcm_defs = {
704 "tcm", {true, true, true}, true, DBG_TSTORM_ID,
705 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
706 TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE,
707 TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID,
708 TCM_REG_DBG_FORCE_FRAME,
709 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 5
710 };
711
712 static struct block_defs block_mcm_defs = {
713 "mcm", {true, true, true}, true, DBG_MSTORM_ID,
714 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
715 MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE,
716 MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID,
717 MCM_REG_DBG_FORCE_FRAME,
718 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 3
719 };
720
721 static struct block_defs block_ucm_defs = {
722 "ucm", {true, true, true}, true, DBG_USTORM_ID,
723 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
724 UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE,
725 UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID,
726 UCM_REG_DBG_FORCE_FRAME,
727 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 8
728 };
729
730 static struct block_defs block_xcm_defs = {
731 "xcm", {true, true, true}, true, DBG_XSTORM_ID,
732 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
733 XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE,
734 XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID,
735 XCM_REG_DBG_FORCE_FRAME,
736 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 19
737 };
738
739 static struct block_defs block_ycm_defs = {
740 "ycm", {true, true, true}, true, DBG_YSTORM_ID,
741 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
742 YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE,
743 YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID,
744 YCM_REG_DBG_FORCE_FRAME,
745 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 5
746 };
747
748 static struct block_defs block_pcm_defs = {
749 "pcm", {true, true, true}, true, DBG_PSTORM_ID,
750 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
751 PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE,
752 PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID,
753 PCM_REG_DBG_FORCE_FRAME,
754 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 4
755 };
756
757 static struct block_defs block_qm_defs = {
758 "qm", {true, true, true}, false, 0,
759 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ},
760 QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE,
761 QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID,
762 QM_REG_DBG_FORCE_FRAME,
763 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 16
764 };
765
766 static struct block_defs block_tm_defs = {
767 "tm", {true, true, true}, false, 0,
768 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
769 TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE,
770 TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID,
771 TM_REG_DBG_FORCE_FRAME,
772 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 17
773 };
774
775 static struct block_defs block_dorq_defs = {
776 "dorq", {true, true, true}, false, 0,
777 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
778 DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE,
779 DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID,
780 DORQ_REG_DBG_FORCE_FRAME,
781 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 18
782 };
783
784 static struct block_defs block_brb_defs = {
785 "brb", {true, true, true}, false, 0,
786 {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
787 BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE,
788 BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID,
789 BRB_REG_DBG_FORCE_FRAME,
790 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 0
791 };
792
793 static struct block_defs block_src_defs = {
794 "src", {true, true, true}, false, 0,
795 {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
796 SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE,
797 SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID,
798 SRC_REG_DBG_FORCE_FRAME,
799 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 2
800 };
801
802 static struct block_defs block_prs_defs = {
803 "prs", {true, true, true}, false, 0,
804 {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
805 PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE,
806 PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID,
807 PRS_REG_DBG_FORCE_FRAME,
808 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 1
809 };
810
811 static struct block_defs block_tsdm_defs = {
812 "tsdm", {true, true, true}, true, DBG_TSTORM_ID,
813 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
814 TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE,
815 TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID,
816 TSDM_REG_DBG_FORCE_FRAME,
817 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 3
818 };
819
820 static struct block_defs block_msdm_defs = {
821 "msdm", {true, true, true}, true, DBG_MSTORM_ID,
822 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
823 MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE,
824 MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID,
825 MSDM_REG_DBG_FORCE_FRAME,
826 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 6
827 };
828
829 static struct block_defs block_usdm_defs = {
830 "usdm", {true, true, true}, true, DBG_USTORM_ID,
831 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
832 USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE,
833 USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID,
834 USDM_REG_DBG_FORCE_FRAME,
835 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 7
836 };
837
838 static struct block_defs block_xsdm_defs = {
839 "xsdm", {true, true, true}, true, DBG_XSTORM_ID,
840 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
841 XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE,
842 XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID,
843 XSDM_REG_DBG_FORCE_FRAME,
844 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 20
845 };
846
847 static struct block_defs block_ysdm_defs = {
848 "ysdm", {true, true, true}, true, DBG_YSTORM_ID,
849 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
850 YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE,
851 YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID,
852 YSDM_REG_DBG_FORCE_FRAME,
853 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 8
854 };
855
856 static struct block_defs block_psdm_defs = {
857 "psdm", {true, true, true}, true, DBG_PSTORM_ID,
858 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
859 PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE,
860 PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID,
861 PSDM_REG_DBG_FORCE_FRAME,
862 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 7
863 };
864
865 static struct block_defs block_tsem_defs = {
866 "tsem", {true, true, true}, true, DBG_TSTORM_ID,
867 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
868 TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE,
869 TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID,
870 TSEM_REG_DBG_FORCE_FRAME,
871 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 4
872 };
873
874 static struct block_defs block_msem_defs = {
875 "msem", {true, true, true}, true, DBG_MSTORM_ID,
876 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
877 MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE,
878 MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID,
879 MSEM_REG_DBG_FORCE_FRAME,
880 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 9
881 };
882
883 static struct block_defs block_usem_defs = {
884 "usem", {true, true, true}, true, DBG_USTORM_ID,
885 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
886 USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE,
887 USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID,
888 USEM_REG_DBG_FORCE_FRAME,
889 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 9
890 };
891
892 static struct block_defs block_xsem_defs = {
893 "xsem", {true, true, true}, true, DBG_XSTORM_ID,
894 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
895 XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE,
896 XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID,
897 XSEM_REG_DBG_FORCE_FRAME,
898 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 21
899 };
900
901 static struct block_defs block_ysem_defs = {
902 "ysem", {true, true, true}, true, DBG_YSTORM_ID,
903 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
904 YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE,
905 YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID,
906 YSEM_REG_DBG_FORCE_FRAME,
907 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 11
908 };
909
910 static struct block_defs block_psem_defs = {
911 "psem", {true, true, true}, true, DBG_PSTORM_ID,
912 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
913 PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE,
914 PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID,
915 PSEM_REG_DBG_FORCE_FRAME,
916 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 10
917 };
918
919 static struct block_defs block_rss_defs = {
920 "rss", {true, true, true}, false, 0,
921 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
922 RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE,
923 RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID,
924 RSS_REG_DBG_FORCE_FRAME,
925 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 18
926 };
927
928 static struct block_defs block_tmld_defs = {
929 "tmld", {true, true, true}, false, 0,
930 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
931 TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE,
932 TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID,
933 TMLD_REG_DBG_FORCE_FRAME,
934 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 13
935 };
936
937 static struct block_defs block_muld_defs = {
938 "muld", {true, true, true}, false, 0,
939 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
940 MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE,
941 MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID,
942 MULD_REG_DBG_FORCE_FRAME,
943 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 14
944 };
945
946 static struct block_defs block_yuld_defs = {
947 "yuld", {true, true, true}, false, 0,
948 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
949 YULD_REG_DBG_SELECT, YULD_REG_DBG_DWORD_ENABLE,
950 YULD_REG_DBG_SHIFT, YULD_REG_DBG_FORCE_VALID,
951 YULD_REG_DBG_FORCE_FRAME,
952 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 15
953 };
954
955 static struct block_defs block_xyld_defs = {
956 "xyld", {true, true, true}, false, 0,
957 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
958 XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE,
959 XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID,
960 XYLD_REG_DBG_FORCE_FRAME,
961 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 12
962 };
963
964 static struct block_defs block_prm_defs = {
965 "prm", {true, true, true}, false, 0,
966 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
967 PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE,
968 PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID,
969 PRM_REG_DBG_FORCE_FRAME,
970 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 21
971 };
972
973 static struct block_defs block_pbf_pb1_defs = {
974 "pbf_pb1", {true, true, true}, false, 0,
975 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
976 PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE,
977 PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID,
978 PBF_PB1_REG_DBG_FORCE_FRAME,
979 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
980 11
981 };
982
983 static struct block_defs block_pbf_pb2_defs = {
984 "pbf_pb2", {true, true, true}, false, 0,
985 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
986 PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE,
987 PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID,
988 PBF_PB2_REG_DBG_FORCE_FRAME,
989 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
990 12
991 };
992
993 static struct block_defs block_rpb_defs = {
994 "rpb", {true, true, true}, false, 0,
995 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
996 RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE,
997 RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID,
998 RPB_REG_DBG_FORCE_FRAME,
999 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 13
1000 };
1001
1002 static struct block_defs block_btb_defs = {
1003 "btb", {true, true, true}, false, 0,
1004 {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV},
1005 BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE,
1006 BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID,
1007 BTB_REG_DBG_FORCE_FRAME,
1008 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 10
1009 };
1010
1011 static struct block_defs block_pbf_defs = {
1012 "pbf", {true, true, true}, false, 0,
1013 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
1014 PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE,
1015 PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID,
1016 PBF_REG_DBG_FORCE_FRAME,
1017 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 15
1018 };
1019
1020 static struct block_defs block_rdif_defs = {
1021 "rdif", {true, true, true}, false, 0,
1022 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
1023 RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE,
1024 RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID,
1025 RDIF_REG_DBG_FORCE_FRAME,
1026 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 16
1027 };
1028
1029 static struct block_defs block_tdif_defs = {
1030 "tdif", {true, true, true}, false, 0,
1031 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
1032 TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE,
1033 TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID,
1034 TDIF_REG_DBG_FORCE_FRAME,
1035 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 17
1036 };
1037
1038 static struct block_defs block_cdu_defs = {
1039 "cdu", {true, true, true}, false, 0,
1040 {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1041 CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE,
1042 CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID,
1043 CDU_REG_DBG_FORCE_FRAME,
1044 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 23
1045 };
1046
1047 static struct block_defs block_ccfc_defs = {
1048 "ccfc", {true, true, true}, false, 0,
1049 {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1050 CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE,
1051 CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID,
1052 CCFC_REG_DBG_FORCE_FRAME,
1053 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 24
1054 };
1055
1056 static struct block_defs block_tcfc_defs = {
1057 "tcfc", {true, true, true}, false, 0,
1058 {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1059 TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE,
1060 TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID,
1061 TCFC_REG_DBG_FORCE_FRAME,
1062 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 25
1063 };
1064
1065 static struct block_defs block_igu_defs = {
1066 "igu", {true, true, true}, false, 0,
1067 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
1068 IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE,
1069 IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID,
1070 IGU_REG_DBG_FORCE_FRAME,
1071 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 27
1072 };
1073
1074 static struct block_defs block_cau_defs = {
1075 "cau", {true, true, true}, false, 0,
1076 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
1077 CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE,
1078 CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID,
1079 CAU_REG_DBG_FORCE_FRAME,
1080 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 19
1081 };
1082
1083 static struct block_defs block_umac_defs = {
1084 "umac", {false, false, true}, false, 0,
1085 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
1086 UMAC_REG_DBG_SELECT, UMAC_REG_DBG_DWORD_ENABLE,
1087 UMAC_REG_DBG_SHIFT, UMAC_REG_DBG_FORCE_VALID,
1088 UMAC_REG_DBG_FORCE_FRAME,
1089 true, false, DBG_RESET_REG_MISCS_PL_HV, 6
1090 };
1091
1092 static struct block_defs block_xmac_defs = {
1093 "xmac", {false, false, false}, false, 0,
1094 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1095 0, 0, 0, 0, 0,
1096 false, false, MAX_DBG_RESET_REGS, 0
1097 };
1098
1099 static struct block_defs block_dbg_defs = {
1100 "dbg", {false, false, false}, false, 0,
1101 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1102 0, 0, 0, 0, 0,
1103 true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3
1104 };
1105
1106 static struct block_defs block_nig_defs = {
1107 "nig", {true, true, true}, false, 0,
1108 {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
1109 NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE,
1110 NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID,
1111 NIG_REG_DBG_FORCE_FRAME,
1112 true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 0
1113 };
1114
1115 static struct block_defs block_wol_defs = {
1116 "wol", {false, false, true}, false, 0,
1117 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
1118 WOL_REG_DBG_SELECT, WOL_REG_DBG_DWORD_ENABLE,
1119 WOL_REG_DBG_SHIFT, WOL_REG_DBG_FORCE_VALID,
1120 WOL_REG_DBG_FORCE_FRAME,
1121 true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 7
1122 };
1123
1124 static struct block_defs block_bmbn_defs = {
1125 "bmbn", {false, false, true}, false, 0,
1126 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB},
1127 BMBN_REG_DBG_SELECT, BMBN_REG_DBG_DWORD_ENABLE,
1128 BMBN_REG_DBG_SHIFT, BMBN_REG_DBG_FORCE_VALID,
1129 BMBN_REG_DBG_FORCE_FRAME,
1130 false, false, MAX_DBG_RESET_REGS, 0
1131 };
1132
1133 static struct block_defs block_ipc_defs = {
1134 "ipc", {false, false, false}, false, 0,
1135 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1136 0, 0, 0, 0, 0,
1137 true, false, DBG_RESET_REG_MISCS_PL_UA, 8
1138 };
1139
1140 static struct block_defs block_nwm_defs = {
1141 "nwm", {false, false, true}, false, 0,
1142 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
1143 NWM_REG_DBG_SELECT, NWM_REG_DBG_DWORD_ENABLE,
1144 NWM_REG_DBG_SHIFT, NWM_REG_DBG_FORCE_VALID,
1145 NWM_REG_DBG_FORCE_FRAME,
1146 true, false, DBG_RESET_REG_MISCS_PL_HV_2, 0
1147 };
1148
1149 static struct block_defs block_nws_defs = {
1150 "nws", {false, false, false}, false, 0,
1151 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1152 0, 0, 0, 0, 0,
1153 true, false, DBG_RESET_REG_MISCS_PL_HV, 12
1154 };
1155
1156 static struct block_defs block_ms_defs = {
1157 "ms", {false, false, false}, false, 0,
1158 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1159 0, 0, 0, 0, 0,
1160 true, false, DBG_RESET_REG_MISCS_PL_HV, 13
1161 };
1162
1163 static struct block_defs block_phy_pcie_defs = {
1164 "phy_pcie", {false, false, true}, false, 0,
1165 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
1166 PCIE_REG_DBG_COMMON_SELECT, PCIE_REG_DBG_COMMON_DWORD_ENABLE,
1167 PCIE_REG_DBG_COMMON_SHIFT, PCIE_REG_DBG_COMMON_FORCE_VALID,
1168 PCIE_REG_DBG_COMMON_FORCE_FRAME,
1169 false, false, MAX_DBG_RESET_REGS, 0
1170 };
1171
1172 static struct block_defs block_led_defs = {
1173 "led", {false, false, false}, false, 0,
1174 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1175 0, 0, 0, 0, 0,
1176 true, true, DBG_RESET_REG_MISCS_PL_HV, 14
1177 };
1178
1179 static struct block_defs block_misc_aeu_defs = {
1180 "misc_aeu", {false, false, false}, false, 0,
1181 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1182 0, 0, 0, 0, 0,
1183 false, false, MAX_DBG_RESET_REGS, 0
1184 };
1185
1186 static struct block_defs block_bar0_map_defs = {
1187 "bar0_map", {false, false, false}, false, 0,
1188 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1189 0, 0, 0, 0, 0,
1190 false, false, MAX_DBG_RESET_REGS, 0
1191 };
1192
1193 static struct block_defs *s_block_defs[MAX_BLOCK_ID] = {
1194 &block_grc_defs,
1195 &block_miscs_defs,
1196 &block_misc_defs,
1197 &block_dbu_defs,
1198 &block_pglue_b_defs,
1199 &block_cnig_defs,
1200 &block_cpmu_defs,
1201 &block_ncsi_defs,
1202 &block_opte_defs,
1203 &block_bmb_defs,
1204 &block_pcie_defs,
1205 &block_mcp_defs,
1206 &block_mcp2_defs,
1207 &block_pswhst_defs,
1208 &block_pswhst2_defs,
1209 &block_pswrd_defs,
1210 &block_pswrd2_defs,
1211 &block_pswwr_defs,
1212 &block_pswwr2_defs,
1213 &block_pswrq_defs,
1214 &block_pswrq2_defs,
1215 &block_pglcs_defs,
1216 &block_dmae_defs,
1217 &block_ptu_defs,
1218 &block_tcm_defs,
1219 &block_mcm_defs,
1220 &block_ucm_defs,
1221 &block_xcm_defs,
1222 &block_ycm_defs,
1223 &block_pcm_defs,
1224 &block_qm_defs,
1225 &block_tm_defs,
1226 &block_dorq_defs,
1227 &block_brb_defs,
1228 &block_src_defs,
1229 &block_prs_defs,
1230 &block_tsdm_defs,
1231 &block_msdm_defs,
1232 &block_usdm_defs,
1233 &block_xsdm_defs,
1234 &block_ysdm_defs,
1235 &block_psdm_defs,
1236 &block_tsem_defs,
1237 &block_msem_defs,
1238 &block_usem_defs,
1239 &block_xsem_defs,
1240 &block_ysem_defs,
1241 &block_psem_defs,
1242 &block_rss_defs,
1243 &block_tmld_defs,
1244 &block_muld_defs,
1245 &block_yuld_defs,
1246 &block_xyld_defs,
1247 &block_prm_defs,
1248 &block_pbf_pb1_defs,
1249 &block_pbf_pb2_defs,
1250 &block_rpb_defs,
1251 &block_btb_defs,
1252 &block_pbf_defs,
1253 &block_rdif_defs,
1254 &block_tdif_defs,
1255 &block_cdu_defs,
1256 &block_ccfc_defs,
1257 &block_tcfc_defs,
1258 &block_igu_defs,
1259 &block_cau_defs,
1260 &block_umac_defs,
1261 &block_xmac_defs,
1262 &block_dbg_defs,
1263 &block_nig_defs,
1264 &block_wol_defs,
1265 &block_bmbn_defs,
1266 &block_ipc_defs,
1267 &block_nwm_defs,
1268 &block_nws_defs,
1269 &block_ms_defs,
1270 &block_phy_pcie_defs,
1271 &block_led_defs,
1272 &block_misc_aeu_defs,
1273 &block_bar0_map_defs,
1274 };
1275
1276 static struct platform_defs s_platform_defs[] = {
1277 {"asic", 1},
1278 {"reserved", 0},
1279 {"reserved2", 0},
1280 {"reserved3", 0}
1281 };
1282
1283 static struct grc_param_defs s_grc_param_defs[] = {
1284 {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_TSTORM */
1285 {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_MSTORM */
1286 {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_USTORM */
1287 {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_XSTORM */
1288 {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_YSTORM */
1289 {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_PSTORM */
1290 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_REGS */
1291 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RAM */
1292 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PBUF */
1293 {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IOR */
1294 {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_VFC */
1295 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM_CTX */
1296 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_ILT */
1297 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RSS */
1298 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CAU */
1299 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_QM */
1300 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MCP */
1301 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_RESERVED */
1302 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CFC */
1303 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IGU */
1304 {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BRB */
1305 {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BTB */
1306 {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BMB */
1307 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_NIG */
1308 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MULD */
1309 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PRS */
1310 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DMAE */
1311 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_TM */
1312 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_SDM */
1313 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DIF */
1314 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_STATIC */
1315 {{0, 0, 0}, 0, 1, false, 0, 0}, /* DBG_GRC_PARAM_UNSTALL */
1316 {{MAX_LCIDS, MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS,
1317 MAX_LCIDS}, /* DBG_GRC_PARAM_NUM_LCIDS */
1318 {{MAX_LTIDS, MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS,
1319 MAX_LTIDS}, /* DBG_GRC_PARAM_NUM_LTIDS */
1320 {{0, 0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_EXCLUDE_ALL */
1321 {{0, 0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_CRASH */
1322 {{0, 0, 0}, 0, 1, false, 1, 0}, /* DBG_GRC_PARAM_PARITY_SAFE */
1323 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM */
1324 {{1, 1, 1}, 0, 1, false, 0, 1} /* DBG_GRC_PARAM_DUMP_PHY */
1325 };
1326
1327 static struct rss_mem_defs s_rss_mem_defs[] = {
1328 { "rss_mem_cid", "rss_cid", 0,
1329 {256, 256, 320},
1330 {32, 32, 32} },
1331 { "rss_mem_key_msb", "rss_key", 1024,
1332 {128, 128, 208},
1333 {256, 256, 256} },
1334 { "rss_mem_key_lsb", "rss_key", 2048,
1335 {128, 128, 208},
1336 {64, 64, 64} },
1337 { "rss_mem_info", "rss_info", 3072,
1338 {128, 128, 208},
1339 {16, 16, 16} },
1340 { "rss_mem_ind", "rss_ind", 4096,
1341 {(128 * 128), (128 * 128), (128 * 208)},
1342 {16, 16, 16} }
1343 };
1344
1345 static struct vfc_ram_defs s_vfc_ram_defs[] = {
1346 {"vfc_ram_tt1", "vfc_ram", 0, 512},
1347 {"vfc_ram_mtt2", "vfc_ram", 512, 128},
1348 {"vfc_ram_stt2", "vfc_ram", 640, 32},
1349 {"vfc_ram_ro_vect", "vfc_ram", 672, 32}
1350 };
1351
1352 static struct big_ram_defs s_big_ram_defs[] = {
1353 { "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
1354 BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
1355 {4800, 4800, 5632} },
1356 { "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
1357 BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
1358 {2880, 2880, 3680} },
1359 { "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
1360 BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
1361 {1152, 1152, 1152} }
1362 };
1363
1364 static struct reset_reg_defs s_reset_regs_defs[] = {
1365 { MISCS_REG_RESET_PL_UA, 0x0,
1366 {true, true, true} }, /* DBG_RESET_REG_MISCS_PL_UA */
1367 { MISCS_REG_RESET_PL_HV, 0x0,
1368 {true, true, true} }, /* DBG_RESET_REG_MISCS_PL_HV */
1369 { MISCS_REG_RESET_PL_HV_2, 0x0,
1370 {false, false, true} }, /* DBG_RESET_REG_MISCS_PL_HV_2 */
1371 { MISC_REG_RESET_PL_UA, 0x0,
1372 {true, true, true} }, /* DBG_RESET_REG_MISC_PL_UA */
1373 { MISC_REG_RESET_PL_HV, 0x0,
1374 {true, true, true} }, /* DBG_RESET_REG_MISC_PL_HV */
1375 { MISC_REG_RESET_PL_PDA_VMAIN_1, 0x4404040,
1376 {true, true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
1377 { MISC_REG_RESET_PL_PDA_VMAIN_2, 0x7c00007,
1378 {true, true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
1379 { MISC_REG_RESET_PL_PDA_VAUX, 0x2,
1380 {true, true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VAUX */
1381 };
1382
1383 static struct phy_defs s_phy_defs[] = {
1384 {"nw_phy", NWS_REG_NWS_CMU, PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0,
1385 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8,
1386 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0,
1387 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8},
1388 {"sgmii_phy", MS_REG_MS_CMU, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132,
1389 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133,
1390 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130,
1391 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131},
1392 {"pcie_phy0", PHY_PCIE_REG_PHY0, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132,
1393 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133,
1394 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130,
1395 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131},
1396 {"pcie_phy1", PHY_PCIE_REG_PHY1, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132,
1397 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133,
1398 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130,
1399 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131},
1400 };
1401
1402 /**************************** Private Functions ******************************/
1403
1404 /* Reads and returns a single dword from the specified unaligned buffer */
qed_read_unaligned_dword(u8 * buf)1405 static u32 qed_read_unaligned_dword(u8 *buf)
1406 {
1407 u32 dword;
1408
1409 memcpy((u8 *)&dword, buf, sizeof(dword));
1410 return dword;
1411 }
1412
1413 /* Initializes debug data for the specified device */
qed_dbg_dev_init(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1414 static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
1415 struct qed_ptt *p_ptt)
1416 {
1417 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1418
1419 if (dev_data->initialized)
1420 return DBG_STATUS_OK;
1421
1422 if (QED_IS_K2(p_hwfn->cdev)) {
1423 dev_data->chip_id = CHIP_K2;
1424 dev_data->mode_enable[MODE_K2] = 1;
1425 } else if (QED_IS_BB_B0(p_hwfn->cdev)) {
1426 dev_data->chip_id = CHIP_BB_B0;
1427 dev_data->mode_enable[MODE_BB_B0] = 1;
1428 } else {
1429 return DBG_STATUS_UNKNOWN_CHIP;
1430 }
1431
1432 dev_data->platform_id = PLATFORM_ASIC;
1433 dev_data->mode_enable[MODE_ASIC] = 1;
1434 dev_data->initialized = true;
1435 return DBG_STATUS_OK;
1436 }
1437
1438 /* Reads the FW info structure for the specified Storm from the chip,
1439 * and writes it to the specified fw_info pointer.
1440 */
qed_read_fw_info(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u8 storm_id,struct fw_info * fw_info)1441 static void qed_read_fw_info(struct qed_hwfn *p_hwfn,
1442 struct qed_ptt *p_ptt,
1443 u8 storm_id, struct fw_info *fw_info)
1444 {
1445 /* Read first the address that points to fw_info location.
1446 * The address is located in the last line of the Storm RAM.
1447 */
1448 u32 addr = s_storm_defs[storm_id].sem_fast_mem_addr +
1449 SEM_FAST_REG_INT_RAM +
1450 DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) -
1451 sizeof(struct fw_info_location);
1452 struct fw_info_location fw_info_location;
1453 u32 *dest = (u32 *)&fw_info_location;
1454 u32 i;
1455
1456 memset(&fw_info_location, 0, sizeof(fw_info_location));
1457 memset(fw_info, 0, sizeof(*fw_info));
1458 for (i = 0; i < BYTES_TO_DWORDS(sizeof(fw_info_location));
1459 i++, addr += BYTES_IN_DWORD)
1460 dest[i] = qed_rd(p_hwfn, p_ptt, addr);
1461 if (fw_info_location.size > 0 && fw_info_location.size <=
1462 sizeof(*fw_info)) {
1463 /* Read FW version info from Storm RAM */
1464 addr = fw_info_location.grc_addr;
1465 dest = (u32 *)fw_info;
1466 for (i = 0; i < BYTES_TO_DWORDS(fw_info_location.size);
1467 i++, addr += BYTES_IN_DWORD)
1468 dest[i] = qed_rd(p_hwfn, p_ptt, addr);
1469 }
1470 }
1471
1472 /* Dumps the specified string to the specified buffer. Returns the dumped size
1473 * in bytes (actual length + 1 for the null character termination).
1474 */
qed_dump_str(char * dump_buf,bool dump,const char * str)1475 static u32 qed_dump_str(char *dump_buf, bool dump, const char *str)
1476 {
1477 if (dump)
1478 strcpy(dump_buf, str);
1479 return (u32)strlen(str) + 1;
1480 }
1481
1482 /* Dumps zeros to align the specified buffer to dwords. Returns the dumped size
1483 * in bytes.
1484 */
qed_dump_align(char * dump_buf,bool dump,u32 byte_offset)1485 static u32 qed_dump_align(char *dump_buf, bool dump, u32 byte_offset)
1486 {
1487 u8 offset_in_dword = (u8)(byte_offset & 0x3), align_size;
1488
1489 align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0;
1490
1491 if (dump && align_size)
1492 memset(dump_buf, 0, align_size);
1493 return align_size;
1494 }
1495
1496 /* Writes the specified string param to the specified buffer.
1497 * Returns the dumped size in dwords.
1498 */
qed_dump_str_param(u32 * dump_buf,bool dump,const char * param_name,const char * param_val)1499 static u32 qed_dump_str_param(u32 *dump_buf,
1500 bool dump,
1501 const char *param_name, const char *param_val)
1502 {
1503 char *char_buf = (char *)dump_buf;
1504 u32 offset = 0;
1505
1506 /* Dump param name */
1507 offset += qed_dump_str(char_buf + offset, dump, param_name);
1508
1509 /* Indicate a string param value */
1510 if (dump)
1511 *(char_buf + offset) = 1;
1512 offset++;
1513
1514 /* Dump param value */
1515 offset += qed_dump_str(char_buf + offset, dump, param_val);
1516
1517 /* Align buffer to next dword */
1518 offset += qed_dump_align(char_buf + offset, dump, offset);
1519 return BYTES_TO_DWORDS(offset);
1520 }
1521
1522 /* Writes the specified numeric param to the specified buffer.
1523 * Returns the dumped size in dwords.
1524 */
qed_dump_num_param(u32 * dump_buf,bool dump,const char * param_name,u32 param_val)1525 static u32 qed_dump_num_param(u32 *dump_buf,
1526 bool dump, const char *param_name, u32 param_val)
1527 {
1528 char *char_buf = (char *)dump_buf;
1529 u32 offset = 0;
1530
1531 /* Dump param name */
1532 offset += qed_dump_str(char_buf + offset, dump, param_name);
1533
1534 /* Indicate a numeric param value */
1535 if (dump)
1536 *(char_buf + offset) = 0;
1537 offset++;
1538
1539 /* Align buffer to next dword */
1540 offset += qed_dump_align(char_buf + offset, dump, offset);
1541
1542 /* Dump param value (and change offset from bytes to dwords) */
1543 offset = BYTES_TO_DWORDS(offset);
1544 if (dump)
1545 *(dump_buf + offset) = param_val;
1546 offset++;
1547 return offset;
1548 }
1549
1550 /* Reads the FW version and writes it as a param to the specified buffer.
1551 * Returns the dumped size in dwords.
1552 */
qed_dump_fw_ver_param(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)1553 static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
1554 struct qed_ptt *p_ptt,
1555 u32 *dump_buf, bool dump)
1556 {
1557 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1558 char fw_ver_str[16] = EMPTY_FW_VERSION_STR;
1559 char fw_img_str[16] = EMPTY_FW_IMAGE_STR;
1560 struct fw_info fw_info = { {0}, {0} };
1561 int printed_chars;
1562 u32 offset = 0;
1563
1564 if (dump) {
1565 /* Read FW image/version from PRAM in a non-reset SEMI */
1566 bool found = false;
1567 u8 storm_id;
1568
1569 for (storm_id = 0; storm_id < MAX_DBG_STORMS && !found;
1570 storm_id++) {
1571 /* Read FW version/image */
1572 if (!dev_data->block_in_reset
1573 [s_storm_defs[storm_id].block_id]) {
1574 /* read FW info for the current Storm */
1575 qed_read_fw_info(p_hwfn,
1576 p_ptt, storm_id, &fw_info);
1577
1578 /* Create FW version/image strings */
1579 printed_chars =
1580 snprintf(fw_ver_str,
1581 sizeof(fw_ver_str),
1582 "%d_%d_%d_%d",
1583 fw_info.ver.num.major,
1584 fw_info.ver.num.minor,
1585 fw_info.ver.num.rev,
1586 fw_info.ver.num.eng);
1587 if (printed_chars < 0 || printed_chars >=
1588 sizeof(fw_ver_str))
1589 DP_NOTICE(p_hwfn,
1590 "Unexpected debug error: invalid FW version string\n");
1591 switch (fw_info.ver.image_id) {
1592 case FW_IMG_MAIN:
1593 strcpy(fw_img_str, "main");
1594 break;
1595 default:
1596 strcpy(fw_img_str, "unknown");
1597 break;
1598 }
1599
1600 found = true;
1601 }
1602 }
1603 }
1604
1605 /* Dump FW version, image and timestamp */
1606 offset += qed_dump_str_param(dump_buf + offset,
1607 dump, "fw-version", fw_ver_str);
1608 offset += qed_dump_str_param(dump_buf + offset,
1609 dump, "fw-image", fw_img_str);
1610 offset += qed_dump_num_param(dump_buf + offset,
1611 dump,
1612 "fw-timestamp", fw_info.ver.timestamp);
1613 return offset;
1614 }
1615
1616 /* Reads the MFW version and writes it as a param to the specified buffer.
1617 * Returns the dumped size in dwords.
1618 */
qed_dump_mfw_ver_param(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)1619 static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn,
1620 struct qed_ptt *p_ptt,
1621 u32 *dump_buf, bool dump)
1622 {
1623 char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
1624
1625 if (dump) {
1626 u32 global_section_offsize, global_section_addr, mfw_ver;
1627 u32 public_data_addr, global_section_offsize_addr;
1628 int printed_chars;
1629
1630 /* Find MCP public data GRC address.
1631 * Needs to be ORed with MCP_REG_SCRATCH due to a HW bug.
1632 */
1633 public_data_addr = qed_rd(p_hwfn, p_ptt,
1634 MISC_REG_SHARED_MEM_ADDR) |
1635 MCP_REG_SCRATCH;
1636
1637 /* Find MCP public global section offset */
1638 global_section_offsize_addr = public_data_addr +
1639 offsetof(struct mcp_public_data,
1640 sections) +
1641 sizeof(offsize_t) * PUBLIC_GLOBAL;
1642 global_section_offsize = qed_rd(p_hwfn, p_ptt,
1643 global_section_offsize_addr);
1644 global_section_addr = MCP_REG_SCRATCH +
1645 (global_section_offsize &
1646 OFFSIZE_OFFSET_MASK) * 4;
1647
1648 /* Read MFW version from MCP public global section */
1649 mfw_ver = qed_rd(p_hwfn, p_ptt,
1650 global_section_addr +
1651 offsetof(struct public_global, mfw_ver));
1652
1653 /* Dump MFW version param */
1654 printed_chars = snprintf(mfw_ver_str, sizeof(mfw_ver_str),
1655 "%d_%d_%d_%d",
1656 (u8) (mfw_ver >> 24),
1657 (u8) (mfw_ver >> 16),
1658 (u8) (mfw_ver >> 8),
1659 (u8) mfw_ver);
1660 if (printed_chars < 0 || printed_chars >= sizeof(mfw_ver_str))
1661 DP_NOTICE(p_hwfn,
1662 "Unexpected debug error: invalid MFW version string\n");
1663 }
1664
1665 return qed_dump_str_param(dump_buf, dump, "mfw-version", mfw_ver_str);
1666 }
1667
1668 /* Writes a section header to the specified buffer.
1669 * Returns the dumped size in dwords.
1670 */
qed_dump_section_hdr(u32 * dump_buf,bool dump,const char * name,u32 num_params)1671 static u32 qed_dump_section_hdr(u32 *dump_buf,
1672 bool dump, const char *name, u32 num_params)
1673 {
1674 return qed_dump_num_param(dump_buf, dump, name, num_params);
1675 }
1676
1677 /* Writes the common global params to the specified buffer.
1678 * Returns the dumped size in dwords.
1679 */
qed_dump_common_global_params(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u8 num_specific_global_params)1680 static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
1681 struct qed_ptt *p_ptt,
1682 u32 *dump_buf,
1683 bool dump,
1684 u8 num_specific_global_params)
1685 {
1686 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1687 u32 offset = 0;
1688
1689 /* Find platform string and dump global params section header */
1690 offset += qed_dump_section_hdr(dump_buf + offset,
1691 dump,
1692 "global_params",
1693 NUM_COMMON_GLOBAL_PARAMS +
1694 num_specific_global_params);
1695
1696 /* Store params */
1697 offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
1698 offset += qed_dump_mfw_ver_param(p_hwfn,
1699 p_ptt, dump_buf + offset, dump);
1700 offset += qed_dump_num_param(dump_buf + offset,
1701 dump, "tools-version", TOOLS_VERSION);
1702 offset += qed_dump_str_param(dump_buf + offset,
1703 dump,
1704 "chip",
1705 s_chip_defs[dev_data->chip_id].name);
1706 offset += qed_dump_str_param(dump_buf + offset,
1707 dump,
1708 "platform",
1709 s_platform_defs[dev_data->platform_id].
1710 name);
1711 offset +=
1712 qed_dump_num_param(dump_buf + offset, dump, "pci-func",
1713 p_hwfn->abs_pf_id);
1714 return offset;
1715 }
1716
1717 /* Writes the last section to the specified buffer at the given offset.
1718 * Returns the dumped size in dwords.
1719 */
qed_dump_last_section(u32 * dump_buf,u32 offset,bool dump)1720 static u32 qed_dump_last_section(u32 *dump_buf, u32 offset, bool dump)
1721 {
1722 u32 start_offset = offset, crc = ~0;
1723
1724 /* Dump CRC section header */
1725 offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0);
1726
1727 /* Calculate CRC32 and add it to the dword following the "last" section.
1728 */
1729 if (dump)
1730 *(dump_buf + offset) = ~crc32(crc, (u8 *)dump_buf,
1731 DWORDS_TO_BYTES(offset));
1732 offset++;
1733 return offset - start_offset;
1734 }
1735
1736 /* Update blocks reset state */
qed_update_blocks_reset_state(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1737 static void qed_update_blocks_reset_state(struct qed_hwfn *p_hwfn,
1738 struct qed_ptt *p_ptt)
1739 {
1740 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1741 u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
1742 u32 i;
1743
1744 /* Read reset registers */
1745 for (i = 0; i < MAX_DBG_RESET_REGS; i++)
1746 if (s_reset_regs_defs[i].exists[dev_data->chip_id])
1747 reg_val[i] = qed_rd(p_hwfn,
1748 p_ptt, s_reset_regs_defs[i].addr);
1749
1750 /* Check if blocks are in reset */
1751 for (i = 0; i < MAX_BLOCK_ID; i++)
1752 dev_data->block_in_reset[i] =
1753 s_block_defs[i]->has_reset_bit &&
1754 !(reg_val[s_block_defs[i]->reset_reg] &
1755 BIT(s_block_defs[i]->reset_bit_offset));
1756 }
1757
1758 /* Enable / disable the Debug block */
qed_bus_enable_dbg_block(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,bool enable)1759 static void qed_bus_enable_dbg_block(struct qed_hwfn *p_hwfn,
1760 struct qed_ptt *p_ptt, bool enable)
1761 {
1762 qed_wr(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON, enable ? 1 : 0);
1763 }
1764
1765 /* Resets the Debug block */
qed_bus_reset_dbg_block(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1766 static void qed_bus_reset_dbg_block(struct qed_hwfn *p_hwfn,
1767 struct qed_ptt *p_ptt)
1768 {
1769 u32 dbg_reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
1770
1771 dbg_reset_reg_addr =
1772 s_reset_regs_defs[s_block_defs[BLOCK_DBG]->reset_reg].addr;
1773 old_reset_reg_val = qed_rd(p_hwfn, p_ptt, dbg_reset_reg_addr);
1774 new_reset_reg_val = old_reset_reg_val &
1775 ~BIT(s_block_defs[BLOCK_DBG]->reset_bit_offset);
1776
1777 qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, new_reset_reg_val);
1778 qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, old_reset_reg_val);
1779 }
1780
qed_bus_set_framing_mode(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,enum dbg_bus_frame_modes mode)1781 static void qed_bus_set_framing_mode(struct qed_hwfn *p_hwfn,
1782 struct qed_ptt *p_ptt,
1783 enum dbg_bus_frame_modes mode)
1784 {
1785 qed_wr(p_hwfn, p_ptt, DBG_REG_FRAMING_MODE, (u8)mode);
1786 }
1787
1788 /* Enable / disable Debug Bus clients according to the specified mask.
1789 * (1 = enable, 0 = disable)
1790 */
qed_bus_enable_clients(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 client_mask)1791 static void qed_bus_enable_clients(struct qed_hwfn *p_hwfn,
1792 struct qed_ptt *p_ptt, u32 client_mask)
1793 {
1794 qed_wr(p_hwfn, p_ptt, DBG_REG_CLIENT_ENABLE, client_mask);
1795 }
1796
qed_is_mode_match(struct qed_hwfn * p_hwfn,u16 * modes_buf_offset)1797 static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset)
1798 {
1799 const u32 *ptr = s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr;
1800 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1801 u8 tree_val = ((u8 *)ptr)[(*modes_buf_offset)++];
1802 bool arg1, arg2;
1803
1804 switch (tree_val) {
1805 case INIT_MODE_OP_NOT:
1806 return !qed_is_mode_match(p_hwfn, modes_buf_offset);
1807 case INIT_MODE_OP_OR:
1808 case INIT_MODE_OP_AND:
1809 arg1 = qed_is_mode_match(p_hwfn, modes_buf_offset);
1810 arg2 = qed_is_mode_match(p_hwfn, modes_buf_offset);
1811 return (tree_val == INIT_MODE_OP_OR) ? (arg1 ||
1812 arg2) : (arg1 && arg2);
1813 default:
1814 return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0;
1815 }
1816 }
1817
1818 /* Returns the value of the specified GRC param */
qed_grc_get_param(struct qed_hwfn * p_hwfn,enum dbg_grc_params grc_param)1819 static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
1820 enum dbg_grc_params grc_param)
1821 {
1822 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1823
1824 return dev_data->grc.param_val[grc_param];
1825 }
1826
1827 /* Clear all GRC params */
qed_dbg_grc_clear_params(struct qed_hwfn * p_hwfn)1828 static void qed_dbg_grc_clear_params(struct qed_hwfn *p_hwfn)
1829 {
1830 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1831 u32 i;
1832
1833 for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
1834 dev_data->grc.param_set_by_user[i] = 0;
1835 }
1836
1837 /* Assign default GRC param values */
qed_dbg_grc_set_params_default(struct qed_hwfn * p_hwfn)1838 static void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
1839 {
1840 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1841 u32 i;
1842
1843 for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
1844 if (!dev_data->grc.param_set_by_user[i])
1845 dev_data->grc.param_val[i] =
1846 s_grc_param_defs[i].default_val[dev_data->chip_id];
1847 }
1848
1849 /* Returns true if the specified entity (indicated by GRC param) should be
1850 * included in the dump, false otherwise.
1851 */
qed_grc_is_included(struct qed_hwfn * p_hwfn,enum dbg_grc_params grc_param)1852 static bool qed_grc_is_included(struct qed_hwfn *p_hwfn,
1853 enum dbg_grc_params grc_param)
1854 {
1855 return qed_grc_get_param(p_hwfn, grc_param) > 0;
1856 }
1857
1858 /* Returns true of the specified Storm should be included in the dump, false
1859 * otherwise.
1860 */
qed_grc_is_storm_included(struct qed_hwfn * p_hwfn,enum dbg_storms storm)1861 static bool qed_grc_is_storm_included(struct qed_hwfn *p_hwfn,
1862 enum dbg_storms storm)
1863 {
1864 return qed_grc_get_param(p_hwfn, (enum dbg_grc_params)storm) > 0;
1865 }
1866
1867 /* Returns true if the specified memory should be included in the dump, false
1868 * otherwise.
1869 */
qed_grc_is_mem_included(struct qed_hwfn * p_hwfn,enum block_id block_id,u8 mem_group_id)1870 static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
1871 enum block_id block_id, u8 mem_group_id)
1872 {
1873 u8 i;
1874
1875 /* Check Storm match */
1876 if (s_block_defs[block_id]->associated_to_storm &&
1877 !qed_grc_is_storm_included(p_hwfn,
1878 (enum dbg_storms)s_block_defs[block_id]->storm_id))
1879 return false;
1880
1881 for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
1882 if (mem_group_id == s_big_ram_defs[i].mem_group_id ||
1883 mem_group_id == s_big_ram_defs[i].ram_mem_group_id)
1884 return qed_grc_is_included(p_hwfn,
1885 s_big_ram_defs[i].grc_param);
1886 if (mem_group_id == MEM_GROUP_PXP_ILT || mem_group_id ==
1887 MEM_GROUP_PXP_MEM)
1888 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP);
1889 if (mem_group_id == MEM_GROUP_RAM)
1890 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM);
1891 if (mem_group_id == MEM_GROUP_PBUF)
1892 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF);
1893 if (mem_group_id == MEM_GROUP_CAU_MEM ||
1894 mem_group_id == MEM_GROUP_CAU_SB ||
1895 mem_group_id == MEM_GROUP_CAU_PI)
1896 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU);
1897 if (mem_group_id == MEM_GROUP_QM_MEM)
1898 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM);
1899 if (mem_group_id == MEM_GROUP_CONN_CFC_MEM ||
1900 mem_group_id == MEM_GROUP_TASK_CFC_MEM)
1901 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC);
1902 if (mem_group_id == MEM_GROUP_IGU_MEM || mem_group_id ==
1903 MEM_GROUP_IGU_MSIX)
1904 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
1905 if (mem_group_id == MEM_GROUP_MULD_MEM)
1906 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD);
1907 if (mem_group_id == MEM_GROUP_PRS_MEM)
1908 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS);
1909 if (mem_group_id == MEM_GROUP_DMAE_MEM)
1910 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE);
1911 if (mem_group_id == MEM_GROUP_TM_MEM)
1912 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM);
1913 if (mem_group_id == MEM_GROUP_SDM_MEM)
1914 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM);
1915 if (mem_group_id == MEM_GROUP_TDIF_CTX || mem_group_id ==
1916 MEM_GROUP_RDIF_CTX)
1917 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF);
1918 if (mem_group_id == MEM_GROUP_CM_MEM)
1919 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM);
1920 if (mem_group_id == MEM_GROUP_IOR)
1921 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR);
1922
1923 return true;
1924 }
1925
1926 /* Stalls all Storms */
qed_grc_stall_storms(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,bool stall)1927 static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn,
1928 struct qed_ptt *p_ptt, bool stall)
1929 {
1930 u8 reg_val = stall ? 1 : 0;
1931 u8 storm_id;
1932
1933 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
1934 if (qed_grc_is_storm_included(p_hwfn,
1935 (enum dbg_storms)storm_id)) {
1936 u32 reg_addr =
1937 s_storm_defs[storm_id].sem_fast_mem_addr +
1938 SEM_FAST_REG_STALL_0;
1939
1940 qed_wr(p_hwfn, p_ptt, reg_addr, reg_val);
1941 }
1942 }
1943
1944 msleep(STALL_DELAY_MS);
1945 }
1946
1947 /* Takes all blocks out of reset */
qed_grc_unreset_blocks(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1948 static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
1949 struct qed_ptt *p_ptt)
1950 {
1951 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1952 u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
1953 u32 i;
1954
1955 /* Fill reset regs values */
1956 for (i = 0; i < MAX_BLOCK_ID; i++)
1957 if (s_block_defs[i]->has_reset_bit && s_block_defs[i]->unreset)
1958 reg_val[s_block_defs[i]->reset_reg] |=
1959 BIT(s_block_defs[i]->reset_bit_offset);
1960
1961 /* Write reset registers */
1962 for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
1963 if (s_reset_regs_defs[i].exists[dev_data->chip_id]) {
1964 reg_val[i] |= s_reset_regs_defs[i].unreset_val;
1965 if (reg_val[i])
1966 qed_wr(p_hwfn,
1967 p_ptt,
1968 s_reset_regs_defs[i].addr +
1969 RESET_REG_UNRESET_OFFSET, reg_val[i]);
1970 }
1971 }
1972 }
1973
1974 /* Returns the attention name offsets of the specified block */
1975 static const struct dbg_attn_block_type_data *
qed_get_block_attn_data(enum block_id block_id,enum dbg_attn_type attn_type)1976 qed_get_block_attn_data(enum block_id block_id, enum dbg_attn_type attn_type)
1977 {
1978 const struct dbg_attn_block *base_attn_block_arr =
1979 (const struct dbg_attn_block *)
1980 s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr;
1981
1982 return &base_attn_block_arr[block_id].per_type_data[attn_type];
1983 }
1984
1985 /* Returns the attention registers of the specified block */
1986 static const struct dbg_attn_reg *
qed_get_block_attn_regs(enum block_id block_id,enum dbg_attn_type attn_type,u8 * num_attn_regs)1987 qed_get_block_attn_regs(enum block_id block_id, enum dbg_attn_type attn_type,
1988 u8 *num_attn_regs)
1989 {
1990 const struct dbg_attn_block_type_data *block_type_data =
1991 qed_get_block_attn_data(block_id, attn_type);
1992
1993 *num_attn_regs = block_type_data->num_regs;
1994 return &((const struct dbg_attn_reg *)
1995 s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)[block_type_data->
1996 regs_offset];
1997 }
1998
1999 /* For each block, clear the status of all parities */
qed_grc_clear_all_prty(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)2000 static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
2001 struct qed_ptt *p_ptt)
2002 {
2003 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2004 u8 reg_idx, num_attn_regs;
2005 u32 block_id;
2006
2007 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2008 const struct dbg_attn_reg *attn_reg_arr;
2009
2010 if (dev_data->block_in_reset[block_id])
2011 continue;
2012
2013 attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
2014 ATTN_TYPE_PARITY,
2015 &num_attn_regs);
2016 for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2017 const struct dbg_attn_reg *reg_data =
2018 &attn_reg_arr[reg_idx];
2019
2020 /* Check mode */
2021 bool eval_mode = GET_FIELD(reg_data->mode.data,
2022 DBG_MODE_HDR_EVAL_MODE) > 0;
2023 u16 modes_buf_offset =
2024 GET_FIELD(reg_data->mode.data,
2025 DBG_MODE_HDR_MODES_BUF_OFFSET);
2026
2027 if (!eval_mode ||
2028 qed_is_mode_match(p_hwfn, &modes_buf_offset))
2029 /* Mode match - read parity status read-clear
2030 * register.
2031 */
2032 qed_rd(p_hwfn, p_ptt,
2033 DWORDS_TO_BYTES(reg_data->
2034 sts_clr_address));
2035 }
2036 }
2037 }
2038
2039 /* Dumps GRC registers section header. Returns the dumped size in dwords.
2040 * The following parameters are dumped:
2041 * - 'count' = num_dumped_entries
2042 * - 'split' = split_type
2043 * - 'id'i = split_id (dumped only if split_id >= 0)
2044 * - 'param_name' = param_val (user param, dumped only if param_name != NULL and
2045 * param_val != NULL)
2046 */
qed_grc_dump_regs_hdr(u32 * dump_buf,bool dump,u32 num_reg_entries,const char * split_type,int split_id,const char * param_name,const char * param_val)2047 static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
2048 bool dump,
2049 u32 num_reg_entries,
2050 const char *split_type,
2051 int split_id,
2052 const char *param_name, const char *param_val)
2053 {
2054 u8 num_params = 2 + (split_id >= 0 ? 1 : 0) + (param_name ? 1 : 0);
2055 u32 offset = 0;
2056
2057 offset += qed_dump_section_hdr(dump_buf + offset,
2058 dump, "grc_regs", num_params);
2059 offset += qed_dump_num_param(dump_buf + offset,
2060 dump, "count", num_reg_entries);
2061 offset += qed_dump_str_param(dump_buf + offset,
2062 dump, "split", split_type);
2063 if (split_id >= 0)
2064 offset += qed_dump_num_param(dump_buf + offset,
2065 dump, "id", split_id);
2066 if (param_name && param_val)
2067 offset += qed_dump_str_param(dump_buf + offset,
2068 dump, param_name, param_val);
2069 return offset;
2070 }
2071
2072 /* Dumps GRC register/memory. Returns the dumped size in dwords. */
qed_grc_dump_reg_entry(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u32 addr,u32 len)2073 static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn,
2074 struct qed_ptt *p_ptt, u32 *dump_buf,
2075 bool dump, u32 addr, u32 len)
2076 {
2077 u32 offset = 0, i;
2078
2079 if (dump) {
2080 *(dump_buf + offset++) = addr | (len << REG_DUMP_LEN_SHIFT);
2081 for (i = 0; i < len; i++, addr++, offset++)
2082 *(dump_buf + offset) = qed_rd(p_hwfn,
2083 p_ptt,
2084 DWORDS_TO_BYTES(addr));
2085 } else {
2086 offset += len + 1;
2087 }
2088
2089 return offset;
2090 }
2091
2092 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
qed_grc_dump_regs_entries(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct dbg_array input_regs_arr,u32 * dump_buf,bool dump,bool block_enable[MAX_BLOCK_ID],u32 * num_dumped_reg_entries)2093 static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
2094 struct qed_ptt *p_ptt,
2095 struct dbg_array input_regs_arr,
2096 u32 *dump_buf,
2097 bool dump,
2098 bool block_enable[MAX_BLOCK_ID],
2099 u32 *num_dumped_reg_entries)
2100 {
2101 u32 i, offset = 0, input_offset = 0;
2102 bool mode_match = true;
2103
2104 *num_dumped_reg_entries = 0;
2105 while (input_offset < input_regs_arr.size_in_dwords) {
2106 const struct dbg_dump_cond_hdr *cond_hdr =
2107 (const struct dbg_dump_cond_hdr *)
2108 &input_regs_arr.ptr[input_offset++];
2109 bool eval_mode = GET_FIELD(cond_hdr->mode.data,
2110 DBG_MODE_HDR_EVAL_MODE) > 0;
2111
2112 /* Check mode/block */
2113 if (eval_mode) {
2114 u16 modes_buf_offset =
2115 GET_FIELD(cond_hdr->mode.data,
2116 DBG_MODE_HDR_MODES_BUF_OFFSET);
2117 mode_match = qed_is_mode_match(p_hwfn,
2118 &modes_buf_offset);
2119 }
2120
2121 if (mode_match && block_enable[cond_hdr->block_id]) {
2122 for (i = 0; i < cond_hdr->data_size;
2123 i++, input_offset++) {
2124 const struct dbg_dump_reg *reg =
2125 (const struct dbg_dump_reg *)
2126 &input_regs_arr.ptr[input_offset];
2127
2128 offset +=
2129 qed_grc_dump_reg_entry(p_hwfn, p_ptt,
2130 dump_buf + offset, dump,
2131 GET_FIELD(reg->data,
2132 DBG_DUMP_REG_ADDRESS),
2133 GET_FIELD(reg->data,
2134 DBG_DUMP_REG_LENGTH));
2135 (*num_dumped_reg_entries)++;
2136 }
2137 } else {
2138 input_offset += cond_hdr->data_size;
2139 }
2140 }
2141
2142 return offset;
2143 }
2144
2145 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
qed_grc_dump_split_data(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct dbg_array input_regs_arr,u32 * dump_buf,bool dump,bool block_enable[MAX_BLOCK_ID],const char * split_type_name,u32 split_id,const char * param_name,const char * param_val)2146 static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
2147 struct qed_ptt *p_ptt,
2148 struct dbg_array input_regs_arr,
2149 u32 *dump_buf,
2150 bool dump,
2151 bool block_enable[MAX_BLOCK_ID],
2152 const char *split_type_name,
2153 u32 split_id,
2154 const char *param_name,
2155 const char *param_val)
2156 {
2157 u32 num_dumped_reg_entries, offset;
2158
2159 /* Calculate register dump header size (and skip it for now) */
2160 offset = qed_grc_dump_regs_hdr(dump_buf,
2161 false,
2162 0,
2163 split_type_name,
2164 split_id, param_name, param_val);
2165
2166 /* Dump registers */
2167 offset += qed_grc_dump_regs_entries(p_hwfn,
2168 p_ptt,
2169 input_regs_arr,
2170 dump_buf + offset,
2171 dump,
2172 block_enable,
2173 &num_dumped_reg_entries);
2174
2175 /* Write register dump header */
2176 if (dump && num_dumped_reg_entries > 0)
2177 qed_grc_dump_regs_hdr(dump_buf,
2178 dump,
2179 num_dumped_reg_entries,
2180 split_type_name,
2181 split_id, param_name, param_val);
2182
2183 return num_dumped_reg_entries > 0 ? offset : 0;
2184 }
2185
2186 /* Dumps registers according to the input registers array.
2187 * Returns the dumped size in dwords.
2188 */
qed_grc_dump_registers(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,bool block_enable[MAX_BLOCK_ID],const char * param_name,const char * param_val)2189 static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
2190 struct qed_ptt *p_ptt,
2191 u32 *dump_buf,
2192 bool dump,
2193 bool block_enable[MAX_BLOCK_ID],
2194 const char *param_name, const char *param_val)
2195 {
2196 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2197 u32 offset = 0, input_offset = 0;
2198 u8 port_id, pf_id;
2199
2200 if (dump)
2201 DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "Dumping registers...\n");
2202 while (input_offset <
2203 s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) {
2204 const struct dbg_dump_split_hdr *split_hdr =
2205 (const struct dbg_dump_split_hdr *)
2206 &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset++];
2207 u8 split_type_id = GET_FIELD(split_hdr->hdr,
2208 DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
2209 u32 split_data_size = GET_FIELD(split_hdr->hdr,
2210 DBG_DUMP_SPLIT_HDR_DATA_SIZE);
2211 struct dbg_array curr_input_regs_arr = {
2212 &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset],
2213 split_data_size};
2214
2215 switch (split_type_id) {
2216 case SPLIT_TYPE_NONE:
2217 case SPLIT_TYPE_VF:
2218 offset += qed_grc_dump_split_data(p_hwfn,
2219 p_ptt,
2220 curr_input_regs_arr,
2221 dump_buf + offset,
2222 dump,
2223 block_enable,
2224 "eng",
2225 (u32)(-1),
2226 param_name,
2227 param_val);
2228 break;
2229 case SPLIT_TYPE_PORT:
2230 for (port_id = 0;
2231 port_id <
2232 s_chip_defs[dev_data->chip_id].
2233 per_platform[dev_data->platform_id].num_ports;
2234 port_id++) {
2235 if (dump)
2236 qed_port_pretend(p_hwfn, p_ptt,
2237 port_id);
2238 offset +=
2239 qed_grc_dump_split_data(p_hwfn, p_ptt,
2240 curr_input_regs_arr,
2241 dump_buf + offset,
2242 dump, block_enable,
2243 "port", port_id,
2244 param_name,
2245 param_val);
2246 }
2247 break;
2248 case SPLIT_TYPE_PF:
2249 case SPLIT_TYPE_PORT_PF:
2250 for (pf_id = 0;
2251 pf_id <
2252 s_chip_defs[dev_data->chip_id].
2253 per_platform[dev_data->platform_id].num_pfs;
2254 pf_id++) {
2255 if (dump)
2256 qed_fid_pretend(p_hwfn, p_ptt, pf_id);
2257 offset += qed_grc_dump_split_data(p_hwfn,
2258 p_ptt,
2259 curr_input_regs_arr,
2260 dump_buf + offset,
2261 dump, block_enable,
2262 "pf", pf_id, param_name,
2263 param_val);
2264 }
2265 break;
2266 default:
2267 break;
2268 }
2269
2270 input_offset += split_data_size;
2271 }
2272
2273 /* Pretend to original PF */
2274 if (dump)
2275 qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
2276 return offset;
2277 }
2278
2279 /* Dump reset registers. Returns the dumped size in dwords. */
qed_grc_dump_reset_regs(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)2280 static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
2281 struct qed_ptt *p_ptt,
2282 u32 *dump_buf, bool dump)
2283 {
2284 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2285 u32 i, offset = 0, num_regs = 0;
2286
2287 /* Calculate header size */
2288 offset += qed_grc_dump_regs_hdr(dump_buf,
2289 false, 0, "eng", -1, NULL, NULL);
2290
2291 /* Write reset registers */
2292 for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
2293 if (s_reset_regs_defs[i].exists[dev_data->chip_id]) {
2294 offset += qed_grc_dump_reg_entry(p_hwfn,
2295 p_ptt,
2296 dump_buf + offset,
2297 dump,
2298 BYTES_TO_DWORDS
2299 (s_reset_regs_defs
2300 [i].addr), 1);
2301 num_regs++;
2302 }
2303 }
2304
2305 /* Write header */
2306 if (dump)
2307 qed_grc_dump_regs_hdr(dump_buf,
2308 true, num_regs, "eng", -1, NULL, NULL);
2309 return offset;
2310 }
2311
2312 /* Dump registers that are modified during GRC Dump and therefore must be dumped
2313 * first. Returns the dumped size in dwords.
2314 */
qed_grc_dump_modified_regs(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)2315 static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
2316 struct qed_ptt *p_ptt,
2317 u32 *dump_buf, bool dump)
2318 {
2319 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2320 u32 offset = 0, num_reg_entries = 0, block_id;
2321 u8 storm_id, reg_idx, num_attn_regs;
2322
2323 /* Calculate header size */
2324 offset += qed_grc_dump_regs_hdr(dump_buf,
2325 false, 0, "eng", -1, NULL, NULL);
2326
2327 /* Write parity registers */
2328 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2329 const struct dbg_attn_reg *attn_reg_arr;
2330
2331 if (dev_data->block_in_reset[block_id] && dump)
2332 continue;
2333
2334 attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
2335 ATTN_TYPE_PARITY,
2336 &num_attn_regs);
2337 for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2338 const struct dbg_attn_reg *reg_data =
2339 &attn_reg_arr[reg_idx];
2340 u16 modes_buf_offset;
2341 bool eval_mode;
2342
2343 /* Check mode */
2344 eval_mode = GET_FIELD(reg_data->mode.data,
2345 DBG_MODE_HDR_EVAL_MODE) > 0;
2346 modes_buf_offset =
2347 GET_FIELD(reg_data->mode.data,
2348 DBG_MODE_HDR_MODES_BUF_OFFSET);
2349 if (!eval_mode ||
2350 qed_is_mode_match(p_hwfn, &modes_buf_offset)) {
2351 /* Mode match - read and dump registers */
2352 offset += qed_grc_dump_reg_entry(p_hwfn,
2353 p_ptt,
2354 dump_buf + offset,
2355 dump,
2356 reg_data->mask_address,
2357 1);
2358 offset += qed_grc_dump_reg_entry(p_hwfn,
2359 p_ptt,
2360 dump_buf + offset,
2361 dump,
2362 GET_FIELD(reg_data->data,
2363 DBG_ATTN_REG_STS_ADDRESS),
2364 1);
2365 num_reg_entries += 2;
2366 }
2367 }
2368 }
2369
2370 /* Write storm stall status registers */
2371 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2372 if (dev_data->block_in_reset[s_storm_defs[storm_id].block_id] &&
2373 dump)
2374 continue;
2375
2376 offset += qed_grc_dump_reg_entry(p_hwfn,
2377 p_ptt,
2378 dump_buf + offset,
2379 dump,
2380 BYTES_TO_DWORDS(s_storm_defs[storm_id].
2381 sem_fast_mem_addr +
2382 SEM_FAST_REG_STALLED),
2383 1);
2384 num_reg_entries++;
2385 }
2386
2387 /* Write header */
2388 if (dump)
2389 qed_grc_dump_regs_hdr(dump_buf,
2390 true,
2391 num_reg_entries, "eng", -1, NULL, NULL);
2392 return offset;
2393 }
2394
2395 /* Dumps a GRC memory header (section and params).
2396 * The following parameters are dumped:
2397 * name - name is dumped only if it's not NULL.
2398 * addr - byte_addr is dumped only if name is NULL.
2399 * len - dword_len is always dumped.
2400 * width - bit_width is dumped if it's not zero.
2401 * packed - packed=1 is dumped if it's not false.
2402 * mem_group - mem_group is always dumped.
2403 * is_storm - true only if the memory is related to a Storm.
2404 * storm_letter - storm letter (valid only if is_storm is true).
2405 * Returns the dumped size in dwords.
2406 */
qed_grc_dump_mem_hdr(struct qed_hwfn * p_hwfn,u32 * dump_buf,bool dump,const char * name,u32 byte_addr,u32 dword_len,u32 bit_width,bool packed,const char * mem_group,bool is_storm,char storm_letter)2407 static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
2408 u32 *dump_buf,
2409 bool dump,
2410 const char *name,
2411 u32 byte_addr,
2412 u32 dword_len,
2413 u32 bit_width,
2414 bool packed,
2415 const char *mem_group,
2416 bool is_storm, char storm_letter)
2417 {
2418 u8 num_params = 3;
2419 u32 offset = 0;
2420 char buf[64];
2421
2422 if (!dword_len)
2423 DP_NOTICE(p_hwfn,
2424 "Unexpected GRC Dump error: dumped memory size must be non-zero\n");
2425 if (bit_width)
2426 num_params++;
2427 if (packed)
2428 num_params++;
2429
2430 /* Dump section header */
2431 offset += qed_dump_section_hdr(dump_buf + offset,
2432 dump, "grc_mem", num_params);
2433 if (name) {
2434 /* Dump name */
2435 if (is_storm) {
2436 strcpy(buf, "?STORM_");
2437 buf[0] = storm_letter;
2438 strcpy(buf + strlen(buf), name);
2439 } else {
2440 strcpy(buf, name);
2441 }
2442
2443 offset += qed_dump_str_param(dump_buf + offset,
2444 dump, "name", buf);
2445 if (dump)
2446 DP_VERBOSE(p_hwfn,
2447 QED_MSG_DEBUG,
2448 "Dumping %d registers from %s...\n",
2449 dword_len, buf);
2450 } else {
2451 /* Dump address */
2452 offset += qed_dump_num_param(dump_buf + offset,
2453 dump, "addr", byte_addr);
2454 if (dump && dword_len > 64)
2455 DP_VERBOSE(p_hwfn,
2456 QED_MSG_DEBUG,
2457 "Dumping %d registers from address 0x%x...\n",
2458 dword_len, byte_addr);
2459 }
2460
2461 /* Dump len */
2462 offset += qed_dump_num_param(dump_buf + offset, dump, "len", dword_len);
2463
2464 /* Dump bit width */
2465 if (bit_width)
2466 offset += qed_dump_num_param(dump_buf + offset,
2467 dump, "width", bit_width);
2468
2469 /* Dump packed */
2470 if (packed)
2471 offset += qed_dump_num_param(dump_buf + offset,
2472 dump, "packed", 1);
2473
2474 /* Dump reg type */
2475 if (is_storm) {
2476 strcpy(buf, "?STORM_");
2477 buf[0] = storm_letter;
2478 strcpy(buf + strlen(buf), mem_group);
2479 } else {
2480 strcpy(buf, mem_group);
2481 }
2482
2483 offset += qed_dump_str_param(dump_buf + offset, dump, "type", buf);
2484 return offset;
2485 }
2486
2487 /* Dumps a single GRC memory. If name is NULL, the memory is stored by address.
2488 * Returns the dumped size in dwords.
2489 */
qed_grc_dump_mem(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,const char * name,u32 byte_addr,u32 dword_len,u32 bit_width,bool packed,const char * mem_group,bool is_storm,char storm_letter)2490 static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
2491 struct qed_ptt *p_ptt,
2492 u32 *dump_buf,
2493 bool dump,
2494 const char *name,
2495 u32 byte_addr,
2496 u32 dword_len,
2497 u32 bit_width,
2498 bool packed,
2499 const char *mem_group,
2500 bool is_storm, char storm_letter)
2501 {
2502 u32 offset = 0;
2503
2504 offset += qed_grc_dump_mem_hdr(p_hwfn,
2505 dump_buf + offset,
2506 dump,
2507 name,
2508 byte_addr,
2509 dword_len,
2510 bit_width,
2511 packed,
2512 mem_group, is_storm, storm_letter);
2513 if (dump) {
2514 u32 i;
2515
2516 for (i = 0; i < dword_len;
2517 i++, byte_addr += BYTES_IN_DWORD, offset++)
2518 *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, byte_addr);
2519 } else {
2520 offset += dword_len;
2521 }
2522
2523 return offset;
2524 }
2525
2526 /* Dumps GRC memories entries. Returns the dumped size in dwords. */
qed_grc_dump_mem_entries(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct dbg_array input_mems_arr,u32 * dump_buf,bool dump)2527 static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
2528 struct qed_ptt *p_ptt,
2529 struct dbg_array input_mems_arr,
2530 u32 *dump_buf, bool dump)
2531 {
2532 u32 i, offset = 0, input_offset = 0;
2533 bool mode_match = true;
2534
2535 while (input_offset < input_mems_arr.size_in_dwords) {
2536 const struct dbg_dump_cond_hdr *cond_hdr;
2537 u32 num_entries;
2538 bool eval_mode;
2539
2540 cond_hdr = (const struct dbg_dump_cond_hdr *)
2541 &input_mems_arr.ptr[input_offset++];
2542 eval_mode = GET_FIELD(cond_hdr->mode.data,
2543 DBG_MODE_HDR_EVAL_MODE) > 0;
2544
2545 /* Check required mode */
2546 if (eval_mode) {
2547 u16 modes_buf_offset =
2548 GET_FIELD(cond_hdr->mode.data,
2549 DBG_MODE_HDR_MODES_BUF_OFFSET);
2550
2551 mode_match = qed_is_mode_match(p_hwfn,
2552 &modes_buf_offset);
2553 }
2554
2555 if (!mode_match) {
2556 input_offset += cond_hdr->data_size;
2557 continue;
2558 }
2559
2560 num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
2561 for (i = 0; i < num_entries;
2562 i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) {
2563 const struct dbg_dump_mem *mem =
2564 (const struct dbg_dump_mem *)
2565 &input_mems_arr.ptr[input_offset];
2566 u8 mem_group_id;
2567
2568 mem_group_id = GET_FIELD(mem->dword0,
2569 DBG_DUMP_MEM_MEM_GROUP_ID);
2570 if (mem_group_id >= MEM_GROUPS_NUM) {
2571 DP_NOTICE(p_hwfn, "Invalid mem_group_id\n");
2572 return 0;
2573 }
2574
2575 if (qed_grc_is_mem_included(p_hwfn,
2576 (enum block_id)cond_hdr->block_id,
2577 mem_group_id)) {
2578 u32 mem_byte_addr =
2579 DWORDS_TO_BYTES(GET_FIELD(mem->dword0,
2580 DBG_DUMP_MEM_ADDRESS));
2581 u32 mem_len = GET_FIELD(mem->dword1,
2582 DBG_DUMP_MEM_LENGTH);
2583 char storm_letter = 'a';
2584 bool is_storm = false;
2585
2586 /* Update memory length for CCFC/TCFC memories
2587 * according to number of LCIDs/LTIDs.
2588 */
2589 if (mem_group_id == MEM_GROUP_CONN_CFC_MEM)
2590 mem_len = qed_grc_get_param(p_hwfn,
2591 DBG_GRC_PARAM_NUM_LCIDS)
2592 * (mem_len / MAX_LCIDS);
2593 else if (mem_group_id == MEM_GROUP_TASK_CFC_MEM)
2594 mem_len = qed_grc_get_param(p_hwfn,
2595 DBG_GRC_PARAM_NUM_LTIDS)
2596 * (mem_len / MAX_LTIDS);
2597
2598 /* If memory is associated with Storm, update
2599 * Storm details.
2600 */
2601 if (s_block_defs[cond_hdr->block_id]->
2602 associated_to_storm) {
2603 is_storm = true;
2604 storm_letter =
2605 s_storm_defs[s_block_defs[
2606 cond_hdr->block_id]->
2607 storm_id].letter;
2608 }
2609
2610 /* Dump memory */
2611 offset += qed_grc_dump_mem(p_hwfn, p_ptt,
2612 dump_buf + offset, dump, NULL,
2613 mem_byte_addr, mem_len, 0,
2614 false,
2615 s_mem_group_names[mem_group_id],
2616 is_storm, storm_letter);
2617 }
2618 }
2619 }
2620
2621 return offset;
2622 }
2623
2624 /* Dumps GRC memories according to the input array dump_mem.
2625 * Returns the dumped size in dwords.
2626 */
qed_grc_dump_memories(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)2627 static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn,
2628 struct qed_ptt *p_ptt,
2629 u32 *dump_buf, bool dump)
2630 {
2631 u32 offset = 0, input_offset = 0;
2632
2633 while (input_offset <
2634 s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].size_in_dwords) {
2635 const struct dbg_dump_split_hdr *split_hdr =
2636 (const struct dbg_dump_split_hdr *)
2637 &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset++];
2638 u8 split_type_id = GET_FIELD(split_hdr->hdr,
2639 DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
2640 u32 split_data_size = GET_FIELD(split_hdr->hdr,
2641 DBG_DUMP_SPLIT_HDR_DATA_SIZE);
2642 struct dbg_array curr_input_mems_arr = {
2643 &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset],
2644 split_data_size};
2645
2646 switch (split_type_id) {
2647 case SPLIT_TYPE_NONE:
2648 offset += qed_grc_dump_mem_entries(p_hwfn,
2649 p_ptt,
2650 curr_input_mems_arr,
2651 dump_buf + offset,
2652 dump);
2653 break;
2654 default:
2655 DP_NOTICE(p_hwfn,
2656 "Dumping split memories is currently not supported\n");
2657 break;
2658 }
2659
2660 input_offset += split_data_size;
2661 }
2662
2663 return offset;
2664 }
2665
2666 /* Dumps GRC context data for the specified Storm.
2667 * Returns the dumped size in dwords.
2668 */
qed_grc_dump_ctx_data(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,const char * name,u32 num_lids,u32 lid_size,u32 rd_reg_addr,u8 storm_id)2669 static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
2670 struct qed_ptt *p_ptt,
2671 u32 *dump_buf,
2672 bool dump,
2673 const char *name,
2674 u32 num_lids,
2675 u32 lid_size,
2676 u32 rd_reg_addr,
2677 u8 storm_id)
2678 {
2679 u32 i, lid, total_size;
2680 u32 offset = 0;
2681
2682 if (!lid_size)
2683 return 0;
2684 lid_size *= BYTES_IN_DWORD;
2685 total_size = num_lids * lid_size;
2686 offset += qed_grc_dump_mem_hdr(p_hwfn,
2687 dump_buf + offset,
2688 dump,
2689 name,
2690 0,
2691 total_size,
2692 lid_size * 32,
2693 false,
2694 name,
2695 true, s_storm_defs[storm_id].letter);
2696
2697 /* Dump context data */
2698 if (dump) {
2699 for (lid = 0; lid < num_lids; lid++) {
2700 for (i = 0; i < lid_size; i++, offset++) {
2701 qed_wr(p_hwfn,
2702 p_ptt,
2703 s_storm_defs[storm_id].cm_ctx_wr_addr,
2704 BIT(9) | lid);
2705 *(dump_buf + offset) = qed_rd(p_hwfn,
2706 p_ptt,
2707 rd_reg_addr);
2708 }
2709 }
2710 } else {
2711 offset += total_size;
2712 }
2713
2714 return offset;
2715 }
2716
2717 /* Dumps GRC contexts. Returns the dumped size in dwords. */
qed_grc_dump_ctx(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)2718 static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn,
2719 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
2720 {
2721 u32 offset = 0;
2722 u8 storm_id;
2723
2724 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2725 if (!qed_grc_is_storm_included(p_hwfn,
2726 (enum dbg_storms)storm_id))
2727 continue;
2728
2729 /* Dump Conn AG context size */
2730 offset +=
2731 qed_grc_dump_ctx_data(p_hwfn,
2732 p_ptt,
2733 dump_buf + offset,
2734 dump,
2735 "CONN_AG_CTX",
2736 qed_grc_get_param(p_hwfn,
2737 DBG_GRC_PARAM_NUM_LCIDS),
2738 s_storm_defs[storm_id].
2739 cm_conn_ag_ctx_lid_size,
2740 s_storm_defs[storm_id].
2741 cm_conn_ag_ctx_rd_addr,
2742 storm_id);
2743
2744 /* Dump Conn ST context size */
2745 offset +=
2746 qed_grc_dump_ctx_data(p_hwfn,
2747 p_ptt,
2748 dump_buf + offset,
2749 dump,
2750 "CONN_ST_CTX",
2751 qed_grc_get_param(p_hwfn,
2752 DBG_GRC_PARAM_NUM_LCIDS),
2753 s_storm_defs[storm_id].
2754 cm_conn_st_ctx_lid_size,
2755 s_storm_defs[storm_id].
2756 cm_conn_st_ctx_rd_addr,
2757 storm_id);
2758
2759 /* Dump Task AG context size */
2760 offset +=
2761 qed_grc_dump_ctx_data(p_hwfn,
2762 p_ptt,
2763 dump_buf + offset,
2764 dump,
2765 "TASK_AG_CTX",
2766 qed_grc_get_param(p_hwfn,
2767 DBG_GRC_PARAM_NUM_LTIDS),
2768 s_storm_defs[storm_id].
2769 cm_task_ag_ctx_lid_size,
2770 s_storm_defs[storm_id].
2771 cm_task_ag_ctx_rd_addr,
2772 storm_id);
2773
2774 /* Dump Task ST context size */
2775 offset +=
2776 qed_grc_dump_ctx_data(p_hwfn,
2777 p_ptt,
2778 dump_buf + offset,
2779 dump,
2780 "TASK_ST_CTX",
2781 qed_grc_get_param(p_hwfn,
2782 DBG_GRC_PARAM_NUM_LTIDS),
2783 s_storm_defs[storm_id].
2784 cm_task_st_ctx_lid_size,
2785 s_storm_defs[storm_id].
2786 cm_task_st_ctx_rd_addr,
2787 storm_id);
2788 }
2789
2790 return offset;
2791 }
2792
2793 /* Dumps GRC IORs data. Returns the dumped size in dwords. */
qed_grc_dump_iors(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)2794 static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn,
2795 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
2796 {
2797 char buf[10] = "IOR_SET_?";
2798 u8 storm_id, set_id;
2799 u32 offset = 0;
2800
2801 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2802 if (qed_grc_is_storm_included(p_hwfn,
2803 (enum dbg_storms)storm_id)) {
2804 for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
2805 u32 addr =
2806 s_storm_defs[storm_id].sem_fast_mem_addr +
2807 SEM_FAST_REG_STORM_REG_FILE +
2808 DWORDS_TO_BYTES(IOR_SET_OFFSET(set_id));
2809
2810 buf[strlen(buf) - 1] = '0' + set_id;
2811 offset += qed_grc_dump_mem(p_hwfn,
2812 p_ptt,
2813 dump_buf + offset,
2814 dump,
2815 buf,
2816 addr,
2817 IORS_PER_SET,
2818 32,
2819 false,
2820 "ior",
2821 true,
2822 s_storm_defs
2823 [storm_id].letter);
2824 }
2825 }
2826 }
2827
2828 return offset;
2829 }
2830
2831 /* Dump VFC CAM. Returns the dumped size in dwords. */
qed_grc_dump_vfc_cam(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u8 storm_id)2832 static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn,
2833 struct qed_ptt *p_ptt,
2834 u32 *dump_buf, bool dump, u8 storm_id)
2835 {
2836 u32 total_size = VFC_CAM_NUM_ROWS * VFC_CAM_RESP_DWORDS;
2837 u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 };
2838 u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 };
2839 u32 offset = 0;
2840 u32 row, i;
2841
2842 offset += qed_grc_dump_mem_hdr(p_hwfn,
2843 dump_buf + offset,
2844 dump,
2845 "vfc_cam",
2846 0,
2847 total_size,
2848 256,
2849 false,
2850 "vfc_cam",
2851 true, s_storm_defs[storm_id].letter);
2852 if (dump) {
2853 /* Prepare CAM address */
2854 SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
2855 for (row = 0; row < VFC_CAM_NUM_ROWS;
2856 row++, offset += VFC_CAM_RESP_DWORDS) {
2857 /* Write VFC CAM command */
2858 SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
2859 ARR_REG_WR(p_hwfn,
2860 p_ptt,
2861 s_storm_defs[storm_id].sem_fast_mem_addr +
2862 SEM_FAST_REG_VFC_DATA_WR,
2863 cam_cmd, VFC_CAM_CMD_DWORDS);
2864
2865 /* Write VFC CAM address */
2866 ARR_REG_WR(p_hwfn,
2867 p_ptt,
2868 s_storm_defs[storm_id].sem_fast_mem_addr +
2869 SEM_FAST_REG_VFC_ADDR,
2870 cam_addr, VFC_CAM_ADDR_DWORDS);
2871
2872 /* Read VFC CAM read response */
2873 ARR_REG_RD(p_hwfn,
2874 p_ptt,
2875 s_storm_defs[storm_id].sem_fast_mem_addr +
2876 SEM_FAST_REG_VFC_DATA_RD,
2877 dump_buf + offset, VFC_CAM_RESP_DWORDS);
2878 }
2879 } else {
2880 offset += total_size;
2881 }
2882
2883 return offset;
2884 }
2885
2886 /* Dump VFC RAM. Returns the dumped size in dwords. */
qed_grc_dump_vfc_ram(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u8 storm_id,struct vfc_ram_defs * ram_defs)2887 static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn,
2888 struct qed_ptt *p_ptt,
2889 u32 *dump_buf,
2890 bool dump,
2891 u8 storm_id, struct vfc_ram_defs *ram_defs)
2892 {
2893 u32 total_size = ram_defs->num_rows * VFC_RAM_RESP_DWORDS;
2894 u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 };
2895 u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 };
2896 u32 offset = 0;
2897 u32 row, i;
2898
2899 offset += qed_grc_dump_mem_hdr(p_hwfn,
2900 dump_buf + offset,
2901 dump,
2902 ram_defs->mem_name,
2903 0,
2904 total_size,
2905 256,
2906 false,
2907 ram_defs->type_name,
2908 true, s_storm_defs[storm_id].letter);
2909
2910 /* Prepare RAM address */
2911 SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
2912
2913 if (!dump)
2914 return offset + total_size;
2915
2916 for (row = ram_defs->base_row;
2917 row < ram_defs->base_row + ram_defs->num_rows;
2918 row++, offset += VFC_RAM_RESP_DWORDS) {
2919 /* Write VFC RAM command */
2920 ARR_REG_WR(p_hwfn,
2921 p_ptt,
2922 s_storm_defs[storm_id].sem_fast_mem_addr +
2923 SEM_FAST_REG_VFC_DATA_WR,
2924 ram_cmd, VFC_RAM_CMD_DWORDS);
2925
2926 /* Write VFC RAM address */
2927 SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row);
2928 ARR_REG_WR(p_hwfn,
2929 p_ptt,
2930 s_storm_defs[storm_id].sem_fast_mem_addr +
2931 SEM_FAST_REG_VFC_ADDR,
2932 ram_addr, VFC_RAM_ADDR_DWORDS);
2933
2934 /* Read VFC RAM read response */
2935 ARR_REG_RD(p_hwfn,
2936 p_ptt,
2937 s_storm_defs[storm_id].sem_fast_mem_addr +
2938 SEM_FAST_REG_VFC_DATA_RD,
2939 dump_buf + offset, VFC_RAM_RESP_DWORDS);
2940 }
2941
2942 return offset;
2943 }
2944
2945 /* Dumps GRC VFC data. Returns the dumped size in dwords. */
qed_grc_dump_vfc(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)2946 static u32 qed_grc_dump_vfc(struct qed_hwfn *p_hwfn,
2947 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
2948 {
2949 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2950 u8 storm_id, i;
2951 u32 offset = 0;
2952
2953 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2954 if (qed_grc_is_storm_included(p_hwfn,
2955 (enum dbg_storms)storm_id) &&
2956 s_storm_defs[storm_id].has_vfc &&
2957 (storm_id != DBG_PSTORM_ID ||
2958 dev_data->platform_id == PLATFORM_ASIC)) {
2959 /* Read CAM */
2960 offset += qed_grc_dump_vfc_cam(p_hwfn,
2961 p_ptt,
2962 dump_buf + offset,
2963 dump, storm_id);
2964
2965 /* Read RAM */
2966 for (i = 0; i < NUM_VFC_RAM_TYPES; i++)
2967 offset += qed_grc_dump_vfc_ram(p_hwfn,
2968 p_ptt,
2969 dump_buf +
2970 offset,
2971 dump,
2972 storm_id,
2973 &s_vfc_ram_defs
2974 [i]);
2975 }
2976 }
2977
2978 return offset;
2979 }
2980
2981 /* Dumps GRC RSS data. Returns the dumped size in dwords. */
qed_grc_dump_rss(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)2982 static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
2983 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
2984 {
2985 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2986 u32 offset = 0;
2987 u8 rss_mem_id;
2988
2989 for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
2990 struct rss_mem_defs *rss_defs = &s_rss_mem_defs[rss_mem_id];
2991 u32 num_entries = rss_defs->num_entries[dev_data->chip_id];
2992 u32 entry_width = rss_defs->entry_width[dev_data->chip_id];
2993 u32 total_size = (num_entries * entry_width) / 32;
2994 bool packed = (entry_width == 16);
2995 u32 addr = rss_defs->addr;
2996 u32 i, j;
2997
2998 offset += qed_grc_dump_mem_hdr(p_hwfn,
2999 dump_buf + offset,
3000 dump,
3001 rss_defs->mem_name,
3002 addr,
3003 total_size,
3004 entry_width,
3005 packed,
3006 rss_defs->type_name, false, 0);
3007
3008 if (!dump) {
3009 offset += total_size;
3010 continue;
3011 }
3012
3013 /* Dump RSS data */
3014 for (i = 0; i < BYTES_TO_DWORDS(total_size); i++, addr++) {
3015 qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, addr);
3016 for (j = 0; j < BYTES_IN_DWORD; j++, offset++)
3017 *(dump_buf + offset) =
3018 qed_rd(p_hwfn, p_ptt,
3019 RSS_REG_RSS_RAM_DATA +
3020 DWORDS_TO_BYTES(j));
3021 }
3022 }
3023
3024 return offset;
3025 }
3026
3027 /* Dumps GRC Big RAM. Returns the dumped size in dwords. */
qed_grc_dump_big_ram(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u8 big_ram_id)3028 static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
3029 struct qed_ptt *p_ptt,
3030 u32 *dump_buf, bool dump, u8 big_ram_id)
3031 {
3032 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3033 char mem_name[12] = "???_BIG_RAM";
3034 char type_name[8] = "???_RAM";
3035 u32 ram_size, total_blocks;
3036 u32 offset = 0, i, j;
3037
3038 total_blocks =
3039 s_big_ram_defs[big_ram_id].num_of_blocks[dev_data->chip_id];
3040 ram_size = total_blocks * BIG_RAM_BLOCK_SIZE_DWORDS;
3041
3042 strncpy(type_name, s_big_ram_defs[big_ram_id].instance_name,
3043 strlen(s_big_ram_defs[big_ram_id].instance_name));
3044 strncpy(mem_name, s_big_ram_defs[big_ram_id].instance_name,
3045 strlen(s_big_ram_defs[big_ram_id].instance_name));
3046
3047 /* Dump memory header */
3048 offset += qed_grc_dump_mem_hdr(p_hwfn,
3049 dump_buf + offset,
3050 dump,
3051 mem_name,
3052 0,
3053 ram_size,
3054 BIG_RAM_BLOCK_SIZE_BYTES * 8,
3055 false, type_name, false, 0);
3056
3057 if (!dump)
3058 return offset + ram_size;
3059
3060 /* Read and dump Big RAM data */
3061 for (i = 0; i < total_blocks / 2; i++) {
3062 qed_wr(p_hwfn, p_ptt, s_big_ram_defs[big_ram_id].addr_reg_addr,
3063 i);
3064 for (j = 0; j < 2 * BIG_RAM_BLOCK_SIZE_DWORDS; j++, offset++)
3065 *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt,
3066 s_big_ram_defs[big_ram_id].
3067 data_reg_addr +
3068 DWORDS_TO_BYTES(j));
3069 }
3070
3071 return offset;
3072 }
3073
qed_grc_dump_mcp(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)3074 static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
3075 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3076 {
3077 bool block_enable[MAX_BLOCK_ID] = { 0 };
3078 bool halted = false;
3079 u32 offset = 0;
3080
3081 /* Halt MCP */
3082 if (dump) {
3083 halted = !qed_mcp_halt(p_hwfn, p_ptt);
3084 if (!halted)
3085 DP_NOTICE(p_hwfn, "MCP halt failed!\n");
3086 }
3087
3088 /* Dump MCP scratchpad */
3089 offset += qed_grc_dump_mem(p_hwfn,
3090 p_ptt,
3091 dump_buf + offset,
3092 dump,
3093 NULL,
3094 MCP_REG_SCRATCH,
3095 MCP_REG_SCRATCH_SIZE,
3096 0, false, "MCP", false, 0);
3097
3098 /* Dump MCP cpu_reg_file */
3099 offset += qed_grc_dump_mem(p_hwfn,
3100 p_ptt,
3101 dump_buf + offset,
3102 dump,
3103 NULL,
3104 MCP_REG_CPU_REG_FILE,
3105 MCP_REG_CPU_REG_FILE_SIZE,
3106 0, false, "MCP", false, 0);
3107
3108 /* Dump MCP registers */
3109 block_enable[BLOCK_MCP] = true;
3110 offset += qed_grc_dump_registers(p_hwfn,
3111 p_ptt,
3112 dump_buf + offset,
3113 dump, block_enable, "block", "MCP");
3114
3115 /* Dump required non-MCP registers */
3116 offset += qed_grc_dump_regs_hdr(dump_buf + offset,
3117 dump, 1, "eng", -1, "block", "MCP");
3118 offset += qed_grc_dump_reg_entry(p_hwfn,
3119 p_ptt,
3120 dump_buf + offset,
3121 dump,
3122 BYTES_TO_DWORDS
3123 (MISC_REG_SHARED_MEM_ADDR), 1);
3124
3125 /* Release MCP */
3126 if (halted && qed_mcp_resume(p_hwfn, p_ptt))
3127 DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
3128 return offset;
3129 }
3130
3131 /* Dumps the tbus indirect memory for all PHYs. */
qed_grc_dump_phy(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)3132 static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
3133 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3134 {
3135 u32 offset = 0, tbus_lo_offset, tbus_hi_offset;
3136 char mem_name[32];
3137 u8 phy_id;
3138
3139 for (phy_id = 0; phy_id < ARRAY_SIZE(s_phy_defs); phy_id++) {
3140 struct phy_defs *phy_defs = &s_phy_defs[phy_id];
3141 int printed_chars;
3142
3143 printed_chars = snprintf(mem_name, sizeof(mem_name), "tbus_%s",
3144 phy_defs->phy_name);
3145 if (printed_chars < 0 || printed_chars >= sizeof(mem_name))
3146 DP_NOTICE(p_hwfn,
3147 "Unexpected debug error: invalid PHY memory name\n");
3148 offset += qed_grc_dump_mem_hdr(p_hwfn,
3149 dump_buf + offset,
3150 dump,
3151 mem_name,
3152 0,
3153 PHY_DUMP_SIZE_DWORDS,
3154 16, true, mem_name, false, 0);
3155 if (dump) {
3156 u32 addr_lo_addr = phy_defs->base_addr +
3157 phy_defs->tbus_addr_lo_addr;
3158 u32 addr_hi_addr = phy_defs->base_addr +
3159 phy_defs->tbus_addr_hi_addr;
3160 u32 data_lo_addr = phy_defs->base_addr +
3161 phy_defs->tbus_data_lo_addr;
3162 u32 data_hi_addr = phy_defs->base_addr +
3163 phy_defs->tbus_data_hi_addr;
3164 u8 *bytes_buf = (u8 *)(dump_buf + offset);
3165
3166 for (tbus_hi_offset = 0;
3167 tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8);
3168 tbus_hi_offset++) {
3169 qed_wr(p_hwfn,
3170 p_ptt, addr_hi_addr, tbus_hi_offset);
3171 for (tbus_lo_offset = 0; tbus_lo_offset < 256;
3172 tbus_lo_offset++) {
3173 qed_wr(p_hwfn,
3174 p_ptt,
3175 addr_lo_addr, tbus_lo_offset);
3176 *(bytes_buf++) =
3177 (u8)qed_rd(p_hwfn, p_ptt,
3178 data_lo_addr);
3179 *(bytes_buf++) =
3180 (u8)qed_rd(p_hwfn, p_ptt,
3181 data_hi_addr);
3182 }
3183 }
3184 }
3185
3186 offset += PHY_DUMP_SIZE_DWORDS;
3187 }
3188
3189 return offset;
3190 }
3191
qed_config_dbg_line(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,enum block_id block_id,u8 line_id,u8 cycle_en,u8 right_shift,u8 force_valid,u8 force_frame)3192 static void qed_config_dbg_line(struct qed_hwfn *p_hwfn,
3193 struct qed_ptt *p_ptt,
3194 enum block_id block_id,
3195 u8 line_id,
3196 u8 cycle_en,
3197 u8 right_shift, u8 force_valid, u8 force_frame)
3198 {
3199 struct block_defs *p_block_defs = s_block_defs[block_id];
3200
3201 qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_select_addr, line_id);
3202 qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_cycle_enable_addr, cycle_en);
3203 qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_shift_addr, right_shift);
3204 qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_force_valid_addr, force_valid);
3205 qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_force_frame_addr, force_frame);
3206 }
3207
3208 /* Dumps Static Debug data. Returns the dumped size in dwords. */
qed_grc_dump_static_debug(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)3209 static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
3210 struct qed_ptt *p_ptt,
3211 u32 *dump_buf, bool dump)
3212 {
3213 u32 block_dwords = NUM_DBG_BUS_LINES * STATIC_DEBUG_LINE_DWORDS;
3214 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3215 u32 offset = 0, block_id, line_id, addr, i;
3216 struct block_defs *p_block_defs;
3217
3218 if (dump) {
3219 DP_VERBOSE(p_hwfn,
3220 QED_MSG_DEBUG, "Dumping static debug data...\n");
3221
3222 /* Disable all blocks debug output */
3223 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3224 p_block_defs = s_block_defs[block_id];
3225
3226 if (p_block_defs->has_dbg_bus[dev_data->chip_id])
3227 qed_wr(p_hwfn, p_ptt,
3228 p_block_defs->dbg_cycle_enable_addr, 0);
3229 }
3230
3231 qed_bus_reset_dbg_block(p_hwfn, p_ptt);
3232 qed_bus_set_framing_mode(p_hwfn,
3233 p_ptt, DBG_BUS_FRAME_MODE_8HW_0ST);
3234 qed_wr(p_hwfn,
3235 p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_INT_BUF);
3236 qed_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, 1);
3237 qed_bus_enable_dbg_block(p_hwfn, p_ptt, true);
3238 }
3239
3240 /* Dump all static debug lines for each relevant block */
3241 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3242 p_block_defs = s_block_defs[block_id];
3243
3244 if (!p_block_defs->has_dbg_bus[dev_data->chip_id])
3245 continue;
3246
3247 /* Dump static section params */
3248 offset += qed_grc_dump_mem_hdr(p_hwfn,
3249 dump_buf + offset,
3250 dump,
3251 p_block_defs->name, 0,
3252 block_dwords, 32, false,
3253 "STATIC", false, 0);
3254
3255 if (dump && !dev_data->block_in_reset[block_id]) {
3256 u8 dbg_client_id =
3257 p_block_defs->dbg_client_id[dev_data->chip_id];
3258
3259 /* Enable block's client */
3260 qed_bus_enable_clients(p_hwfn, p_ptt,
3261 BIT(dbg_client_id));
3262
3263 for (line_id = 0; line_id < NUM_DBG_BUS_LINES;
3264 line_id++) {
3265 /* Configure debug line ID */
3266 qed_config_dbg_line(p_hwfn,
3267 p_ptt,
3268 (enum block_id)block_id,
3269 (u8)line_id,
3270 0xf, 0, 0, 0);
3271
3272 /* Read debug line info */
3273 for (i = 0, addr = DBG_REG_CALENDAR_OUT_DATA;
3274 i < STATIC_DEBUG_LINE_DWORDS;
3275 i++, offset++, addr += BYTES_IN_DWORD)
3276 dump_buf[offset] = qed_rd(p_hwfn, p_ptt,
3277 addr);
3278 }
3279
3280 /* Disable block's client and debug output */
3281 qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3282 qed_wr(p_hwfn, p_ptt,
3283 p_block_defs->dbg_cycle_enable_addr, 0);
3284 } else {
3285 /* All lines are invalid - dump zeros */
3286 if (dump)
3287 memset(dump_buf + offset, 0,
3288 DWORDS_TO_BYTES(block_dwords));
3289 offset += block_dwords;
3290 }
3291 }
3292
3293 if (dump) {
3294 qed_bus_enable_dbg_block(p_hwfn, p_ptt, false);
3295 qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3296 }
3297
3298 return offset;
3299 }
3300
3301 /* Performs GRC Dump to the specified buffer.
3302 * Returns the dumped size in dwords.
3303 */
qed_grc_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u32 * num_dumped_dwords)3304 static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
3305 struct qed_ptt *p_ptt,
3306 u32 *dump_buf,
3307 bool dump, u32 *num_dumped_dwords)
3308 {
3309 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3310 bool parities_masked = false;
3311 u8 i, port_mode = 0;
3312 u32 offset = 0;
3313
3314 /* Check if emulation platform */
3315 *num_dumped_dwords = 0;
3316
3317 /* Fill GRC parameters that were not set by the user with their default
3318 * value.
3319 */
3320 qed_dbg_grc_set_params_default(p_hwfn);
3321
3322 /* Find port mode */
3323 if (dump) {
3324 switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
3325 case 0:
3326 port_mode = 1;
3327 break;
3328 case 1:
3329 port_mode = 2;
3330 break;
3331 case 2:
3332 port_mode = 4;
3333 break;
3334 }
3335 }
3336
3337 /* Update reset state */
3338 if (dump)
3339 qed_update_blocks_reset_state(p_hwfn, p_ptt);
3340
3341 /* Dump global params */
3342 offset += qed_dump_common_global_params(p_hwfn,
3343 p_ptt,
3344 dump_buf + offset, dump, 4);
3345 offset += qed_dump_str_param(dump_buf + offset,
3346 dump, "dump-type", "grc-dump");
3347 offset += qed_dump_num_param(dump_buf + offset,
3348 dump,
3349 "num-lcids",
3350 qed_grc_get_param(p_hwfn,
3351 DBG_GRC_PARAM_NUM_LCIDS));
3352 offset += qed_dump_num_param(dump_buf + offset,
3353 dump,
3354 "num-ltids",
3355 qed_grc_get_param(p_hwfn,
3356 DBG_GRC_PARAM_NUM_LTIDS));
3357 offset += qed_dump_num_param(dump_buf + offset,
3358 dump, "num-ports", port_mode);
3359
3360 /* Dump reset registers (dumped before taking blocks out of reset ) */
3361 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
3362 offset += qed_grc_dump_reset_regs(p_hwfn,
3363 p_ptt,
3364 dump_buf + offset, dump);
3365
3366 /* Take all blocks out of reset (using reset registers) */
3367 if (dump) {
3368 qed_grc_unreset_blocks(p_hwfn, p_ptt);
3369 qed_update_blocks_reset_state(p_hwfn, p_ptt);
3370 }
3371
3372 /* Disable all parities using MFW command */
3373 if (dump) {
3374 parities_masked = !qed_mcp_mask_parities(p_hwfn, p_ptt, 1);
3375 if (!parities_masked) {
3376 if (qed_grc_get_param
3377 (p_hwfn, DBG_GRC_PARAM_PARITY_SAFE))
3378 return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY;
3379 else
3380 DP_NOTICE(p_hwfn,
3381 "Failed to mask parities using MFW\n");
3382 }
3383 }
3384
3385 /* Dump modified registers (dumped before modifying them) */
3386 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
3387 offset += qed_grc_dump_modified_regs(p_hwfn,
3388 p_ptt,
3389 dump_buf + offset, dump);
3390
3391 /* Stall storms */
3392 if (dump &&
3393 (qed_grc_is_included(p_hwfn,
3394 DBG_GRC_PARAM_DUMP_IOR) ||
3395 qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)))
3396 qed_grc_stall_storms(p_hwfn, p_ptt, true);
3397
3398 /* Dump all regs */
3399 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) {
3400 /* Dump all blocks except MCP */
3401 bool block_enable[MAX_BLOCK_ID];
3402
3403 for (i = 0; i < MAX_BLOCK_ID; i++)
3404 block_enable[i] = true;
3405 block_enable[BLOCK_MCP] = false;
3406 offset += qed_grc_dump_registers(p_hwfn,
3407 p_ptt,
3408 dump_buf +
3409 offset,
3410 dump,
3411 block_enable, NULL, NULL);
3412 }
3413
3414 /* Dump memories */
3415 offset += qed_grc_dump_memories(p_hwfn, p_ptt, dump_buf + offset, dump);
3416
3417 /* Dump MCP */
3418 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP))
3419 offset += qed_grc_dump_mcp(p_hwfn,
3420 p_ptt, dump_buf + offset, dump);
3421
3422 /* Dump context */
3423 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX))
3424 offset += qed_grc_dump_ctx(p_hwfn,
3425 p_ptt, dump_buf + offset, dump);
3426
3427 /* Dump RSS memories */
3428 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RSS))
3429 offset += qed_grc_dump_rss(p_hwfn,
3430 p_ptt, dump_buf + offset, dump);
3431
3432 /* Dump Big RAM */
3433 for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
3434 if (qed_grc_is_included(p_hwfn, s_big_ram_defs[i].grc_param))
3435 offset += qed_grc_dump_big_ram(p_hwfn,
3436 p_ptt,
3437 dump_buf + offset,
3438 dump, i);
3439
3440 /* Dump IORs */
3441 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR))
3442 offset += qed_grc_dump_iors(p_hwfn,
3443 p_ptt, dump_buf + offset, dump);
3444
3445 /* Dump VFC */
3446 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC))
3447 offset += qed_grc_dump_vfc(p_hwfn,
3448 p_ptt, dump_buf + offset, dump);
3449
3450 /* Dump PHY tbus */
3451 if (qed_grc_is_included(p_hwfn,
3452 DBG_GRC_PARAM_DUMP_PHY) && dev_data->chip_id ==
3453 CHIP_K2 && dev_data->platform_id == PLATFORM_ASIC)
3454 offset += qed_grc_dump_phy(p_hwfn,
3455 p_ptt, dump_buf + offset, dump);
3456
3457 /* Dump static debug data */
3458 if (qed_grc_is_included(p_hwfn,
3459 DBG_GRC_PARAM_DUMP_STATIC) &&
3460 dev_data->bus.state == DBG_BUS_STATE_IDLE)
3461 offset += qed_grc_dump_static_debug(p_hwfn,
3462 p_ptt,
3463 dump_buf + offset, dump);
3464
3465 /* Dump last section */
3466 offset += qed_dump_last_section(dump_buf, offset, dump);
3467 if (dump) {
3468 /* Unstall storms */
3469 if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL))
3470 qed_grc_stall_storms(p_hwfn, p_ptt, false);
3471
3472 /* Clear parity status */
3473 qed_grc_clear_all_prty(p_hwfn, p_ptt);
3474
3475 /* Enable all parities using MFW command */
3476 if (parities_masked)
3477 qed_mcp_mask_parities(p_hwfn, p_ptt, 0);
3478 }
3479
3480 *num_dumped_dwords = offset;
3481
3482 return DBG_STATUS_OK;
3483 }
3484
3485 /* Writes the specified failing Idle Check rule to the specified buffer.
3486 * Returns the dumped size in dwords.
3487 */
qed_idle_chk_dump_failure(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u16 rule_id,const struct dbg_idle_chk_rule * rule,u16 fail_entry_id,u32 * cond_reg_values)3488 static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
3489 struct qed_ptt *p_ptt,
3490 u32 *
3491 dump_buf,
3492 bool dump,
3493 u16 rule_id,
3494 const struct dbg_idle_chk_rule *rule,
3495 u16 fail_entry_id, u32 *cond_reg_values)
3496 {
3497 const union dbg_idle_chk_reg *regs = &((const union dbg_idle_chk_reg *)
3498 s_dbg_arrays
3499 [BIN_BUF_DBG_IDLE_CHK_REGS].
3500 ptr)[rule->reg_offset];
3501 const struct dbg_idle_chk_cond_reg *cond_regs = ®s[0].cond_reg;
3502 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3503 struct dbg_idle_chk_result_hdr *hdr =
3504 (struct dbg_idle_chk_result_hdr *)dump_buf;
3505 const struct dbg_idle_chk_info_reg *info_regs =
3506 ®s[rule->num_cond_regs].info_reg;
3507 u32 next_reg_offset = 0, i, offset = 0;
3508 u8 reg_id;
3509
3510 /* Dump rule data */
3511 if (dump) {
3512 memset(hdr, 0, sizeof(*hdr));
3513 hdr->rule_id = rule_id;
3514 hdr->mem_entry_id = fail_entry_id;
3515 hdr->severity = rule->severity;
3516 hdr->num_dumped_cond_regs = rule->num_cond_regs;
3517 }
3518
3519 offset += IDLE_CHK_RESULT_HDR_DWORDS;
3520
3521 /* Dump condition register values */
3522 for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
3523 const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
3524
3525 /* Write register header */
3526 if (dump) {
3527 struct dbg_idle_chk_result_reg_hdr *reg_hdr =
3528 (struct dbg_idle_chk_result_reg_hdr *)(dump_buf
3529 + offset);
3530 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
3531 memset(reg_hdr, 0,
3532 sizeof(struct dbg_idle_chk_result_reg_hdr));
3533 reg_hdr->start_entry = reg->start_entry;
3534 reg_hdr->size = reg->entry_size;
3535 SET_FIELD(reg_hdr->data,
3536 DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM,
3537 reg->num_entries > 1 || reg->start_entry > 0
3538 ? 1 : 0);
3539 SET_FIELD(reg_hdr->data,
3540 DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id);
3541
3542 /* Write register values */
3543 for (i = 0; i < reg_hdr->size;
3544 i++, next_reg_offset++, offset++)
3545 dump_buf[offset] =
3546 cond_reg_values[next_reg_offset];
3547 } else {
3548 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS +
3549 reg->entry_size;
3550 }
3551 }
3552
3553 /* Dump info register values */
3554 for (reg_id = 0; reg_id < rule->num_info_regs; reg_id++) {
3555 const struct dbg_idle_chk_info_reg *reg = &info_regs[reg_id];
3556 u32 block_id;
3557
3558 if (!dump) {
3559 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->size;
3560 continue;
3561 }
3562
3563 /* Check if register's block is in reset */
3564 block_id = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_BLOCK_ID);
3565 if (block_id >= MAX_BLOCK_ID) {
3566 DP_NOTICE(p_hwfn, "Invalid block_id\n");
3567 return 0;
3568 }
3569
3570 if (!dev_data->block_in_reset[block_id]) {
3571 bool eval_mode = GET_FIELD(reg->mode.data,
3572 DBG_MODE_HDR_EVAL_MODE) > 0;
3573 bool mode_match = true;
3574
3575 /* Check mode */
3576 if (eval_mode) {
3577 u16 modes_buf_offset =
3578 GET_FIELD(reg->mode.data,
3579 DBG_MODE_HDR_MODES_BUF_OFFSET);
3580 mode_match =
3581 qed_is_mode_match(p_hwfn,
3582 &modes_buf_offset);
3583 }
3584
3585 if (mode_match) {
3586 u32 grc_addr =
3587 DWORDS_TO_BYTES(GET_FIELD(reg->data,
3588 DBG_IDLE_CHK_INFO_REG_ADDRESS));
3589
3590 /* Write register header */
3591 struct dbg_idle_chk_result_reg_hdr *reg_hdr =
3592 (struct dbg_idle_chk_result_reg_hdr *)
3593 (dump_buf + offset);
3594
3595 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
3596 hdr->num_dumped_info_regs++;
3597 memset(reg_hdr, 0, sizeof(*reg_hdr));
3598 reg_hdr->size = reg->size;
3599 SET_FIELD(reg_hdr->data,
3600 DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
3601 rule->num_cond_regs + reg_id);
3602
3603 /* Write register values */
3604 for (i = 0; i < reg->size;
3605 i++, offset++, grc_addr += 4)
3606 dump_buf[offset] =
3607 qed_rd(p_hwfn, p_ptt, grc_addr);
3608 }
3609 }
3610 }
3611
3612 return offset;
3613 }
3614
3615 /* Dumps idle check rule entries. Returns the dumped size in dwords. */
3616 static u32
qed_idle_chk_dump_rule_entries(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,const struct dbg_idle_chk_rule * input_rules,u32 num_input_rules,u32 * num_failing_rules)3617 qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
3618 u32 *dump_buf, bool dump,
3619 const struct dbg_idle_chk_rule *input_rules,
3620 u32 num_input_rules, u32 *num_failing_rules)
3621 {
3622 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3623 u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE];
3624 u32 i, j, offset = 0;
3625 u16 entry_id;
3626 u8 reg_id;
3627
3628 *num_failing_rules = 0;
3629 for (i = 0; i < num_input_rules; i++) {
3630 const struct dbg_idle_chk_cond_reg *cond_regs;
3631 const struct dbg_idle_chk_rule *rule;
3632 const union dbg_idle_chk_reg *regs;
3633 u16 num_reg_entries = 1;
3634 bool check_rule = true;
3635 const u32 *imm_values;
3636
3637 rule = &input_rules[i];
3638 regs = &((const union dbg_idle_chk_reg *)
3639 s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)
3640 [rule->reg_offset];
3641 cond_regs = ®s[0].cond_reg;
3642 imm_values = &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr
3643 [rule->imm_offset];
3644
3645 /* Check if all condition register blocks are out of reset, and
3646 * find maximal number of entries (all condition registers that
3647 * are memories must have the same size, which is > 1).
3648 */
3649 for (reg_id = 0; reg_id < rule->num_cond_regs && check_rule;
3650 reg_id++) {
3651 u32 block_id = GET_FIELD(cond_regs[reg_id].data,
3652 DBG_IDLE_CHK_COND_REG_BLOCK_ID);
3653
3654 if (block_id >= MAX_BLOCK_ID) {
3655 DP_NOTICE(p_hwfn, "Invalid block_id\n");
3656 return 0;
3657 }
3658
3659 check_rule = !dev_data->block_in_reset[block_id];
3660 if (cond_regs[reg_id].num_entries > num_reg_entries)
3661 num_reg_entries = cond_regs[reg_id].num_entries;
3662 }
3663
3664 if (!check_rule && dump)
3665 continue;
3666
3667 /* Go over all register entries (number of entries is the same
3668 * for all condition registers).
3669 */
3670 for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
3671 /* Read current entry of all condition registers */
3672 if (dump) {
3673 u32 next_reg_offset = 0;
3674
3675 for (reg_id = 0;
3676 reg_id < rule->num_cond_regs;
3677 reg_id++) {
3678 const struct dbg_idle_chk_cond_reg
3679 *reg = &cond_regs[reg_id];
3680
3681 /* Find GRC address (if it's a memory,
3682 * the address of the specific entry is
3683 * calculated).
3684 */
3685 u32 grc_addr =
3686 DWORDS_TO_BYTES(
3687 GET_FIELD(reg->data,
3688 DBG_IDLE_CHK_COND_REG_ADDRESS));
3689
3690 if (reg->num_entries > 1 ||
3691 reg->start_entry > 0) {
3692 u32 padded_entry_size =
3693 reg->entry_size > 1 ?
3694 roundup_pow_of_two
3695 (reg->entry_size) : 1;
3696
3697 grc_addr +=
3698 DWORDS_TO_BYTES(
3699 (reg->start_entry +
3700 entry_id)
3701 * padded_entry_size);
3702 }
3703
3704 /* Read registers */
3705 if (next_reg_offset + reg->entry_size >=
3706 IDLE_CHK_MAX_ENTRIES_SIZE) {
3707 DP_NOTICE(p_hwfn,
3708 "idle check registers entry is too large\n");
3709 return 0;
3710 }
3711
3712 for (j = 0; j < reg->entry_size;
3713 j++, next_reg_offset++,
3714 grc_addr += 4)
3715 cond_reg_values[next_reg_offset] =
3716 qed_rd(p_hwfn, p_ptt, grc_addr);
3717 }
3718 }
3719
3720 /* Call rule's condition function - a return value of
3721 * true indicates failure.
3722 */
3723 if ((*cond_arr[rule->cond_id])(cond_reg_values,
3724 imm_values) || !dump) {
3725 offset +=
3726 qed_idle_chk_dump_failure(p_hwfn,
3727 p_ptt,
3728 dump_buf + offset,
3729 dump,
3730 rule->rule_id,
3731 rule,
3732 entry_id,
3733 cond_reg_values);
3734 (*num_failing_rules)++;
3735 break;
3736 }
3737 }
3738 }
3739
3740 return offset;
3741 }
3742
3743 /* Performs Idle Check Dump to the specified buffer.
3744 * Returns the dumped size in dwords.
3745 */
qed_idle_chk_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)3746 static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
3747 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3748 {
3749 u32 offset = 0, input_offset = 0, num_failing_rules = 0;
3750 u32 num_failing_rules_offset;
3751
3752 /* Dump global params */
3753 offset += qed_dump_common_global_params(p_hwfn,
3754 p_ptt,
3755 dump_buf + offset, dump, 1);
3756 offset += qed_dump_str_param(dump_buf + offset,
3757 dump, "dump-type", "idle-chk");
3758
3759 /* Dump idle check section header with a single parameter */
3760 offset += qed_dump_section_hdr(dump_buf + offset, dump, "idle_chk", 1);
3761 num_failing_rules_offset = offset;
3762 offset += qed_dump_num_param(dump_buf + offset, dump, "num_rules", 0);
3763 while (input_offset <
3764 s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].size_in_dwords) {
3765 const struct dbg_idle_chk_cond_hdr *cond_hdr =
3766 (const struct dbg_idle_chk_cond_hdr *)
3767 &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr
3768 [input_offset++];
3769 bool eval_mode = GET_FIELD(cond_hdr->mode.data,
3770 DBG_MODE_HDR_EVAL_MODE) > 0;
3771 bool mode_match = true;
3772
3773 /* Check mode */
3774 if (eval_mode) {
3775 u16 modes_buf_offset =
3776 GET_FIELD(cond_hdr->mode.data,
3777 DBG_MODE_HDR_MODES_BUF_OFFSET);
3778
3779 mode_match = qed_is_mode_match(p_hwfn,
3780 &modes_buf_offset);
3781 }
3782
3783 if (mode_match) {
3784 u32 curr_failing_rules;
3785
3786 offset +=
3787 qed_idle_chk_dump_rule_entries(p_hwfn,
3788 p_ptt,
3789 dump_buf + offset,
3790 dump,
3791 (const struct dbg_idle_chk_rule *)
3792 &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].
3793 ptr[input_offset],
3794 cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS,
3795 &curr_failing_rules);
3796 num_failing_rules += curr_failing_rules;
3797 }
3798
3799 input_offset += cond_hdr->data_size;
3800 }
3801
3802 /* Overwrite num_rules parameter */
3803 if (dump)
3804 qed_dump_num_param(dump_buf + num_failing_rules_offset,
3805 dump, "num_rules", num_failing_rules);
3806
3807 return offset;
3808 }
3809
3810 /* Finds the meta data image in NVRAM. */
qed_find_nvram_image(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 image_type,u32 * nvram_offset_bytes,u32 * nvram_size_bytes)3811 static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
3812 struct qed_ptt *p_ptt,
3813 u32 image_type,
3814 u32 *nvram_offset_bytes,
3815 u32 *nvram_size_bytes)
3816 {
3817 u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
3818 struct mcp_file_att file_att;
3819
3820 /* Call NVRAM get file command */
3821 if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_GET_FILE_ATT,
3822 image_type, &ret_mcp_resp, &ret_mcp_param,
3823 &ret_txn_size, (u32 *)&file_att) != 0)
3824 return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
3825
3826 /* Check response */
3827 if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
3828 return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
3829
3830 /* Update return values */
3831 *nvram_offset_bytes = file_att.nvm_start_addr;
3832 *nvram_size_bytes = file_att.len;
3833 DP_VERBOSE(p_hwfn,
3834 QED_MSG_DEBUG,
3835 "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n",
3836 image_type, *nvram_offset_bytes, *nvram_size_bytes);
3837
3838 /* Check alignment */
3839 if (*nvram_size_bytes & 0x3)
3840 return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
3841 return DBG_STATUS_OK;
3842 }
3843
qed_nvram_read(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 nvram_offset_bytes,u32 nvram_size_bytes,u32 * ret_buf)3844 static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
3845 struct qed_ptt *p_ptt,
3846 u32 nvram_offset_bytes,
3847 u32 nvram_size_bytes, u32 *ret_buf)
3848 {
3849 u32 ret_mcp_resp, ret_mcp_param, ret_read_size;
3850 u32 bytes_to_copy, read_offset = 0;
3851 s32 bytes_left = nvram_size_bytes;
3852
3853 DP_VERBOSE(p_hwfn,
3854 QED_MSG_DEBUG,
3855 "nvram_read: reading image of size %d bytes from NVRAM\n",
3856 nvram_size_bytes);
3857 do {
3858 bytes_to_copy =
3859 (bytes_left >
3860 MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
3861
3862 /* Call NVRAM read command */
3863 if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3864 DRV_MSG_CODE_NVM_READ_NVRAM,
3865 (nvram_offset_bytes +
3866 read_offset) |
3867 (bytes_to_copy <<
3868 DRV_MB_PARAM_NVM_LEN_SHIFT),
3869 &ret_mcp_resp, &ret_mcp_param,
3870 &ret_read_size,
3871 (u32 *)((u8 *)ret_buf +
3872 read_offset)) != 0)
3873 return DBG_STATUS_NVRAM_READ_FAILED;
3874
3875 /* Check response */
3876 if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
3877 return DBG_STATUS_NVRAM_READ_FAILED;
3878
3879 /* Update read offset */
3880 read_offset += ret_read_size;
3881 bytes_left -= ret_read_size;
3882 } while (bytes_left > 0);
3883
3884 return DBG_STATUS_OK;
3885 }
3886
3887 /* Get info on the MCP Trace data in the scratchpad:
3888 * - trace_data_grc_addr - the GRC address of the trace data
3889 * - trace_data_size_bytes - the size in bytes of the MCP Trace data (without
3890 * the header)
3891 */
qed_mcp_trace_get_data_info(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * trace_data_grc_addr,u32 * trace_data_size_bytes)3892 static enum dbg_status qed_mcp_trace_get_data_info(struct qed_hwfn *p_hwfn,
3893 struct qed_ptt *p_ptt,
3894 u32 *trace_data_grc_addr,
3895 u32 *trace_data_size_bytes)
3896 {
3897 /* Read MCP trace section offsize structure from MCP scratchpad */
3898 u32 spad_trace_offsize = qed_rd(p_hwfn,
3899 p_ptt,
3900 MCP_SPAD_TRACE_OFFSIZE_ADDR);
3901 u32 signature;
3902
3903 /* Extract MCP trace section GRC address from offsize structure (within
3904 * scratchpad).
3905 */
3906 *trace_data_grc_addr =
3907 MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize);
3908
3909 /* Read signature from MCP trace section */
3910 signature = qed_rd(p_hwfn, p_ptt,
3911 *trace_data_grc_addr +
3912 offsetof(struct mcp_trace, signature));
3913 if (signature != MFW_TRACE_SIGNATURE)
3914 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
3915
3916 /* Read trace size from MCP trace section */
3917 *trace_data_size_bytes = qed_rd(p_hwfn,
3918 p_ptt,
3919 *trace_data_grc_addr +
3920 offsetof(struct mcp_trace, size));
3921 return DBG_STATUS_OK;
3922 }
3923
3924 /* Reads MCP trace meta data image from NVRAM.
3925 * - running_bundle_id (OUT) - the running bundle ID (invalid when loaded from
3926 * file)
3927 * - trace_meta_offset_bytes (OUT) - the NVRAM offset in bytes in which the MCP
3928 * Trace meta data starts (invalid when loaded from file)
3929 * - trace_meta_size_bytes (OUT) - the size in bytes of the MCP Trace meta data
3930 */
qed_mcp_trace_get_meta_info(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 trace_data_size_bytes,u32 * running_bundle_id,u32 * trace_meta_offset_bytes,u32 * trace_meta_size_bytes)3931 static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
3932 struct qed_ptt *p_ptt,
3933 u32 trace_data_size_bytes,
3934 u32 *running_bundle_id,
3935 u32 *trace_meta_offset_bytes,
3936 u32 *trace_meta_size_bytes)
3937 {
3938 /* Read MCP trace section offsize structure from MCP scratchpad */
3939 u32 spad_trace_offsize = qed_rd(p_hwfn,
3940 p_ptt,
3941 MCP_SPAD_TRACE_OFFSIZE_ADDR);
3942
3943 /* Find running bundle ID */
3944 u32 running_mfw_addr =
3945 MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) +
3946 QED_SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
3947 enum dbg_status status;
3948 u32 nvram_image_type;
3949
3950 *running_bundle_id = qed_rd(p_hwfn, p_ptt, running_mfw_addr);
3951 if (*running_bundle_id > 1)
3952 return DBG_STATUS_INVALID_NVRAM_BUNDLE;
3953
3954 /* Find image in NVRAM */
3955 nvram_image_type =
3956 (*running_bundle_id ==
3957 DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
3958 status = qed_find_nvram_image(p_hwfn,
3959 p_ptt,
3960 nvram_image_type,
3961 trace_meta_offset_bytes,
3962 trace_meta_size_bytes);
3963
3964 return status;
3965 }
3966
3967 /* Reads the MCP Trace data from the specified GRC address into the specified
3968 * buffer.
3969 */
qed_mcp_trace_read_data(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 grc_addr,u32 size_in_dwords,u32 * buf)3970 static void qed_mcp_trace_read_data(struct qed_hwfn *p_hwfn,
3971 struct qed_ptt *p_ptt,
3972 u32 grc_addr, u32 size_in_dwords, u32 *buf)
3973 {
3974 u32 i;
3975
3976 DP_VERBOSE(p_hwfn,
3977 QED_MSG_DEBUG,
3978 "mcp_trace_read_data: reading trace data of size %d dwords from GRC address 0x%x\n",
3979 size_in_dwords, grc_addr);
3980 for (i = 0; i < size_in_dwords; i++, grc_addr += BYTES_IN_DWORD)
3981 buf[i] = qed_rd(p_hwfn, p_ptt, grc_addr);
3982 }
3983
3984 /* Reads the MCP Trace meta data (from NVRAM or buffer) into the specified
3985 * buffer.
3986 */
qed_mcp_trace_read_meta(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 nvram_offset_in_bytes,u32 size_in_bytes,u32 * buf)3987 static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn,
3988 struct qed_ptt *p_ptt,
3989 u32 nvram_offset_in_bytes,
3990 u32 size_in_bytes, u32 *buf)
3991 {
3992 u8 *byte_buf = (u8 *)buf;
3993 u8 modules_num, i;
3994 u32 signature;
3995
3996 /* Read meta data from NVRAM */
3997 enum dbg_status status = qed_nvram_read(p_hwfn,
3998 p_ptt,
3999 nvram_offset_in_bytes,
4000 size_in_bytes,
4001 buf);
4002
4003 if (status != DBG_STATUS_OK)
4004 return status;
4005
4006 /* Extract and check first signature */
4007 signature = qed_read_unaligned_dword(byte_buf);
4008 byte_buf += sizeof(u32);
4009 if (signature != MCP_TRACE_META_IMAGE_SIGNATURE)
4010 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4011
4012 /* Extract number of modules */
4013 modules_num = *(byte_buf++);
4014
4015 /* Skip all modules */
4016 for (i = 0; i < modules_num; i++) {
4017 u8 module_len = *(byte_buf++);
4018
4019 byte_buf += module_len;
4020 }
4021
4022 /* Extract and check second signature */
4023 signature = qed_read_unaligned_dword(byte_buf);
4024 byte_buf += sizeof(u32);
4025 if (signature != MCP_TRACE_META_IMAGE_SIGNATURE)
4026 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4027 return DBG_STATUS_OK;
4028 }
4029
4030 /* Dump MCP Trace */
qed_mcp_trace_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u32 * num_dumped_dwords)4031 static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
4032 struct qed_ptt *p_ptt,
4033 u32 *dump_buf,
4034 bool dump, u32 *num_dumped_dwords)
4035 {
4036 u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
4037 u32 trace_meta_size_dwords, running_bundle_id, offset = 0;
4038 u32 trace_meta_offset_bytes, trace_meta_size_bytes;
4039 enum dbg_status status;
4040 int halted = 0;
4041
4042 *num_dumped_dwords = 0;
4043
4044 /* Get trace data info */
4045 status = qed_mcp_trace_get_data_info(p_hwfn,
4046 p_ptt,
4047 &trace_data_grc_addr,
4048 &trace_data_size_bytes);
4049 if (status != DBG_STATUS_OK)
4050 return status;
4051
4052 /* Dump global params */
4053 offset += qed_dump_common_global_params(p_hwfn,
4054 p_ptt,
4055 dump_buf + offset, dump, 1);
4056 offset += qed_dump_str_param(dump_buf + offset,
4057 dump, "dump-type", "mcp-trace");
4058
4059 /* Halt MCP while reading from scratchpad so the read data will be
4060 * consistent if halt fails, MCP trace is taken anyway, with a small
4061 * risk that it may be corrupt.
4062 */
4063 if (dump) {
4064 halted = !qed_mcp_halt(p_hwfn, p_ptt);
4065 if (!halted)
4066 DP_NOTICE(p_hwfn, "MCP halt failed!\n");
4067 }
4068
4069 /* Find trace data size */
4070 trace_data_size_dwords =
4071 DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace),
4072 BYTES_IN_DWORD);
4073
4074 /* Dump trace data section header and param */
4075 offset += qed_dump_section_hdr(dump_buf + offset,
4076 dump, "mcp_trace_data", 1);
4077 offset += qed_dump_num_param(dump_buf + offset,
4078 dump, "size", trace_data_size_dwords);
4079
4080 /* Read trace data from scratchpad into dump buffer */
4081 if (dump)
4082 qed_mcp_trace_read_data(p_hwfn,
4083 p_ptt,
4084 trace_data_grc_addr,
4085 trace_data_size_dwords,
4086 dump_buf + offset);
4087 offset += trace_data_size_dwords;
4088
4089 /* Resume MCP (only if halt succeeded) */
4090 if (halted && qed_mcp_resume(p_hwfn, p_ptt) != 0)
4091 DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
4092
4093 /* Dump trace meta section header */
4094 offset += qed_dump_section_hdr(dump_buf + offset,
4095 dump, "mcp_trace_meta", 1);
4096
4097 /* Read trace meta info */
4098 status = qed_mcp_trace_get_meta_info(p_hwfn,
4099 p_ptt,
4100 trace_data_size_bytes,
4101 &running_bundle_id,
4102 &trace_meta_offset_bytes,
4103 &trace_meta_size_bytes);
4104 if (status != DBG_STATUS_OK)
4105 return status;
4106
4107 /* Dump trace meta size param (trace_meta_size_bytes is always
4108 * dword-aligned).
4109 */
4110 trace_meta_size_dwords = BYTES_TO_DWORDS(trace_meta_size_bytes);
4111 offset += qed_dump_num_param(dump_buf + offset, dump, "size",
4112 trace_meta_size_dwords);
4113
4114 /* Read trace meta image into dump buffer */
4115 if (dump) {
4116 status = qed_mcp_trace_read_meta(p_hwfn,
4117 p_ptt,
4118 trace_meta_offset_bytes,
4119 trace_meta_size_bytes,
4120 dump_buf + offset);
4121 if (status != DBG_STATUS_OK)
4122 return status;
4123 }
4124
4125 offset += trace_meta_size_dwords;
4126
4127 *num_dumped_dwords = offset;
4128
4129 return DBG_STATUS_OK;
4130 }
4131
4132 /* Dump GRC FIFO */
qed_reg_fifo_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u32 * num_dumped_dwords)4133 static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
4134 struct qed_ptt *p_ptt,
4135 u32 *dump_buf,
4136 bool dump, u32 *num_dumped_dwords)
4137 {
4138 u32 offset = 0, dwords_read, size_param_offset;
4139 bool fifo_has_data;
4140
4141 *num_dumped_dwords = 0;
4142
4143 /* Dump global params */
4144 offset += qed_dump_common_global_params(p_hwfn,
4145 p_ptt,
4146 dump_buf + offset, dump, 1);
4147 offset += qed_dump_str_param(dump_buf + offset,
4148 dump, "dump-type", "reg-fifo");
4149
4150 /* Dump fifo data section header and param. The size param is 0 for now,
4151 * and is overwritten after reading the FIFO.
4152 */
4153 offset += qed_dump_section_hdr(dump_buf + offset,
4154 dump, "reg_fifo_data", 1);
4155 size_param_offset = offset;
4156 offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4157
4158 if (!dump) {
4159 /* FIFO max size is REG_FIFO_DEPTH_DWORDS. There is no way to
4160 * test how much data is available, except for reading it.
4161 */
4162 offset += REG_FIFO_DEPTH_DWORDS;
4163 *num_dumped_dwords = offset;
4164 return DBG_STATUS_OK;
4165 }
4166
4167 fifo_has_data = qed_rd(p_hwfn, p_ptt,
4168 GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4169
4170 /* Pull available data from fifo. Use DMAE since this is widebus memory
4171 * and must be accessed atomically. Test for dwords_read not passing
4172 * buffer size since more entries could be added to the buffer as we are
4173 * emptying it.
4174 */
4175 for (dwords_read = 0;
4176 fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS;
4177 dwords_read += REG_FIFO_ELEMENT_DWORDS, offset +=
4178 REG_FIFO_ELEMENT_DWORDS) {
4179 if (qed_dmae_grc2host(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO,
4180 (u64)(uintptr_t)(&dump_buf[offset]),
4181 REG_FIFO_ELEMENT_DWORDS, 0))
4182 return DBG_STATUS_DMAE_FAILED;
4183 fifo_has_data = qed_rd(p_hwfn, p_ptt,
4184 GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4185 }
4186
4187 qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4188 dwords_read);
4189
4190 *num_dumped_dwords = offset;
4191 return DBG_STATUS_OK;
4192 }
4193
4194 /* Dump IGU FIFO */
qed_igu_fifo_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u32 * num_dumped_dwords)4195 static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
4196 struct qed_ptt *p_ptt,
4197 u32 *dump_buf,
4198 bool dump, u32 *num_dumped_dwords)
4199 {
4200 u32 offset = 0, dwords_read, size_param_offset;
4201 bool fifo_has_data;
4202
4203 *num_dumped_dwords = 0;
4204
4205 /* Dump global params */
4206 offset += qed_dump_common_global_params(p_hwfn,
4207 p_ptt,
4208 dump_buf + offset, dump, 1);
4209 offset += qed_dump_str_param(dump_buf + offset,
4210 dump, "dump-type", "igu-fifo");
4211
4212 /* Dump fifo data section header and param. The size param is 0 for now,
4213 * and is overwritten after reading the FIFO.
4214 */
4215 offset += qed_dump_section_hdr(dump_buf + offset,
4216 dump, "igu_fifo_data", 1);
4217 size_param_offset = offset;
4218 offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4219
4220 if (!dump) {
4221 /* FIFO max size is IGU_FIFO_DEPTH_DWORDS. There is no way to
4222 * test how much data is available, except for reading it.
4223 */
4224 offset += IGU_FIFO_DEPTH_DWORDS;
4225 *num_dumped_dwords = offset;
4226 return DBG_STATUS_OK;
4227 }
4228
4229 fifo_has_data = qed_rd(p_hwfn, p_ptt,
4230 IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4231
4232 /* Pull available data from fifo. Use DMAE since this is widebus memory
4233 * and must be accessed atomically. Test for dwords_read not passing
4234 * buffer size since more entries could be added to the buffer as we are
4235 * emptying it.
4236 */
4237 for (dwords_read = 0;
4238 fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS;
4239 dwords_read += IGU_FIFO_ELEMENT_DWORDS, offset +=
4240 IGU_FIFO_ELEMENT_DWORDS) {
4241 if (qed_dmae_grc2host(p_hwfn, p_ptt,
4242 IGU_REG_ERROR_HANDLING_MEMORY,
4243 (u64)(uintptr_t)(&dump_buf[offset]),
4244 IGU_FIFO_ELEMENT_DWORDS, 0))
4245 return DBG_STATUS_DMAE_FAILED;
4246 fifo_has_data = qed_rd(p_hwfn, p_ptt,
4247 IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4248 }
4249
4250 qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4251 dwords_read);
4252
4253 *num_dumped_dwords = offset;
4254 return DBG_STATUS_OK;
4255 }
4256
4257 /* Protection Override dump */
qed_protection_override_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u32 * num_dumped_dwords)4258 static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
4259 struct qed_ptt *p_ptt,
4260 u32 *dump_buf,
4261 bool dump,
4262 u32 *num_dumped_dwords)
4263 {
4264 u32 offset = 0, size_param_offset, override_window_dwords;
4265
4266 *num_dumped_dwords = 0;
4267
4268 /* Dump global params */
4269 offset += qed_dump_common_global_params(p_hwfn,
4270 p_ptt,
4271 dump_buf + offset, dump, 1);
4272 offset += qed_dump_str_param(dump_buf + offset,
4273 dump, "dump-type", "protection-override");
4274
4275 /* Dump data section header and param. The size param is 0 for now, and
4276 * is overwritten after reading the data.
4277 */
4278 offset += qed_dump_section_hdr(dump_buf + offset,
4279 dump, "protection_override_data", 1);
4280 size_param_offset = offset;
4281 offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4282
4283 if (!dump) {
4284 offset += PROTECTION_OVERRIDE_DEPTH_DWORDS;
4285 *num_dumped_dwords = offset;
4286 return DBG_STATUS_OK;
4287 }
4288
4289 /* Add override window info to buffer */
4290 override_window_dwords =
4291 qed_rd(p_hwfn, p_ptt,
4292 GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
4293 PROTECTION_OVERRIDE_ELEMENT_DWORDS;
4294 if (qed_dmae_grc2host(p_hwfn, p_ptt,
4295 GRC_REG_PROTECTION_OVERRIDE_WINDOW,
4296 (u64)(uintptr_t)(dump_buf + offset),
4297 override_window_dwords, 0))
4298 return DBG_STATUS_DMAE_FAILED;
4299 offset += override_window_dwords;
4300 qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4301 override_window_dwords);
4302
4303 *num_dumped_dwords = offset;
4304 return DBG_STATUS_OK;
4305 }
4306
4307 /* Performs FW Asserts Dump to the specified buffer.
4308 * Returns the dumped size in dwords.
4309 */
qed_fw_asserts_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)4310 static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
4311 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
4312 {
4313 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4314 char storm_letter_str[2] = "?";
4315 struct fw_info fw_info;
4316 u32 offset = 0, i;
4317 u8 storm_id;
4318
4319 /* Dump global params */
4320 offset += qed_dump_common_global_params(p_hwfn,
4321 p_ptt,
4322 dump_buf + offset, dump, 1);
4323 offset += qed_dump_str_param(dump_buf + offset,
4324 dump, "dump-type", "fw-asserts");
4325 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
4326 u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx,
4327 last_list_idx, element_addr;
4328
4329 if (dev_data->block_in_reset[s_storm_defs[storm_id].block_id])
4330 continue;
4331
4332 /* Read FW info for the current Storm */
4333 qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
4334
4335 /* Dump FW Asserts section header and params */
4336 storm_letter_str[0] = s_storm_defs[storm_id].letter;
4337 offset += qed_dump_section_hdr(dump_buf + offset, dump,
4338 "fw_asserts", 2);
4339 offset += qed_dump_str_param(dump_buf + offset, dump, "storm",
4340 storm_letter_str);
4341 offset += qed_dump_num_param(dump_buf + offset, dump, "size",
4342 fw_info.fw_asserts_section.
4343 list_element_dword_size);
4344
4345 if (!dump) {
4346 offset += fw_info.fw_asserts_section.
4347 list_element_dword_size;
4348 continue;
4349 }
4350
4351 /* Read and dump FW Asserts data */
4352 fw_asserts_section_addr =
4353 s_storm_defs[storm_id].sem_fast_mem_addr +
4354 SEM_FAST_REG_INT_RAM +
4355 RAM_LINES_TO_BYTES(fw_info.fw_asserts_section.
4356 section_ram_line_offset);
4357 next_list_idx_addr =
4358 fw_asserts_section_addr +
4359 DWORDS_TO_BYTES(fw_info.fw_asserts_section.
4360 list_next_index_dword_offset);
4361 next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
4362 last_list_idx = (next_list_idx > 0
4363 ? next_list_idx
4364 : fw_info.fw_asserts_section.list_num_elements)
4365 - 1;
4366 element_addr =
4367 fw_asserts_section_addr +
4368 DWORDS_TO_BYTES(fw_info.fw_asserts_section.
4369 list_dword_offset) +
4370 last_list_idx *
4371 DWORDS_TO_BYTES(fw_info.fw_asserts_section.
4372 list_element_dword_size);
4373 for (i = 0;
4374 i < fw_info.fw_asserts_section.list_element_dword_size;
4375 i++, offset++, element_addr += BYTES_IN_DWORD)
4376 dump_buf[offset] = qed_rd(p_hwfn, p_ptt, element_addr);
4377 }
4378
4379 /* Dump last section */
4380 offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0);
4381 return offset;
4382 }
4383
4384 /***************************** Public Functions *******************************/
4385
qed_dbg_set_bin_ptr(const u8 * const bin_ptr)4386 enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr)
4387 {
4388 /* Convert binary data to debug arrays */
4389 u32 num_of_buffers = *(u32 *)bin_ptr;
4390 struct bin_buffer_hdr *buf_array;
4391 u8 buf_id;
4392
4393 buf_array = (struct bin_buffer_hdr *)((u32 *)bin_ptr + 1);
4394
4395 for (buf_id = 0; buf_id < num_of_buffers; buf_id++) {
4396 s_dbg_arrays[buf_id].ptr =
4397 (u32 *)(bin_ptr + buf_array[buf_id].offset);
4398 s_dbg_arrays[buf_id].size_in_dwords =
4399 BYTES_TO_DWORDS(buf_array[buf_id].length);
4400 }
4401
4402 return DBG_STATUS_OK;
4403 }
4404
qed_dbg_grc_get_dump_buf_size(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * buf_size)4405 enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
4406 struct qed_ptt *p_ptt,
4407 u32 *buf_size)
4408 {
4409 enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
4410
4411 *buf_size = 0;
4412 if (status != DBG_STATUS_OK)
4413 return status;
4414 if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
4415 !s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr ||
4416 !s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr ||
4417 !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
4418 !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
4419 return DBG_STATUS_DBG_ARRAY_NOT_SET;
4420 return qed_grc_dump(p_hwfn, p_ptt, NULL, false, buf_size);
4421 }
4422
qed_dbg_grc_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)4423 enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
4424 struct qed_ptt *p_ptt,
4425 u32 *dump_buf,
4426 u32 buf_size_in_dwords,
4427 u32 *num_dumped_dwords)
4428 {
4429 u32 needed_buf_size_in_dwords;
4430 enum dbg_status status;
4431
4432 status = qed_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt,
4433 &needed_buf_size_in_dwords);
4434
4435 *num_dumped_dwords = 0;
4436 if (status != DBG_STATUS_OK)
4437 return status;
4438 if (buf_size_in_dwords < needed_buf_size_in_dwords)
4439 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
4440
4441 /* GRC Dump */
4442 status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
4443
4444 /* Clear all GRC params */
4445 qed_dbg_grc_clear_params(p_hwfn);
4446 return status;
4447 }
4448
qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * buf_size)4449 enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
4450 struct qed_ptt *p_ptt,
4451 u32 *buf_size)
4452 {
4453 enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
4454 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4455
4456 *buf_size = 0;
4457 if (status != DBG_STATUS_OK)
4458 return status;
4459 if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
4460 !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr ||
4461 !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr ||
4462 !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr)
4463 return DBG_STATUS_DBG_ARRAY_NOT_SET;
4464 if (!dev_data->idle_chk.buf_size_set) {
4465 dev_data->idle_chk.buf_size = qed_idle_chk_dump(p_hwfn,
4466 p_ptt,
4467 NULL, false);
4468 dev_data->idle_chk.buf_size_set = true;
4469 }
4470
4471 *buf_size = dev_data->idle_chk.buf_size;
4472 return DBG_STATUS_OK;
4473 }
4474
qed_dbg_idle_chk_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)4475 enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
4476 struct qed_ptt *p_ptt,
4477 u32 *dump_buf,
4478 u32 buf_size_in_dwords,
4479 u32 *num_dumped_dwords)
4480 {
4481 u32 needed_buf_size_in_dwords;
4482 enum dbg_status status;
4483
4484 status = qed_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt,
4485 &needed_buf_size_in_dwords);
4486
4487 *num_dumped_dwords = 0;
4488 if (status != DBG_STATUS_OK)
4489 return status;
4490 if (buf_size_in_dwords < needed_buf_size_in_dwords)
4491 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
4492
4493 /* Update reset state */
4494 qed_update_blocks_reset_state(p_hwfn, p_ptt);
4495
4496 /* Idle Check Dump */
4497 *num_dumped_dwords = qed_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true);
4498 return DBG_STATUS_OK;
4499 }
4500
qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * buf_size)4501 enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
4502 struct qed_ptt *p_ptt,
4503 u32 *buf_size)
4504 {
4505 enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
4506
4507 *buf_size = 0;
4508 if (status != DBG_STATUS_OK)
4509 return status;
4510 return qed_mcp_trace_dump(p_hwfn, p_ptt, NULL, false, buf_size);
4511 }
4512
qed_dbg_mcp_trace_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)4513 enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
4514 struct qed_ptt *p_ptt,
4515 u32 *dump_buf,
4516 u32 buf_size_in_dwords,
4517 u32 *num_dumped_dwords)
4518 {
4519 u32 needed_buf_size_in_dwords;
4520 enum dbg_status status;
4521
4522 status = qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn, p_ptt,
4523 &needed_buf_size_in_dwords);
4524
4525 if (status != DBG_STATUS_OK)
4526 return status;
4527 if (buf_size_in_dwords < needed_buf_size_in_dwords)
4528 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
4529
4530 /* Update reset state */
4531 qed_update_blocks_reset_state(p_hwfn, p_ptt);
4532
4533 /* Perform dump */
4534 return qed_mcp_trace_dump(p_hwfn,
4535 p_ptt, dump_buf, true, num_dumped_dwords);
4536 }
4537
qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * buf_size)4538 enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
4539 struct qed_ptt *p_ptt,
4540 u32 *buf_size)
4541 {
4542 enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
4543
4544 *buf_size = 0;
4545 if (status != DBG_STATUS_OK)
4546 return status;
4547 return qed_reg_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
4548 }
4549
qed_dbg_reg_fifo_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)4550 enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
4551 struct qed_ptt *p_ptt,
4552 u32 *dump_buf,
4553 u32 buf_size_in_dwords,
4554 u32 *num_dumped_dwords)
4555 {
4556 u32 needed_buf_size_in_dwords;
4557 enum dbg_status status;
4558
4559 status = qed_dbg_reg_fifo_get_dump_buf_size(p_hwfn, p_ptt,
4560 &needed_buf_size_in_dwords);
4561
4562 *num_dumped_dwords = 0;
4563 if (status != DBG_STATUS_OK)
4564 return status;
4565 if (buf_size_in_dwords < needed_buf_size_in_dwords)
4566 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
4567
4568 /* Update reset state */
4569 qed_update_blocks_reset_state(p_hwfn, p_ptt);
4570 return qed_reg_fifo_dump(p_hwfn,
4571 p_ptt, dump_buf, true, num_dumped_dwords);
4572 }
4573
qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * buf_size)4574 enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
4575 struct qed_ptt *p_ptt,
4576 u32 *buf_size)
4577 {
4578 enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
4579
4580 *buf_size = 0;
4581 if (status != DBG_STATUS_OK)
4582 return status;
4583 return qed_igu_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
4584 }
4585
qed_dbg_igu_fifo_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)4586 enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
4587 struct qed_ptt *p_ptt,
4588 u32 *dump_buf,
4589 u32 buf_size_in_dwords,
4590 u32 *num_dumped_dwords)
4591 {
4592 u32 needed_buf_size_in_dwords;
4593 enum dbg_status status;
4594
4595 status = qed_dbg_igu_fifo_get_dump_buf_size(p_hwfn, p_ptt,
4596 &needed_buf_size_in_dwords);
4597
4598 *num_dumped_dwords = 0;
4599 if (status != DBG_STATUS_OK)
4600 return status;
4601 if (buf_size_in_dwords < needed_buf_size_in_dwords)
4602 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
4603
4604 /* Update reset state */
4605 qed_update_blocks_reset_state(p_hwfn, p_ptt);
4606 return qed_igu_fifo_dump(p_hwfn,
4607 p_ptt, dump_buf, true, num_dumped_dwords);
4608 }
4609
4610 enum dbg_status
qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * buf_size)4611 qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
4612 struct qed_ptt *p_ptt,
4613 u32 *buf_size)
4614 {
4615 enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
4616
4617 *buf_size = 0;
4618 if (status != DBG_STATUS_OK)
4619 return status;
4620 return qed_protection_override_dump(p_hwfn,
4621 p_ptt, NULL, false, buf_size);
4622 }
4623
qed_dbg_protection_override_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)4624 enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
4625 struct qed_ptt *p_ptt,
4626 u32 *dump_buf,
4627 u32 buf_size_in_dwords,
4628 u32 *num_dumped_dwords)
4629 {
4630 u32 needed_buf_size_in_dwords;
4631 enum dbg_status status;
4632
4633 status = qed_dbg_protection_override_get_dump_buf_size(p_hwfn, p_ptt,
4634 &needed_buf_size_in_dwords);
4635
4636 *num_dumped_dwords = 0;
4637 if (status != DBG_STATUS_OK)
4638 return status;
4639 if (buf_size_in_dwords < needed_buf_size_in_dwords)
4640 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
4641
4642 /* Update reset state */
4643 qed_update_blocks_reset_state(p_hwfn, p_ptt);
4644 return qed_protection_override_dump(p_hwfn,
4645 p_ptt,
4646 dump_buf, true, num_dumped_dwords);
4647 }
4648
qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * buf_size)4649 enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
4650 struct qed_ptt *p_ptt,
4651 u32 *buf_size)
4652 {
4653 enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
4654
4655 *buf_size = 0;
4656 if (status != DBG_STATUS_OK)
4657 return status;
4658
4659 /* Update reset state */
4660 qed_update_blocks_reset_state(p_hwfn, p_ptt);
4661 *buf_size = qed_fw_asserts_dump(p_hwfn, p_ptt, NULL, false);
4662 return DBG_STATUS_OK;
4663 }
4664
qed_dbg_fw_asserts_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)4665 enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
4666 struct qed_ptt *p_ptt,
4667 u32 *dump_buf,
4668 u32 buf_size_in_dwords,
4669 u32 *num_dumped_dwords)
4670 {
4671 u32 needed_buf_size_in_dwords;
4672 enum dbg_status status;
4673
4674 status = qed_dbg_fw_asserts_get_dump_buf_size(p_hwfn, p_ptt,
4675 &needed_buf_size_in_dwords);
4676
4677 *num_dumped_dwords = 0;
4678 if (status != DBG_STATUS_OK)
4679 return status;
4680 if (buf_size_in_dwords < needed_buf_size_in_dwords)
4681 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
4682
4683 *num_dumped_dwords = qed_fw_asserts_dump(p_hwfn, p_ptt, dump_buf, true);
4684 return DBG_STATUS_OK;
4685 }
4686
4687 /******************************* Data Types **********************************/
4688
4689 struct mcp_trace_format {
4690 u32 data;
4691 #define MCP_TRACE_FORMAT_MODULE_MASK 0x0000ffff
4692 #define MCP_TRACE_FORMAT_MODULE_SHIFT 0
4693 #define MCP_TRACE_FORMAT_LEVEL_MASK 0x00030000
4694 #define MCP_TRACE_FORMAT_LEVEL_SHIFT 16
4695 #define MCP_TRACE_FORMAT_P1_SIZE_MASK 0x000c0000
4696 #define MCP_TRACE_FORMAT_P1_SIZE_SHIFT 18
4697 #define MCP_TRACE_FORMAT_P2_SIZE_MASK 0x00300000
4698 #define MCP_TRACE_FORMAT_P2_SIZE_SHIFT 20
4699 #define MCP_TRACE_FORMAT_P3_SIZE_MASK 0x00c00000
4700 #define MCP_TRACE_FORMAT_P3_SIZE_SHIFT 22
4701 #define MCP_TRACE_FORMAT_LEN_MASK 0xff000000
4702 #define MCP_TRACE_FORMAT_LEN_SHIFT 24
4703 char *format_str;
4704 };
4705
4706 struct mcp_trace_meta {
4707 u32 modules_num;
4708 char **modules;
4709 u32 formats_num;
4710 struct mcp_trace_format *formats;
4711 };
4712
4713 /* Reg fifo element */
4714 struct reg_fifo_element {
4715 u64 data;
4716 #define REG_FIFO_ELEMENT_ADDRESS_SHIFT 0
4717 #define REG_FIFO_ELEMENT_ADDRESS_MASK 0x7fffff
4718 #define REG_FIFO_ELEMENT_ACCESS_SHIFT 23
4719 #define REG_FIFO_ELEMENT_ACCESS_MASK 0x1
4720 #define REG_FIFO_ELEMENT_PF_SHIFT 24
4721 #define REG_FIFO_ELEMENT_PF_MASK 0xf
4722 #define REG_FIFO_ELEMENT_VF_SHIFT 28
4723 #define REG_FIFO_ELEMENT_VF_MASK 0xff
4724 #define REG_FIFO_ELEMENT_PORT_SHIFT 36
4725 #define REG_FIFO_ELEMENT_PORT_MASK 0x3
4726 #define REG_FIFO_ELEMENT_PRIVILEGE_SHIFT 38
4727 #define REG_FIFO_ELEMENT_PRIVILEGE_MASK 0x3
4728 #define REG_FIFO_ELEMENT_PROTECTION_SHIFT 40
4729 #define REG_FIFO_ELEMENT_PROTECTION_MASK 0x7
4730 #define REG_FIFO_ELEMENT_MASTER_SHIFT 43
4731 #define REG_FIFO_ELEMENT_MASTER_MASK 0xf
4732 #define REG_FIFO_ELEMENT_ERROR_SHIFT 47
4733 #define REG_FIFO_ELEMENT_ERROR_MASK 0x1f
4734 };
4735
4736 /* IGU fifo element */
4737 struct igu_fifo_element {
4738 u32 dword0;
4739 #define IGU_FIFO_ELEMENT_DWORD0_FID_SHIFT 0
4740 #define IGU_FIFO_ELEMENT_DWORD0_FID_MASK 0xff
4741 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_SHIFT 8
4742 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_MASK 0x1
4743 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_SHIFT 9
4744 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_MASK 0xf
4745 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_SHIFT 13
4746 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_MASK 0xf
4747 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_SHIFT 17
4748 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_MASK 0x7fff
4749 u32 dword1;
4750 u32 dword2;
4751 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_SHIFT 0
4752 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_MASK 0x1
4753 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_SHIFT 1
4754 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_MASK 0xffffffff
4755 u32 reserved;
4756 };
4757
4758 struct igu_fifo_wr_data {
4759 u32 data;
4760 #define IGU_FIFO_WR_DATA_PROD_CONS_SHIFT 0
4761 #define IGU_FIFO_WR_DATA_PROD_CONS_MASK 0xffffff
4762 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_SHIFT 24
4763 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_MASK 0x1
4764 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_SHIFT 25
4765 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_MASK 0x3
4766 #define IGU_FIFO_WR_DATA_SEGMENT_SHIFT 27
4767 #define IGU_FIFO_WR_DATA_SEGMENT_MASK 0x1
4768 #define IGU_FIFO_WR_DATA_TIMER_MASK_SHIFT 28
4769 #define IGU_FIFO_WR_DATA_TIMER_MASK_MASK 0x1
4770 #define IGU_FIFO_WR_DATA_CMD_TYPE_SHIFT 31
4771 #define IGU_FIFO_WR_DATA_CMD_TYPE_MASK 0x1
4772 };
4773
4774 struct igu_fifo_cleanup_wr_data {
4775 u32 data;
4776 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_SHIFT 0
4777 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_MASK 0x7ffffff
4778 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_SHIFT 27
4779 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_MASK 0x1
4780 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_SHIFT 28
4781 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_MASK 0x7
4782 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_SHIFT 31
4783 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_MASK 0x1
4784 };
4785
4786 /* Protection override element */
4787 struct protection_override_element {
4788 u64 data;
4789 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_SHIFT 0
4790 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_MASK 0x7fffff
4791 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_SHIFT 23
4792 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_MASK 0xffffff
4793 #define PROTECTION_OVERRIDE_ELEMENT_READ_SHIFT 47
4794 #define PROTECTION_OVERRIDE_ELEMENT_READ_MASK 0x1
4795 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_SHIFT 48
4796 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_MASK 0x1
4797 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_SHIFT 49
4798 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_MASK 0x7
4799 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_SHIFT 52
4800 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_MASK 0x7
4801 };
4802
4803 enum igu_fifo_sources {
4804 IGU_SRC_PXP0,
4805 IGU_SRC_PXP1,
4806 IGU_SRC_PXP2,
4807 IGU_SRC_PXP3,
4808 IGU_SRC_PXP4,
4809 IGU_SRC_PXP5,
4810 IGU_SRC_PXP6,
4811 IGU_SRC_PXP7,
4812 IGU_SRC_CAU,
4813 IGU_SRC_ATTN,
4814 IGU_SRC_GRC
4815 };
4816
4817 enum igu_fifo_addr_types {
4818 IGU_ADDR_TYPE_MSIX_MEM,
4819 IGU_ADDR_TYPE_WRITE_PBA,
4820 IGU_ADDR_TYPE_WRITE_INT_ACK,
4821 IGU_ADDR_TYPE_WRITE_ATTN_BITS,
4822 IGU_ADDR_TYPE_READ_INT,
4823 IGU_ADDR_TYPE_WRITE_PROD_UPDATE,
4824 IGU_ADDR_TYPE_RESERVED
4825 };
4826
4827 struct igu_fifo_addr_data {
4828 u16 start_addr;
4829 u16 end_addr;
4830 char *desc;
4831 char *vf_desc;
4832 enum igu_fifo_addr_types type;
4833 };
4834
4835 /******************************** Constants **********************************/
4836
4837 #define MAX_MSG_LEN 1024
4838 #define MCP_TRACE_MAX_MODULE_LEN 8
4839 #define MCP_TRACE_FORMAT_MAX_PARAMS 3
4840 #define MCP_TRACE_FORMAT_PARAM_WIDTH \
4841 (MCP_TRACE_FORMAT_P2_SIZE_SHIFT - MCP_TRACE_FORMAT_P1_SIZE_SHIFT)
4842 #define REG_FIFO_ELEMENT_ADDR_FACTOR 4
4843 #define REG_FIFO_ELEMENT_IS_PF_VF_VAL 127
4844 #define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR 4
4845
4846 /********************************* Macros ************************************/
4847
4848 #define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD)
4849
4850 /***************************** Constant Arrays *******************************/
4851
4852 /* Status string array */
4853 static const char * const s_status_str[] = {
4854 "Operation completed successfully",
4855 "Debug application version wasn't set",
4856 "Unsupported debug application version",
4857 "The debug block wasn't reset since the last recording",
4858 "Invalid arguments",
4859 "The debug output was already set",
4860 "Invalid PCI buffer size",
4861 "PCI buffer allocation failed",
4862 "A PCI buffer wasn't allocated",
4863 "Too many inputs were enabled. Enabled less inputs, or set 'unifyInputs' to true",
4864 "GRC/Timestamp input overlap in cycle dword 0",
4865 "Cannot record Storm data since the entire recording cycle is used by HW",
4866 "The Storm was already enabled",
4867 "The specified Storm wasn't enabled",
4868 "The block was already enabled",
4869 "The specified block wasn't enabled",
4870 "No input was enabled for recording",
4871 "Filters and triggers are not allowed when recording in 64b units",
4872 "The filter was already enabled",
4873 "The trigger was already enabled",
4874 "The trigger wasn't enabled",
4875 "A constraint can be added only after a filter was enabled or a trigger state was added",
4876 "Cannot add more than 3 trigger states",
4877 "Cannot add more than 4 constraints per filter or trigger state",
4878 "The recording wasn't started",
4879 "A trigger was configured, but it didn't trigger",
4880 "No data was recorded",
4881 "Dump buffer is too small",
4882 "Dumped data is not aligned to chunks",
4883 "Unknown chip",
4884 "Failed allocating virtual memory",
4885 "The input block is in reset",
4886 "Invalid MCP trace signature found in NVRAM",
4887 "Invalid bundle ID found in NVRAM",
4888 "Failed getting NVRAM image",
4889 "NVRAM image is not dword-aligned",
4890 "Failed reading from NVRAM",
4891 "Idle check parsing failed",
4892 "MCP Trace data is corrupt",
4893 "Dump doesn't contain meta data - it must be provided in an image file",
4894 "Failed to halt MCP",
4895 "Failed to resume MCP after halt",
4896 "DMAE transaction failed",
4897 "Failed to empty SEMI sync FIFO",
4898 "IGU FIFO data is corrupt",
4899 "MCP failed to mask parities",
4900 "FW Asserts parsing failed",
4901 "GRC FIFO data is corrupt",
4902 "Protection Override data is corrupt",
4903 "Debug arrays were not set (when using binary files, dbg_set_bin_ptr must be called)",
4904 "When a block is filtered, no other blocks can be recorded unless inputs are unified (due to a HW bug)"
4905 };
4906
4907 /* Idle check severity names array */
4908 static const char * const s_idle_chk_severity_str[] = {
4909 "Error",
4910 "Error if no traffic",
4911 "Warning"
4912 };
4913
4914 /* MCP Trace level names array */
4915 static const char * const s_mcp_trace_level_str[] = {
4916 "ERROR",
4917 "TRACE",
4918 "DEBUG"
4919 };
4920
4921 /* Parsing strings */
4922 static const char * const s_access_strs[] = {
4923 "read",
4924 "write"
4925 };
4926
4927 static const char * const s_privilege_strs[] = {
4928 "VF",
4929 "PDA",
4930 "HV",
4931 "UA"
4932 };
4933
4934 static const char * const s_protection_strs[] = {
4935 "(default)",
4936 "(default)",
4937 "(default)",
4938 "(default)",
4939 "override VF",
4940 "override PDA",
4941 "override HV",
4942 "override UA"
4943 };
4944
4945 static const char * const s_master_strs[] = {
4946 "???",
4947 "pxp",
4948 "mcp",
4949 "msdm",
4950 "psdm",
4951 "ysdm",
4952 "usdm",
4953 "tsdm",
4954 "xsdm",
4955 "dbu",
4956 "dmae",
4957 "???",
4958 "???",
4959 "???",
4960 "???",
4961 "???"
4962 };
4963
4964 static const char * const s_reg_fifo_error_strs[] = {
4965 "grc timeout",
4966 "address doesn't belong to any block",
4967 "reserved address in block or write to read-only address",
4968 "privilege/protection mismatch",
4969 "path isolation error"
4970 };
4971
4972 static const char * const s_igu_fifo_source_strs[] = {
4973 "TSTORM",
4974 "MSTORM",
4975 "USTORM",
4976 "XSTORM",
4977 "YSTORM",
4978 "PSTORM",
4979 "PCIE",
4980 "NIG_QM_PBF",
4981 "CAU",
4982 "ATTN",
4983 "GRC",
4984 };
4985
4986 static const char * const s_igu_fifo_error_strs[] = {
4987 "no error",
4988 "length error",
4989 "function disabled",
4990 "VF sent command to attnetion address",
4991 "host sent prod update command",
4992 "read of during interrupt register while in MIMD mode",
4993 "access to PXP BAR reserved address",
4994 "producer update command to attention index",
4995 "unknown error",
4996 "SB index not valid",
4997 "SB relative index and FID not found",
4998 "FID not match",
4999 "command with error flag asserted (PCI error or CAU discard)",
5000 "VF sent cleanup and RF cleanup is disabled",
5001 "cleanup command on type bigger than 4"
5002 };
5003
5004 /* IGU FIFO address data */
5005 static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = {
5006 {0x0, 0x101, "MSI-X Memory", NULL, IGU_ADDR_TYPE_MSIX_MEM},
5007 {0x102, 0x1ff, "reserved", NULL, IGU_ADDR_TYPE_RESERVED},
5008 {0x200, 0x200, "Write PBA[0:63]", NULL, IGU_ADDR_TYPE_WRITE_PBA},
5009 {0x201, 0x201, "Write PBA[64:127]", "reserved",
5010 IGU_ADDR_TYPE_WRITE_PBA},
5011 {0x202, 0x202, "Write PBA[128]", "reserved", IGU_ADDR_TYPE_WRITE_PBA},
5012 {0x203, 0x3ff, "reserved", NULL, IGU_ADDR_TYPE_RESERVED},
5013 {0x400, 0x5ef, "Write interrupt acknowledgment", NULL,
5014 IGU_ADDR_TYPE_WRITE_INT_ACK},
5015 {0x5f0, 0x5f0, "Attention bits update", NULL,
5016 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
5017 {0x5f1, 0x5f1, "Attention bits set", NULL,
5018 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
5019 {0x5f2, 0x5f2, "Attention bits clear", NULL,
5020 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
5021 {0x5f3, 0x5f3, "Read interrupt 0:63 with mask", NULL,
5022 IGU_ADDR_TYPE_READ_INT},
5023 {0x5f4, 0x5f4, "Read interrupt 0:31 with mask", NULL,
5024 IGU_ADDR_TYPE_READ_INT},
5025 {0x5f5, 0x5f5, "Read interrupt 32:63 with mask", NULL,
5026 IGU_ADDR_TYPE_READ_INT},
5027 {0x5f6, 0x5f6, "Read interrupt 0:63 without mask", NULL,
5028 IGU_ADDR_TYPE_READ_INT},
5029 {0x5f7, 0x5ff, "reserved", NULL, IGU_ADDR_TYPE_RESERVED},
5030 {0x600, 0x7ff, "Producer update", NULL, IGU_ADDR_TYPE_WRITE_PROD_UPDATE}
5031 };
5032
5033 /******************************** Variables **********************************/
5034
5035 /* MCP Trace meta data - used in case the dump doesn't contain the meta data
5036 * (e.g. due to no NVRAM access).
5037 */
5038 static struct dbg_array s_mcp_trace_meta = { NULL, 0 };
5039
5040 /* Temporary buffer, used for print size calculations */
5041 static char s_temp_buf[MAX_MSG_LEN];
5042
5043 /***************************** Public Functions *******************************/
5044
qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr)5045 enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr)
5046 {
5047 /* Convert binary data to debug arrays */
5048 u32 num_of_buffers = *(u32 *)bin_ptr;
5049 struct bin_buffer_hdr *buf_array;
5050 u8 buf_id;
5051
5052 buf_array = (struct bin_buffer_hdr *)((u32 *)bin_ptr + 1);
5053
5054 for (buf_id = 0; buf_id < num_of_buffers; buf_id++) {
5055 s_dbg_arrays[buf_id].ptr =
5056 (u32 *)(bin_ptr + buf_array[buf_id].offset);
5057 s_dbg_arrays[buf_id].size_in_dwords =
5058 BYTES_TO_DWORDS(buf_array[buf_id].length);
5059 }
5060
5061 return DBG_STATUS_OK;
5062 }
5063
qed_cyclic_add(u32 a,u32 b,u32 size)5064 static u32 qed_cyclic_add(u32 a, u32 b, u32 size)
5065 {
5066 return (a + b) % size;
5067 }
5068
qed_cyclic_sub(u32 a,u32 b,u32 size)5069 static u32 qed_cyclic_sub(u32 a, u32 b, u32 size)
5070 {
5071 return (size + a - b) % size;
5072 }
5073
5074 /* Reads the specified number of bytes from the specified cyclic buffer (up to 4
5075 * bytes) and returns them as a dword value. the specified buffer offset is
5076 * updated.
5077 */
qed_read_from_cyclic_buf(void * buf,u32 * offset,u32 buf_size,u8 num_bytes_to_read)5078 static u32 qed_read_from_cyclic_buf(void *buf,
5079 u32 *offset,
5080 u32 buf_size, u8 num_bytes_to_read)
5081 {
5082 u8 *bytes_buf = (u8 *)buf;
5083 u8 *val_ptr;
5084 u32 val = 0;
5085 u8 i;
5086
5087 val_ptr = (u8 *)&val;
5088
5089 for (i = 0; i < num_bytes_to_read; i++) {
5090 val_ptr[i] = bytes_buf[*offset];
5091 *offset = qed_cyclic_add(*offset, 1, buf_size);
5092 }
5093
5094 return val;
5095 }
5096
5097 /* Reads and returns the next byte from the specified buffer.
5098 * The specified buffer offset is updated.
5099 */
qed_read_byte_from_buf(void * buf,u32 * offset)5100 static u8 qed_read_byte_from_buf(void *buf, u32 *offset)
5101 {
5102 return ((u8 *)buf)[(*offset)++];
5103 }
5104
5105 /* Reads and returns the next dword from the specified buffer.
5106 * The specified buffer offset is updated.
5107 */
qed_read_dword_from_buf(void * buf,u32 * offset)5108 static u32 qed_read_dword_from_buf(void *buf, u32 *offset)
5109 {
5110 u32 dword_val = *(u32 *)&((u8 *)buf)[*offset];
5111
5112 *offset += 4;
5113 return dword_val;
5114 }
5115
5116 /* Reads the next string from the specified buffer, and copies it to the
5117 * specified pointer. The specified buffer offset is updated.
5118 */
qed_read_str_from_buf(void * buf,u32 * offset,u32 size,char * dest)5119 static void qed_read_str_from_buf(void *buf, u32 *offset, u32 size, char *dest)
5120 {
5121 const char *source_str = &((const char *)buf)[*offset];
5122
5123 strncpy(dest, source_str, size);
5124 dest[size - 1] = '\0';
5125 *offset += size;
5126 }
5127
5128 /* Returns a pointer to the specified offset (in bytes) of the specified buffer.
5129 * If the specified buffer in NULL, a temporary buffer pointer is returned.
5130 */
qed_get_buf_ptr(void * buf,u32 offset)5131 static char *qed_get_buf_ptr(void *buf, u32 offset)
5132 {
5133 return buf ? (char *)buf + offset : s_temp_buf;
5134 }
5135
5136 /* Reads a param from the specified buffer. Returns the number of dwords read.
5137 * If the returned str_param is NULL, the param is numeric and its value is
5138 * returned in num_param.
5139 * Otheriwise, the param is a string and its pointer is returned in str_param.
5140 */
qed_read_param(u32 * dump_buf,const char ** param_name,const char ** param_str_val,u32 * param_num_val)5141 static u32 qed_read_param(u32 *dump_buf,
5142 const char **param_name,
5143 const char **param_str_val, u32 *param_num_val)
5144 {
5145 char *char_buf = (char *)dump_buf;
5146 u32 offset = 0; /* In bytes */
5147
5148 /* Extract param name */
5149 *param_name = char_buf;
5150 offset += strlen(*param_name) + 1;
5151
5152 /* Check param type */
5153 if (*(char_buf + offset++)) {
5154 /* String param */
5155 *param_str_val = char_buf + offset;
5156 offset += strlen(*param_str_val) + 1;
5157 if (offset & 0x3)
5158 offset += (4 - (offset & 0x3));
5159 } else {
5160 /* Numeric param */
5161 *param_str_val = NULL;
5162 if (offset & 0x3)
5163 offset += (4 - (offset & 0x3));
5164 *param_num_val = *(u32 *)(char_buf + offset);
5165 offset += 4;
5166 }
5167
5168 return offset / 4;
5169 }
5170
5171 /* Reads a section header from the specified buffer.
5172 * Returns the number of dwords read.
5173 */
qed_read_section_hdr(u32 * dump_buf,const char ** section_name,u32 * num_section_params)5174 static u32 qed_read_section_hdr(u32 *dump_buf,
5175 const char **section_name,
5176 u32 *num_section_params)
5177 {
5178 const char *param_str_val;
5179
5180 return qed_read_param(dump_buf,
5181 section_name, ¶m_str_val, num_section_params);
5182 }
5183
5184 /* Reads section params from the specified buffer and prints them to the results
5185 * buffer. Returns the number of dwords read.
5186 */
qed_print_section_params(u32 * dump_buf,u32 num_section_params,char * results_buf,u32 * num_chars_printed)5187 static u32 qed_print_section_params(u32 *dump_buf,
5188 u32 num_section_params,
5189 char *results_buf, u32 *num_chars_printed)
5190 {
5191 u32 i, dump_offset = 0, results_offset = 0;
5192
5193 for (i = 0; i < num_section_params; i++) {
5194 const char *param_name;
5195 const char *param_str_val;
5196 u32 param_num_val = 0;
5197
5198 dump_offset += qed_read_param(dump_buf + dump_offset,
5199 ¶m_name,
5200 ¶m_str_val, ¶m_num_val);
5201 if (param_str_val)
5202 /* String param */
5203 results_offset +=
5204 sprintf(qed_get_buf_ptr(results_buf,
5205 results_offset),
5206 "%s: %s\n", param_name, param_str_val);
5207 else if (strcmp(param_name, "fw-timestamp"))
5208 /* Numeric param */
5209 results_offset +=
5210 sprintf(qed_get_buf_ptr(results_buf,
5211 results_offset),
5212 "%s: %d\n", param_name, param_num_val);
5213 }
5214
5215 results_offset +=
5216 sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
5217 *num_chars_printed = results_offset;
5218 return dump_offset;
5219 }
5220
qed_dbg_get_status_str(enum dbg_status status)5221 const char *qed_dbg_get_status_str(enum dbg_status status)
5222 {
5223 return (status <
5224 MAX_DBG_STATUS) ? s_status_str[status] : "Invalid debug status";
5225 }
5226
5227 /* Parses the idle check rules and returns the number of characters printed.
5228 * In case of parsing error, returns 0.
5229 */
qed_parse_idle_chk_dump_rules(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 * dump_buf_end,u32 num_rules,bool print_fw_idle_chk,char * results_buf,u32 * num_errors,u32 * num_warnings)5230 static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn,
5231 u32 *dump_buf,
5232 u32 *dump_buf_end,
5233 u32 num_rules,
5234 bool print_fw_idle_chk,
5235 char *results_buf,
5236 u32 *num_errors, u32 *num_warnings)
5237 {
5238 u32 rule_idx, results_offset = 0; /* Offset in results_buf in bytes */
5239 u16 i, j;
5240
5241 *num_errors = 0;
5242 *num_warnings = 0;
5243
5244 /* Go over dumped results */
5245 for (rule_idx = 0; rule_idx < num_rules && dump_buf < dump_buf_end;
5246 rule_idx++) {
5247 const struct dbg_idle_chk_rule_parsing_data *rule_parsing_data;
5248 struct dbg_idle_chk_result_hdr *hdr;
5249 const char *parsing_str;
5250 u32 parsing_str_offset;
5251 const char *lsi_msg;
5252 u8 curr_reg_id = 0;
5253 bool has_fw_msg;
5254
5255 hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
5256 rule_parsing_data =
5257 (const struct dbg_idle_chk_rule_parsing_data *)
5258 &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].
5259 ptr[hdr->rule_id];
5260 parsing_str_offset =
5261 GET_FIELD(rule_parsing_data->data,
5262 DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET);
5263 has_fw_msg =
5264 GET_FIELD(rule_parsing_data->data,
5265 DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG) > 0;
5266 parsing_str = &((const char *)
5267 s_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
5268 [parsing_str_offset];
5269 lsi_msg = parsing_str;
5270
5271 if (hdr->severity >= MAX_DBG_IDLE_CHK_SEVERITY_TYPES)
5272 return 0;
5273
5274 /* Skip rule header */
5275 dump_buf += (sizeof(struct dbg_idle_chk_result_hdr) / 4);
5276
5277 /* Update errors/warnings count */
5278 if (hdr->severity == IDLE_CHK_SEVERITY_ERROR ||
5279 hdr->severity == IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC)
5280 (*num_errors)++;
5281 else
5282 (*num_warnings)++;
5283
5284 /* Print rule severity */
5285 results_offset +=
5286 sprintf(qed_get_buf_ptr(results_buf,
5287 results_offset), "%s: ",
5288 s_idle_chk_severity_str[hdr->severity]);
5289
5290 /* Print rule message */
5291 if (has_fw_msg)
5292 parsing_str += strlen(parsing_str) + 1;
5293 results_offset +=
5294 sprintf(qed_get_buf_ptr(results_buf,
5295 results_offset), "%s.",
5296 has_fw_msg &&
5297 print_fw_idle_chk ? parsing_str : lsi_msg);
5298 parsing_str += strlen(parsing_str) + 1;
5299
5300 /* Print register values */
5301 results_offset +=
5302 sprintf(qed_get_buf_ptr(results_buf,
5303 results_offset), " Registers:");
5304 for (i = 0;
5305 i < hdr->num_dumped_cond_regs + hdr->num_dumped_info_regs;
5306 i++) {
5307 struct dbg_idle_chk_result_reg_hdr *reg_hdr
5308 = (struct dbg_idle_chk_result_reg_hdr *)
5309 dump_buf;
5310 bool is_mem =
5311 GET_FIELD(reg_hdr->data,
5312 DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM);
5313 u8 reg_id =
5314 GET_FIELD(reg_hdr->data,
5315 DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID);
5316
5317 /* Skip reg header */
5318 dump_buf +=
5319 (sizeof(struct dbg_idle_chk_result_reg_hdr) / 4);
5320
5321 /* Skip register names until the required reg_id is
5322 * reached.
5323 */
5324 for (; reg_id > curr_reg_id;
5325 curr_reg_id++,
5326 parsing_str += strlen(parsing_str) + 1);
5327
5328 results_offset +=
5329 sprintf(qed_get_buf_ptr(results_buf,
5330 results_offset), " %s",
5331 parsing_str);
5332 if (i < hdr->num_dumped_cond_regs && is_mem)
5333 results_offset +=
5334 sprintf(qed_get_buf_ptr(results_buf,
5335 results_offset),
5336 "[%d]", hdr->mem_entry_id +
5337 reg_hdr->start_entry);
5338 results_offset +=
5339 sprintf(qed_get_buf_ptr(results_buf,
5340 results_offset), "=");
5341 for (j = 0; j < reg_hdr->size; j++, dump_buf++) {
5342 results_offset +=
5343 sprintf(qed_get_buf_ptr(results_buf,
5344 results_offset),
5345 "0x%x", *dump_buf);
5346 if (j < reg_hdr->size - 1)
5347 results_offset +=
5348 sprintf(qed_get_buf_ptr
5349 (results_buf,
5350 results_offset), ",");
5351 }
5352 }
5353
5354 results_offset +=
5355 sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
5356 }
5357
5358 /* Check if end of dump buffer was exceeded */
5359 if (dump_buf > dump_buf_end)
5360 return 0;
5361 return results_offset;
5362 }
5363
5364 /* Parses an idle check dump buffer.
5365 * If result_buf is not NULL, the idle check results are printed to it.
5366 * In any case, the required results buffer size is assigned to
5367 * parsed_results_bytes.
5368 * The parsing status is returned.
5369 */
qed_parse_idle_chk_dump(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,char * results_buf,u32 * parsed_results_bytes,u32 * num_errors,u32 * num_warnings)5370 static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
5371 u32 *dump_buf,
5372 u32 num_dumped_dwords,
5373 char *results_buf,
5374 u32 *parsed_results_bytes,
5375 u32 *num_errors,
5376 u32 *num_warnings)
5377 {
5378 const char *section_name, *param_name, *param_str_val;
5379 u32 *dump_buf_end = dump_buf + num_dumped_dwords;
5380 u32 num_section_params = 0, num_rules;
5381 u32 results_offset = 0; /* Offset in results_buf in bytes */
5382
5383 *parsed_results_bytes = 0;
5384 *num_errors = 0;
5385 *num_warnings = 0;
5386 if (!s_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr ||
5387 !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr)
5388 return DBG_STATUS_DBG_ARRAY_NOT_SET;
5389
5390 /* Read global_params section */
5391 dump_buf += qed_read_section_hdr(dump_buf,
5392 §ion_name, &num_section_params);
5393 if (strcmp(section_name, "global_params"))
5394 return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
5395
5396 /* Print global params */
5397 dump_buf += qed_print_section_params(dump_buf,
5398 num_section_params,
5399 results_buf, &results_offset);
5400
5401 /* Read idle_chk section */
5402 dump_buf += qed_read_section_hdr(dump_buf,
5403 §ion_name, &num_section_params);
5404 if (strcmp(section_name, "idle_chk") || num_section_params != 1)
5405 return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
5406
5407 dump_buf += qed_read_param(dump_buf,
5408 ¶m_name, ¶m_str_val, &num_rules);
5409 if (strcmp(param_name, "num_rules") != 0)
5410 return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
5411
5412 if (num_rules) {
5413 u32 rules_print_size;
5414
5415 /* Print FW output */
5416 results_offset +=
5417 sprintf(qed_get_buf_ptr(results_buf,
5418 results_offset),
5419 "FW_IDLE_CHECK:\n");
5420 rules_print_size =
5421 qed_parse_idle_chk_dump_rules(p_hwfn, dump_buf,
5422 dump_buf_end, num_rules,
5423 true,
5424 results_buf ?
5425 results_buf +
5426 results_offset : NULL,
5427 num_errors, num_warnings);
5428 results_offset += rules_print_size;
5429 if (rules_print_size == 0)
5430 return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
5431
5432 /* Print LSI output */
5433 results_offset +=
5434 sprintf(qed_get_buf_ptr(results_buf,
5435 results_offset),
5436 "\nLSI_IDLE_CHECK:\n");
5437 rules_print_size =
5438 qed_parse_idle_chk_dump_rules(p_hwfn, dump_buf,
5439 dump_buf_end, num_rules,
5440 false,
5441 results_buf ?
5442 results_buf +
5443 results_offset : NULL,
5444 num_errors, num_warnings);
5445 results_offset += rules_print_size;
5446 if (rules_print_size == 0)
5447 return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
5448 }
5449
5450 /* Print errors/warnings count */
5451 if (*num_errors) {
5452 results_offset +=
5453 sprintf(qed_get_buf_ptr(results_buf,
5454 results_offset),
5455 "\nIdle Check failed!!! (with %d errors and %d warnings)\n",
5456 *num_errors, *num_warnings);
5457 } else if (*num_warnings) {
5458 results_offset +=
5459 sprintf(qed_get_buf_ptr(results_buf,
5460 results_offset),
5461 "\nIdle Check completed successfuly (with %d warnings)\n",
5462 *num_warnings);
5463 } else {
5464 results_offset +=
5465 sprintf(qed_get_buf_ptr(results_buf,
5466 results_offset),
5467 "\nIdle Check completed successfuly\n");
5468 }
5469
5470 /* Add 1 for string NULL termination */
5471 *parsed_results_bytes = results_offset + 1;
5472 return DBG_STATUS_OK;
5473 }
5474
qed_get_idle_chk_results_buf_size(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,u32 * results_buf_size)5475 enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
5476 u32 *dump_buf,
5477 u32 num_dumped_dwords,
5478 u32 *results_buf_size)
5479 {
5480 u32 num_errors, num_warnings;
5481
5482 return qed_parse_idle_chk_dump(p_hwfn,
5483 dump_buf,
5484 num_dumped_dwords,
5485 NULL,
5486 results_buf_size,
5487 &num_errors, &num_warnings);
5488 }
5489
qed_print_idle_chk_results(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,char * results_buf,u32 * num_errors,u32 * num_warnings)5490 enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
5491 u32 *dump_buf,
5492 u32 num_dumped_dwords,
5493 char *results_buf,
5494 u32 *num_errors, u32 *num_warnings)
5495 {
5496 u32 parsed_buf_size;
5497
5498 return qed_parse_idle_chk_dump(p_hwfn,
5499 dump_buf,
5500 num_dumped_dwords,
5501 results_buf,
5502 &parsed_buf_size,
5503 num_errors, num_warnings);
5504 }
5505
5506 /* Frees the specified MCP Trace meta data */
qed_mcp_trace_free_meta(struct qed_hwfn * p_hwfn,struct mcp_trace_meta * meta)5507 static void qed_mcp_trace_free_meta(struct qed_hwfn *p_hwfn,
5508 struct mcp_trace_meta *meta)
5509 {
5510 u32 i;
5511
5512 /* Release modules */
5513 if (meta->modules) {
5514 for (i = 0; i < meta->modules_num; i++)
5515 kfree(meta->modules[i]);
5516 kfree(meta->modules);
5517 }
5518
5519 /* Release formats */
5520 if (meta->formats) {
5521 for (i = 0; i < meta->formats_num; i++)
5522 kfree(meta->formats[i].format_str);
5523 kfree(meta->formats);
5524 }
5525 }
5526
5527 /* Allocates and fills MCP Trace meta data based on the specified meta data
5528 * dump buffer.
5529 * Returns debug status code.
5530 */
qed_mcp_trace_alloc_meta(struct qed_hwfn * p_hwfn,const u32 * meta_buf,struct mcp_trace_meta * meta)5531 static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn,
5532 const u32 *meta_buf,
5533 struct mcp_trace_meta *meta)
5534 {
5535 u8 *meta_buf_bytes = (u8 *)meta_buf;
5536 u32 offset = 0, signature, i;
5537
5538 memset(meta, 0, sizeof(*meta));
5539
5540 /* Read first signature */
5541 signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
5542 if (signature != MCP_TRACE_META_IMAGE_SIGNATURE)
5543 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
5544
5545 /* Read number of modules and allocate memory for all the modules
5546 * pointers.
5547 */
5548 meta->modules_num = qed_read_byte_from_buf(meta_buf_bytes, &offset);
5549 meta->modules = kzalloc(meta->modules_num * sizeof(char *), GFP_KERNEL);
5550 if (!meta->modules)
5551 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
5552
5553 /* Allocate and read all module strings */
5554 for (i = 0; i < meta->modules_num; i++) {
5555 u8 module_len = qed_read_byte_from_buf(meta_buf_bytes, &offset);
5556
5557 *(meta->modules + i) = kzalloc(module_len, GFP_KERNEL);
5558 if (!(*(meta->modules + i))) {
5559 /* Update number of modules to be released */
5560 meta->modules_num = i ? i - 1 : 0;
5561 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
5562 }
5563
5564 qed_read_str_from_buf(meta_buf_bytes, &offset, module_len,
5565 *(meta->modules + i));
5566 if (module_len > MCP_TRACE_MAX_MODULE_LEN)
5567 (*(meta->modules + i))[MCP_TRACE_MAX_MODULE_LEN] = '\0';
5568 }
5569
5570 /* Read second signature */
5571 signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
5572 if (signature != MCP_TRACE_META_IMAGE_SIGNATURE)
5573 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
5574
5575 /* Read number of formats and allocate memory for all formats */
5576 meta->formats_num = qed_read_dword_from_buf(meta_buf_bytes, &offset);
5577 meta->formats = kzalloc(meta->formats_num *
5578 sizeof(struct mcp_trace_format),
5579 GFP_KERNEL);
5580 if (!meta->formats)
5581 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
5582
5583 /* Allocate and read all strings */
5584 for (i = 0; i < meta->formats_num; i++) {
5585 struct mcp_trace_format *format_ptr = &meta->formats[i];
5586 u8 format_len;
5587
5588 format_ptr->data = qed_read_dword_from_buf(meta_buf_bytes,
5589 &offset);
5590 format_len =
5591 (format_ptr->data &
5592 MCP_TRACE_FORMAT_LEN_MASK) >> MCP_TRACE_FORMAT_LEN_SHIFT;
5593 format_ptr->format_str = kzalloc(format_len, GFP_KERNEL);
5594 if (!format_ptr->format_str) {
5595 /* Update number of modules to be released */
5596 meta->formats_num = i ? i - 1 : 0;
5597 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
5598 }
5599
5600 qed_read_str_from_buf(meta_buf_bytes,
5601 &offset,
5602 format_len, format_ptr->format_str);
5603 }
5604
5605 return DBG_STATUS_OK;
5606 }
5607
5608 /* Parses an MCP Trace dump buffer.
5609 * If result_buf is not NULL, the MCP Trace results are printed to it.
5610 * In any case, the required results buffer size is assigned to
5611 * parsed_results_bytes.
5612 * The parsing status is returned.
5613 */
qed_parse_mcp_trace_dump(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,char * results_buf,u32 * parsed_results_bytes)5614 static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
5615 u32 *dump_buf,
5616 u32 num_dumped_dwords,
5617 char *results_buf,
5618 u32 *parsed_results_bytes)
5619 {
5620 u32 results_offset = 0, param_mask, param_shift, param_num_val;
5621 u32 num_section_params, offset, end_offset, bytes_left;
5622 const char *section_name, *param_name, *param_str_val;
5623 u32 trace_data_dwords, trace_meta_dwords;
5624 struct mcp_trace_meta meta;
5625 struct mcp_trace *trace;
5626 enum dbg_status status;
5627 const u32 *meta_buf;
5628 u8 *trace_buf;
5629
5630 *parsed_results_bytes = 0;
5631
5632 /* Read global_params section */
5633 dump_buf += qed_read_section_hdr(dump_buf,
5634 §ion_name, &num_section_params);
5635 if (strcmp(section_name, "global_params"))
5636 return DBG_STATUS_MCP_TRACE_BAD_DATA;
5637
5638 /* Print global params */
5639 dump_buf += qed_print_section_params(dump_buf,
5640 num_section_params,
5641 results_buf, &results_offset);
5642
5643 /* Read trace_data section */
5644 dump_buf += qed_read_section_hdr(dump_buf,
5645 §ion_name, &num_section_params);
5646 if (strcmp(section_name, "mcp_trace_data") || num_section_params != 1)
5647 return DBG_STATUS_MCP_TRACE_BAD_DATA;
5648 dump_buf += qed_read_param(dump_buf,
5649 ¶m_name, ¶m_str_val, ¶m_num_val);
5650 if (strcmp(param_name, "size"))
5651 return DBG_STATUS_MCP_TRACE_BAD_DATA;
5652 trace_data_dwords = param_num_val;
5653
5654 /* Prepare trace info */
5655 trace = (struct mcp_trace *)dump_buf;
5656 trace_buf = (u8 *)dump_buf + sizeof(struct mcp_trace);
5657 offset = trace->trace_oldest;
5658 end_offset = trace->trace_prod;
5659 bytes_left = qed_cyclic_sub(end_offset, offset, trace->size);
5660 dump_buf += trace_data_dwords;
5661
5662 /* Read meta_data section */
5663 dump_buf += qed_read_section_hdr(dump_buf,
5664 §ion_name, &num_section_params);
5665 if (strcmp(section_name, "mcp_trace_meta"))
5666 return DBG_STATUS_MCP_TRACE_BAD_DATA;
5667 dump_buf += qed_read_param(dump_buf,
5668 ¶m_name, ¶m_str_val, ¶m_num_val);
5669 if (strcmp(param_name, "size") != 0)
5670 return DBG_STATUS_MCP_TRACE_BAD_DATA;
5671 trace_meta_dwords = param_num_val;
5672
5673 /* Choose meta data buffer */
5674 if (!trace_meta_dwords) {
5675 /* Dump doesn't include meta data */
5676 if (!s_mcp_trace_meta.ptr)
5677 return DBG_STATUS_MCP_TRACE_NO_META;
5678 meta_buf = s_mcp_trace_meta.ptr;
5679 } else {
5680 /* Dump includes meta data */
5681 meta_buf = dump_buf;
5682 }
5683
5684 /* Allocate meta data memory */
5685 status = qed_mcp_trace_alloc_meta(p_hwfn, meta_buf, &meta);
5686 if (status != DBG_STATUS_OK)
5687 goto free_mem;
5688
5689 /* Ignore the level and modules masks - just print everything that is
5690 * already in the buffer.
5691 */
5692 while (bytes_left) {
5693 struct mcp_trace_format *format_ptr;
5694 u8 format_level, format_module;
5695 u32 params[3] = { 0, 0, 0 };
5696 u32 header, format_idx, i;
5697
5698 if (bytes_left < MFW_TRACE_ENTRY_SIZE) {
5699 status = DBG_STATUS_MCP_TRACE_BAD_DATA;
5700 goto free_mem;
5701 }
5702
5703 header = qed_read_from_cyclic_buf(trace_buf,
5704 &offset,
5705 trace->size,
5706 MFW_TRACE_ENTRY_SIZE);
5707 bytes_left -= MFW_TRACE_ENTRY_SIZE;
5708 format_idx = header & MFW_TRACE_EVENTID_MASK;
5709
5710 /* Skip message if its index doesn't exist in the meta data */
5711 if (format_idx > meta.formats_num) {
5712 u8 format_size =
5713 (u8)((header &
5714 MFW_TRACE_PRM_SIZE_MASK) >>
5715 MFW_TRACE_PRM_SIZE_SHIFT);
5716
5717 if (bytes_left < format_size) {
5718 status = DBG_STATUS_MCP_TRACE_BAD_DATA;
5719 goto free_mem;
5720 }
5721
5722 offset = qed_cyclic_add(offset,
5723 format_size, trace->size);
5724 bytes_left -= format_size;
5725 continue;
5726 }
5727
5728 format_ptr = &meta.formats[format_idx];
5729 for (i = 0,
5730 param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK, param_shift =
5731 MCP_TRACE_FORMAT_P1_SIZE_SHIFT;
5732 i < MCP_TRACE_FORMAT_MAX_PARAMS;
5733 i++, param_mask <<= MCP_TRACE_FORMAT_PARAM_WIDTH,
5734 param_shift += MCP_TRACE_FORMAT_PARAM_WIDTH) {
5735 /* Extract param size (0..3) */
5736 u8 param_size =
5737 (u8)((format_ptr->data &
5738 param_mask) >> param_shift);
5739
5740 /* If the param size is zero, there are no other
5741 * parameters.
5742 */
5743 if (!param_size)
5744 break;
5745
5746 /* Size is encoded using 2 bits, where 3 is used to
5747 * encode 4.
5748 */
5749 if (param_size == 3)
5750 param_size = 4;
5751 if (bytes_left < param_size) {
5752 status = DBG_STATUS_MCP_TRACE_BAD_DATA;
5753 goto free_mem;
5754 }
5755
5756 params[i] = qed_read_from_cyclic_buf(trace_buf,
5757 &offset,
5758 trace->size,
5759 param_size);
5760 bytes_left -= param_size;
5761 }
5762
5763 format_level =
5764 (u8)((format_ptr->data &
5765 MCP_TRACE_FORMAT_LEVEL_MASK) >>
5766 MCP_TRACE_FORMAT_LEVEL_SHIFT);
5767 format_module =
5768 (u8)((format_ptr->data &
5769 MCP_TRACE_FORMAT_MODULE_MASK) >>
5770 MCP_TRACE_FORMAT_MODULE_SHIFT);
5771 if (format_level >= ARRAY_SIZE(s_mcp_trace_level_str)) {
5772 status = DBG_STATUS_MCP_TRACE_BAD_DATA;
5773 goto free_mem;
5774 }
5775
5776 /* Print current message to results buffer */
5777 results_offset +=
5778 sprintf(qed_get_buf_ptr(results_buf,
5779 results_offset), "%s %-8s: ",
5780 s_mcp_trace_level_str[format_level],
5781 meta.modules[format_module]);
5782 results_offset +=
5783 sprintf(qed_get_buf_ptr(results_buf,
5784 results_offset),
5785 format_ptr->format_str, params[0], params[1],
5786 params[2]);
5787 }
5788
5789 free_mem:
5790 *parsed_results_bytes = results_offset + 1;
5791 qed_mcp_trace_free_meta(p_hwfn, &meta);
5792 return status;
5793 }
5794
qed_get_mcp_trace_results_buf_size(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,u32 * results_buf_size)5795 enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
5796 u32 *dump_buf,
5797 u32 num_dumped_dwords,
5798 u32 *results_buf_size)
5799 {
5800 return qed_parse_mcp_trace_dump(p_hwfn,
5801 dump_buf,
5802 num_dumped_dwords,
5803 NULL, results_buf_size);
5804 }
5805
qed_print_mcp_trace_results(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,char * results_buf)5806 enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
5807 u32 *dump_buf,
5808 u32 num_dumped_dwords,
5809 char *results_buf)
5810 {
5811 u32 parsed_buf_size;
5812
5813 return qed_parse_mcp_trace_dump(p_hwfn,
5814 dump_buf,
5815 num_dumped_dwords,
5816 results_buf, &parsed_buf_size);
5817 }
5818
5819 /* Parses a Reg FIFO dump buffer.
5820 * If result_buf is not NULL, the Reg FIFO results are printed to it.
5821 * In any case, the required results buffer size is assigned to
5822 * parsed_results_bytes.
5823 * The parsing status is returned.
5824 */
qed_parse_reg_fifo_dump(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,char * results_buf,u32 * parsed_results_bytes)5825 static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn,
5826 u32 *dump_buf,
5827 u32 num_dumped_dwords,
5828 char *results_buf,
5829 u32 *parsed_results_bytes)
5830 {
5831 u32 results_offset = 0, param_num_val, num_section_params, num_elements;
5832 const char *section_name, *param_name, *param_str_val;
5833 struct reg_fifo_element *elements;
5834 u8 i, j, err_val, vf_val;
5835 char vf_str[4];
5836
5837 /* Read global_params section */
5838 dump_buf += qed_read_section_hdr(dump_buf,
5839 §ion_name, &num_section_params);
5840 if (strcmp(section_name, "global_params"))
5841 return DBG_STATUS_REG_FIFO_BAD_DATA;
5842
5843 /* Print global params */
5844 dump_buf += qed_print_section_params(dump_buf,
5845 num_section_params,
5846 results_buf, &results_offset);
5847
5848 /* Read reg_fifo_data section */
5849 dump_buf += qed_read_section_hdr(dump_buf,
5850 §ion_name, &num_section_params);
5851 if (strcmp(section_name, "reg_fifo_data"))
5852 return DBG_STATUS_REG_FIFO_BAD_DATA;
5853 dump_buf += qed_read_param(dump_buf,
5854 ¶m_name, ¶m_str_val, ¶m_num_val);
5855 if (strcmp(param_name, "size"))
5856 return DBG_STATUS_REG_FIFO_BAD_DATA;
5857 if (param_num_val % REG_FIFO_ELEMENT_DWORDS)
5858 return DBG_STATUS_REG_FIFO_BAD_DATA;
5859 num_elements = param_num_val / REG_FIFO_ELEMENT_DWORDS;
5860 elements = (struct reg_fifo_element *)dump_buf;
5861
5862 /* Decode elements */
5863 for (i = 0; i < num_elements; i++) {
5864 bool err_printed = false;
5865
5866 /* Discover if element belongs to a VF or a PF */
5867 vf_val = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_VF);
5868 if (vf_val == REG_FIFO_ELEMENT_IS_PF_VF_VAL)
5869 sprintf(vf_str, "%s", "N/A");
5870 else
5871 sprintf(vf_str, "%d", vf_val);
5872
5873 /* Add parsed element to parsed buffer */
5874 results_offset +=
5875 sprintf(qed_get_buf_ptr(results_buf,
5876 results_offset),
5877 "raw: 0x%016llx, address: 0x%07llx, access: %-5s, pf: %2lld, vf: %s, port: %lld, privilege: %-3s, protection: %-12s, master: %-4s, errors: ",
5878 elements[i].data,
5879 GET_FIELD(elements[i].data,
5880 REG_FIFO_ELEMENT_ADDRESS) *
5881 REG_FIFO_ELEMENT_ADDR_FACTOR,
5882 s_access_strs[GET_FIELD(elements[i].data,
5883 REG_FIFO_ELEMENT_ACCESS)],
5884 GET_FIELD(elements[i].data,
5885 REG_FIFO_ELEMENT_PF), vf_str,
5886 GET_FIELD(elements[i].data,
5887 REG_FIFO_ELEMENT_PORT),
5888 s_privilege_strs[GET_FIELD(elements[i].
5889 data,
5890 REG_FIFO_ELEMENT_PRIVILEGE)],
5891 s_protection_strs[GET_FIELD(elements[i].data,
5892 REG_FIFO_ELEMENT_PROTECTION)],
5893 s_master_strs[GET_FIELD(elements[i].data,
5894 REG_FIFO_ELEMENT_MASTER)]);
5895
5896 /* Print errors */
5897 for (j = 0,
5898 err_val = GET_FIELD(elements[i].data,
5899 REG_FIFO_ELEMENT_ERROR);
5900 j < ARRAY_SIZE(s_reg_fifo_error_strs);
5901 j++, err_val >>= 1) {
5902 if (!(err_val & 0x1))
5903 continue;
5904 if (err_printed)
5905 results_offset +=
5906 sprintf(qed_get_buf_ptr(results_buf,
5907 results_offset),
5908 ", ");
5909 results_offset +=
5910 sprintf(qed_get_buf_ptr(results_buf,
5911 results_offset), "%s",
5912 s_reg_fifo_error_strs[j]);
5913 err_printed = true;
5914 }
5915
5916 results_offset +=
5917 sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
5918 }
5919
5920 results_offset += sprintf(qed_get_buf_ptr(results_buf,
5921 results_offset),
5922 "fifo contained %d elements", num_elements);
5923
5924 /* Add 1 for string NULL termination */
5925 *parsed_results_bytes = results_offset + 1;
5926 return DBG_STATUS_OK;
5927 }
5928
qed_get_reg_fifo_results_buf_size(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,u32 * results_buf_size)5929 enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
5930 u32 *dump_buf,
5931 u32 num_dumped_dwords,
5932 u32 *results_buf_size)
5933 {
5934 return qed_parse_reg_fifo_dump(p_hwfn,
5935 dump_buf,
5936 num_dumped_dwords,
5937 NULL, results_buf_size);
5938 }
5939
qed_print_reg_fifo_results(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,char * results_buf)5940 enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
5941 u32 *dump_buf,
5942 u32 num_dumped_dwords,
5943 char *results_buf)
5944 {
5945 u32 parsed_buf_size;
5946
5947 return qed_parse_reg_fifo_dump(p_hwfn,
5948 dump_buf,
5949 num_dumped_dwords,
5950 results_buf, &parsed_buf_size);
5951 }
5952
5953 /* Parses an IGU FIFO dump buffer.
5954 * If result_buf is not NULL, the IGU FIFO results are printed to it.
5955 * In any case, the required results buffer size is assigned to
5956 * parsed_results_bytes.
5957 * The parsing status is returned.
5958 */
qed_parse_igu_fifo_dump(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,char * results_buf,u32 * parsed_results_bytes)5959 static enum dbg_status qed_parse_igu_fifo_dump(struct qed_hwfn *p_hwfn,
5960 u32 *dump_buf,
5961 u32 num_dumped_dwords,
5962 char *results_buf,
5963 u32 *parsed_results_bytes)
5964 {
5965 u32 results_offset = 0, param_num_val, num_section_params, num_elements;
5966 const char *section_name, *param_name, *param_str_val;
5967 struct igu_fifo_element *elements;
5968 char parsed_addr_data[32];
5969 char parsed_wr_data[256];
5970 u8 i, j;
5971
5972 /* Read global_params section */
5973 dump_buf += qed_read_section_hdr(dump_buf,
5974 §ion_name, &num_section_params);
5975 if (strcmp(section_name, "global_params"))
5976 return DBG_STATUS_IGU_FIFO_BAD_DATA;
5977
5978 /* Print global params */
5979 dump_buf += qed_print_section_params(dump_buf,
5980 num_section_params,
5981 results_buf, &results_offset);
5982
5983 /* Read igu_fifo_data section */
5984 dump_buf += qed_read_section_hdr(dump_buf,
5985 §ion_name, &num_section_params);
5986 if (strcmp(section_name, "igu_fifo_data"))
5987 return DBG_STATUS_IGU_FIFO_BAD_DATA;
5988 dump_buf += qed_read_param(dump_buf,
5989 ¶m_name, ¶m_str_val, ¶m_num_val);
5990 if (strcmp(param_name, "size"))
5991 return DBG_STATUS_IGU_FIFO_BAD_DATA;
5992 if (param_num_val % IGU_FIFO_ELEMENT_DWORDS)
5993 return DBG_STATUS_IGU_FIFO_BAD_DATA;
5994 num_elements = param_num_val / IGU_FIFO_ELEMENT_DWORDS;
5995 elements = (struct igu_fifo_element *)dump_buf;
5996
5997 /* Decode elements */
5998 for (i = 0; i < num_elements; i++) {
5999 /* dword12 (dword index 1 and 2) contains bits 32..95 of the
6000 * FIFO element.
6001 */
6002 u64 dword12 =
6003 ((u64)elements[i].dword2 << 32) | elements[i].dword1;
6004 bool is_wr_cmd = GET_FIELD(dword12,
6005 IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD);
6006 bool is_pf = GET_FIELD(elements[i].dword0,
6007 IGU_FIFO_ELEMENT_DWORD0_IS_PF);
6008 u16 cmd_addr = GET_FIELD(elements[i].dword0,
6009 IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR);
6010 u8 source = GET_FIELD(elements[i].dword0,
6011 IGU_FIFO_ELEMENT_DWORD0_SOURCE);
6012 u8 err_type = GET_FIELD(elements[i].dword0,
6013 IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE);
6014 const struct igu_fifo_addr_data *addr_data = NULL;
6015
6016 if (source >= ARRAY_SIZE(s_igu_fifo_source_strs))
6017 return DBG_STATUS_IGU_FIFO_BAD_DATA;
6018 if (err_type >= ARRAY_SIZE(s_igu_fifo_error_strs))
6019 return DBG_STATUS_IGU_FIFO_BAD_DATA;
6020
6021 /* Find address data */
6022 for (j = 0; j < ARRAY_SIZE(s_igu_fifo_addr_data) && !addr_data;
6023 j++)
6024 if (cmd_addr >= s_igu_fifo_addr_data[j].start_addr &&
6025 cmd_addr <= s_igu_fifo_addr_data[j].end_addr)
6026 addr_data = &s_igu_fifo_addr_data[j];
6027 if (!addr_data)
6028 return DBG_STATUS_IGU_FIFO_BAD_DATA;
6029
6030 /* Prepare parsed address data */
6031 switch (addr_data->type) {
6032 case IGU_ADDR_TYPE_MSIX_MEM:
6033 sprintf(parsed_addr_data,
6034 " vector_num=0x%x", cmd_addr / 2);
6035 break;
6036 case IGU_ADDR_TYPE_WRITE_INT_ACK:
6037 case IGU_ADDR_TYPE_WRITE_PROD_UPDATE:
6038 sprintf(parsed_addr_data,
6039 " SB=0x%x", cmd_addr - addr_data->start_addr);
6040 break;
6041 default:
6042 parsed_addr_data[0] = '\0';
6043 }
6044
6045 /* Prepare parsed write data */
6046 if (is_wr_cmd) {
6047 u32 wr_data = GET_FIELD(dword12,
6048 IGU_FIFO_ELEMENT_DWORD12_WR_DATA);
6049 u32 prod_cons = GET_FIELD(wr_data,
6050 IGU_FIFO_WR_DATA_PROD_CONS);
6051 u8 is_cleanup = GET_FIELD(wr_data,
6052 IGU_FIFO_WR_DATA_CMD_TYPE);
6053
6054 if (source == IGU_SRC_ATTN) {
6055 sprintf(parsed_wr_data,
6056 "prod: 0x%x, ", prod_cons);
6057 } else {
6058 if (is_cleanup) {
6059 u8 cleanup_val = GET_FIELD(wr_data,
6060 IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL);
6061 u8 cleanup_type = GET_FIELD(wr_data,
6062 IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE);
6063
6064 sprintf(parsed_wr_data,
6065 "cmd_type: cleanup, cleanup_val: %s, cleanup_type: %d, ",
6066 cleanup_val ? "set" : "clear",
6067 cleanup_type);
6068 } else {
6069 u8 update_flag = GET_FIELD(wr_data,
6070 IGU_FIFO_WR_DATA_UPDATE_FLAG);
6071 u8 en_dis_int_for_sb =
6072 GET_FIELD(wr_data,
6073 IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB);
6074 u8 segment = GET_FIELD(wr_data,
6075 IGU_FIFO_WR_DATA_SEGMENT);
6076 u8 timer_mask = GET_FIELD(wr_data,
6077 IGU_FIFO_WR_DATA_TIMER_MASK);
6078
6079 sprintf(parsed_wr_data,
6080 "cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb: %s, segment: %s, timer_mask=%d, ",
6081 prod_cons,
6082 update_flag ? "update" : "nop",
6083 en_dis_int_for_sb
6084 ? (en_dis_int_for_sb ==
6085 1 ? "disable" : "nop") :
6086 "enable",
6087 segment ? "attn" : "regular",
6088 timer_mask);
6089 }
6090 }
6091 } else {
6092 parsed_wr_data[0] = '\0';
6093 }
6094
6095 /* Add parsed element to parsed buffer */
6096 results_offset +=
6097 sprintf(qed_get_buf_ptr(results_buf,
6098 results_offset),
6099 "raw: 0x%01x%08x%08x, %s: %d, source: %s, type: %s, cmd_addr: 0x%x (%s%s), %serror: %s\n",
6100 elements[i].dword2, elements[i].dword1,
6101 elements[i].dword0,
6102 is_pf ? "pf" : "vf",
6103 GET_FIELD(elements[i].dword0,
6104 IGU_FIFO_ELEMENT_DWORD0_FID),
6105 s_igu_fifo_source_strs[source],
6106 is_wr_cmd ? "wr" : "rd", cmd_addr,
6107 (!is_pf && addr_data->vf_desc)
6108 ? addr_data->vf_desc : addr_data->desc,
6109 parsed_addr_data, parsed_wr_data,
6110 s_igu_fifo_error_strs[err_type]);
6111 }
6112
6113 results_offset += sprintf(qed_get_buf_ptr(results_buf,
6114 results_offset),
6115 "fifo contained %d elements", num_elements);
6116
6117 /* Add 1 for string NULL termination */
6118 *parsed_results_bytes = results_offset + 1;
6119 return DBG_STATUS_OK;
6120 }
6121
qed_get_igu_fifo_results_buf_size(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,u32 * results_buf_size)6122 enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
6123 u32 *dump_buf,
6124 u32 num_dumped_dwords,
6125 u32 *results_buf_size)
6126 {
6127 return qed_parse_igu_fifo_dump(p_hwfn,
6128 dump_buf,
6129 num_dumped_dwords,
6130 NULL, results_buf_size);
6131 }
6132
qed_print_igu_fifo_results(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,char * results_buf)6133 enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
6134 u32 *dump_buf,
6135 u32 num_dumped_dwords,
6136 char *results_buf)
6137 {
6138 u32 parsed_buf_size;
6139
6140 return qed_parse_igu_fifo_dump(p_hwfn,
6141 dump_buf,
6142 num_dumped_dwords,
6143 results_buf, &parsed_buf_size);
6144 }
6145
6146 static enum dbg_status
qed_parse_protection_override_dump(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,char * results_buf,u32 * parsed_results_bytes)6147 qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn,
6148 u32 *dump_buf,
6149 u32 num_dumped_dwords,
6150 char *results_buf,
6151 u32 *parsed_results_bytes)
6152 {
6153 u32 results_offset = 0, param_num_val, num_section_params, num_elements;
6154 const char *section_name, *param_name, *param_str_val;
6155 struct protection_override_element *elements;
6156 u8 i;
6157
6158 /* Read global_params section */
6159 dump_buf += qed_read_section_hdr(dump_buf,
6160 §ion_name, &num_section_params);
6161 if (strcmp(section_name, "global_params"))
6162 return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
6163
6164 /* Print global params */
6165 dump_buf += qed_print_section_params(dump_buf,
6166 num_section_params,
6167 results_buf, &results_offset);
6168
6169 /* Read protection_override_data section */
6170 dump_buf += qed_read_section_hdr(dump_buf,
6171 §ion_name, &num_section_params);
6172 if (strcmp(section_name, "protection_override_data"))
6173 return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
6174 dump_buf += qed_read_param(dump_buf,
6175 ¶m_name, ¶m_str_val, ¶m_num_val);
6176 if (strcmp(param_name, "size"))
6177 return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
6178 if (param_num_val % PROTECTION_OVERRIDE_ELEMENT_DWORDS != 0)
6179 return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
6180 num_elements = param_num_val / PROTECTION_OVERRIDE_ELEMENT_DWORDS;
6181 elements = (struct protection_override_element *)dump_buf;
6182
6183 /* Decode elements */
6184 for (i = 0; i < num_elements; i++) {
6185 u32 address = GET_FIELD(elements[i].data,
6186 PROTECTION_OVERRIDE_ELEMENT_ADDRESS) *
6187 PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR;
6188
6189 results_offset +=
6190 sprintf(qed_get_buf_ptr(results_buf,
6191 results_offset),
6192 "window %2d, address: 0x%07x, size: %7lld regs, read: %lld, write: %lld, read protection: %-12s, write protection: %-12s\n",
6193 i, address,
6194 GET_FIELD(elements[i].data,
6195 PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE),
6196 GET_FIELD(elements[i].data,
6197 PROTECTION_OVERRIDE_ELEMENT_READ),
6198 GET_FIELD(elements[i].data,
6199 PROTECTION_OVERRIDE_ELEMENT_WRITE),
6200 s_protection_strs[GET_FIELD(elements[i].data,
6201 PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION)],
6202 s_protection_strs[GET_FIELD(elements[i].data,
6203 PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION)]);
6204 }
6205
6206 results_offset += sprintf(qed_get_buf_ptr(results_buf,
6207 results_offset),
6208 "protection override contained %d elements",
6209 num_elements);
6210
6211 /* Add 1 for string NULL termination */
6212 *parsed_results_bytes = results_offset + 1;
6213 return DBG_STATUS_OK;
6214 }
6215
6216 enum dbg_status
qed_get_protection_override_results_buf_size(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,u32 * results_buf_size)6217 qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
6218 u32 *dump_buf,
6219 u32 num_dumped_dwords,
6220 u32 *results_buf_size)
6221 {
6222 return qed_parse_protection_override_dump(p_hwfn,
6223 dump_buf,
6224 num_dumped_dwords,
6225 NULL, results_buf_size);
6226 }
6227
qed_print_protection_override_results(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,char * results_buf)6228 enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
6229 u32 *dump_buf,
6230 u32 num_dumped_dwords,
6231 char *results_buf)
6232 {
6233 u32 parsed_buf_size;
6234
6235 return qed_parse_protection_override_dump(p_hwfn,
6236 dump_buf,
6237 num_dumped_dwords,
6238 results_buf,
6239 &parsed_buf_size);
6240 }
6241
6242 /* Parses a FW Asserts dump buffer.
6243 * If result_buf is not NULL, the FW Asserts results are printed to it.
6244 * In any case, the required results buffer size is assigned to
6245 * parsed_results_bytes.
6246 * The parsing status is returned.
6247 */
qed_parse_fw_asserts_dump(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,char * results_buf,u32 * parsed_results_bytes)6248 static enum dbg_status qed_parse_fw_asserts_dump(struct qed_hwfn *p_hwfn,
6249 u32 *dump_buf,
6250 u32 num_dumped_dwords,
6251 char *results_buf,
6252 u32 *parsed_results_bytes)
6253 {
6254 u32 results_offset = 0, num_section_params, param_num_val, i;
6255 const char *param_name, *param_str_val, *section_name;
6256 bool last_section_found = false;
6257
6258 *parsed_results_bytes = 0;
6259
6260 /* Read global_params section */
6261 dump_buf += qed_read_section_hdr(dump_buf,
6262 §ion_name, &num_section_params);
6263 if (strcmp(section_name, "global_params"))
6264 return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
6265
6266 /* Print global params */
6267 dump_buf += qed_print_section_params(dump_buf,
6268 num_section_params,
6269 results_buf, &results_offset);
6270 while (!last_section_found) {
6271 const char *storm_letter = NULL;
6272 u32 storm_dump_size = 0;
6273
6274 dump_buf += qed_read_section_hdr(dump_buf,
6275 §ion_name,
6276 &num_section_params);
6277 if (!strcmp(section_name, "last")) {
6278 last_section_found = true;
6279 continue;
6280 } else if (strcmp(section_name, "fw_asserts")) {
6281 return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
6282 }
6283
6284 /* Extract params */
6285 for (i = 0; i < num_section_params; i++) {
6286 dump_buf += qed_read_param(dump_buf,
6287 ¶m_name,
6288 ¶m_str_val,
6289 ¶m_num_val);
6290 if (!strcmp(param_name, "storm"))
6291 storm_letter = param_str_val;
6292 else if (!strcmp(param_name, "size"))
6293 storm_dump_size = param_num_val;
6294 else
6295 return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
6296 }
6297
6298 if (!storm_letter || !storm_dump_size)
6299 return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
6300
6301 /* Print data */
6302 results_offset += sprintf(qed_get_buf_ptr(results_buf,
6303 results_offset),
6304 "\n%sSTORM_ASSERT: size=%d\n",
6305 storm_letter, storm_dump_size);
6306 for (i = 0; i < storm_dump_size; i++, dump_buf++)
6307 results_offset +=
6308 sprintf(qed_get_buf_ptr(results_buf,
6309 results_offset),
6310 "%08x\n", *dump_buf);
6311 }
6312
6313 /* Add 1 for string NULL termination */
6314 *parsed_results_bytes = results_offset + 1;
6315 return DBG_STATUS_OK;
6316 }
6317
qed_get_fw_asserts_results_buf_size(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,u32 * results_buf_size)6318 enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
6319 u32 *dump_buf,
6320 u32 num_dumped_dwords,
6321 u32 *results_buf_size)
6322 {
6323 return qed_parse_fw_asserts_dump(p_hwfn,
6324 dump_buf,
6325 num_dumped_dwords,
6326 NULL, results_buf_size);
6327 }
6328
qed_print_fw_asserts_results(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,char * results_buf)6329 enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
6330 u32 *dump_buf,
6331 u32 num_dumped_dwords,
6332 char *results_buf)
6333 {
6334 u32 parsed_buf_size;
6335
6336 return qed_parse_fw_asserts_dump(p_hwfn,
6337 dump_buf,
6338 num_dumped_dwords,
6339 results_buf, &parsed_buf_size);
6340 }
6341
6342 /* Wrapper for unifying the idle_chk and mcp_trace api */
6343 static enum dbg_status
qed_print_idle_chk_results_wrapper(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,char * results_buf)6344 qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
6345 u32 *dump_buf,
6346 u32 num_dumped_dwords,
6347 char *results_buf)
6348 {
6349 u32 num_errors, num_warnnings;
6350
6351 return qed_print_idle_chk_results(p_hwfn, dump_buf, num_dumped_dwords,
6352 results_buf, &num_errors,
6353 &num_warnnings);
6354 }
6355
6356 /* Feature meta data lookup table */
6357 static struct {
6358 char *name;
6359 enum dbg_status (*get_size)(struct qed_hwfn *p_hwfn,
6360 struct qed_ptt *p_ptt, u32 *size);
6361 enum dbg_status (*perform_dump)(struct qed_hwfn *p_hwfn,
6362 struct qed_ptt *p_ptt, u32 *dump_buf,
6363 u32 buf_size, u32 *dumped_dwords);
6364 enum dbg_status (*print_results)(struct qed_hwfn *p_hwfn,
6365 u32 *dump_buf, u32 num_dumped_dwords,
6366 char *results_buf);
6367 enum dbg_status (*results_buf_size)(struct qed_hwfn *p_hwfn,
6368 u32 *dump_buf,
6369 u32 num_dumped_dwords,
6370 u32 *results_buf_size);
6371 } qed_features_lookup[] = {
6372 {
6373 "grc", qed_dbg_grc_get_dump_buf_size,
6374 qed_dbg_grc_dump, NULL, NULL}, {
6375 "idle_chk",
6376 qed_dbg_idle_chk_get_dump_buf_size,
6377 qed_dbg_idle_chk_dump,
6378 qed_print_idle_chk_results_wrapper,
6379 qed_get_idle_chk_results_buf_size}, {
6380 "mcp_trace",
6381 qed_dbg_mcp_trace_get_dump_buf_size,
6382 qed_dbg_mcp_trace_dump, qed_print_mcp_trace_results,
6383 qed_get_mcp_trace_results_buf_size}, {
6384 "reg_fifo",
6385 qed_dbg_reg_fifo_get_dump_buf_size,
6386 qed_dbg_reg_fifo_dump, qed_print_reg_fifo_results,
6387 qed_get_reg_fifo_results_buf_size}, {
6388 "igu_fifo",
6389 qed_dbg_igu_fifo_get_dump_buf_size,
6390 qed_dbg_igu_fifo_dump, qed_print_igu_fifo_results,
6391 qed_get_igu_fifo_results_buf_size}, {
6392 "protection_override",
6393 qed_dbg_protection_override_get_dump_buf_size,
6394 qed_dbg_protection_override_dump,
6395 qed_print_protection_override_results,
6396 qed_get_protection_override_results_buf_size}, {
6397 "fw_asserts",
6398 qed_dbg_fw_asserts_get_dump_buf_size,
6399 qed_dbg_fw_asserts_dump,
6400 qed_print_fw_asserts_results,
6401 qed_get_fw_asserts_results_buf_size},};
6402
qed_dbg_print_feature(u8 * p_text_buf,u32 text_size)6403 static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size)
6404 {
6405 u32 i, precision = 80;
6406
6407 if (!p_text_buf)
6408 return;
6409
6410 pr_notice("\n%.*s", precision, p_text_buf);
6411 for (i = precision; i < text_size; i += precision)
6412 pr_cont("%.*s", precision, p_text_buf + i);
6413 pr_cont("\n");
6414 }
6415
6416 #define QED_RESULTS_BUF_MIN_SIZE 16
6417 /* Generic function for decoding debug feature info */
format_feature(struct qed_hwfn * p_hwfn,enum qed_dbg_features feature_idx)6418 static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
6419 enum qed_dbg_features feature_idx)
6420 {
6421 struct qed_dbg_feature *feature =
6422 &p_hwfn->cdev->dbg_params.features[feature_idx];
6423 u32 text_size_bytes, null_char_pos, i;
6424 enum dbg_status rc;
6425 char *text_buf;
6426
6427 /* Check if feature supports formatting capability */
6428 if (!qed_features_lookup[feature_idx].results_buf_size)
6429 return DBG_STATUS_OK;
6430
6431 /* Obtain size of formatted output */
6432 rc = qed_features_lookup[feature_idx].
6433 results_buf_size(p_hwfn, (u32 *)feature->dump_buf,
6434 feature->dumped_dwords, &text_size_bytes);
6435 if (rc != DBG_STATUS_OK)
6436 return rc;
6437
6438 /* Make sure that the allocated size is a multiple of dword (4 bytes) */
6439 null_char_pos = text_size_bytes - 1;
6440 text_size_bytes = (text_size_bytes + 3) & ~0x3;
6441
6442 if (text_size_bytes < QED_RESULTS_BUF_MIN_SIZE) {
6443 DP_NOTICE(p_hwfn->cdev,
6444 "formatted size of feature was too small %d. Aborting\n",
6445 text_size_bytes);
6446 return DBG_STATUS_INVALID_ARGS;
6447 }
6448
6449 /* Allocate temp text buf */
6450 text_buf = vzalloc(text_size_bytes);
6451 if (!text_buf)
6452 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6453
6454 /* Decode feature opcodes to string on temp buf */
6455 rc = qed_features_lookup[feature_idx].
6456 print_results(p_hwfn, (u32 *)feature->dump_buf,
6457 feature->dumped_dwords, text_buf);
6458 if (rc != DBG_STATUS_OK) {
6459 vfree(text_buf);
6460 return rc;
6461 }
6462
6463 /* Replace the original null character with a '\n' character.
6464 * The bytes that were added as a result of the dword alignment are also
6465 * padded with '\n' characters.
6466 */
6467 for (i = null_char_pos; i < text_size_bytes; i++)
6468 text_buf[i] = '\n';
6469
6470 /* Dump printable feature to log */
6471 if (p_hwfn->cdev->dbg_params.print_data)
6472 qed_dbg_print_feature(text_buf, text_size_bytes);
6473
6474 /* Free the old dump_buf and point the dump_buf to the newly allocagted
6475 * and formatted text buffer.
6476 */
6477 vfree(feature->dump_buf);
6478 feature->dump_buf = text_buf;
6479 feature->buf_size = text_size_bytes;
6480 feature->dumped_dwords = text_size_bytes / 4;
6481 return rc;
6482 }
6483
6484 /* Generic function for performing the dump of a debug feature. */
qed_dbg_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,enum qed_dbg_features feature_idx)6485 static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
6486 struct qed_ptt *p_ptt,
6487 enum qed_dbg_features feature_idx)
6488 {
6489 struct qed_dbg_feature *feature =
6490 &p_hwfn->cdev->dbg_params.features[feature_idx];
6491 u32 buf_size_dwords;
6492 enum dbg_status rc;
6493
6494 DP_NOTICE(p_hwfn->cdev, "Collecting a debug feature [\"%s\"]\n",
6495 qed_features_lookup[feature_idx].name);
6496
6497 /* Dump_buf was already allocated need to free (this can happen if dump
6498 * was called but file was never read).
6499 * We can't use the buffer as is since size may have changed.
6500 */
6501 if (feature->dump_buf) {
6502 vfree(feature->dump_buf);
6503 feature->dump_buf = NULL;
6504 }
6505
6506 /* Get buffer size from hsi, allocate accordingly, and perform the
6507 * dump.
6508 */
6509 rc = qed_features_lookup[feature_idx].get_size(p_hwfn, p_ptt,
6510 &buf_size_dwords);
6511 if (rc != DBG_STATUS_OK)
6512 return rc;
6513 feature->buf_size = buf_size_dwords * sizeof(u32);
6514 feature->dump_buf = vmalloc(feature->buf_size);
6515 if (!feature->dump_buf)
6516 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6517
6518 rc = qed_features_lookup[feature_idx].
6519 perform_dump(p_hwfn, p_ptt, (u32 *)feature->dump_buf,
6520 feature->buf_size / sizeof(u32),
6521 &feature->dumped_dwords);
6522
6523 /* If mcp is stuck we get DBG_STATUS_NVRAM_GET_IMAGE_FAILED error.
6524 * In this case the buffer holds valid binary data, but we wont able
6525 * to parse it (since parsing relies on data in NVRAM which is only
6526 * accessible when MFW is responsive). skip the formatting but return
6527 * success so that binary data is provided.
6528 */
6529 if (rc == DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
6530 return DBG_STATUS_OK;
6531
6532 if (rc != DBG_STATUS_OK)
6533 return rc;
6534
6535 /* Format output */
6536 rc = format_feature(p_hwfn, feature_idx);
6537 return rc;
6538 }
6539
qed_dbg_grc(struct qed_dev * cdev,void * buffer,u32 * num_dumped_bytes)6540 int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
6541 {
6542 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_GRC, num_dumped_bytes);
6543 }
6544
qed_dbg_grc_size(struct qed_dev * cdev)6545 int qed_dbg_grc_size(struct qed_dev *cdev)
6546 {
6547 return qed_dbg_feature_size(cdev, DBG_FEATURE_GRC);
6548 }
6549
qed_dbg_idle_chk(struct qed_dev * cdev,void * buffer,u32 * num_dumped_bytes)6550 int qed_dbg_idle_chk(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
6551 {
6552 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IDLE_CHK,
6553 num_dumped_bytes);
6554 }
6555
qed_dbg_idle_chk_size(struct qed_dev * cdev)6556 int qed_dbg_idle_chk_size(struct qed_dev *cdev)
6557 {
6558 return qed_dbg_feature_size(cdev, DBG_FEATURE_IDLE_CHK);
6559 }
6560
qed_dbg_reg_fifo(struct qed_dev * cdev,void * buffer,u32 * num_dumped_bytes)6561 int qed_dbg_reg_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
6562 {
6563 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_REG_FIFO,
6564 num_dumped_bytes);
6565 }
6566
qed_dbg_reg_fifo_size(struct qed_dev * cdev)6567 int qed_dbg_reg_fifo_size(struct qed_dev *cdev)
6568 {
6569 return qed_dbg_feature_size(cdev, DBG_FEATURE_REG_FIFO);
6570 }
6571
qed_dbg_igu_fifo(struct qed_dev * cdev,void * buffer,u32 * num_dumped_bytes)6572 int qed_dbg_igu_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
6573 {
6574 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IGU_FIFO,
6575 num_dumped_bytes);
6576 }
6577
qed_dbg_igu_fifo_size(struct qed_dev * cdev)6578 int qed_dbg_igu_fifo_size(struct qed_dev *cdev)
6579 {
6580 return qed_dbg_feature_size(cdev, DBG_FEATURE_IGU_FIFO);
6581 }
6582
qed_dbg_protection_override(struct qed_dev * cdev,void * buffer,u32 * num_dumped_bytes)6583 int qed_dbg_protection_override(struct qed_dev *cdev, void *buffer,
6584 u32 *num_dumped_bytes)
6585 {
6586 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_PROTECTION_OVERRIDE,
6587 num_dumped_bytes);
6588 }
6589
qed_dbg_protection_override_size(struct qed_dev * cdev)6590 int qed_dbg_protection_override_size(struct qed_dev *cdev)
6591 {
6592 return qed_dbg_feature_size(cdev, DBG_FEATURE_PROTECTION_OVERRIDE);
6593 }
6594
qed_dbg_fw_asserts(struct qed_dev * cdev,void * buffer,u32 * num_dumped_bytes)6595 int qed_dbg_fw_asserts(struct qed_dev *cdev, void *buffer,
6596 u32 *num_dumped_bytes)
6597 {
6598 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_FW_ASSERTS,
6599 num_dumped_bytes);
6600 }
6601
qed_dbg_fw_asserts_size(struct qed_dev * cdev)6602 int qed_dbg_fw_asserts_size(struct qed_dev *cdev)
6603 {
6604 return qed_dbg_feature_size(cdev, DBG_FEATURE_FW_ASSERTS);
6605 }
6606
qed_dbg_mcp_trace(struct qed_dev * cdev,void * buffer,u32 * num_dumped_bytes)6607 int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer,
6608 u32 *num_dumped_bytes)
6609 {
6610 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_MCP_TRACE,
6611 num_dumped_bytes);
6612 }
6613
qed_dbg_mcp_trace_size(struct qed_dev * cdev)6614 int qed_dbg_mcp_trace_size(struct qed_dev *cdev)
6615 {
6616 return qed_dbg_feature_size(cdev, DBG_FEATURE_MCP_TRACE);
6617 }
6618
6619 /* Defines the amount of bytes allocated for recording the length of debugfs
6620 * feature buffer.
6621 */
6622 #define REGDUMP_HEADER_SIZE sizeof(u32)
6623 #define REGDUMP_HEADER_FEATURE_SHIFT 24
6624 #define REGDUMP_HEADER_ENGINE_SHIFT 31
6625 #define REGDUMP_HEADER_OMIT_ENGINE_SHIFT 30
6626 enum debug_print_features {
6627 OLD_MODE = 0,
6628 IDLE_CHK = 1,
6629 GRC_DUMP = 2,
6630 MCP_TRACE = 3,
6631 REG_FIFO = 4,
6632 PROTECTION_OVERRIDE = 5,
6633 IGU_FIFO = 6,
6634 PHY = 7,
6635 FW_ASSERTS = 8,
6636 };
6637
qed_calc_regdump_header(enum debug_print_features feature,int engine,u32 feature_size,u8 omit_engine)6638 static u32 qed_calc_regdump_header(enum debug_print_features feature,
6639 int engine, u32 feature_size, u8 omit_engine)
6640 {
6641 /* Insert the engine, feature and mode inside the header and combine it
6642 * with feature size.
6643 */
6644 return feature_size | (feature << REGDUMP_HEADER_FEATURE_SHIFT) |
6645 (omit_engine << REGDUMP_HEADER_OMIT_ENGINE_SHIFT) |
6646 (engine << REGDUMP_HEADER_ENGINE_SHIFT);
6647 }
6648
qed_dbg_all_data(struct qed_dev * cdev,void * buffer)6649 int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
6650 {
6651 u8 cur_engine, omit_engine = 0, org_engine;
6652 u32 offset = 0, feature_size;
6653 int rc;
6654
6655 if (cdev->num_hwfns == 1)
6656 omit_engine = 1;
6657
6658 org_engine = qed_get_debug_engine(cdev);
6659 for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
6660 /* Collect idle_chks and grcDump for each hw function */
6661 DP_VERBOSE(cdev, QED_MSG_DEBUG,
6662 "obtaining idle_chk and grcdump for current engine\n");
6663 qed_set_debug_engine(cdev, cur_engine);
6664
6665 /* First idle_chk */
6666 rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
6667 REGDUMP_HEADER_SIZE, &feature_size);
6668 if (!rc) {
6669 *(u32 *)((u8 *)buffer + offset) =
6670 qed_calc_regdump_header(IDLE_CHK, cur_engine,
6671 feature_size, omit_engine);
6672 offset += (feature_size + REGDUMP_HEADER_SIZE);
6673 } else {
6674 DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
6675 }
6676
6677 /* Second idle_chk */
6678 rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
6679 REGDUMP_HEADER_SIZE, &feature_size);
6680 if (!rc) {
6681 *(u32 *)((u8 *)buffer + offset) =
6682 qed_calc_regdump_header(IDLE_CHK, cur_engine,
6683 feature_size, omit_engine);
6684 offset += (feature_size + REGDUMP_HEADER_SIZE);
6685 } else {
6686 DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
6687 }
6688
6689 /* reg_fifo dump */
6690 rc = qed_dbg_reg_fifo(cdev, (u8 *)buffer + offset +
6691 REGDUMP_HEADER_SIZE, &feature_size);
6692 if (!rc) {
6693 *(u32 *)((u8 *)buffer + offset) =
6694 qed_calc_regdump_header(REG_FIFO, cur_engine,
6695 feature_size, omit_engine);
6696 offset += (feature_size + REGDUMP_HEADER_SIZE);
6697 } else {
6698 DP_ERR(cdev, "qed_dbg_reg_fifo failed. rc = %d\n", rc);
6699 }
6700
6701 /* igu_fifo dump */
6702 rc = qed_dbg_igu_fifo(cdev, (u8 *)buffer + offset +
6703 REGDUMP_HEADER_SIZE, &feature_size);
6704 if (!rc) {
6705 *(u32 *)((u8 *)buffer + offset) =
6706 qed_calc_regdump_header(IGU_FIFO, cur_engine,
6707 feature_size, omit_engine);
6708 offset += (feature_size + REGDUMP_HEADER_SIZE);
6709 } else {
6710 DP_ERR(cdev, "qed_dbg_igu_fifo failed. rc = %d", rc);
6711 }
6712
6713 /* protection_override dump */
6714 rc = qed_dbg_protection_override(cdev, (u8 *)buffer + offset +
6715 REGDUMP_HEADER_SIZE,
6716 &feature_size);
6717 if (!rc) {
6718 *(u32 *)((u8 *)buffer + offset) =
6719 qed_calc_regdump_header(PROTECTION_OVERRIDE,
6720 cur_engine,
6721 feature_size, omit_engine);
6722 offset += (feature_size + REGDUMP_HEADER_SIZE);
6723 } else {
6724 DP_ERR(cdev,
6725 "qed_dbg_protection_override failed. rc = %d\n",
6726 rc);
6727 }
6728
6729 /* fw_asserts dump */
6730 rc = qed_dbg_fw_asserts(cdev, (u8 *)buffer + offset +
6731 REGDUMP_HEADER_SIZE, &feature_size);
6732 if (!rc) {
6733 *(u32 *)((u8 *)buffer + offset) =
6734 qed_calc_regdump_header(FW_ASSERTS, cur_engine,
6735 feature_size, omit_engine);
6736 offset += (feature_size + REGDUMP_HEADER_SIZE);
6737 } else {
6738 DP_ERR(cdev, "qed_dbg_fw_asserts failed. rc = %d\n",
6739 rc);
6740 }
6741
6742 /* GRC dump - must be last because when mcp stuck it will
6743 * clutter idle_chk, reg_fifo, ...
6744 */
6745 rc = qed_dbg_grc(cdev, (u8 *)buffer + offset +
6746 REGDUMP_HEADER_SIZE, &feature_size);
6747 if (!rc) {
6748 *(u32 *)((u8 *)buffer + offset) =
6749 qed_calc_regdump_header(GRC_DUMP, cur_engine,
6750 feature_size, omit_engine);
6751 offset += (feature_size + REGDUMP_HEADER_SIZE);
6752 } else {
6753 DP_ERR(cdev, "qed_dbg_grc failed. rc = %d", rc);
6754 }
6755 }
6756
6757 /* mcp_trace */
6758 rc = qed_dbg_mcp_trace(cdev, (u8 *)buffer + offset +
6759 REGDUMP_HEADER_SIZE, &feature_size);
6760 if (!rc) {
6761 *(u32 *)((u8 *)buffer + offset) =
6762 qed_calc_regdump_header(MCP_TRACE, cur_engine,
6763 feature_size, omit_engine);
6764 offset += (feature_size + REGDUMP_HEADER_SIZE);
6765 } else {
6766 DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc);
6767 }
6768
6769 qed_set_debug_engine(cdev, org_engine);
6770
6771 return 0;
6772 }
6773
qed_dbg_all_data_size(struct qed_dev * cdev)6774 int qed_dbg_all_data_size(struct qed_dev *cdev)
6775 {
6776 u8 cur_engine, org_engine;
6777 u32 regs_len = 0;
6778
6779 org_engine = qed_get_debug_engine(cdev);
6780 for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
6781 /* Engine specific */
6782 DP_VERBOSE(cdev, QED_MSG_DEBUG,
6783 "calculating idle_chk and grcdump register length for current engine\n");
6784 qed_set_debug_engine(cdev, cur_engine);
6785 regs_len += REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
6786 REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
6787 REGDUMP_HEADER_SIZE + qed_dbg_grc_size(cdev) +
6788 REGDUMP_HEADER_SIZE + qed_dbg_reg_fifo_size(cdev) +
6789 REGDUMP_HEADER_SIZE + qed_dbg_igu_fifo_size(cdev) +
6790 REGDUMP_HEADER_SIZE +
6791 qed_dbg_protection_override_size(cdev) +
6792 REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev);
6793 }
6794
6795 /* Engine common */
6796 regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev);
6797 qed_set_debug_engine(cdev, org_engine);
6798
6799 return regs_len;
6800 }
6801
qed_dbg_feature(struct qed_dev * cdev,void * buffer,enum qed_dbg_features feature,u32 * num_dumped_bytes)6802 int qed_dbg_feature(struct qed_dev *cdev, void *buffer,
6803 enum qed_dbg_features feature, u32 *num_dumped_bytes)
6804 {
6805 struct qed_hwfn *p_hwfn =
6806 &cdev->hwfns[cdev->dbg_params.engine_for_debug];
6807 struct qed_dbg_feature *qed_feature =
6808 &cdev->dbg_params.features[feature];
6809 enum dbg_status dbg_rc;
6810 struct qed_ptt *p_ptt;
6811 int rc = 0;
6812
6813 /* Acquire ptt */
6814 p_ptt = qed_ptt_acquire(p_hwfn);
6815 if (!p_ptt)
6816 return -EINVAL;
6817
6818 /* Get dump */
6819 dbg_rc = qed_dbg_dump(p_hwfn, p_ptt, feature);
6820 if (dbg_rc != DBG_STATUS_OK) {
6821 DP_VERBOSE(cdev, QED_MSG_DEBUG, "%s\n",
6822 qed_dbg_get_status_str(dbg_rc));
6823 *num_dumped_bytes = 0;
6824 rc = -EINVAL;
6825 goto out;
6826 }
6827
6828 DP_VERBOSE(cdev, QED_MSG_DEBUG,
6829 "copying debugfs feature to external buffer\n");
6830 memcpy(buffer, qed_feature->dump_buf, qed_feature->buf_size);
6831 *num_dumped_bytes = cdev->dbg_params.features[feature].dumped_dwords *
6832 4;
6833
6834 out:
6835 qed_ptt_release(p_hwfn, p_ptt);
6836 return rc;
6837 }
6838
qed_dbg_feature_size(struct qed_dev * cdev,enum qed_dbg_features feature)6839 int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
6840 {
6841 struct qed_hwfn *p_hwfn =
6842 &cdev->hwfns[cdev->dbg_params.engine_for_debug];
6843 struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
6844 struct qed_dbg_feature *qed_feature =
6845 &cdev->dbg_params.features[feature];
6846 u32 buf_size_dwords;
6847 enum dbg_status rc;
6848
6849 if (!p_ptt)
6850 return -EINVAL;
6851
6852 rc = qed_features_lookup[feature].get_size(p_hwfn, p_ptt,
6853 &buf_size_dwords);
6854 if (rc != DBG_STATUS_OK)
6855 buf_size_dwords = 0;
6856
6857 qed_ptt_release(p_hwfn, p_ptt);
6858 qed_feature->buf_size = buf_size_dwords * sizeof(u32);
6859 return qed_feature->buf_size;
6860 }
6861
qed_get_debug_engine(struct qed_dev * cdev)6862 u8 qed_get_debug_engine(struct qed_dev *cdev)
6863 {
6864 return cdev->dbg_params.engine_for_debug;
6865 }
6866
qed_set_debug_engine(struct qed_dev * cdev,int engine_number)6867 void qed_set_debug_engine(struct qed_dev *cdev, int engine_number)
6868 {
6869 DP_VERBOSE(cdev, QED_MSG_DEBUG, "set debug engine to %d\n",
6870 engine_number);
6871 cdev->dbg_params.engine_for_debug = engine_number;
6872 }
6873
qed_dbg_pf_init(struct qed_dev * cdev)6874 void qed_dbg_pf_init(struct qed_dev *cdev)
6875 {
6876 const u8 *dbg_values;
6877
6878 /* Debug values are after init values.
6879 * The offset is the first dword of the file.
6880 */
6881 dbg_values = cdev->firmware->data + *(u32 *)cdev->firmware->data;
6882 qed_dbg_set_bin_ptr((u8 *)dbg_values);
6883 qed_dbg_user_set_bin_ptr((u8 *)dbg_values);
6884 }
6885
qed_dbg_pf_exit(struct qed_dev * cdev)6886 void qed_dbg_pf_exit(struct qed_dev *cdev)
6887 {
6888 struct qed_dbg_feature *feature = NULL;
6889 enum qed_dbg_features feature_idx;
6890
6891 /* Debug features' buffers may be allocated if debug feature was used
6892 * but dump wasn't called.
6893 */
6894 for (feature_idx = 0; feature_idx < DBG_FEATURE_NUM; feature_idx++) {
6895 feature = &cdev->dbg_params.features[feature_idx];
6896 if (feature->dump_buf) {
6897 vfree(feature->dump_buf);
6898 feature->dump_buf = NULL;
6899 }
6900 }
6901 }
6902