1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (c) 2018-2019 Hisilicon Limited. */
3
4 #include <linux/device.h>
5
6 #include "hclge_debugfs.h"
7 #include "hclge_err.h"
8 #include "hclge_main.h"
9 #include "hclge_tm.h"
10 #include "hnae3.h"
11
12 static const char * const state_str[] = { "off", "on" };
13 static const char * const hclge_mac_state_str[] = {
14 "TO_ADD", "TO_DEL", "ACTIVE"
15 };
16
17 static const struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
18 { .cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
19 .dfx_msg = &hclge_dbg_bios_common_reg[0],
20 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_bios_common_reg),
21 .offset = HCLGE_DBG_DFX_BIOS_OFFSET,
22 .cmd = HCLGE_OPC_DFX_BIOS_COMMON_REG } },
23 { .cmd = HNAE3_DBG_CMD_REG_SSU,
24 .dfx_msg = &hclge_dbg_ssu_reg_0[0],
25 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_0),
26 .offset = HCLGE_DBG_DFX_SSU_0_OFFSET,
27 .cmd = HCLGE_OPC_DFX_SSU_REG_0 } },
28 { .cmd = HNAE3_DBG_CMD_REG_SSU,
29 .dfx_msg = &hclge_dbg_ssu_reg_1[0],
30 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_1),
31 .offset = HCLGE_DBG_DFX_SSU_1_OFFSET,
32 .cmd = HCLGE_OPC_DFX_SSU_REG_1 } },
33 { .cmd = HNAE3_DBG_CMD_REG_SSU,
34 .dfx_msg = &hclge_dbg_ssu_reg_2[0],
35 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_2),
36 .offset = HCLGE_DBG_DFX_SSU_2_OFFSET,
37 .cmd = HCLGE_OPC_DFX_SSU_REG_2 } },
38 { .cmd = HNAE3_DBG_CMD_REG_IGU_EGU,
39 .dfx_msg = &hclge_dbg_igu_egu_reg[0],
40 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_igu_egu_reg),
41 .offset = HCLGE_DBG_DFX_IGU_OFFSET,
42 .cmd = HCLGE_OPC_DFX_IGU_EGU_REG } },
43 { .cmd = HNAE3_DBG_CMD_REG_RPU,
44 .dfx_msg = &hclge_dbg_rpu_reg_0[0],
45 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_0),
46 .offset = HCLGE_DBG_DFX_RPU_0_OFFSET,
47 .cmd = HCLGE_OPC_DFX_RPU_REG_0 } },
48 { .cmd = HNAE3_DBG_CMD_REG_RPU,
49 .dfx_msg = &hclge_dbg_rpu_reg_1[0],
50 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_1),
51 .offset = HCLGE_DBG_DFX_RPU_1_OFFSET,
52 .cmd = HCLGE_OPC_DFX_RPU_REG_1 } },
53 { .cmd = HNAE3_DBG_CMD_REG_NCSI,
54 .dfx_msg = &hclge_dbg_ncsi_reg[0],
55 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ncsi_reg),
56 .offset = HCLGE_DBG_DFX_NCSI_OFFSET,
57 .cmd = HCLGE_OPC_DFX_NCSI_REG } },
58 { .cmd = HNAE3_DBG_CMD_REG_RTC,
59 .dfx_msg = &hclge_dbg_rtc_reg[0],
60 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rtc_reg),
61 .offset = HCLGE_DBG_DFX_RTC_OFFSET,
62 .cmd = HCLGE_OPC_DFX_RTC_REG } },
63 { .cmd = HNAE3_DBG_CMD_REG_PPP,
64 .dfx_msg = &hclge_dbg_ppp_reg[0],
65 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ppp_reg),
66 .offset = HCLGE_DBG_DFX_PPP_OFFSET,
67 .cmd = HCLGE_OPC_DFX_PPP_REG } },
68 { .cmd = HNAE3_DBG_CMD_REG_RCB,
69 .dfx_msg = &hclge_dbg_rcb_reg[0],
70 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rcb_reg),
71 .offset = HCLGE_DBG_DFX_RCB_OFFSET,
72 .cmd = HCLGE_OPC_DFX_RCB_REG } },
73 { .cmd = HNAE3_DBG_CMD_REG_TQP,
74 .dfx_msg = &hclge_dbg_tqp_reg[0],
75 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_tqp_reg),
76 .offset = HCLGE_DBG_DFX_TQP_OFFSET,
77 .cmd = HCLGE_OPC_DFX_TQP_REG } },
78 };
79
hclge_dbg_fill_content(char * content,u16 len,const struct hclge_dbg_item * items,const char ** result,u16 size)80 static void hclge_dbg_fill_content(char *content, u16 len,
81 const struct hclge_dbg_item *items,
82 const char **result, u16 size)
83 {
84 char *pos = content;
85 u16 i;
86
87 memset(content, ' ', len);
88 for (i = 0; i < size; i++) {
89 if (result)
90 strncpy(pos, result[i], strlen(result[i]));
91 else
92 strncpy(pos, items[i].name, strlen(items[i].name));
93 pos += strlen(items[i].name) + items[i].interval;
94 }
95 *pos++ = '\n';
96 *pos++ = '\0';
97 }
98
hclge_dbg_get_func_id_str(char * buf,u8 id)99 static char *hclge_dbg_get_func_id_str(char *buf, u8 id)
100 {
101 if (id)
102 sprintf(buf, "vf%u", id - 1);
103 else
104 sprintf(buf, "pf");
105
106 return buf;
107 }
108
hclge_dbg_get_dfx_bd_num(struct hclge_dev * hdev,int offset,u32 * bd_num)109 static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset,
110 u32 *bd_num)
111 {
112 struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
113 int entries_per_desc;
114 int index;
115 int ret;
116
117 ret = hclge_query_bd_num_cmd_send(hdev, desc);
118 if (ret) {
119 dev_err(&hdev->pdev->dev,
120 "failed to get dfx bd_num, offset = %d, ret = %d\n",
121 offset, ret);
122 return ret;
123 }
124
125 entries_per_desc = ARRAY_SIZE(desc[0].data);
126 index = offset % entries_per_desc;
127
128 *bd_num = le32_to_cpu(desc[offset / entries_per_desc].data[index]);
129 if (!(*bd_num)) {
130 dev_err(&hdev->pdev->dev, "The value of dfx bd_num is 0!\n");
131 return -EINVAL;
132 }
133
134 return 0;
135 }
136
hclge_dbg_cmd_send(struct hclge_dev * hdev,struct hclge_desc * desc_src,int index,int bd_num,enum hclge_opcode_type cmd)137 static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
138 struct hclge_desc *desc_src,
139 int index, int bd_num,
140 enum hclge_opcode_type cmd)
141 {
142 struct hclge_desc *desc = desc_src;
143 int ret, i;
144
145 hclge_cmd_setup_basic_desc(desc, cmd, true);
146 desc->data[0] = cpu_to_le32(index);
147
148 for (i = 1; i < bd_num; i++) {
149 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
150 desc++;
151 hclge_cmd_setup_basic_desc(desc, cmd, true);
152 }
153
154 ret = hclge_cmd_send(&hdev->hw, desc_src, bd_num);
155 if (ret)
156 dev_err(&hdev->pdev->dev,
157 "cmd(0x%x) send fail, ret = %d\n", cmd, ret);
158 return ret;
159 }
160
161 static int
hclge_dbg_dump_reg_tqp(struct hclge_dev * hdev,const struct hclge_dbg_reg_type_info * reg_info,char * buf,int len,int * pos)162 hclge_dbg_dump_reg_tqp(struct hclge_dev *hdev,
163 const struct hclge_dbg_reg_type_info *reg_info,
164 char *buf, int len, int *pos)
165 {
166 const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
167 const struct hclge_dbg_reg_common_msg *reg_msg = ®_info->reg_msg;
168 struct hclge_desc *desc_src;
169 u32 index, entry, i, cnt;
170 int bd_num, min_num, ret;
171 struct hclge_desc *desc;
172
173 ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num);
174 if (ret)
175 return ret;
176
177 desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
178 if (!desc_src)
179 return -ENOMEM;
180
181 min_num = min_t(int, bd_num * HCLGE_DESC_DATA_LEN, reg_msg->msg_num);
182
183 for (i = 0, cnt = 0; i < min_num; i++, dfx_message++)
184 *pos += scnprintf(buf + *pos, len - *pos, "item%u = %s\n",
185 cnt++, dfx_message->message);
186
187 for (i = 0; i < cnt; i++)
188 *pos += scnprintf(buf + *pos, len - *pos, "item%u\t", i);
189
190 *pos += scnprintf(buf + *pos, len - *pos, "\n");
191
192 for (index = 0; index < hdev->vport[0].alloc_tqps; index++) {
193 dfx_message = reg_info->dfx_msg;
194 desc = desc_src;
195 ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num,
196 reg_msg->cmd);
197 if (ret)
198 break;
199
200 for (i = 0; i < min_num; i++, dfx_message++) {
201 entry = i % HCLGE_DESC_DATA_LEN;
202 if (i > 0 && !entry)
203 desc++;
204
205 *pos += scnprintf(buf + *pos, len - *pos, "%#x\t",
206 le32_to_cpu(desc->data[entry]));
207 }
208 *pos += scnprintf(buf + *pos, len - *pos, "\n");
209 }
210
211 kfree(desc_src);
212 return ret;
213 }
214
215 static int
hclge_dbg_dump_reg_common(struct hclge_dev * hdev,const struct hclge_dbg_reg_type_info * reg_info,char * buf,int len,int * pos)216 hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
217 const struct hclge_dbg_reg_type_info *reg_info,
218 char *buf, int len, int *pos)
219 {
220 const struct hclge_dbg_reg_common_msg *reg_msg = ®_info->reg_msg;
221 const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
222 struct hclge_desc *desc_src;
223 int bd_num, min_num, ret;
224 struct hclge_desc *desc;
225 u32 entry, i;
226
227 ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num);
228 if (ret)
229 return ret;
230
231 desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
232 if (!desc_src)
233 return -ENOMEM;
234
235 desc = desc_src;
236
237 ret = hclge_dbg_cmd_send(hdev, desc, 0, bd_num, reg_msg->cmd);
238 if (ret) {
239 kfree(desc);
240 return ret;
241 }
242
243 min_num = min_t(int, bd_num * HCLGE_DESC_DATA_LEN, reg_msg->msg_num);
244
245 for (i = 0; i < min_num; i++, dfx_message++) {
246 entry = i % HCLGE_DESC_DATA_LEN;
247 if (i > 0 && !entry)
248 desc++;
249 if (!dfx_message->flag)
250 continue;
251
252 *pos += scnprintf(buf + *pos, len - *pos, "%s: %#x\n",
253 dfx_message->message,
254 le32_to_cpu(desc->data[entry]));
255 }
256
257 kfree(desc_src);
258 return 0;
259 }
260
hclge_dbg_dump_mac_enable_status(struct hclge_dev * hdev,char * buf,int len,int * pos)261 static int hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev, char *buf,
262 int len, int *pos)
263 {
264 struct hclge_config_mac_mode_cmd *req;
265 struct hclge_desc desc;
266 u32 loop_en;
267 int ret;
268
269 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
270
271 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
272 if (ret) {
273 dev_err(&hdev->pdev->dev,
274 "failed to dump mac enable status, ret = %d\n", ret);
275 return ret;
276 }
277
278 req = (struct hclge_config_mac_mode_cmd *)desc.data;
279 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
280
281 *pos += scnprintf(buf + *pos, len - *pos, "mac_trans_en: %#x\n",
282 hnae3_get_bit(loop_en, HCLGE_MAC_TX_EN_B));
283 *pos += scnprintf(buf + *pos, len - *pos, "mac_rcv_en: %#x\n",
284 hnae3_get_bit(loop_en, HCLGE_MAC_RX_EN_B));
285 *pos += scnprintf(buf + *pos, len - *pos, "pad_trans_en: %#x\n",
286 hnae3_get_bit(loop_en, HCLGE_MAC_PAD_TX_B));
287 *pos += scnprintf(buf + *pos, len - *pos, "pad_rcv_en: %#x\n",
288 hnae3_get_bit(loop_en, HCLGE_MAC_PAD_RX_B));
289 *pos += scnprintf(buf + *pos, len - *pos, "1588_trans_en: %#x\n",
290 hnae3_get_bit(loop_en, HCLGE_MAC_1588_TX_B));
291 *pos += scnprintf(buf + *pos, len - *pos, "1588_rcv_en: %#x\n",
292 hnae3_get_bit(loop_en, HCLGE_MAC_1588_RX_B));
293 *pos += scnprintf(buf + *pos, len - *pos, "mac_app_loop_en: %#x\n",
294 hnae3_get_bit(loop_en, HCLGE_MAC_APP_LP_B));
295 *pos += scnprintf(buf + *pos, len - *pos, "mac_line_loop_en: %#x\n",
296 hnae3_get_bit(loop_en, HCLGE_MAC_LINE_LP_B));
297 *pos += scnprintf(buf + *pos, len - *pos, "mac_fcs_tx_en: %#x\n",
298 hnae3_get_bit(loop_en, HCLGE_MAC_FCS_TX_B));
299 *pos += scnprintf(buf + *pos, len - *pos,
300 "mac_rx_oversize_truncate_en: %#x\n",
301 hnae3_get_bit(loop_en,
302 HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B));
303 *pos += scnprintf(buf + *pos, len - *pos, "mac_rx_fcs_strip_en: %#x\n",
304 hnae3_get_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B));
305 *pos += scnprintf(buf + *pos, len - *pos, "mac_rx_fcs_en: %#x\n",
306 hnae3_get_bit(loop_en, HCLGE_MAC_RX_FCS_B));
307 *pos += scnprintf(buf + *pos, len - *pos,
308 "mac_tx_under_min_err_en: %#x\n",
309 hnae3_get_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B));
310 *pos += scnprintf(buf + *pos, len - *pos,
311 "mac_tx_oversize_truncate_en: %#x\n",
312 hnae3_get_bit(loop_en,
313 HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B));
314
315 return 0;
316 }
317
hclge_dbg_dump_mac_frame_size(struct hclge_dev * hdev,char * buf,int len,int * pos)318 static int hclge_dbg_dump_mac_frame_size(struct hclge_dev *hdev, char *buf,
319 int len, int *pos)
320 {
321 struct hclge_config_max_frm_size_cmd *req;
322 struct hclge_desc desc;
323 int ret;
324
325 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, true);
326
327 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
328 if (ret) {
329 dev_err(&hdev->pdev->dev,
330 "failed to dump mac frame size, ret = %d\n", ret);
331 return ret;
332 }
333
334 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
335
336 *pos += scnprintf(buf + *pos, len - *pos, "max_frame_size: %u\n",
337 le16_to_cpu(req->max_frm_size));
338 *pos += scnprintf(buf + *pos, len - *pos, "min_frame_size: %u\n",
339 req->min_frm_size);
340
341 return 0;
342 }
343
hclge_dbg_dump_mac_speed_duplex(struct hclge_dev * hdev,char * buf,int len,int * pos)344 static int hclge_dbg_dump_mac_speed_duplex(struct hclge_dev *hdev, char *buf,
345 int len, int *pos)
346 {
347 #define HCLGE_MAC_SPEED_SHIFT 0
348 #define HCLGE_MAC_SPEED_MASK GENMASK(5, 0)
349 #define HCLGE_MAC_DUPLEX_SHIFT 7
350
351 struct hclge_config_mac_speed_dup_cmd *req;
352 struct hclge_desc desc;
353 int ret;
354
355 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, true);
356
357 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
358 if (ret) {
359 dev_err(&hdev->pdev->dev,
360 "failed to dump mac speed duplex, ret = %d\n", ret);
361 return ret;
362 }
363
364 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
365
366 *pos += scnprintf(buf + *pos, len - *pos, "speed: %#lx\n",
367 hnae3_get_field(req->speed_dup, HCLGE_MAC_SPEED_MASK,
368 HCLGE_MAC_SPEED_SHIFT));
369 *pos += scnprintf(buf + *pos, len - *pos, "duplex: %#x\n",
370 hnae3_get_bit(req->speed_dup,
371 HCLGE_MAC_DUPLEX_SHIFT));
372 return 0;
373 }
374
hclge_dbg_dump_mac(struct hclge_dev * hdev,char * buf,int len)375 static int hclge_dbg_dump_mac(struct hclge_dev *hdev, char *buf, int len)
376 {
377 int pos = 0;
378 int ret;
379
380 ret = hclge_dbg_dump_mac_enable_status(hdev, buf, len, &pos);
381 if (ret)
382 return ret;
383
384 ret = hclge_dbg_dump_mac_frame_size(hdev, buf, len, &pos);
385 if (ret)
386 return ret;
387
388 return hclge_dbg_dump_mac_speed_duplex(hdev, buf, len, &pos);
389 }
390
hclge_dbg_dump_dcb_qset(struct hclge_dev * hdev,char * buf,int len,int * pos)391 static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
392 int *pos)
393 {
394 struct hclge_dbg_bitmap_cmd req;
395 struct hclge_desc desc;
396 u16 qset_id, qset_num;
397 int ret;
398
399 ret = hclge_tm_get_qset_num(hdev, &qset_num);
400 if (ret)
401 return ret;
402
403 *pos += scnprintf(buf + *pos, len - *pos,
404 "qset_id roce_qset_mask nic_qset_mask qset_shaping_pass qset_bp_status\n");
405 for (qset_id = 0; qset_id < qset_num; qset_id++) {
406 ret = hclge_dbg_cmd_send(hdev, &desc, qset_id, 1,
407 HCLGE_OPC_QSET_DFX_STS);
408 if (ret)
409 return ret;
410
411 req.bitmap = (u8)le32_to_cpu(desc.data[1]);
412
413 *pos += scnprintf(buf + *pos, len - *pos,
414 "%04u %#x %#x %#x %#x\n",
415 qset_id, req.bit0, req.bit1, req.bit2,
416 req.bit3);
417 }
418
419 return 0;
420 }
421
hclge_dbg_dump_dcb_pri(struct hclge_dev * hdev,char * buf,int len,int * pos)422 static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
423 int *pos)
424 {
425 struct hclge_dbg_bitmap_cmd req;
426 struct hclge_desc desc;
427 u8 pri_id, pri_num;
428 int ret;
429
430 ret = hclge_tm_get_pri_num(hdev, &pri_num);
431 if (ret)
432 return ret;
433
434 *pos += scnprintf(buf + *pos, len - *pos,
435 "pri_id pri_mask pri_cshaping_pass pri_pshaping_pass\n");
436 for (pri_id = 0; pri_id < pri_num; pri_id++) {
437 ret = hclge_dbg_cmd_send(hdev, &desc, pri_id, 1,
438 HCLGE_OPC_PRI_DFX_STS);
439 if (ret)
440 return ret;
441
442 req.bitmap = (u8)le32_to_cpu(desc.data[1]);
443
444 *pos += scnprintf(buf + *pos, len - *pos,
445 "%03u %#x %#x %#x\n",
446 pri_id, req.bit0, req.bit1, req.bit2);
447 }
448
449 return 0;
450 }
451
hclge_dbg_dump_dcb_pg(struct hclge_dev * hdev,char * buf,int len,int * pos)452 static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len,
453 int *pos)
454 {
455 struct hclge_dbg_bitmap_cmd req;
456 struct hclge_desc desc;
457 u8 pg_id;
458 int ret;
459
460 *pos += scnprintf(buf + *pos, len - *pos,
461 "pg_id pg_mask pg_cshaping_pass pg_pshaping_pass\n");
462 for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) {
463 ret = hclge_dbg_cmd_send(hdev, &desc, pg_id, 1,
464 HCLGE_OPC_PG_DFX_STS);
465 if (ret)
466 return ret;
467
468 req.bitmap = (u8)le32_to_cpu(desc.data[1]);
469
470 *pos += scnprintf(buf + *pos, len - *pos,
471 "%03u %#x %#x %#x\n",
472 pg_id, req.bit0, req.bit1, req.bit2);
473 }
474
475 return 0;
476 }
477
hclge_dbg_dump_dcb_queue(struct hclge_dev * hdev,char * buf,int len,int * pos)478 static int hclge_dbg_dump_dcb_queue(struct hclge_dev *hdev, char *buf, int len,
479 int *pos)
480 {
481 struct hclge_desc desc;
482 u16 nq_id;
483 int ret;
484
485 *pos += scnprintf(buf + *pos, len - *pos,
486 "nq_id sch_nic_queue_cnt sch_roce_queue_cnt\n");
487 for (nq_id = 0; nq_id < hdev->num_tqps; nq_id++) {
488 ret = hclge_dbg_cmd_send(hdev, &desc, nq_id, 1,
489 HCLGE_OPC_SCH_NQ_CNT);
490 if (ret)
491 return ret;
492
493 *pos += scnprintf(buf + *pos, len - *pos, "%04u %#x",
494 nq_id, le32_to_cpu(desc.data[1]));
495
496 ret = hclge_dbg_cmd_send(hdev, &desc, nq_id, 1,
497 HCLGE_OPC_SCH_RQ_CNT);
498 if (ret)
499 return ret;
500
501 *pos += scnprintf(buf + *pos, len - *pos,
502 " %#x\n",
503 le32_to_cpu(desc.data[1]));
504 }
505
506 return 0;
507 }
508
hclge_dbg_dump_dcb_port(struct hclge_dev * hdev,char * buf,int len,int * pos)509 static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len,
510 int *pos)
511 {
512 struct hclge_dbg_bitmap_cmd req;
513 struct hclge_desc desc;
514 u8 port_id = 0;
515 int ret;
516
517 ret = hclge_dbg_cmd_send(hdev, &desc, port_id, 1,
518 HCLGE_OPC_PORT_DFX_STS);
519 if (ret)
520 return ret;
521
522 req.bitmap = (u8)le32_to_cpu(desc.data[1]);
523
524 *pos += scnprintf(buf + *pos, len - *pos, "port_mask: %#x\n",
525 req.bit0);
526 *pos += scnprintf(buf + *pos, len - *pos, "port_shaping_pass: %#x\n",
527 req.bit1);
528
529 return 0;
530 }
531
hclge_dbg_dump_dcb_tm(struct hclge_dev * hdev,char * buf,int len,int * pos)532 static int hclge_dbg_dump_dcb_tm(struct hclge_dev *hdev, char *buf, int len,
533 int *pos)
534 {
535 struct hclge_desc desc[2];
536 u8 port_id = 0;
537 int ret;
538
539 ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
540 HCLGE_OPC_TM_INTERNAL_CNT);
541 if (ret)
542 return ret;
543
544 *pos += scnprintf(buf + *pos, len - *pos, "SCH_NIC_NUM: %#x\n",
545 le32_to_cpu(desc[0].data[1]));
546 *pos += scnprintf(buf + *pos, len - *pos, "SCH_ROCE_NUM: %#x\n",
547 le32_to_cpu(desc[0].data[2]));
548
549 ret = hclge_dbg_cmd_send(hdev, desc, port_id, 2,
550 HCLGE_OPC_TM_INTERNAL_STS);
551 if (ret)
552 return ret;
553
554 *pos += scnprintf(buf + *pos, len - *pos, "pri_bp: %#x\n",
555 le32_to_cpu(desc[0].data[1]));
556 *pos += scnprintf(buf + *pos, len - *pos, "fifo_dfx_info: %#x\n",
557 le32_to_cpu(desc[0].data[2]));
558 *pos += scnprintf(buf + *pos, len - *pos,
559 "sch_roce_fifo_afull_gap: %#x\n",
560 le32_to_cpu(desc[0].data[3]));
561 *pos += scnprintf(buf + *pos, len - *pos,
562 "tx_private_waterline: %#x\n",
563 le32_to_cpu(desc[0].data[4]));
564 *pos += scnprintf(buf + *pos, len - *pos, "tm_bypass_en: %#x\n",
565 le32_to_cpu(desc[0].data[5]));
566 *pos += scnprintf(buf + *pos, len - *pos, "SSU_TM_BYPASS_EN: %#x\n",
567 le32_to_cpu(desc[1].data[0]));
568 *pos += scnprintf(buf + *pos, len - *pos, "SSU_RESERVE_CFG: %#x\n",
569 le32_to_cpu(desc[1].data[1]));
570
571 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER)
572 return 0;
573
574 ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
575 HCLGE_OPC_TM_INTERNAL_STS_1);
576 if (ret)
577 return ret;
578
579 *pos += scnprintf(buf + *pos, len - *pos, "TC_MAP_SEL: %#x\n",
580 le32_to_cpu(desc[0].data[1]));
581 *pos += scnprintf(buf + *pos, len - *pos, "IGU_PFC_PRI_EN: %#x\n",
582 le32_to_cpu(desc[0].data[2]));
583 *pos += scnprintf(buf + *pos, len - *pos, "MAC_PFC_PRI_EN: %#x\n",
584 le32_to_cpu(desc[0].data[3]));
585 *pos += scnprintf(buf + *pos, len - *pos, "IGU_PRI_MAP_TC_CFG: %#x\n",
586 le32_to_cpu(desc[0].data[4]));
587 *pos += scnprintf(buf + *pos, len - *pos,
588 "IGU_TX_PRI_MAP_TC_CFG: %#x\n",
589 le32_to_cpu(desc[0].data[5]));
590
591 return 0;
592 }
593
hclge_dbg_dump_dcb(struct hclge_dev * hdev,char * buf,int len)594 static int hclge_dbg_dump_dcb(struct hclge_dev *hdev, char *buf, int len)
595 {
596 int pos = 0;
597 int ret;
598
599 ret = hclge_dbg_dump_dcb_qset(hdev, buf, len, &pos);
600 if (ret)
601 return ret;
602
603 ret = hclge_dbg_dump_dcb_pri(hdev, buf, len, &pos);
604 if (ret)
605 return ret;
606
607 ret = hclge_dbg_dump_dcb_pg(hdev, buf, len, &pos);
608 if (ret)
609 return ret;
610
611 ret = hclge_dbg_dump_dcb_queue(hdev, buf, len, &pos);
612 if (ret)
613 return ret;
614
615 ret = hclge_dbg_dump_dcb_port(hdev, buf, len, &pos);
616 if (ret)
617 return ret;
618
619 return hclge_dbg_dump_dcb_tm(hdev, buf, len, &pos);
620 }
621
hclge_dbg_dump_reg_cmd(struct hclge_dev * hdev,enum hnae3_dbg_cmd cmd,char * buf,int len)622 static int hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev,
623 enum hnae3_dbg_cmd cmd, char *buf, int len)
624 {
625 const struct hclge_dbg_reg_type_info *reg_info;
626 int pos = 0, ret = 0;
627 int i;
628
629 for (i = 0; i < ARRAY_SIZE(hclge_dbg_reg_info); i++) {
630 reg_info = &hclge_dbg_reg_info[i];
631 if (cmd == reg_info->cmd) {
632 if (cmd == HNAE3_DBG_CMD_REG_TQP)
633 return hclge_dbg_dump_reg_tqp(hdev, reg_info,
634 buf, len, &pos);
635
636 ret = hclge_dbg_dump_reg_common(hdev, reg_info, buf,
637 len, &pos);
638 if (ret)
639 break;
640 }
641 }
642
643 return ret;
644 }
645
hclge_dbg_dump_tc(struct hclge_dev * hdev,char * buf,int len)646 static int hclge_dbg_dump_tc(struct hclge_dev *hdev, char *buf, int len)
647 {
648 struct hclge_ets_tc_weight_cmd *ets_weight;
649 struct hclge_desc desc;
650 char *sch_mode_str;
651 int pos = 0;
652 int ret;
653 u8 i;
654
655 if (!hnae3_dev_dcb_supported(hdev)) {
656 dev_err(&hdev->pdev->dev,
657 "Only DCB-supported dev supports tc\n");
658 return -EOPNOTSUPP;
659 }
660
661 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, true);
662 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
663 if (ret) {
664 dev_err(&hdev->pdev->dev, "failed to get tc weight, ret = %d\n",
665 ret);
666 return ret;
667 }
668
669 ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
670
671 pos += scnprintf(buf + pos, len - pos, "enabled tc number: %u\n",
672 hdev->tm_info.num_tc);
673 pos += scnprintf(buf + pos, len - pos, "weight_offset: %u\n",
674 ets_weight->weight_offset);
675
676 pos += scnprintf(buf + pos, len - pos, "TC MODE WEIGHT\n");
677 for (i = 0; i < HNAE3_MAX_TC; i++) {
678 sch_mode_str = ets_weight->tc_weight[i] ? "dwrr" : "sp";
679 pos += scnprintf(buf + pos, len - pos, "%u %4s %3u\n",
680 i, sch_mode_str, ets_weight->tc_weight[i]);
681 }
682
683 return 0;
684 }
685
686 static const struct hclge_dbg_item tm_pg_items[] = {
687 { "ID", 2 },
688 { "PRI_MAP", 2 },
689 { "MODE", 2 },
690 { "DWRR", 2 },
691 { "C_IR_B", 2 },
692 { "C_IR_U", 2 },
693 { "C_IR_S", 2 },
694 { "C_BS_B", 2 },
695 { "C_BS_S", 2 },
696 { "C_FLAG", 2 },
697 { "C_RATE(Mbps)", 2 },
698 { "P_IR_B", 2 },
699 { "P_IR_U", 2 },
700 { "P_IR_S", 2 },
701 { "P_BS_B", 2 },
702 { "P_BS_S", 2 },
703 { "P_FLAG", 2 },
704 { "P_RATE(Mbps)", 0 }
705 };
706
hclge_dbg_fill_shaper_content(struct hclge_tm_shaper_para * para,char ** result,u8 * index)707 static void hclge_dbg_fill_shaper_content(struct hclge_tm_shaper_para *para,
708 char **result, u8 *index)
709 {
710 sprintf(result[(*index)++], "%3u", para->ir_b);
711 sprintf(result[(*index)++], "%3u", para->ir_u);
712 sprintf(result[(*index)++], "%3u", para->ir_s);
713 sprintf(result[(*index)++], "%3u", para->bs_b);
714 sprintf(result[(*index)++], "%3u", para->bs_s);
715 sprintf(result[(*index)++], "%3u", para->flag);
716 sprintf(result[(*index)++], "%6u", para->rate);
717 }
718
__hclge_dbg_dump_tm_pg(struct hclge_dev * hdev,char * data_str,char * buf,int len)719 static int __hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *data_str,
720 char *buf, int len)
721 {
722 struct hclge_tm_shaper_para c_shaper_para, p_shaper_para;
723 char *result[ARRAY_SIZE(tm_pg_items)], *sch_mode_str;
724 u8 pg_id, sch_mode, weight, pri_bit_map, i, j;
725 char content[HCLGE_DBG_TM_INFO_LEN];
726 int pos = 0;
727 int ret;
728
729 for (i = 0; i < ARRAY_SIZE(tm_pg_items); i++) {
730 result[i] = data_str;
731 data_str += HCLGE_DBG_DATA_STR_LEN;
732 }
733
734 hclge_dbg_fill_content(content, sizeof(content), tm_pg_items,
735 NULL, ARRAY_SIZE(tm_pg_items));
736 pos += scnprintf(buf + pos, len - pos, "%s", content);
737
738 for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) {
739 ret = hclge_tm_get_pg_to_pri_map(hdev, pg_id, &pri_bit_map);
740 if (ret)
741 return ret;
742
743 ret = hclge_tm_get_pg_sch_mode(hdev, pg_id, &sch_mode);
744 if (ret)
745 return ret;
746
747 ret = hclge_tm_get_pg_weight(hdev, pg_id, &weight);
748 if (ret)
749 return ret;
750
751 ret = hclge_tm_get_pg_shaper(hdev, pg_id,
752 HCLGE_OPC_TM_PG_C_SHAPPING,
753 &c_shaper_para);
754 if (ret)
755 return ret;
756
757 ret = hclge_tm_get_pg_shaper(hdev, pg_id,
758 HCLGE_OPC_TM_PG_P_SHAPPING,
759 &p_shaper_para);
760 if (ret)
761 return ret;
762
763 sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
764 "sp";
765
766 j = 0;
767 sprintf(result[j++], "%02u", pg_id);
768 sprintf(result[j++], "0x%02x", pri_bit_map);
769 sprintf(result[j++], "%4s", sch_mode_str);
770 sprintf(result[j++], "%3u", weight);
771 hclge_dbg_fill_shaper_content(&c_shaper_para, result, &j);
772 hclge_dbg_fill_shaper_content(&p_shaper_para, result, &j);
773
774 hclge_dbg_fill_content(content, sizeof(content), tm_pg_items,
775 (const char **)result,
776 ARRAY_SIZE(tm_pg_items));
777 pos += scnprintf(buf + pos, len - pos, "%s", content);
778 }
779
780 return 0;
781 }
782
hclge_dbg_dump_tm_pg(struct hclge_dev * hdev,char * buf,int len)783 static int hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *buf, int len)
784 {
785 char *data_str;
786 int ret;
787
788 data_str = kcalloc(ARRAY_SIZE(tm_pg_items),
789 HCLGE_DBG_DATA_STR_LEN, GFP_KERNEL);
790
791 if (!data_str)
792 return -ENOMEM;
793
794 ret = __hclge_dbg_dump_tm_pg(hdev, data_str, buf, len);
795
796 kfree(data_str);
797
798 return ret;
799 }
800
hclge_dbg_dump_tm_port(struct hclge_dev * hdev,char * buf,int len)801 static int hclge_dbg_dump_tm_port(struct hclge_dev *hdev, char *buf, int len)
802 {
803 struct hclge_tm_shaper_para shaper_para;
804 int pos = 0;
805 int ret;
806
807 ret = hclge_tm_get_port_shaper(hdev, &shaper_para);
808 if (ret)
809 return ret;
810
811 pos += scnprintf(buf + pos, len - pos,
812 "IR_B IR_U IR_S BS_B BS_S FLAG RATE(Mbps)\n");
813 pos += scnprintf(buf + pos, len - pos,
814 "%3u %3u %3u %3u %3u %1u %6u\n",
815 shaper_para.ir_b, shaper_para.ir_u, shaper_para.ir_s,
816 shaper_para.bs_b, shaper_para.bs_s, shaper_para.flag,
817 shaper_para.rate);
818
819 return 0;
820 }
821
hclge_dbg_dump_tm_bp_qset_map(struct hclge_dev * hdev,u8 tc_id,char * buf,int len)822 static int hclge_dbg_dump_tm_bp_qset_map(struct hclge_dev *hdev, u8 tc_id,
823 char *buf, int len)
824 {
825 u32 qset_mapping[HCLGE_BP_EXT_GRP_NUM];
826 struct hclge_bp_to_qs_map_cmd *map;
827 struct hclge_desc desc;
828 int pos = 0;
829 u8 group_id;
830 u8 grp_num;
831 u16 i = 0;
832 int ret;
833
834 grp_num = hdev->num_tqps <= HCLGE_TQP_MAX_SIZE_DEV_V2 ?
835 HCLGE_BP_GRP_NUM : HCLGE_BP_EXT_GRP_NUM;
836 map = (struct hclge_bp_to_qs_map_cmd *)desc.data;
837 for (group_id = 0; group_id < grp_num; group_id++) {
838 hclge_cmd_setup_basic_desc(&desc,
839 HCLGE_OPC_TM_BP_TO_QSET_MAPPING,
840 true);
841 map->tc_id = tc_id;
842 map->qs_group_id = group_id;
843 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
844 if (ret) {
845 dev_err(&hdev->pdev->dev,
846 "failed to get bp to qset map, ret = %d\n",
847 ret);
848 return ret;
849 }
850
851 qset_mapping[group_id] = le32_to_cpu(map->qs_bit_map);
852 }
853
854 pos += scnprintf(buf + pos, len - pos, "INDEX | TM BP QSET MAPPING:\n");
855 for (group_id = 0; group_id < grp_num / 8; group_id++) {
856 pos += scnprintf(buf + pos, len - pos,
857 "%04d | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n",
858 group_id * 256, qset_mapping[i + 7],
859 qset_mapping[i + 6], qset_mapping[i + 5],
860 qset_mapping[i + 4], qset_mapping[i + 3],
861 qset_mapping[i + 2], qset_mapping[i + 1],
862 qset_mapping[i]);
863 i += 8;
864 }
865
866 return pos;
867 }
868
hclge_dbg_dump_tm_map(struct hclge_dev * hdev,char * buf,int len)869 static int hclge_dbg_dump_tm_map(struct hclge_dev *hdev, char *buf, int len)
870 {
871 u16 queue_id;
872 u16 qset_id;
873 u8 link_vld;
874 int pos = 0;
875 u8 pri_id;
876 u8 tc_id;
877 int ret;
878
879 for (queue_id = 0; queue_id < hdev->num_tqps; queue_id++) {
880 ret = hclge_tm_get_q_to_qs_map(hdev, queue_id, &qset_id);
881 if (ret)
882 return ret;
883
884 ret = hclge_tm_get_qset_map_pri(hdev, qset_id, &pri_id,
885 &link_vld);
886 if (ret)
887 return ret;
888
889 ret = hclge_tm_get_q_to_tc(hdev, queue_id, &tc_id);
890 if (ret)
891 return ret;
892
893 pos += scnprintf(buf + pos, len - pos,
894 "QUEUE_ID QSET_ID PRI_ID TC_ID\n");
895 pos += scnprintf(buf + pos, len - pos,
896 "%04u %4u %3u %2u\n",
897 queue_id, qset_id, pri_id, tc_id);
898
899 if (!hnae3_dev_dcb_supported(hdev))
900 continue;
901
902 ret = hclge_dbg_dump_tm_bp_qset_map(hdev, tc_id, buf + pos,
903 len - pos);
904 if (ret < 0)
905 return ret;
906 pos += ret;
907
908 pos += scnprintf(buf + pos, len - pos, "\n");
909 }
910
911 return 0;
912 }
913
hclge_dbg_dump_tm_nodes(struct hclge_dev * hdev,char * buf,int len)914 static int hclge_dbg_dump_tm_nodes(struct hclge_dev *hdev, char *buf, int len)
915 {
916 struct hclge_tm_nodes_cmd *nodes;
917 struct hclge_desc desc;
918 int pos = 0;
919 int ret;
920
921 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true);
922 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
923 if (ret) {
924 dev_err(&hdev->pdev->dev,
925 "failed to dump tm nodes, ret = %d\n", ret);
926 return ret;
927 }
928
929 nodes = (struct hclge_tm_nodes_cmd *)desc.data;
930
931 pos += scnprintf(buf + pos, len - pos, " BASE_ID MAX_NUM\n");
932 pos += scnprintf(buf + pos, len - pos, "PG %4u %4u\n",
933 nodes->pg_base_id, nodes->pg_num);
934 pos += scnprintf(buf + pos, len - pos, "PRI %4u %4u\n",
935 nodes->pri_base_id, nodes->pri_num);
936 pos += scnprintf(buf + pos, len - pos, "QSET %4u %4u\n",
937 le16_to_cpu(nodes->qset_base_id),
938 le16_to_cpu(nodes->qset_num));
939 pos += scnprintf(buf + pos, len - pos, "QUEUE %4u %4u\n",
940 le16_to_cpu(nodes->queue_base_id),
941 le16_to_cpu(nodes->queue_num));
942
943 return 0;
944 }
945
946 static const struct hclge_dbg_item tm_pri_items[] = {
947 { "ID", 4 },
948 { "MODE", 2 },
949 { "DWRR", 2 },
950 { "C_IR_B", 2 },
951 { "C_IR_U", 2 },
952 { "C_IR_S", 2 },
953 { "C_BS_B", 2 },
954 { "C_BS_S", 2 },
955 { "C_FLAG", 2 },
956 { "C_RATE(Mbps)", 2 },
957 { "P_IR_B", 2 },
958 { "P_IR_U", 2 },
959 { "P_IR_S", 2 },
960 { "P_BS_B", 2 },
961 { "P_BS_S", 2 },
962 { "P_FLAG", 2 },
963 { "P_RATE(Mbps)", 0 }
964 };
965
hclge_dbg_dump_tm_pri(struct hclge_dev * hdev,char * buf,int len)966 static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len)
967 {
968 char data_str[ARRAY_SIZE(tm_pri_items)][HCLGE_DBG_DATA_STR_LEN];
969 struct hclge_tm_shaper_para c_shaper_para, p_shaper_para;
970 char *result[ARRAY_SIZE(tm_pri_items)], *sch_mode_str;
971 char content[HCLGE_DBG_TM_INFO_LEN];
972 u8 pri_num, sch_mode, weight, i, j;
973 int pos, ret;
974
975 ret = hclge_tm_get_pri_num(hdev, &pri_num);
976 if (ret)
977 return ret;
978
979 for (i = 0; i < ARRAY_SIZE(tm_pri_items); i++)
980 result[i] = &data_str[i][0];
981
982 hclge_dbg_fill_content(content, sizeof(content), tm_pri_items,
983 NULL, ARRAY_SIZE(tm_pri_items));
984 pos = scnprintf(buf, len, "%s", content);
985
986 for (i = 0; i < pri_num; i++) {
987 ret = hclge_tm_get_pri_sch_mode(hdev, i, &sch_mode);
988 if (ret)
989 return ret;
990
991 ret = hclge_tm_get_pri_weight(hdev, i, &weight);
992 if (ret)
993 return ret;
994
995 ret = hclge_tm_get_pri_shaper(hdev, i,
996 HCLGE_OPC_TM_PRI_C_SHAPPING,
997 &c_shaper_para);
998 if (ret)
999 return ret;
1000
1001 ret = hclge_tm_get_pri_shaper(hdev, i,
1002 HCLGE_OPC_TM_PRI_P_SHAPPING,
1003 &p_shaper_para);
1004 if (ret)
1005 return ret;
1006
1007 sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
1008 "sp";
1009
1010 j = 0;
1011 sprintf(result[j++], "%04u", i);
1012 sprintf(result[j++], "%4s", sch_mode_str);
1013 sprintf(result[j++], "%3u", weight);
1014 hclge_dbg_fill_shaper_content(&c_shaper_para, result, &j);
1015 hclge_dbg_fill_shaper_content(&p_shaper_para, result, &j);
1016 hclge_dbg_fill_content(content, sizeof(content), tm_pri_items,
1017 (const char **)result,
1018 ARRAY_SIZE(tm_pri_items));
1019 pos += scnprintf(buf + pos, len - pos, "%s", content);
1020 }
1021
1022 return 0;
1023 }
1024
1025 static const struct hclge_dbg_item tm_qset_items[] = {
1026 { "ID", 4 },
1027 { "MAP_PRI", 2 },
1028 { "LINK_VLD", 2 },
1029 { "MODE", 2 },
1030 { "DWRR", 2 },
1031 { "IR_B", 2 },
1032 { "IR_U", 2 },
1033 { "IR_S", 2 },
1034 { "BS_B", 2 },
1035 { "BS_S", 2 },
1036 { "FLAG", 2 },
1037 { "RATE(Mbps)", 0 }
1038 };
1039
hclge_dbg_dump_tm_qset(struct hclge_dev * hdev,char * buf,int len)1040 static int hclge_dbg_dump_tm_qset(struct hclge_dev *hdev, char *buf, int len)
1041 {
1042 char data_str[ARRAY_SIZE(tm_qset_items)][HCLGE_DBG_DATA_STR_LEN];
1043 char *result[ARRAY_SIZE(tm_qset_items)], *sch_mode_str;
1044 u8 priority, link_vld, sch_mode, weight;
1045 struct hclge_tm_shaper_para shaper_para;
1046 char content[HCLGE_DBG_TM_INFO_LEN];
1047 u16 qset_num, i;
1048 int ret, pos;
1049 u8 j;
1050
1051 ret = hclge_tm_get_qset_num(hdev, &qset_num);
1052 if (ret)
1053 return ret;
1054
1055 for (i = 0; i < ARRAY_SIZE(tm_qset_items); i++)
1056 result[i] = &data_str[i][0];
1057
1058 hclge_dbg_fill_content(content, sizeof(content), tm_qset_items,
1059 NULL, ARRAY_SIZE(tm_qset_items));
1060 pos = scnprintf(buf, len, "%s", content);
1061
1062 for (i = 0; i < qset_num; i++) {
1063 ret = hclge_tm_get_qset_map_pri(hdev, i, &priority, &link_vld);
1064 if (ret)
1065 return ret;
1066
1067 ret = hclge_tm_get_qset_sch_mode(hdev, i, &sch_mode);
1068 if (ret)
1069 return ret;
1070
1071 ret = hclge_tm_get_qset_weight(hdev, i, &weight);
1072 if (ret)
1073 return ret;
1074
1075 ret = hclge_tm_get_qset_shaper(hdev, i, &shaper_para);
1076 if (ret)
1077 return ret;
1078
1079 sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
1080 "sp";
1081
1082 j = 0;
1083 sprintf(result[j++], "%04u", i);
1084 sprintf(result[j++], "%4u", priority);
1085 sprintf(result[j++], "%4u", link_vld);
1086 sprintf(result[j++], "%4s", sch_mode_str);
1087 sprintf(result[j++], "%3u", weight);
1088 hclge_dbg_fill_shaper_content(&shaper_para, result, &j);
1089
1090 hclge_dbg_fill_content(content, sizeof(content), tm_qset_items,
1091 (const char **)result,
1092 ARRAY_SIZE(tm_qset_items));
1093 pos += scnprintf(buf + pos, len - pos, "%s", content);
1094 }
1095
1096 return 0;
1097 }
1098
hclge_dbg_dump_qos_pause_cfg(struct hclge_dev * hdev,char * buf,int len)1099 static int hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev, char *buf,
1100 int len)
1101 {
1102 struct hclge_cfg_pause_param_cmd *pause_param;
1103 struct hclge_desc desc;
1104 int pos = 0;
1105 int ret;
1106
1107 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
1108 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1109 if (ret) {
1110 dev_err(&hdev->pdev->dev,
1111 "failed to dump qos pause, ret = %d\n", ret);
1112 return ret;
1113 }
1114
1115 pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
1116
1117 pos += scnprintf(buf + pos, len - pos, "pause_trans_gap: 0x%x\n",
1118 pause_param->pause_trans_gap);
1119 pos += scnprintf(buf + pos, len - pos, "pause_trans_time: 0x%x\n",
1120 le16_to_cpu(pause_param->pause_trans_time));
1121 return 0;
1122 }
1123
hclge_dbg_dump_qos_pri_map(struct hclge_dev * hdev,char * buf,int len)1124 static int hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev, char *buf,
1125 int len)
1126 {
1127 #define HCLGE_DBG_TC_MASK 0x0F
1128 #define HCLGE_DBG_TC_BIT_WIDTH 4
1129
1130 struct hclge_qos_pri_map_cmd *pri_map;
1131 struct hclge_desc desc;
1132 int pos = 0;
1133 u8 *pri_tc;
1134 u8 tc, i;
1135 int ret;
1136
1137 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, true);
1138 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1139 if (ret) {
1140 dev_err(&hdev->pdev->dev,
1141 "failed to dump qos pri map, ret = %d\n", ret);
1142 return ret;
1143 }
1144
1145 pri_map = (struct hclge_qos_pri_map_cmd *)desc.data;
1146
1147 pos += scnprintf(buf + pos, len - pos, "vlan_to_pri: 0x%x\n",
1148 pri_map->vlan_pri);
1149 pos += scnprintf(buf + pos, len - pos, "PRI TC\n");
1150
1151 pri_tc = (u8 *)pri_map;
1152 for (i = 0; i < HNAE3_MAX_TC; i++) {
1153 tc = pri_tc[i >> 1] >> ((i & 1) * HCLGE_DBG_TC_BIT_WIDTH);
1154 tc &= HCLGE_DBG_TC_MASK;
1155 pos += scnprintf(buf + pos, len - pos, "%u %u\n", i, tc);
1156 }
1157
1158 return 0;
1159 }
1160
hclge_dbg_dump_tx_buf_cfg(struct hclge_dev * hdev,char * buf,int len)1161 static int hclge_dbg_dump_tx_buf_cfg(struct hclge_dev *hdev, char *buf, int len)
1162 {
1163 struct hclge_tx_buff_alloc_cmd *tx_buf_cmd;
1164 struct hclge_desc desc;
1165 int pos = 0;
1166 int i, ret;
1167
1168 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, true);
1169 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1170 if (ret) {
1171 dev_err(&hdev->pdev->dev,
1172 "failed to dump tx buf, ret = %d\n", ret);
1173 return ret;
1174 }
1175
1176 tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1177 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1178 pos += scnprintf(buf + pos, len - pos,
1179 "tx_packet_buf_tc_%d: 0x%x\n", i,
1180 le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i]));
1181
1182 return pos;
1183 }
1184
hclge_dbg_dump_rx_priv_buf_cfg(struct hclge_dev * hdev,char * buf,int len)1185 static int hclge_dbg_dump_rx_priv_buf_cfg(struct hclge_dev *hdev, char *buf,
1186 int len)
1187 {
1188 struct hclge_rx_priv_buff_cmd *rx_buf_cmd;
1189 struct hclge_desc desc;
1190 int pos = 0;
1191 int i, ret;
1192
1193 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, true);
1194 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1195 if (ret) {
1196 dev_err(&hdev->pdev->dev,
1197 "failed to dump rx priv buf, ret = %d\n", ret);
1198 return ret;
1199 }
1200
1201 pos += scnprintf(buf + pos, len - pos, "\n");
1202
1203 rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc.data;
1204 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1205 pos += scnprintf(buf + pos, len - pos,
1206 "rx_packet_buf_tc_%d: 0x%x\n", i,
1207 le16_to_cpu(rx_buf_cmd->buf_num[i]));
1208
1209 pos += scnprintf(buf + pos, len - pos, "rx_share_buf: 0x%x\n",
1210 le16_to_cpu(rx_buf_cmd->shared_buf));
1211
1212 return pos;
1213 }
1214
hclge_dbg_dump_rx_common_wl_cfg(struct hclge_dev * hdev,char * buf,int len)1215 static int hclge_dbg_dump_rx_common_wl_cfg(struct hclge_dev *hdev, char *buf,
1216 int len)
1217 {
1218 struct hclge_rx_com_wl *rx_com_wl;
1219 struct hclge_desc desc;
1220 int pos = 0;
1221 int ret;
1222
1223 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, true);
1224 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1225 if (ret) {
1226 dev_err(&hdev->pdev->dev,
1227 "failed to dump rx common wl, ret = %d\n", ret);
1228 return ret;
1229 }
1230
1231 rx_com_wl = (struct hclge_rx_com_wl *)desc.data;
1232 pos += scnprintf(buf + pos, len - pos, "\n");
1233 pos += scnprintf(buf + pos, len - pos,
1234 "rx_com_wl: high: 0x%x, low: 0x%x\n",
1235 le16_to_cpu(rx_com_wl->com_wl.high),
1236 le16_to_cpu(rx_com_wl->com_wl.low));
1237
1238 return pos;
1239 }
1240
hclge_dbg_dump_rx_global_pkt_cnt(struct hclge_dev * hdev,char * buf,int len)1241 static int hclge_dbg_dump_rx_global_pkt_cnt(struct hclge_dev *hdev, char *buf,
1242 int len)
1243 {
1244 struct hclge_rx_com_wl *rx_packet_cnt;
1245 struct hclge_desc desc;
1246 int pos = 0;
1247 int ret;
1248
1249 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_GBL_PKT_CNT, true);
1250 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1251 if (ret) {
1252 dev_err(&hdev->pdev->dev,
1253 "failed to dump rx global pkt cnt, ret = %d\n", ret);
1254 return ret;
1255 }
1256
1257 rx_packet_cnt = (struct hclge_rx_com_wl *)desc.data;
1258 pos += scnprintf(buf + pos, len - pos,
1259 "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
1260 le16_to_cpu(rx_packet_cnt->com_wl.high),
1261 le16_to_cpu(rx_packet_cnt->com_wl.low));
1262
1263 return pos;
1264 }
1265
hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev * hdev,char * buf,int len)1266 static int hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev *hdev, char *buf,
1267 int len)
1268 {
1269 struct hclge_rx_priv_wl_buf *rx_priv_wl;
1270 struct hclge_desc desc[2];
1271 int pos = 0;
1272 int i, ret;
1273
1274 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
1275 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1276 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
1277 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1278 if (ret) {
1279 dev_err(&hdev->pdev->dev,
1280 "failed to dump rx priv wl buf, ret = %d\n", ret);
1281 return ret;
1282 }
1283
1284 rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data;
1285 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1286 pos += scnprintf(buf + pos, len - pos,
1287 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i,
1288 le16_to_cpu(rx_priv_wl->tc_wl[i].high),
1289 le16_to_cpu(rx_priv_wl->tc_wl[i].low));
1290
1291 rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data;
1292 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1293 pos += scnprintf(buf + pos, len - pos,
1294 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n",
1295 i + HCLGE_TC_NUM_ONE_DESC,
1296 le16_to_cpu(rx_priv_wl->tc_wl[i].high),
1297 le16_to_cpu(rx_priv_wl->tc_wl[i].low));
1298
1299 return pos;
1300 }
1301
hclge_dbg_dump_rx_common_threshold_cfg(struct hclge_dev * hdev,char * buf,int len)1302 static int hclge_dbg_dump_rx_common_threshold_cfg(struct hclge_dev *hdev,
1303 char *buf, int len)
1304 {
1305 struct hclge_rx_com_thrd *rx_com_thrd;
1306 struct hclge_desc desc[2];
1307 int pos = 0;
1308 int i, ret;
1309
1310 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
1311 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1312 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
1313 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1314 if (ret) {
1315 dev_err(&hdev->pdev->dev,
1316 "failed to dump rx common threshold, ret = %d\n", ret);
1317 return ret;
1318 }
1319
1320 pos += scnprintf(buf + pos, len - pos, "\n");
1321 rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data;
1322 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1323 pos += scnprintf(buf + pos, len - pos,
1324 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i,
1325 le16_to_cpu(rx_com_thrd->com_thrd[i].high),
1326 le16_to_cpu(rx_com_thrd->com_thrd[i].low));
1327
1328 rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data;
1329 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1330 pos += scnprintf(buf + pos, len - pos,
1331 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n",
1332 i + HCLGE_TC_NUM_ONE_DESC,
1333 le16_to_cpu(rx_com_thrd->com_thrd[i].high),
1334 le16_to_cpu(rx_com_thrd->com_thrd[i].low));
1335
1336 return pos;
1337 }
1338
hclge_dbg_dump_qos_buf_cfg(struct hclge_dev * hdev,char * buf,int len)1339 static int hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev, char *buf,
1340 int len)
1341 {
1342 int pos = 0;
1343 int ret;
1344
1345 ret = hclge_dbg_dump_tx_buf_cfg(hdev, buf + pos, len - pos);
1346 if (ret < 0)
1347 return ret;
1348 pos += ret;
1349
1350 ret = hclge_dbg_dump_rx_priv_buf_cfg(hdev, buf + pos, len - pos);
1351 if (ret < 0)
1352 return ret;
1353 pos += ret;
1354
1355 ret = hclge_dbg_dump_rx_common_wl_cfg(hdev, buf + pos, len - pos);
1356 if (ret < 0)
1357 return ret;
1358 pos += ret;
1359
1360 ret = hclge_dbg_dump_rx_global_pkt_cnt(hdev, buf + pos, len - pos);
1361 if (ret < 0)
1362 return ret;
1363 pos += ret;
1364
1365 pos += scnprintf(buf + pos, len - pos, "\n");
1366 if (!hnae3_dev_dcb_supported(hdev))
1367 return 0;
1368
1369 ret = hclge_dbg_dump_rx_priv_wl_buf_cfg(hdev, buf + pos, len - pos);
1370 if (ret < 0)
1371 return ret;
1372 pos += ret;
1373
1374 ret = hclge_dbg_dump_rx_common_threshold_cfg(hdev, buf + pos,
1375 len - pos);
1376 if (ret < 0)
1377 return ret;
1378
1379 return 0;
1380 }
1381
hclge_dbg_dump_mng_table(struct hclge_dev * hdev,char * buf,int len)1382 static int hclge_dbg_dump_mng_table(struct hclge_dev *hdev, char *buf, int len)
1383 {
1384 struct hclge_mac_ethertype_idx_rd_cmd *req0;
1385 struct hclge_desc desc;
1386 u32 msg_egress_port;
1387 int pos = 0;
1388 int ret, i;
1389
1390 pos += scnprintf(buf + pos, len - pos,
1391 "entry mac_addr mask ether ");
1392 pos += scnprintf(buf + pos, len - pos,
1393 "mask vlan mask i_map i_dir e_type ");
1394 pos += scnprintf(buf + pos, len - pos, "pf_id vf_id q_id drop\n");
1395
1396 for (i = 0; i < HCLGE_DBG_MNG_TBL_MAX; i++) {
1397 hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_ETHERTYPE_IDX_RD,
1398 true);
1399 req0 = (struct hclge_mac_ethertype_idx_rd_cmd *)&desc.data;
1400 req0->index = cpu_to_le16(i);
1401
1402 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1403 if (ret) {
1404 dev_err(&hdev->pdev->dev,
1405 "failed to dump manage table, ret = %d\n", ret);
1406 return ret;
1407 }
1408
1409 if (!req0->resp_code)
1410 continue;
1411
1412 pos += scnprintf(buf + pos, len - pos, "%02u %pM ",
1413 le16_to_cpu(req0->index), req0->mac_addr);
1414
1415 pos += scnprintf(buf + pos, len - pos,
1416 "%x %04x %x %04x ",
1417 !!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B),
1418 le16_to_cpu(req0->ethter_type),
1419 !!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B),
1420 le16_to_cpu(req0->vlan_tag) &
1421 HCLGE_DBG_MNG_VLAN_TAG);
1422
1423 pos += scnprintf(buf + pos, len - pos,
1424 "%x %02x %02x ",
1425 !!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B),
1426 req0->i_port_bitmap, req0->i_port_direction);
1427
1428 msg_egress_port = le16_to_cpu(req0->egress_port);
1429 pos += scnprintf(buf + pos, len - pos,
1430 "%x %x %02x %04x %x\n",
1431 !!(msg_egress_port & HCLGE_DBG_MNG_E_TYPE_B),
1432 msg_egress_port & HCLGE_DBG_MNG_PF_ID,
1433 (msg_egress_port >> 3) & HCLGE_DBG_MNG_VF_ID,
1434 le16_to_cpu(req0->egress_queue),
1435 !!(msg_egress_port & HCLGE_DBG_MNG_DROP_B));
1436 }
1437
1438 return 0;
1439 }
1440
1441 #define HCLGE_DBG_TCAM_BUF_SIZE 256
1442
hclge_dbg_fd_tcam_read(struct hclge_dev * hdev,bool sel_x,char * tcam_buf,struct hclge_dbg_tcam_msg tcam_msg)1443 static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x,
1444 char *tcam_buf,
1445 struct hclge_dbg_tcam_msg tcam_msg)
1446 {
1447 struct hclge_fd_tcam_config_1_cmd *req1;
1448 struct hclge_fd_tcam_config_2_cmd *req2;
1449 struct hclge_fd_tcam_config_3_cmd *req3;
1450 struct hclge_desc desc[3];
1451 int pos = 0;
1452 int ret, i;
1453 __le32 *req;
1454
1455 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true);
1456 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1457 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, true);
1458 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1459 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, true);
1460
1461 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
1462 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
1463 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
1464
1465 req1->stage = tcam_msg.stage;
1466 req1->xy_sel = sel_x ? 1 : 0;
1467 req1->index = cpu_to_le32(tcam_msg.loc);
1468
1469 ret = hclge_cmd_send(&hdev->hw, desc, 3);
1470 if (ret)
1471 return ret;
1472
1473 pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
1474 "read result tcam key %s(%u):\n", sel_x ? "x" : "y",
1475 tcam_msg.loc);
1476
1477 /* tcam_data0 ~ tcam_data1 */
1478 req = (__le32 *)req1->tcam_data;
1479 for (i = 0; i < 2; i++)
1480 pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
1481 "%08x\n", le32_to_cpu(*req++));
1482
1483 /* tcam_data2 ~ tcam_data7 */
1484 req = (__le32 *)req2->tcam_data;
1485 for (i = 0; i < 6; i++)
1486 pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
1487 "%08x\n", le32_to_cpu(*req++));
1488
1489 /* tcam_data8 ~ tcam_data12 */
1490 req = (__le32 *)req3->tcam_data;
1491 for (i = 0; i < 5; i++)
1492 pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
1493 "%08x\n", le32_to_cpu(*req++));
1494
1495 return ret;
1496 }
1497
hclge_dbg_get_rules_location(struct hclge_dev * hdev,u16 * rule_locs)1498 static int hclge_dbg_get_rules_location(struct hclge_dev *hdev, u16 *rule_locs)
1499 {
1500 struct hclge_fd_rule *rule;
1501 struct hlist_node *node;
1502 int cnt = 0;
1503
1504 spin_lock_bh(&hdev->fd_rule_lock);
1505 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
1506 rule_locs[cnt] = rule->location;
1507 cnt++;
1508 }
1509 spin_unlock_bh(&hdev->fd_rule_lock);
1510
1511 if (cnt != hdev->hclge_fd_rule_num || cnt == 0)
1512 return -EINVAL;
1513
1514 return cnt;
1515 }
1516
hclge_dbg_dump_fd_tcam(struct hclge_dev * hdev,char * buf,int len)1517 static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len)
1518 {
1519 u32 rule_num = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
1520 struct hclge_dbg_tcam_msg tcam_msg;
1521 int i, ret, rule_cnt;
1522 u16 *rule_locs;
1523 char *tcam_buf;
1524 int pos = 0;
1525
1526 if (!hnae3_dev_fd_supported(hdev)) {
1527 dev_err(&hdev->pdev->dev,
1528 "Only FD-supported dev supports dump fd tcam\n");
1529 return -EOPNOTSUPP;
1530 }
1531
1532 if (!hdev->hclge_fd_rule_num || !rule_num)
1533 return 0;
1534
1535 rule_locs = kcalloc(rule_num, sizeof(u16), GFP_KERNEL);
1536 if (!rule_locs)
1537 return -ENOMEM;
1538
1539 tcam_buf = kzalloc(HCLGE_DBG_TCAM_BUF_SIZE, GFP_KERNEL);
1540 if (!tcam_buf) {
1541 kfree(rule_locs);
1542 return -ENOMEM;
1543 }
1544
1545 rule_cnt = hclge_dbg_get_rules_location(hdev, rule_locs);
1546 if (rule_cnt < 0) {
1547 ret = rule_cnt;
1548 dev_err(&hdev->pdev->dev,
1549 "failed to get rule number, ret = %d\n", ret);
1550 goto out;
1551 }
1552
1553 ret = 0;
1554 for (i = 0; i < rule_cnt; i++) {
1555 tcam_msg.stage = HCLGE_FD_STAGE_1;
1556 tcam_msg.loc = rule_locs[i];
1557
1558 ret = hclge_dbg_fd_tcam_read(hdev, true, tcam_buf, tcam_msg);
1559 if (ret) {
1560 dev_err(&hdev->pdev->dev,
1561 "failed to get fd tcam key x, ret = %d\n", ret);
1562 goto out;
1563 }
1564
1565 pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf);
1566
1567 ret = hclge_dbg_fd_tcam_read(hdev, false, tcam_buf, tcam_msg);
1568 if (ret) {
1569 dev_err(&hdev->pdev->dev,
1570 "failed to get fd tcam key y, ret = %d\n", ret);
1571 goto out;
1572 }
1573
1574 pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf);
1575 }
1576
1577 out:
1578 kfree(tcam_buf);
1579 kfree(rule_locs);
1580 return ret;
1581 }
1582
hclge_dbg_dump_fd_counter(struct hclge_dev * hdev,char * buf,int len)1583 static int hclge_dbg_dump_fd_counter(struct hclge_dev *hdev, char *buf, int len)
1584 {
1585 u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
1586 struct hclge_fd_ad_cnt_read_cmd *req;
1587 char str_id[HCLGE_DBG_ID_LEN];
1588 struct hclge_desc desc;
1589 int pos = 0;
1590 int ret;
1591 u64 cnt;
1592 u8 i;
1593
1594 pos += scnprintf(buf + pos, len - pos,
1595 "func_id\thit_times\n");
1596
1597 for (i = 0; i < func_num; i++) {
1598 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_CNT_OP, true);
1599 req = (struct hclge_fd_ad_cnt_read_cmd *)desc.data;
1600 req->index = cpu_to_le16(i);
1601 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1602 if (ret) {
1603 dev_err(&hdev->pdev->dev, "failed to get fd counter, ret = %d\n",
1604 ret);
1605 return ret;
1606 }
1607 cnt = le64_to_cpu(req->cnt);
1608 hclge_dbg_get_func_id_str(str_id, i);
1609 pos += scnprintf(buf + pos, len - pos,
1610 "%s\t%llu\n", str_id, cnt);
1611 }
1612
1613 return 0;
1614 }
1615
hclge_dbg_dump_rst_info(struct hclge_dev * hdev,char * buf,int len)1616 int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len)
1617 {
1618 int pos = 0;
1619
1620 pos += scnprintf(buf + pos, len - pos, "PF reset count: %u\n",
1621 hdev->rst_stats.pf_rst_cnt);
1622 pos += scnprintf(buf + pos, len - pos, "FLR reset count: %u\n",
1623 hdev->rst_stats.flr_rst_cnt);
1624 pos += scnprintf(buf + pos, len - pos, "GLOBAL reset count: %u\n",
1625 hdev->rst_stats.global_rst_cnt);
1626 pos += scnprintf(buf + pos, len - pos, "IMP reset count: %u\n",
1627 hdev->rst_stats.imp_rst_cnt);
1628 pos += scnprintf(buf + pos, len - pos, "reset done count: %u\n",
1629 hdev->rst_stats.reset_done_cnt);
1630 pos += scnprintf(buf + pos, len - pos, "HW reset done count: %u\n",
1631 hdev->rst_stats.hw_reset_done_cnt);
1632 pos += scnprintf(buf + pos, len - pos, "reset count: %u\n",
1633 hdev->rst_stats.reset_cnt);
1634 pos += scnprintf(buf + pos, len - pos, "reset fail count: %u\n",
1635 hdev->rst_stats.reset_fail_cnt);
1636 pos += scnprintf(buf + pos, len - pos,
1637 "vector0 interrupt enable status: 0x%x\n",
1638 hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_REG_BASE));
1639 pos += scnprintf(buf + pos, len - pos, "reset interrupt source: 0x%x\n",
1640 hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG));
1641 pos += scnprintf(buf + pos, len - pos, "reset interrupt status: 0x%x\n",
1642 hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS));
1643 pos += scnprintf(buf + pos, len - pos, "RAS interrupt status: 0x%x\n",
1644 hclge_read_dev(&hdev->hw,
1645 HCLGE_RAS_PF_OTHER_INT_STS_REG));
1646 pos += scnprintf(buf + pos, len - pos, "hardware reset status: 0x%x\n",
1647 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
1648 pos += scnprintf(buf + pos, len - pos, "handshake status: 0x%x\n",
1649 hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG));
1650 pos += scnprintf(buf + pos, len - pos, "function reset status: 0x%x\n",
1651 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING));
1652 pos += scnprintf(buf + pos, len - pos, "hdev state: 0x%lx\n",
1653 hdev->state);
1654
1655 return 0;
1656 }
1657
hclge_dbg_dump_serv_info(struct hclge_dev * hdev,char * buf,int len)1658 static int hclge_dbg_dump_serv_info(struct hclge_dev *hdev, char *buf, int len)
1659 {
1660 unsigned long rem_nsec;
1661 int pos = 0;
1662 u64 lc;
1663
1664 lc = local_clock();
1665 rem_nsec = do_div(lc, HCLGE_BILLION_NANO_SECONDS);
1666
1667 pos += scnprintf(buf + pos, len - pos, "local_clock: [%5lu.%06lu]\n",
1668 (unsigned long)lc, rem_nsec / 1000);
1669 pos += scnprintf(buf + pos, len - pos, "delta: %u(ms)\n",
1670 jiffies_to_msecs(jiffies - hdev->last_serv_processed));
1671 pos += scnprintf(buf + pos, len - pos,
1672 "last_service_task_processed: %lu(jiffies)\n",
1673 hdev->last_serv_processed);
1674 pos += scnprintf(buf + pos, len - pos, "last_service_task_cnt: %lu\n",
1675 hdev->serv_processed_cnt);
1676
1677 return 0;
1678 }
1679
hclge_dbg_dump_interrupt(struct hclge_dev * hdev,char * buf,int len)1680 static int hclge_dbg_dump_interrupt(struct hclge_dev *hdev, char *buf, int len)
1681 {
1682 int pos = 0;
1683
1684 pos += scnprintf(buf + pos, len - pos, "num_nic_msi: %u\n",
1685 hdev->num_nic_msi);
1686 pos += scnprintf(buf + pos, len - pos, "num_roce_msi: %u\n",
1687 hdev->num_roce_msi);
1688 pos += scnprintf(buf + pos, len - pos, "num_msi_used: %u\n",
1689 hdev->num_msi_used);
1690 pos += scnprintf(buf + pos, len - pos, "num_msi_left: %u\n",
1691 hdev->num_msi_left);
1692
1693 return 0;
1694 }
1695
hclge_dbg_imp_info_data_print(struct hclge_desc * desc_src,char * buf,int len,u32 bd_num)1696 static void hclge_dbg_imp_info_data_print(struct hclge_desc *desc_src,
1697 char *buf, int len, u32 bd_num)
1698 {
1699 #define HCLGE_DBG_IMP_INFO_PRINT_OFFSET 0x2
1700
1701 struct hclge_desc *desc_index = desc_src;
1702 u32 offset = 0;
1703 int pos = 0;
1704 u32 i, j;
1705
1706 pos += scnprintf(buf + pos, len - pos, "offset | data\n");
1707
1708 for (i = 0; i < bd_num; i++) {
1709 j = 0;
1710 while (j < HCLGE_DESC_DATA_LEN - 1) {
1711 pos += scnprintf(buf + pos, len - pos, "0x%04x | ",
1712 offset);
1713 pos += scnprintf(buf + pos, len - pos, "0x%08x ",
1714 le32_to_cpu(desc_index->data[j++]));
1715 pos += scnprintf(buf + pos, len - pos, "0x%08x\n",
1716 le32_to_cpu(desc_index->data[j++]));
1717 offset += sizeof(u32) * HCLGE_DBG_IMP_INFO_PRINT_OFFSET;
1718 }
1719 desc_index++;
1720 }
1721 }
1722
1723 static int
hclge_dbg_get_imp_stats_info(struct hclge_dev * hdev,char * buf,int len)1724 hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len)
1725 {
1726 struct hclge_get_imp_bd_cmd *req;
1727 struct hclge_desc *desc_src;
1728 struct hclge_desc desc;
1729 u32 bd_num;
1730 int ret;
1731
1732 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_STATS_BD, true);
1733
1734 req = (struct hclge_get_imp_bd_cmd *)desc.data;
1735 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1736 if (ret) {
1737 dev_err(&hdev->pdev->dev,
1738 "failed to get imp statistics bd number, ret = %d\n",
1739 ret);
1740 return ret;
1741 }
1742
1743 bd_num = le32_to_cpu(req->bd_num);
1744 if (!bd_num) {
1745 dev_err(&hdev->pdev->dev, "imp statistics bd number is 0!\n");
1746 return -EINVAL;
1747 }
1748
1749 desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
1750 if (!desc_src)
1751 return -ENOMEM;
1752
1753 ret = hclge_dbg_cmd_send(hdev, desc_src, 0, bd_num,
1754 HCLGE_OPC_IMP_STATS_INFO);
1755 if (ret) {
1756 kfree(desc_src);
1757 dev_err(&hdev->pdev->dev,
1758 "failed to get imp statistics, ret = %d\n", ret);
1759 return ret;
1760 }
1761
1762 hclge_dbg_imp_info_data_print(desc_src, buf, len, bd_num);
1763
1764 kfree(desc_src);
1765
1766 return 0;
1767 }
1768
1769 #define HCLGE_CMD_NCL_CONFIG_BD_NUM 5
1770 #define HCLGE_MAX_NCL_CONFIG_LENGTH 16384
1771
hclge_ncl_config_data_print(struct hclge_desc * desc,int * index,char * buf,int * len,int * pos)1772 static void hclge_ncl_config_data_print(struct hclge_desc *desc, int *index,
1773 char *buf, int *len, int *pos)
1774 {
1775 #define HCLGE_CMD_DATA_NUM 6
1776
1777 int offset = HCLGE_MAX_NCL_CONFIG_LENGTH - *index;
1778 int i, j;
1779
1780 for (i = 0; i < HCLGE_CMD_NCL_CONFIG_BD_NUM; i++) {
1781 for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) {
1782 if (i == 0 && j == 0)
1783 continue;
1784
1785 *pos += scnprintf(buf + *pos, *len - *pos,
1786 "0x%04x | 0x%08x\n", offset,
1787 le32_to_cpu(desc[i].data[j]));
1788
1789 offset += sizeof(u32);
1790 *index -= sizeof(u32);
1791
1792 if (*index <= 0)
1793 return;
1794 }
1795 }
1796 }
1797
1798 static int
hclge_dbg_dump_ncl_config(struct hclge_dev * hdev,char * buf,int len)1799 hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, char *buf, int len)
1800 {
1801 #define HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD (20 + 24 * 4)
1802
1803 struct hclge_desc desc[HCLGE_CMD_NCL_CONFIG_BD_NUM];
1804 int bd_num = HCLGE_CMD_NCL_CONFIG_BD_NUM;
1805 int index = HCLGE_MAX_NCL_CONFIG_LENGTH;
1806 int pos = 0;
1807 u32 data0;
1808 int ret;
1809
1810 pos += scnprintf(buf + pos, len - pos, "offset | data\n");
1811
1812 while (index > 0) {
1813 data0 = HCLGE_MAX_NCL_CONFIG_LENGTH - index;
1814 if (index >= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD)
1815 data0 |= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD << 16;
1816 else
1817 data0 |= (u32)index << 16;
1818 ret = hclge_dbg_cmd_send(hdev, desc, data0, bd_num,
1819 HCLGE_OPC_QUERY_NCL_CONFIG);
1820 if (ret)
1821 return ret;
1822
1823 hclge_ncl_config_data_print(desc, &index, buf, &len, &pos);
1824 }
1825
1826 return 0;
1827 }
1828
hclge_dbg_dump_loopback(struct hclge_dev * hdev,char * buf,int len)1829 static int hclge_dbg_dump_loopback(struct hclge_dev *hdev, char *buf, int len)
1830 {
1831 struct phy_device *phydev = hdev->hw.mac.phydev;
1832 struct hclge_config_mac_mode_cmd *req_app;
1833 struct hclge_common_lb_cmd *req_common;
1834 struct hclge_desc desc;
1835 u8 loopback_en;
1836 int pos = 0;
1837 int ret;
1838
1839 req_app = (struct hclge_config_mac_mode_cmd *)desc.data;
1840 req_common = (struct hclge_common_lb_cmd *)desc.data;
1841
1842 pos += scnprintf(buf + pos, len - pos, "mac id: %u\n",
1843 hdev->hw.mac.mac_id);
1844
1845 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
1846 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1847 if (ret) {
1848 dev_err(&hdev->pdev->dev,
1849 "failed to dump app loopback status, ret = %d\n", ret);
1850 return ret;
1851 }
1852
1853 loopback_en = hnae3_get_bit(le32_to_cpu(req_app->txrx_pad_fcs_loop_en),
1854 HCLGE_MAC_APP_LP_B);
1855 pos += scnprintf(buf + pos, len - pos, "app loopback: %s\n",
1856 state_str[loopback_en]);
1857
1858 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, true);
1859 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1860 if (ret) {
1861 dev_err(&hdev->pdev->dev,
1862 "failed to dump common loopback status, ret = %d\n",
1863 ret);
1864 return ret;
1865 }
1866
1867 loopback_en = req_common->enable & HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
1868 pos += scnprintf(buf + pos, len - pos, "serdes serial loopback: %s\n",
1869 state_str[loopback_en]);
1870
1871 loopback_en = req_common->enable &
1872 HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B ? 1 : 0;
1873 pos += scnprintf(buf + pos, len - pos, "serdes parallel loopback: %s\n",
1874 state_str[loopback_en]);
1875
1876 if (phydev) {
1877 loopback_en = phydev->loopback_enabled;
1878 pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n",
1879 state_str[loopback_en]);
1880 } else if (hnae3_dev_phy_imp_supported(hdev)) {
1881 loopback_en = req_common->enable &
1882 HCLGE_CMD_GE_PHY_INNER_LOOP_B;
1883 pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n",
1884 state_str[loopback_en]);
1885 }
1886
1887 return 0;
1888 }
1889
1890 /* hclge_dbg_dump_mac_tnl_status: print message about mac tnl interrupt
1891 * @hdev: pointer to struct hclge_dev
1892 */
1893 static int
hclge_dbg_dump_mac_tnl_status(struct hclge_dev * hdev,char * buf,int len)1894 hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev, char *buf, int len)
1895 {
1896 struct hclge_mac_tnl_stats stats;
1897 unsigned long rem_nsec;
1898 int pos = 0;
1899
1900 pos += scnprintf(buf + pos, len - pos,
1901 "Recently generated mac tnl interruption:\n");
1902
1903 while (kfifo_get(&hdev->mac_tnl_log, &stats)) {
1904 rem_nsec = do_div(stats.time, HCLGE_BILLION_NANO_SECONDS);
1905
1906 pos += scnprintf(buf + pos, len - pos,
1907 "[%07lu.%03lu] status = 0x%x\n",
1908 (unsigned long)stats.time, rem_nsec / 1000,
1909 stats.status);
1910 }
1911
1912 return 0;
1913 }
1914
1915
1916 static const struct hclge_dbg_item mac_list_items[] = {
1917 { "FUNC_ID", 2 },
1918 { "MAC_ADDR", 12 },
1919 { "STATE", 2 },
1920 };
1921
hclge_dbg_dump_mac_list(struct hclge_dev * hdev,char * buf,int len,bool is_unicast)1922 static void hclge_dbg_dump_mac_list(struct hclge_dev *hdev, char *buf, int len,
1923 bool is_unicast)
1924 {
1925 char data_str[ARRAY_SIZE(mac_list_items)][HCLGE_DBG_DATA_STR_LEN];
1926 char content[HCLGE_DBG_INFO_LEN], str_id[HCLGE_DBG_ID_LEN];
1927 char *result[ARRAY_SIZE(mac_list_items)];
1928 struct hclge_mac_node *mac_node, *tmp;
1929 struct hclge_vport *vport;
1930 struct list_head *list;
1931 u32 func_id;
1932 int pos = 0;
1933 int i;
1934
1935 for (i = 0; i < ARRAY_SIZE(mac_list_items); i++)
1936 result[i] = &data_str[i][0];
1937
1938 pos += scnprintf(buf + pos, len - pos, "%s MAC_LIST:\n",
1939 is_unicast ? "UC" : "MC");
1940 hclge_dbg_fill_content(content, sizeof(content), mac_list_items,
1941 NULL, ARRAY_SIZE(mac_list_items));
1942 pos += scnprintf(buf + pos, len - pos, "%s", content);
1943
1944 for (func_id = 0; func_id < hdev->num_alloc_vport; func_id++) {
1945 vport = &hdev->vport[func_id];
1946 list = is_unicast ? &vport->uc_mac_list : &vport->mc_mac_list;
1947 spin_lock_bh(&vport->mac_list_lock);
1948 list_for_each_entry_safe(mac_node, tmp, list, node) {
1949 i = 0;
1950 result[i++] = hclge_dbg_get_func_id_str(str_id,
1951 func_id);
1952 sprintf(result[i++], "%pM", mac_node->mac_addr);
1953 sprintf(result[i++], "%5s",
1954 hclge_mac_state_str[mac_node->state]);
1955 hclge_dbg_fill_content(content, sizeof(content),
1956 mac_list_items,
1957 (const char **)result,
1958 ARRAY_SIZE(mac_list_items));
1959 pos += scnprintf(buf + pos, len - pos, "%s", content);
1960 }
1961 spin_unlock_bh(&vport->mac_list_lock);
1962 }
1963 }
1964
hclge_dbg_dump_umv_info(struct hclge_dev * hdev,char * buf,int len)1965 static int hclge_dbg_dump_umv_info(struct hclge_dev *hdev, char *buf, int len)
1966 {
1967 u8 func_num = pci_num_vf(hdev->pdev) + 1;
1968 struct hclge_vport *vport;
1969 int pos = 0;
1970 u8 i;
1971
1972 pos += scnprintf(buf, len, "num_alloc_vport : %u\n",
1973 hdev->num_alloc_vport);
1974 pos += scnprintf(buf + pos, len - pos, "max_umv_size : %u\n",
1975 hdev->max_umv_size);
1976 pos += scnprintf(buf + pos, len - pos, "wanted_umv_size : %u\n",
1977 hdev->wanted_umv_size);
1978 pos += scnprintf(buf + pos, len - pos, "priv_umv_size : %u\n",
1979 hdev->priv_umv_size);
1980
1981 mutex_lock(&hdev->vport_lock);
1982 pos += scnprintf(buf + pos, len - pos, "share_umv_size : %u\n",
1983 hdev->share_umv_size);
1984 for (i = 0; i < func_num; i++) {
1985 vport = &hdev->vport[i];
1986 pos += scnprintf(buf + pos, len - pos,
1987 "vport(%u) used_umv_num : %u\n",
1988 i, vport->used_umv_num);
1989 }
1990 mutex_unlock(&hdev->vport_lock);
1991
1992 return 0;
1993 }
1994
hclge_get_vlan_rx_offload_cfg(struct hclge_dev * hdev,u8 vf_id,struct hclge_dbg_vlan_cfg * vlan_cfg)1995 static int hclge_get_vlan_rx_offload_cfg(struct hclge_dev *hdev, u8 vf_id,
1996 struct hclge_dbg_vlan_cfg *vlan_cfg)
1997 {
1998 struct hclge_vport_vtag_rx_cfg_cmd *req;
1999 struct hclge_desc desc;
2000 u16 bmap_index;
2001 u8 rx_cfg;
2002 int ret;
2003
2004 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, true);
2005
2006 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
2007 req->vf_offset = vf_id / HCLGE_VF_NUM_PER_CMD;
2008 bmap_index = vf_id % HCLGE_VF_NUM_PER_CMD / HCLGE_VF_NUM_PER_BYTE;
2009 req->vf_bitmap[bmap_index] = 1U << (vf_id % HCLGE_VF_NUM_PER_BYTE);
2010
2011 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2012 if (ret) {
2013 dev_err(&hdev->pdev->dev,
2014 "failed to get vport%u rxvlan cfg, ret = %d\n",
2015 vf_id, ret);
2016 return ret;
2017 }
2018
2019 rx_cfg = req->vport_vlan_cfg;
2020 vlan_cfg->strip_tag1 = hnae3_get_bit(rx_cfg, HCLGE_REM_TAG1_EN_B);
2021 vlan_cfg->strip_tag2 = hnae3_get_bit(rx_cfg, HCLGE_REM_TAG2_EN_B);
2022 vlan_cfg->drop_tag1 = hnae3_get_bit(rx_cfg, HCLGE_DISCARD_TAG1_EN_B);
2023 vlan_cfg->drop_tag2 = hnae3_get_bit(rx_cfg, HCLGE_DISCARD_TAG2_EN_B);
2024 vlan_cfg->pri_only1 = hnae3_get_bit(rx_cfg, HCLGE_SHOW_TAG1_EN_B);
2025 vlan_cfg->pri_only2 = hnae3_get_bit(rx_cfg, HCLGE_SHOW_TAG2_EN_B);
2026
2027 return 0;
2028 }
2029
hclge_get_vlan_tx_offload_cfg(struct hclge_dev * hdev,u8 vf_id,struct hclge_dbg_vlan_cfg * vlan_cfg)2030 static int hclge_get_vlan_tx_offload_cfg(struct hclge_dev *hdev, u8 vf_id,
2031 struct hclge_dbg_vlan_cfg *vlan_cfg)
2032 {
2033 struct hclge_vport_vtag_tx_cfg_cmd *req;
2034 struct hclge_desc desc;
2035 u16 bmap_index;
2036 u8 tx_cfg;
2037 int ret;
2038
2039 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, true);
2040 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
2041 req->vf_offset = vf_id / HCLGE_VF_NUM_PER_CMD;
2042 bmap_index = vf_id % HCLGE_VF_NUM_PER_CMD / HCLGE_VF_NUM_PER_BYTE;
2043 req->vf_bitmap[bmap_index] = 1U << (vf_id % HCLGE_VF_NUM_PER_BYTE);
2044
2045 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2046 if (ret) {
2047 dev_err(&hdev->pdev->dev,
2048 "failed to get vport%u txvlan cfg, ret = %d\n",
2049 vf_id, ret);
2050 return ret;
2051 }
2052
2053 tx_cfg = req->vport_vlan_cfg;
2054 vlan_cfg->pvid = le16_to_cpu(req->def_vlan_tag1);
2055
2056 vlan_cfg->accept_tag1 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_TAG1_B);
2057 vlan_cfg->accept_tag2 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_TAG2_B);
2058 vlan_cfg->accept_untag1 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_UNTAG1_B);
2059 vlan_cfg->accept_untag2 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_UNTAG2_B);
2060 vlan_cfg->insert_tag1 = hnae3_get_bit(tx_cfg, HCLGE_PORT_INS_TAG1_EN_B);
2061 vlan_cfg->insert_tag2 = hnae3_get_bit(tx_cfg, HCLGE_PORT_INS_TAG2_EN_B);
2062 vlan_cfg->shift_tag = hnae3_get_bit(tx_cfg, HCLGE_TAG_SHIFT_MODE_EN_B);
2063
2064 return 0;
2065 }
2066
hclge_get_vlan_filter_config_cmd(struct hclge_dev * hdev,u8 vlan_type,u8 vf_id,struct hclge_desc * desc)2067 static int hclge_get_vlan_filter_config_cmd(struct hclge_dev *hdev,
2068 u8 vlan_type, u8 vf_id,
2069 struct hclge_desc *desc)
2070 {
2071 struct hclge_vlan_filter_ctrl_cmd *req;
2072 int ret;
2073
2074 hclge_cmd_setup_basic_desc(desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
2075 req = (struct hclge_vlan_filter_ctrl_cmd *)desc->data;
2076 req->vlan_type = vlan_type;
2077 req->vf_id = vf_id;
2078
2079 ret = hclge_cmd_send(&hdev->hw, desc, 1);
2080 if (ret)
2081 dev_err(&hdev->pdev->dev,
2082 "failed to get vport%u vlan filter config, ret = %d.\n",
2083 vf_id, ret);
2084
2085 return ret;
2086 }
2087
hclge_get_vlan_filter_state(struct hclge_dev * hdev,u8 vlan_type,u8 vf_id,u8 * vlan_fe)2088 static int hclge_get_vlan_filter_state(struct hclge_dev *hdev, u8 vlan_type,
2089 u8 vf_id, u8 *vlan_fe)
2090 {
2091 struct hclge_vlan_filter_ctrl_cmd *req;
2092 struct hclge_desc desc;
2093 int ret;
2094
2095 ret = hclge_get_vlan_filter_config_cmd(hdev, vlan_type, vf_id, &desc);
2096 if (ret)
2097 return ret;
2098
2099 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
2100 *vlan_fe = req->vlan_fe;
2101
2102 return 0;
2103 }
2104
hclge_get_port_vlan_filter_bypass_state(struct hclge_dev * hdev,u8 vf_id,u8 * bypass_en)2105 static int hclge_get_port_vlan_filter_bypass_state(struct hclge_dev *hdev,
2106 u8 vf_id, u8 *bypass_en)
2107 {
2108 struct hclge_port_vlan_filter_bypass_cmd *req;
2109 struct hclge_desc desc;
2110 int ret;
2111
2112 if (!test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, hdev->ae_dev->caps))
2113 return 0;
2114
2115 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, true);
2116 req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
2117 req->vf_id = vf_id;
2118
2119 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2120 if (ret) {
2121 dev_err(&hdev->pdev->dev,
2122 "failed to get vport%u port vlan filter bypass state, ret = %d.\n",
2123 vf_id, ret);
2124 return ret;
2125 }
2126
2127 *bypass_en = hnae3_get_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B);
2128
2129 return 0;
2130 }
2131
2132 static const struct hclge_dbg_item vlan_filter_items[] = {
2133 { "FUNC_ID", 2 },
2134 { "I_VF_VLAN_FILTER", 2 },
2135 { "E_VF_VLAN_FILTER", 2 },
2136 { "PORT_VLAN_FILTER_BYPASS", 0 }
2137 };
2138
2139 static const struct hclge_dbg_item vlan_offload_items[] = {
2140 { "FUNC_ID", 2 },
2141 { "PVID", 4 },
2142 { "ACCEPT_TAG1", 2 },
2143 { "ACCEPT_TAG2", 2 },
2144 { "ACCEPT_UNTAG1", 2 },
2145 { "ACCEPT_UNTAG2", 2 },
2146 { "INSERT_TAG1", 2 },
2147 { "INSERT_TAG2", 2 },
2148 { "SHIFT_TAG", 2 },
2149 { "STRIP_TAG1", 2 },
2150 { "STRIP_TAG2", 2 },
2151 { "DROP_TAG1", 2 },
2152 { "DROP_TAG2", 2 },
2153 { "PRI_ONLY_TAG1", 2 },
2154 { "PRI_ONLY_TAG2", 0 }
2155 };
2156
hclge_dbg_dump_vlan_filter_config(struct hclge_dev * hdev,char * buf,int len,int * pos)2157 static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev, char *buf,
2158 int len, int *pos)
2159 {
2160 char content[HCLGE_DBG_VLAN_FLTR_INFO_LEN], str_id[HCLGE_DBG_ID_LEN];
2161 const char *result[ARRAY_SIZE(vlan_filter_items)];
2162 u8 i, j, vlan_fe, bypass, ingress, egress;
2163 u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
2164 int ret;
2165
2166 ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_PORT, 0,
2167 &vlan_fe);
2168 if (ret)
2169 return ret;
2170 ingress = vlan_fe & HCLGE_FILTER_FE_NIC_INGRESS_B;
2171 egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0;
2172
2173 *pos += scnprintf(buf, len, "I_PORT_VLAN_FILTER: %s\n",
2174 state_str[ingress]);
2175 *pos += scnprintf(buf + *pos, len - *pos, "E_PORT_VLAN_FILTER: %s\n",
2176 state_str[egress]);
2177
2178 hclge_dbg_fill_content(content, sizeof(content), vlan_filter_items,
2179 NULL, ARRAY_SIZE(vlan_filter_items));
2180 *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
2181
2182 for (i = 0; i < func_num; i++) {
2183 ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_VF, i,
2184 &vlan_fe);
2185 if (ret)
2186 return ret;
2187
2188 ingress = vlan_fe & HCLGE_FILTER_FE_NIC_INGRESS_B;
2189 egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0;
2190 ret = hclge_get_port_vlan_filter_bypass_state(hdev, i, &bypass);
2191 if (ret)
2192 return ret;
2193 j = 0;
2194 result[j++] = hclge_dbg_get_func_id_str(str_id, i);
2195 result[j++] = state_str[ingress];
2196 result[j++] = state_str[egress];
2197 result[j++] =
2198 test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
2199 hdev->ae_dev->caps) ? state_str[bypass] : "NA";
2200 hclge_dbg_fill_content(content, sizeof(content),
2201 vlan_filter_items, result,
2202 ARRAY_SIZE(vlan_filter_items));
2203 *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
2204 }
2205 *pos += scnprintf(buf + *pos, len - *pos, "\n");
2206
2207 return 0;
2208 }
2209
hclge_dbg_dump_vlan_offload_config(struct hclge_dev * hdev,char * buf,int len,int * pos)2210 static int hclge_dbg_dump_vlan_offload_config(struct hclge_dev *hdev, char *buf,
2211 int len, int *pos)
2212 {
2213 char str_id[HCLGE_DBG_ID_LEN], str_pvid[HCLGE_DBG_ID_LEN];
2214 const char *result[ARRAY_SIZE(vlan_offload_items)];
2215 char content[HCLGE_DBG_VLAN_OFFLOAD_INFO_LEN];
2216 u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
2217 struct hclge_dbg_vlan_cfg vlan_cfg;
2218 int ret;
2219 u8 i, j;
2220
2221 hclge_dbg_fill_content(content, sizeof(content), vlan_offload_items,
2222 NULL, ARRAY_SIZE(vlan_offload_items));
2223 *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
2224
2225 for (i = 0; i < func_num; i++) {
2226 ret = hclge_get_vlan_tx_offload_cfg(hdev, i, &vlan_cfg);
2227 if (ret)
2228 return ret;
2229
2230 ret = hclge_get_vlan_rx_offload_cfg(hdev, i, &vlan_cfg);
2231 if (ret)
2232 return ret;
2233
2234 sprintf(str_pvid, "%u", vlan_cfg.pvid);
2235 j = 0;
2236 result[j++] = hclge_dbg_get_func_id_str(str_id, i);
2237 result[j++] = str_pvid;
2238 result[j++] = state_str[vlan_cfg.accept_tag1];
2239 result[j++] = state_str[vlan_cfg.accept_tag2];
2240 result[j++] = state_str[vlan_cfg.accept_untag1];
2241 result[j++] = state_str[vlan_cfg.accept_untag2];
2242 result[j++] = state_str[vlan_cfg.insert_tag1];
2243 result[j++] = state_str[vlan_cfg.insert_tag2];
2244 result[j++] = state_str[vlan_cfg.shift_tag];
2245 result[j++] = state_str[vlan_cfg.strip_tag1];
2246 result[j++] = state_str[vlan_cfg.strip_tag2];
2247 result[j++] = state_str[vlan_cfg.drop_tag1];
2248 result[j++] = state_str[vlan_cfg.drop_tag2];
2249 result[j++] = state_str[vlan_cfg.pri_only1];
2250 result[j++] = state_str[vlan_cfg.pri_only2];
2251
2252 hclge_dbg_fill_content(content, sizeof(content),
2253 vlan_offload_items, result,
2254 ARRAY_SIZE(vlan_offload_items));
2255 *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
2256 }
2257
2258 return 0;
2259 }
2260
hclge_dbg_dump_vlan_config(struct hclge_dev * hdev,char * buf,int len)2261 static int hclge_dbg_dump_vlan_config(struct hclge_dev *hdev, char *buf,
2262 int len)
2263 {
2264 int pos = 0;
2265 int ret;
2266
2267 ret = hclge_dbg_dump_vlan_filter_config(hdev, buf, len, &pos);
2268 if (ret)
2269 return ret;
2270
2271 return hclge_dbg_dump_vlan_offload_config(hdev, buf, len, &pos);
2272 }
2273
hclge_dbg_dump_ptp_info(struct hclge_dev * hdev,char * buf,int len)2274 static int hclge_dbg_dump_ptp_info(struct hclge_dev *hdev, char *buf, int len)
2275 {
2276 struct hclge_ptp *ptp = hdev->ptp;
2277 u32 sw_cfg = ptp->ptp_cfg;
2278 unsigned int tx_start;
2279 unsigned int last_rx;
2280 int pos = 0;
2281 u32 hw_cfg;
2282 int ret;
2283
2284 pos += scnprintf(buf + pos, len - pos, "phc %s's debug info:\n",
2285 ptp->info.name);
2286 pos += scnprintf(buf + pos, len - pos, "ptp enable: %s\n",
2287 test_bit(HCLGE_PTP_FLAG_EN, &ptp->flags) ?
2288 "yes" : "no");
2289 pos += scnprintf(buf + pos, len - pos, "ptp tx enable: %s\n",
2290 test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) ?
2291 "yes" : "no");
2292 pos += scnprintf(buf + pos, len - pos, "ptp rx enable: %s\n",
2293 test_bit(HCLGE_PTP_FLAG_RX_EN, &ptp->flags) ?
2294 "yes" : "no");
2295
2296 last_rx = jiffies_to_msecs(ptp->last_rx);
2297 pos += scnprintf(buf + pos, len - pos, "last rx time: %lu.%lu\n",
2298 last_rx / MSEC_PER_SEC, last_rx % MSEC_PER_SEC);
2299 pos += scnprintf(buf + pos, len - pos, "rx count: %lu\n", ptp->rx_cnt);
2300
2301 tx_start = jiffies_to_msecs(ptp->tx_start);
2302 pos += scnprintf(buf + pos, len - pos, "last tx start time: %lu.%lu\n",
2303 tx_start / MSEC_PER_SEC, tx_start % MSEC_PER_SEC);
2304 pos += scnprintf(buf + pos, len - pos, "tx count: %lu\n", ptp->tx_cnt);
2305 pos += scnprintf(buf + pos, len - pos, "tx skipped count: %lu\n",
2306 ptp->tx_skipped);
2307 pos += scnprintf(buf + pos, len - pos, "tx timeout count: %lu\n",
2308 ptp->tx_timeout);
2309 pos += scnprintf(buf + pos, len - pos, "last tx seqid: %u\n",
2310 ptp->last_tx_seqid);
2311
2312 ret = hclge_ptp_cfg_qry(hdev, &hw_cfg);
2313 if (ret)
2314 return ret;
2315
2316 pos += scnprintf(buf + pos, len - pos, "sw_cfg: %#x, hw_cfg: %#x\n",
2317 sw_cfg, hw_cfg);
2318
2319 pos += scnprintf(buf + pos, len - pos, "tx type: %d, rx filter: %d\n",
2320 ptp->ts_cfg.tx_type, ptp->ts_cfg.rx_filter);
2321
2322 return 0;
2323 }
2324
hclge_dbg_dump_mac_uc(struct hclge_dev * hdev,char * buf,int len)2325 static int hclge_dbg_dump_mac_uc(struct hclge_dev *hdev, char *buf, int len)
2326 {
2327 hclge_dbg_dump_mac_list(hdev, buf, len, true);
2328
2329 return 0;
2330 }
2331
hclge_dbg_dump_mac_mc(struct hclge_dev * hdev,char * buf,int len)2332 static int hclge_dbg_dump_mac_mc(struct hclge_dev *hdev, char *buf, int len)
2333 {
2334 hclge_dbg_dump_mac_list(hdev, buf, len, false);
2335
2336 return 0;
2337 }
2338
2339 static const struct hclge_dbg_func hclge_dbg_cmd_func[] = {
2340 {
2341 .cmd = HNAE3_DBG_CMD_TM_NODES,
2342 .dbg_dump = hclge_dbg_dump_tm_nodes,
2343 },
2344 {
2345 .cmd = HNAE3_DBG_CMD_TM_PRI,
2346 .dbg_dump = hclge_dbg_dump_tm_pri,
2347 },
2348 {
2349 .cmd = HNAE3_DBG_CMD_TM_QSET,
2350 .dbg_dump = hclge_dbg_dump_tm_qset,
2351 },
2352 {
2353 .cmd = HNAE3_DBG_CMD_TM_MAP,
2354 .dbg_dump = hclge_dbg_dump_tm_map,
2355 },
2356 {
2357 .cmd = HNAE3_DBG_CMD_TM_PG,
2358 .dbg_dump = hclge_dbg_dump_tm_pg,
2359 },
2360 {
2361 .cmd = HNAE3_DBG_CMD_TM_PORT,
2362 .dbg_dump = hclge_dbg_dump_tm_port,
2363 },
2364 {
2365 .cmd = HNAE3_DBG_CMD_TC_SCH_INFO,
2366 .dbg_dump = hclge_dbg_dump_tc,
2367 },
2368 {
2369 .cmd = HNAE3_DBG_CMD_QOS_PAUSE_CFG,
2370 .dbg_dump = hclge_dbg_dump_qos_pause_cfg,
2371 },
2372 {
2373 .cmd = HNAE3_DBG_CMD_QOS_PRI_MAP,
2374 .dbg_dump = hclge_dbg_dump_qos_pri_map,
2375 },
2376 {
2377 .cmd = HNAE3_DBG_CMD_QOS_BUF_CFG,
2378 .dbg_dump = hclge_dbg_dump_qos_buf_cfg,
2379 },
2380 {
2381 .cmd = HNAE3_DBG_CMD_MAC_UC,
2382 .dbg_dump = hclge_dbg_dump_mac_uc,
2383 },
2384 {
2385 .cmd = HNAE3_DBG_CMD_MAC_MC,
2386 .dbg_dump = hclge_dbg_dump_mac_mc,
2387 },
2388 {
2389 .cmd = HNAE3_DBG_CMD_MNG_TBL,
2390 .dbg_dump = hclge_dbg_dump_mng_table,
2391 },
2392 {
2393 .cmd = HNAE3_DBG_CMD_LOOPBACK,
2394 .dbg_dump = hclge_dbg_dump_loopback,
2395 },
2396 {
2397 .cmd = HNAE3_DBG_CMD_PTP_INFO,
2398 .dbg_dump = hclge_dbg_dump_ptp_info,
2399 },
2400 {
2401 .cmd = HNAE3_DBG_CMD_INTERRUPT_INFO,
2402 .dbg_dump = hclge_dbg_dump_interrupt,
2403 },
2404 {
2405 .cmd = HNAE3_DBG_CMD_RESET_INFO,
2406 .dbg_dump = hclge_dbg_dump_rst_info,
2407 },
2408 {
2409 .cmd = HNAE3_DBG_CMD_IMP_INFO,
2410 .dbg_dump = hclge_dbg_get_imp_stats_info,
2411 },
2412 {
2413 .cmd = HNAE3_DBG_CMD_NCL_CONFIG,
2414 .dbg_dump = hclge_dbg_dump_ncl_config,
2415 },
2416 {
2417 .cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
2418 .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2419 },
2420 {
2421 .cmd = HNAE3_DBG_CMD_REG_SSU,
2422 .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2423 },
2424 {
2425 .cmd = HNAE3_DBG_CMD_REG_IGU_EGU,
2426 .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2427 },
2428 {
2429 .cmd = HNAE3_DBG_CMD_REG_RPU,
2430 .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2431 },
2432 {
2433 .cmd = HNAE3_DBG_CMD_REG_NCSI,
2434 .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2435 },
2436 {
2437 .cmd = HNAE3_DBG_CMD_REG_RTC,
2438 .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2439 },
2440 {
2441 .cmd = HNAE3_DBG_CMD_REG_PPP,
2442 .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2443 },
2444 {
2445 .cmd = HNAE3_DBG_CMD_REG_RCB,
2446 .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2447 },
2448 {
2449 .cmd = HNAE3_DBG_CMD_REG_TQP,
2450 .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2451 },
2452 {
2453 .cmd = HNAE3_DBG_CMD_REG_MAC,
2454 .dbg_dump = hclge_dbg_dump_mac,
2455 },
2456 {
2457 .cmd = HNAE3_DBG_CMD_REG_DCB,
2458 .dbg_dump = hclge_dbg_dump_dcb,
2459 },
2460 {
2461 .cmd = HNAE3_DBG_CMD_FD_TCAM,
2462 .dbg_dump = hclge_dbg_dump_fd_tcam,
2463 },
2464 {
2465 .cmd = HNAE3_DBG_CMD_MAC_TNL_STATUS,
2466 .dbg_dump = hclge_dbg_dump_mac_tnl_status,
2467 },
2468 {
2469 .cmd = HNAE3_DBG_CMD_SERV_INFO,
2470 .dbg_dump = hclge_dbg_dump_serv_info,
2471 },
2472 {
2473 .cmd = HNAE3_DBG_CMD_VLAN_CONFIG,
2474 .dbg_dump = hclge_dbg_dump_vlan_config,
2475 },
2476 {
2477 .cmd = HNAE3_DBG_CMD_FD_COUNTER,
2478 .dbg_dump = hclge_dbg_dump_fd_counter,
2479 },
2480 {
2481 .cmd = HNAE3_DBG_CMD_UMV_INFO,
2482 .dbg_dump = hclge_dbg_dump_umv_info,
2483 },
2484 };
2485
hclge_dbg_read_cmd(struct hnae3_handle * handle,enum hnae3_dbg_cmd cmd,char * buf,int len)2486 int hclge_dbg_read_cmd(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
2487 char *buf, int len)
2488 {
2489 struct hclge_vport *vport = hclge_get_vport(handle);
2490 const struct hclge_dbg_func *cmd_func;
2491 struct hclge_dev *hdev = vport->back;
2492 u32 i;
2493
2494 for (i = 0; i < ARRAY_SIZE(hclge_dbg_cmd_func); i++) {
2495 if (cmd == hclge_dbg_cmd_func[i].cmd) {
2496 cmd_func = &hclge_dbg_cmd_func[i];
2497 if (cmd_func->dbg_dump)
2498 return cmd_func->dbg_dump(hdev, buf, len);
2499 else
2500 return cmd_func->dbg_dump_reg(hdev, cmd, buf,
2501 len);
2502 }
2503 }
2504
2505 dev_err(&hdev->pdev->dev, "invalid command(%d)\n", cmd);
2506 return -EINVAL;
2507 }
2508