1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (c) 2018-2019 Hisilicon Limited. */
3
4 #include <linux/device.h>
5
6 #include "hclge_debugfs.h"
7 #include "hclge_main.h"
8 #include "hclge_tm.h"
9 #include "hnae3.h"
10
11 static const struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
12 { .reg_type = "bios common",
13 .dfx_msg = &hclge_dbg_bios_common_reg[0],
14 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_bios_common_reg),
15 .offset = HCLGE_DBG_DFX_BIOS_OFFSET,
16 .cmd = HCLGE_OPC_DFX_BIOS_COMMON_REG } },
17 { .reg_type = "ssu",
18 .dfx_msg = &hclge_dbg_ssu_reg_0[0],
19 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_0),
20 .offset = HCLGE_DBG_DFX_SSU_0_OFFSET,
21 .cmd = HCLGE_OPC_DFX_SSU_REG_0 } },
22 { .reg_type = "ssu",
23 .dfx_msg = &hclge_dbg_ssu_reg_1[0],
24 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_1),
25 .offset = HCLGE_DBG_DFX_SSU_1_OFFSET,
26 .cmd = HCLGE_OPC_DFX_SSU_REG_1 } },
27 { .reg_type = "ssu",
28 .dfx_msg = &hclge_dbg_ssu_reg_2[0],
29 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_2),
30 .offset = HCLGE_DBG_DFX_SSU_2_OFFSET,
31 .cmd = HCLGE_OPC_DFX_SSU_REG_2 } },
32 { .reg_type = "igu egu",
33 .dfx_msg = &hclge_dbg_igu_egu_reg[0],
34 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_igu_egu_reg),
35 .offset = HCLGE_DBG_DFX_IGU_OFFSET,
36 .cmd = HCLGE_OPC_DFX_IGU_EGU_REG } },
37 { .reg_type = "rpu",
38 .dfx_msg = &hclge_dbg_rpu_reg_0[0],
39 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_0),
40 .offset = HCLGE_DBG_DFX_RPU_0_OFFSET,
41 .cmd = HCLGE_OPC_DFX_RPU_REG_0 } },
42 { .reg_type = "rpu",
43 .dfx_msg = &hclge_dbg_rpu_reg_1[0],
44 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_1),
45 .offset = HCLGE_DBG_DFX_RPU_1_OFFSET,
46 .cmd = HCLGE_OPC_DFX_RPU_REG_1 } },
47 { .reg_type = "ncsi",
48 .dfx_msg = &hclge_dbg_ncsi_reg[0],
49 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ncsi_reg),
50 .offset = HCLGE_DBG_DFX_NCSI_OFFSET,
51 .cmd = HCLGE_OPC_DFX_NCSI_REG } },
52 { .reg_type = "rtc",
53 .dfx_msg = &hclge_dbg_rtc_reg[0],
54 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rtc_reg),
55 .offset = HCLGE_DBG_DFX_RTC_OFFSET,
56 .cmd = HCLGE_OPC_DFX_RTC_REG } },
57 { .reg_type = "ppp",
58 .dfx_msg = &hclge_dbg_ppp_reg[0],
59 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ppp_reg),
60 .offset = HCLGE_DBG_DFX_PPP_OFFSET,
61 .cmd = HCLGE_OPC_DFX_PPP_REG } },
62 { .reg_type = "rcb",
63 .dfx_msg = &hclge_dbg_rcb_reg[0],
64 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rcb_reg),
65 .offset = HCLGE_DBG_DFX_RCB_OFFSET,
66 .cmd = HCLGE_OPC_DFX_RCB_REG } },
67 { .reg_type = "tqp",
68 .dfx_msg = &hclge_dbg_tqp_reg[0],
69 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_tqp_reg),
70 .offset = HCLGE_DBG_DFX_TQP_OFFSET,
71 .cmd = HCLGE_OPC_DFX_TQP_REG } },
72 };
73
hclge_dbg_get_dfx_bd_num(struct hclge_dev * hdev,int offset)74 static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset)
75 {
76 struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
77 int entries_per_desc;
78 int index;
79 int ret;
80
81 ret = hclge_query_bd_num_cmd_send(hdev, desc);
82 if (ret) {
83 dev_err(&hdev->pdev->dev,
84 "get dfx bdnum fail, ret = %d\n", ret);
85 return ret;
86 }
87
88 entries_per_desc = ARRAY_SIZE(desc[0].data);
89 index = offset % entries_per_desc;
90 return le32_to_cpu(desc[offset / entries_per_desc].data[index]);
91 }
92
hclge_dbg_cmd_send(struct hclge_dev * hdev,struct hclge_desc * desc_src,int index,int bd_num,enum hclge_opcode_type cmd)93 static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
94 struct hclge_desc *desc_src,
95 int index, int bd_num,
96 enum hclge_opcode_type cmd)
97 {
98 struct hclge_desc *desc = desc_src;
99 int ret, i;
100
101 hclge_cmd_setup_basic_desc(desc, cmd, true);
102 desc->data[0] = cpu_to_le32(index);
103
104 for (i = 1; i < bd_num; i++) {
105 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
106 desc++;
107 hclge_cmd_setup_basic_desc(desc, cmd, true);
108 }
109
110 ret = hclge_cmd_send(&hdev->hw, desc_src, bd_num);
111 if (ret)
112 dev_err(&hdev->pdev->dev,
113 "cmd(0x%x) send fail, ret = %d\n", cmd, ret);
114 return ret;
115 }
116
hclge_dbg_dump_reg_common(struct hclge_dev * hdev,const struct hclge_dbg_reg_type_info * reg_info,const char * cmd_buf)117 static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
118 const struct hclge_dbg_reg_type_info *reg_info,
119 const char *cmd_buf)
120 {
121 #define IDX_OFFSET 1
122
123 const char *s = &cmd_buf[strlen(reg_info->reg_type) + IDX_OFFSET];
124 const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
125 const struct hclge_dbg_reg_common_msg *reg_msg = ®_info->reg_msg;
126 struct hclge_desc *desc_src;
127 struct hclge_desc *desc;
128 int entries_per_desc;
129 int bd_num, buf_len;
130 int index = 0;
131 int min_num;
132 int ret, i;
133
134 if (*s) {
135 ret = kstrtouint(s, 0, &index);
136 index = (ret != 0) ? 0 : index;
137 }
138
139 bd_num = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset);
140 if (bd_num <= 0) {
141 dev_err(&hdev->pdev->dev, "get cmd(%d) bd num(%d) failed\n",
142 reg_msg->offset, bd_num);
143 return;
144 }
145
146 buf_len = sizeof(struct hclge_desc) * bd_num;
147 desc_src = kzalloc(buf_len, GFP_KERNEL);
148 if (!desc_src)
149 return;
150
151 desc = desc_src;
152 ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num, reg_msg->cmd);
153 if (ret) {
154 kfree(desc_src);
155 return;
156 }
157
158 entries_per_desc = ARRAY_SIZE(desc->data);
159 min_num = min_t(int, bd_num * entries_per_desc, reg_msg->msg_num);
160
161 desc = desc_src;
162 for (i = 0; i < min_num; i++) {
163 if (i > 0 && (i % entries_per_desc) == 0)
164 desc++;
165 if (dfx_message->flag)
166 dev_info(&hdev->pdev->dev, "%s: 0x%x\n",
167 dfx_message->message,
168 le32_to_cpu(desc->data[i % entries_per_desc]));
169
170 dfx_message++;
171 }
172
173 kfree(desc_src);
174 }
175
hclge_dbg_dump_mac_enable_status(struct hclge_dev * hdev)176 static void hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev)
177 {
178 struct hclge_config_mac_mode_cmd *req;
179 struct hclge_desc desc;
180 u32 loop_en;
181 int ret;
182
183 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
184
185 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
186 if (ret) {
187 dev_err(&hdev->pdev->dev,
188 "failed to dump mac enable status, ret = %d\n", ret);
189 return;
190 }
191
192 req = (struct hclge_config_mac_mode_cmd *)desc.data;
193 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
194
195 dev_info(&hdev->pdev->dev, "config_mac_trans_en: %#x\n",
196 hnae3_get_bit(loop_en, HCLGE_MAC_TX_EN_B));
197 dev_info(&hdev->pdev->dev, "config_mac_rcv_en: %#x\n",
198 hnae3_get_bit(loop_en, HCLGE_MAC_RX_EN_B));
199 dev_info(&hdev->pdev->dev, "config_pad_trans_en: %#x\n",
200 hnae3_get_bit(loop_en, HCLGE_MAC_PAD_TX_B));
201 dev_info(&hdev->pdev->dev, "config_pad_rcv_en: %#x\n",
202 hnae3_get_bit(loop_en, HCLGE_MAC_PAD_RX_B));
203 dev_info(&hdev->pdev->dev, "config_1588_trans_en: %#x\n",
204 hnae3_get_bit(loop_en, HCLGE_MAC_1588_TX_B));
205 dev_info(&hdev->pdev->dev, "config_1588_rcv_en: %#x\n",
206 hnae3_get_bit(loop_en, HCLGE_MAC_1588_RX_B));
207 dev_info(&hdev->pdev->dev, "config_mac_app_loop_en: %#x\n",
208 hnae3_get_bit(loop_en, HCLGE_MAC_APP_LP_B));
209 dev_info(&hdev->pdev->dev, "config_mac_line_loop_en: %#x\n",
210 hnae3_get_bit(loop_en, HCLGE_MAC_LINE_LP_B));
211 dev_info(&hdev->pdev->dev, "config_mac_fcs_tx_en: %#x\n",
212 hnae3_get_bit(loop_en, HCLGE_MAC_FCS_TX_B));
213 dev_info(&hdev->pdev->dev, "config_mac_rx_oversize_truncate_en: %#x\n",
214 hnae3_get_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B));
215 dev_info(&hdev->pdev->dev, "config_mac_rx_fcs_strip_en: %#x\n",
216 hnae3_get_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B));
217 dev_info(&hdev->pdev->dev, "config_mac_rx_fcs_en: %#x\n",
218 hnae3_get_bit(loop_en, HCLGE_MAC_RX_FCS_B));
219 dev_info(&hdev->pdev->dev, "config_mac_tx_under_min_err_en: %#x\n",
220 hnae3_get_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B));
221 dev_info(&hdev->pdev->dev, "config_mac_tx_oversize_truncate_en: %#x\n",
222 hnae3_get_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B));
223 }
224
hclge_dbg_dump_mac_frame_size(struct hclge_dev * hdev)225 static void hclge_dbg_dump_mac_frame_size(struct hclge_dev *hdev)
226 {
227 struct hclge_config_max_frm_size_cmd *req;
228 struct hclge_desc desc;
229 int ret;
230
231 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, true);
232
233 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
234 if (ret) {
235 dev_err(&hdev->pdev->dev,
236 "failed to dump mac frame size, ret = %d\n", ret);
237 return;
238 }
239
240 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
241
242 dev_info(&hdev->pdev->dev, "max_frame_size: %u\n",
243 le16_to_cpu(req->max_frm_size));
244 dev_info(&hdev->pdev->dev, "min_frame_size: %u\n", req->min_frm_size);
245 }
246
hclge_dbg_dump_mac_speed_duplex(struct hclge_dev * hdev)247 static void hclge_dbg_dump_mac_speed_duplex(struct hclge_dev *hdev)
248 {
249 #define HCLGE_MAC_SPEED_SHIFT 0
250 #define HCLGE_MAC_SPEED_MASK GENMASK(5, 0)
251 #define HCLGE_MAC_DUPLEX_SHIFT 7
252
253 struct hclge_config_mac_speed_dup_cmd *req;
254 struct hclge_desc desc;
255 int ret;
256
257 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, true);
258
259 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
260 if (ret) {
261 dev_err(&hdev->pdev->dev,
262 "failed to dump mac speed duplex, ret = %d\n", ret);
263 return;
264 }
265
266 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
267
268 dev_info(&hdev->pdev->dev, "speed: %#lx\n",
269 hnae3_get_field(req->speed_dup, HCLGE_MAC_SPEED_MASK,
270 HCLGE_MAC_SPEED_SHIFT));
271 dev_info(&hdev->pdev->dev, "duplex: %#x\n",
272 hnae3_get_bit(req->speed_dup, HCLGE_MAC_DUPLEX_SHIFT));
273 }
274
hclge_dbg_dump_mac(struct hclge_dev * hdev)275 static void hclge_dbg_dump_mac(struct hclge_dev *hdev)
276 {
277 hclge_dbg_dump_mac_enable_status(hdev);
278
279 hclge_dbg_dump_mac_frame_size(hdev);
280
281 hclge_dbg_dump_mac_speed_duplex(hdev);
282 }
283
hclge_dbg_dump_dcb(struct hclge_dev * hdev,const char * cmd_buf)284 static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, const char *cmd_buf)
285 {
286 struct device *dev = &hdev->pdev->dev;
287 struct hclge_dbg_bitmap_cmd *bitmap;
288 enum hclge_opcode_type cmd;
289 int rq_id, pri_id, qset_id;
290 int port_id, nq_id, pg_id;
291 struct hclge_desc desc[2];
292
293 int cnt, ret;
294
295 cnt = sscanf(cmd_buf, "%i %i %i %i %i %i",
296 &port_id, &pri_id, &pg_id, &rq_id, &nq_id, &qset_id);
297 if (cnt != 6) {
298 dev_err(&hdev->pdev->dev,
299 "dump dcb: bad command parameter, cnt=%d\n", cnt);
300 return;
301 }
302
303 cmd = HCLGE_OPC_QSET_DFX_STS;
304 ret = hclge_dbg_cmd_send(hdev, desc, qset_id, 1, cmd);
305 if (ret)
306 goto err_dcb_cmd_send;
307
308 bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
309 dev_info(dev, "roce_qset_mask: 0x%x\n", bitmap->bit0);
310 dev_info(dev, "nic_qs_mask: 0x%x\n", bitmap->bit1);
311 dev_info(dev, "qs_shaping_pass: 0x%x\n", bitmap->bit2);
312 dev_info(dev, "qs_bp_sts: 0x%x\n", bitmap->bit3);
313
314 cmd = HCLGE_OPC_PRI_DFX_STS;
315 ret = hclge_dbg_cmd_send(hdev, desc, pri_id, 1, cmd);
316 if (ret)
317 goto err_dcb_cmd_send;
318
319 bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
320 dev_info(dev, "pri_mask: 0x%x\n", bitmap->bit0);
321 dev_info(dev, "pri_cshaping_pass: 0x%x\n", bitmap->bit1);
322 dev_info(dev, "pri_pshaping_pass: 0x%x\n", bitmap->bit2);
323
324 cmd = HCLGE_OPC_PG_DFX_STS;
325 ret = hclge_dbg_cmd_send(hdev, desc, pg_id, 1, cmd);
326 if (ret)
327 goto err_dcb_cmd_send;
328
329 bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
330 dev_info(dev, "pg_mask: 0x%x\n", bitmap->bit0);
331 dev_info(dev, "pg_cshaping_pass: 0x%x\n", bitmap->bit1);
332 dev_info(dev, "pg_pshaping_pass: 0x%x\n", bitmap->bit2);
333
334 cmd = HCLGE_OPC_PORT_DFX_STS;
335 ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, cmd);
336 if (ret)
337 goto err_dcb_cmd_send;
338
339 bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
340 dev_info(dev, "port_mask: 0x%x\n", bitmap->bit0);
341 dev_info(dev, "port_shaping_pass: 0x%x\n", bitmap->bit1);
342
343 cmd = HCLGE_OPC_SCH_NQ_CNT;
344 ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, cmd);
345 if (ret)
346 goto err_dcb_cmd_send;
347
348 dev_info(dev, "sch_nq_cnt: 0x%x\n", le32_to_cpu(desc[0].data[1]));
349
350 cmd = HCLGE_OPC_SCH_RQ_CNT;
351 ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, cmd);
352 if (ret)
353 goto err_dcb_cmd_send;
354
355 dev_info(dev, "sch_rq_cnt: 0x%x\n", le32_to_cpu(desc[0].data[1]));
356
357 cmd = HCLGE_OPC_TM_INTERNAL_STS;
358 ret = hclge_dbg_cmd_send(hdev, desc, 0, 2, cmd);
359 if (ret)
360 goto err_dcb_cmd_send;
361
362 dev_info(dev, "pri_bp: 0x%x\n", le32_to_cpu(desc[0].data[1]));
363 dev_info(dev, "fifo_dfx_info: 0x%x\n", le32_to_cpu(desc[0].data[2]));
364 dev_info(dev, "sch_roce_fifo_afull_gap: 0x%x\n",
365 le32_to_cpu(desc[0].data[3]));
366 dev_info(dev, "tx_private_waterline: 0x%x\n",
367 le32_to_cpu(desc[0].data[4]));
368 dev_info(dev, "tm_bypass_en: 0x%x\n", le32_to_cpu(desc[0].data[5]));
369 dev_info(dev, "SSU_TM_BYPASS_EN: 0x%x\n", le32_to_cpu(desc[1].data[0]));
370 dev_info(dev, "SSU_RESERVE_CFG: 0x%x\n", le32_to_cpu(desc[1].data[1]));
371
372 cmd = HCLGE_OPC_TM_INTERNAL_CNT;
373 ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, cmd);
374 if (ret)
375 goto err_dcb_cmd_send;
376
377 dev_info(dev, "SCH_NIC_NUM: 0x%x\n", le32_to_cpu(desc[0].data[1]));
378 dev_info(dev, "SCH_ROCE_NUM: 0x%x\n", le32_to_cpu(desc[0].data[2]));
379
380 cmd = HCLGE_OPC_TM_INTERNAL_STS_1;
381 ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, cmd);
382 if (ret)
383 goto err_dcb_cmd_send;
384
385 dev_info(dev, "TC_MAP_SEL: 0x%x\n", le32_to_cpu(desc[0].data[1]));
386 dev_info(dev, "IGU_PFC_PRI_EN: 0x%x\n", le32_to_cpu(desc[0].data[2]));
387 dev_info(dev, "MAC_PFC_PRI_EN: 0x%x\n", le32_to_cpu(desc[0].data[3]));
388 dev_info(dev, "IGU_PRI_MAP_TC_CFG: 0x%x\n",
389 le32_to_cpu(desc[0].data[4]));
390 dev_info(dev, "IGU_TX_PRI_MAP_TC_CFG: 0x%x\n",
391 le32_to_cpu(desc[0].data[5]));
392 return;
393
394 err_dcb_cmd_send:
395 dev_err(&hdev->pdev->dev,
396 "failed to dump dcb dfx, cmd = %#x, ret = %d\n",
397 cmd, ret);
398 }
399
hclge_dbg_dump_reg_cmd(struct hclge_dev * hdev,const char * cmd_buf)400 static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, const char *cmd_buf)
401 {
402 const struct hclge_dbg_reg_type_info *reg_info;
403 bool has_dump = false;
404 int i;
405
406 for (i = 0; i < ARRAY_SIZE(hclge_dbg_reg_info); i++) {
407 reg_info = &hclge_dbg_reg_info[i];
408 if (!strncmp(cmd_buf, reg_info->reg_type,
409 strlen(reg_info->reg_type))) {
410 hclge_dbg_dump_reg_common(hdev, reg_info, cmd_buf);
411 has_dump = true;
412 }
413 }
414
415 if (strncmp(cmd_buf, "mac", strlen("mac")) == 0) {
416 hclge_dbg_dump_mac(hdev);
417 has_dump = true;
418 }
419
420 if (strncmp(cmd_buf, "dcb", 3) == 0) {
421 hclge_dbg_dump_dcb(hdev, &cmd_buf[sizeof("dcb")]);
422 has_dump = true;
423 }
424
425 if (!has_dump) {
426 dev_info(&hdev->pdev->dev, "unknown command\n");
427 return;
428 }
429 }
430
hclge_print_tc_info(struct hclge_dev * hdev,bool flag,int index)431 static void hclge_print_tc_info(struct hclge_dev *hdev, bool flag, int index)
432 {
433 if (flag)
434 dev_info(&hdev->pdev->dev, "tc(%d): no sp mode weight: %u\n",
435 index, hdev->tm_info.pg_info[0].tc_dwrr[index]);
436 else
437 dev_info(&hdev->pdev->dev, "tc(%d): sp mode\n", index);
438 }
439
hclge_dbg_dump_tc(struct hclge_dev * hdev)440 static void hclge_dbg_dump_tc(struct hclge_dev *hdev)
441 {
442 struct hclge_ets_tc_weight_cmd *ets_weight;
443 struct hclge_desc desc;
444 int i, ret;
445
446 if (!hnae3_dev_dcb_supported(hdev)) {
447 dev_info(&hdev->pdev->dev,
448 "Only DCB-supported dev supports tc\n");
449 return;
450 }
451
452 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, true);
453
454 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
455 if (ret) {
456 dev_err(&hdev->pdev->dev, "dump tc fail, ret = %d\n", ret);
457 return;
458 }
459
460 ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
461
462 dev_info(&hdev->pdev->dev, "dump tc: %u tc enabled\n",
463 hdev->tm_info.num_tc);
464 dev_info(&hdev->pdev->dev, "weight_offset: %u\n",
465 ets_weight->weight_offset);
466
467 for (i = 0; i < HNAE3_MAX_TC; i++)
468 hclge_print_tc_info(hdev, ets_weight->tc_weight[i], i);
469 }
470
hclge_dbg_dump_tm_pg(struct hclge_dev * hdev)471 static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
472 {
473 struct hclge_port_shapping_cmd *port_shap_cfg_cmd;
474 struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
475 struct hclge_pg_shapping_cmd *pg_shap_cfg_cmd;
476 enum hclge_opcode_type cmd;
477 struct hclge_desc desc;
478 int ret;
479
480 cmd = HCLGE_OPC_TM_PG_C_SHAPPING;
481 hclge_cmd_setup_basic_desc(&desc, cmd, true);
482 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
483 if (ret)
484 goto err_tm_pg_cmd_send;
485
486 pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
487 dev_info(&hdev->pdev->dev, "PG_C pg_id: %u\n", pg_shap_cfg_cmd->pg_id);
488 dev_info(&hdev->pdev->dev, "PG_C pg_shapping: 0x%x\n",
489 le32_to_cpu(pg_shap_cfg_cmd->pg_shapping_para));
490
491 cmd = HCLGE_OPC_TM_PG_P_SHAPPING;
492 hclge_cmd_setup_basic_desc(&desc, cmd, true);
493 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
494 if (ret)
495 goto err_tm_pg_cmd_send;
496
497 pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
498 dev_info(&hdev->pdev->dev, "PG_P pg_id: %u\n", pg_shap_cfg_cmd->pg_id);
499 dev_info(&hdev->pdev->dev, "PG_P pg_shapping: 0x%x\n",
500 le32_to_cpu(pg_shap_cfg_cmd->pg_shapping_para));
501
502 cmd = HCLGE_OPC_TM_PORT_SHAPPING;
503 hclge_cmd_setup_basic_desc(&desc, cmd, true);
504 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
505 if (ret)
506 goto err_tm_pg_cmd_send;
507
508 port_shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
509 dev_info(&hdev->pdev->dev, "PORT port_shapping: 0x%x\n",
510 le32_to_cpu(port_shap_cfg_cmd->port_shapping_para));
511
512 cmd = HCLGE_OPC_TM_PG_SCH_MODE_CFG;
513 hclge_cmd_setup_basic_desc(&desc, cmd, true);
514 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
515 if (ret)
516 goto err_tm_pg_cmd_send;
517
518 dev_info(&hdev->pdev->dev, "PG_SCH pg_id: %u\n",
519 le32_to_cpu(desc.data[0]));
520
521 cmd = HCLGE_OPC_TM_PRI_SCH_MODE_CFG;
522 hclge_cmd_setup_basic_desc(&desc, cmd, true);
523 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
524 if (ret)
525 goto err_tm_pg_cmd_send;
526
527 dev_info(&hdev->pdev->dev, "PRI_SCH pri_id: %u\n",
528 le32_to_cpu(desc.data[0]));
529
530 cmd = HCLGE_OPC_TM_QS_SCH_MODE_CFG;
531 hclge_cmd_setup_basic_desc(&desc, cmd, true);
532 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
533 if (ret)
534 goto err_tm_pg_cmd_send;
535
536 dev_info(&hdev->pdev->dev, "QS_SCH qs_id: %u\n",
537 le32_to_cpu(desc.data[0]));
538
539 if (!hnae3_dev_dcb_supported(hdev)) {
540 dev_info(&hdev->pdev->dev,
541 "Only DCB-supported dev supports tm mapping\n");
542 return;
543 }
544
545 cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING;
546 hclge_cmd_setup_basic_desc(&desc, cmd, true);
547 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
548 if (ret)
549 goto err_tm_pg_cmd_send;
550
551 bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
552 dev_info(&hdev->pdev->dev, "BP_TO_QSET tc_id: %u\n",
553 bp_to_qs_map_cmd->tc_id);
554 dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_group_id: 0x%x\n",
555 bp_to_qs_map_cmd->qs_group_id);
556 dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_bit_map: 0x%x\n",
557 le32_to_cpu(bp_to_qs_map_cmd->qs_bit_map));
558 return;
559
560 err_tm_pg_cmd_send:
561 dev_err(&hdev->pdev->dev, "dump tm_pg fail(0x%x), ret = %d\n",
562 cmd, ret);
563 }
564
hclge_dbg_dump_tm(struct hclge_dev * hdev)565 static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
566 {
567 struct hclge_priority_weight_cmd *priority_weight;
568 struct hclge_pg_to_pri_link_cmd *pg_to_pri_map;
569 struct hclge_qs_to_pri_link_cmd *qs_to_pri_map;
570 struct hclge_nq_to_qs_link_cmd *nq_to_qs_map;
571 struct hclge_pri_shapping_cmd *shap_cfg_cmd;
572 struct hclge_pg_weight_cmd *pg_weight;
573 struct hclge_qs_weight_cmd *qs_weight;
574 enum hclge_opcode_type cmd;
575 struct hclge_desc desc;
576 int ret;
577
578 cmd = HCLGE_OPC_TM_PG_TO_PRI_LINK;
579 hclge_cmd_setup_basic_desc(&desc, cmd, true);
580 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
581 if (ret)
582 goto err_tm_cmd_send;
583
584 pg_to_pri_map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
585 dev_info(&hdev->pdev->dev, "dump tm\n");
586 dev_info(&hdev->pdev->dev, "PG_TO_PRI gp_id: %u\n",
587 pg_to_pri_map->pg_id);
588 dev_info(&hdev->pdev->dev, "PG_TO_PRI map: 0x%x\n",
589 pg_to_pri_map->pri_bit_map);
590
591 cmd = HCLGE_OPC_TM_QS_TO_PRI_LINK;
592 hclge_cmd_setup_basic_desc(&desc, cmd, true);
593 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
594 if (ret)
595 goto err_tm_cmd_send;
596
597 qs_to_pri_map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
598 dev_info(&hdev->pdev->dev, "QS_TO_PRI qs_id: %u\n",
599 le16_to_cpu(qs_to_pri_map->qs_id));
600 dev_info(&hdev->pdev->dev, "QS_TO_PRI priority: %u\n",
601 qs_to_pri_map->priority);
602 dev_info(&hdev->pdev->dev, "QS_TO_PRI link_vld: %u\n",
603 qs_to_pri_map->link_vld);
604
605 cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK;
606 hclge_cmd_setup_basic_desc(&desc, cmd, true);
607 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
608 if (ret)
609 goto err_tm_cmd_send;
610
611 nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
612 dev_info(&hdev->pdev->dev, "NQ_TO_QS nq_id: %u\n",
613 le16_to_cpu(nq_to_qs_map->nq_id));
614 dev_info(&hdev->pdev->dev, "NQ_TO_QS qset_id: 0x%x\n",
615 le16_to_cpu(nq_to_qs_map->qset_id));
616
617 cmd = HCLGE_OPC_TM_PG_WEIGHT;
618 hclge_cmd_setup_basic_desc(&desc, cmd, true);
619 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
620 if (ret)
621 goto err_tm_cmd_send;
622
623 pg_weight = (struct hclge_pg_weight_cmd *)desc.data;
624 dev_info(&hdev->pdev->dev, "PG pg_id: %u\n", pg_weight->pg_id);
625 dev_info(&hdev->pdev->dev, "PG dwrr: %u\n", pg_weight->dwrr);
626
627 cmd = HCLGE_OPC_TM_QS_WEIGHT;
628 hclge_cmd_setup_basic_desc(&desc, cmd, true);
629 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
630 if (ret)
631 goto err_tm_cmd_send;
632
633 qs_weight = (struct hclge_qs_weight_cmd *)desc.data;
634 dev_info(&hdev->pdev->dev, "QS qs_id: %u\n",
635 le16_to_cpu(qs_weight->qs_id));
636 dev_info(&hdev->pdev->dev, "QS dwrr: %u\n", qs_weight->dwrr);
637
638 cmd = HCLGE_OPC_TM_PRI_WEIGHT;
639 hclge_cmd_setup_basic_desc(&desc, cmd, true);
640 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
641 if (ret)
642 goto err_tm_cmd_send;
643
644 priority_weight = (struct hclge_priority_weight_cmd *)desc.data;
645 dev_info(&hdev->pdev->dev, "PRI pri_id: %u\n", priority_weight->pri_id);
646 dev_info(&hdev->pdev->dev, "PRI dwrr: %u\n", priority_weight->dwrr);
647
648 cmd = HCLGE_OPC_TM_PRI_C_SHAPPING;
649 hclge_cmd_setup_basic_desc(&desc, cmd, true);
650 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
651 if (ret)
652 goto err_tm_cmd_send;
653
654 shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
655 dev_info(&hdev->pdev->dev, "PRI_C pri_id: %u\n", shap_cfg_cmd->pri_id);
656 dev_info(&hdev->pdev->dev, "PRI_C pri_shapping: 0x%x\n",
657 le32_to_cpu(shap_cfg_cmd->pri_shapping_para));
658
659 cmd = HCLGE_OPC_TM_PRI_P_SHAPPING;
660 hclge_cmd_setup_basic_desc(&desc, cmd, true);
661 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
662 if (ret)
663 goto err_tm_cmd_send;
664
665 shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
666 dev_info(&hdev->pdev->dev, "PRI_P pri_id: %u\n", shap_cfg_cmd->pri_id);
667 dev_info(&hdev->pdev->dev, "PRI_P pri_shapping: 0x%x\n",
668 le32_to_cpu(shap_cfg_cmd->pri_shapping_para));
669
670 hclge_dbg_dump_tm_pg(hdev);
671
672 return;
673
674 err_tm_cmd_send:
675 dev_err(&hdev->pdev->dev, "dump tm fail(0x%x), ret = %d\n",
676 cmd, ret);
677 }
678
hclge_dbg_dump_tm_map(struct hclge_dev * hdev,const char * cmd_buf)679 static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
680 const char *cmd_buf)
681 {
682 struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
683 struct hclge_nq_to_qs_link_cmd *nq_to_qs_map;
684 struct hclge_qs_to_pri_link_cmd *map;
685 struct hclge_tqp_tx_queue_tc_cmd *tc;
686 enum hclge_opcode_type cmd;
687 struct hclge_desc desc;
688 int queue_id, group_id;
689 u32 qset_mapping[32];
690 int tc_id, qset_id;
691 int pri_id, ret;
692 u32 i;
693
694 ret = kstrtouint(cmd_buf, 0, &queue_id);
695 queue_id = (ret != 0) ? 0 : queue_id;
696
697 cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK;
698 nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
699 hclge_cmd_setup_basic_desc(&desc, cmd, true);
700 nq_to_qs_map->nq_id = cpu_to_le16(queue_id);
701 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
702 if (ret)
703 goto err_tm_map_cmd_send;
704 qset_id = le16_to_cpu(nq_to_qs_map->qset_id) & 0x3FF;
705
706 cmd = HCLGE_OPC_TM_QS_TO_PRI_LINK;
707 map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
708 hclge_cmd_setup_basic_desc(&desc, cmd, true);
709 map->qs_id = cpu_to_le16(qset_id);
710 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
711 if (ret)
712 goto err_tm_map_cmd_send;
713 pri_id = map->priority;
714
715 cmd = HCLGE_OPC_TQP_TX_QUEUE_TC;
716 tc = (struct hclge_tqp_tx_queue_tc_cmd *)desc.data;
717 hclge_cmd_setup_basic_desc(&desc, cmd, true);
718 tc->queue_id = cpu_to_le16(queue_id);
719 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
720 if (ret)
721 goto err_tm_map_cmd_send;
722 tc_id = tc->tc_id & 0x7;
723
724 dev_info(&hdev->pdev->dev, "queue_id | qset_id | pri_id | tc_id\n");
725 dev_info(&hdev->pdev->dev, "%04d | %04d | %02d | %02d\n",
726 queue_id, qset_id, pri_id, tc_id);
727
728 if (!hnae3_dev_dcb_supported(hdev)) {
729 dev_info(&hdev->pdev->dev,
730 "Only DCB-supported dev supports tm mapping\n");
731 return;
732 }
733
734 cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING;
735 bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
736 for (group_id = 0; group_id < 32; group_id++) {
737 hclge_cmd_setup_basic_desc(&desc, cmd, true);
738 bp_to_qs_map_cmd->tc_id = tc_id;
739 bp_to_qs_map_cmd->qs_group_id = group_id;
740 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
741 if (ret)
742 goto err_tm_map_cmd_send;
743
744 qset_mapping[group_id] =
745 le32_to_cpu(bp_to_qs_map_cmd->qs_bit_map);
746 }
747
748 dev_info(&hdev->pdev->dev, "index | tm bp qset maping:\n");
749
750 i = 0;
751 for (group_id = 0; group_id < 4; group_id++) {
752 dev_info(&hdev->pdev->dev,
753 "%04d | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n",
754 group_id * 256, qset_mapping[(u32)(i + 7)],
755 qset_mapping[(u32)(i + 6)], qset_mapping[(u32)(i + 5)],
756 qset_mapping[(u32)(i + 4)], qset_mapping[(u32)(i + 3)],
757 qset_mapping[(u32)(i + 2)], qset_mapping[(u32)(i + 1)],
758 qset_mapping[i]);
759 i += 8;
760 }
761
762 return;
763
764 err_tm_map_cmd_send:
765 dev_err(&hdev->pdev->dev, "dump tqp map fail(0x%x), ret = %d\n",
766 cmd, ret);
767 }
768
hclge_dbg_dump_qos_pause_cfg(struct hclge_dev * hdev)769 static void hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev)
770 {
771 struct hclge_cfg_pause_param_cmd *pause_param;
772 struct hclge_desc desc;
773 int ret;
774
775 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
776
777 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
778 if (ret) {
779 dev_err(&hdev->pdev->dev, "dump checksum fail, ret = %d\n",
780 ret);
781 return;
782 }
783
784 pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
785 dev_info(&hdev->pdev->dev, "dump qos pause cfg\n");
786 dev_info(&hdev->pdev->dev, "pause_trans_gap: 0x%x\n",
787 pause_param->pause_trans_gap);
788 dev_info(&hdev->pdev->dev, "pause_trans_time: 0x%x\n",
789 le16_to_cpu(pause_param->pause_trans_time));
790 }
791
hclge_dbg_dump_qos_pri_map(struct hclge_dev * hdev)792 static void hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev)
793 {
794 struct hclge_qos_pri_map_cmd *pri_map;
795 struct hclge_desc desc;
796 int ret;
797
798 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, true);
799
800 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
801 if (ret) {
802 dev_err(&hdev->pdev->dev,
803 "dump qos pri map fail, ret = %d\n", ret);
804 return;
805 }
806
807 pri_map = (struct hclge_qos_pri_map_cmd *)desc.data;
808 dev_info(&hdev->pdev->dev, "dump qos pri map\n");
809 dev_info(&hdev->pdev->dev, "vlan_to_pri: 0x%x\n", pri_map->vlan_pri);
810 dev_info(&hdev->pdev->dev, "pri_0_to_tc: 0x%x\n", pri_map->pri0_tc);
811 dev_info(&hdev->pdev->dev, "pri_1_to_tc: 0x%x\n", pri_map->pri1_tc);
812 dev_info(&hdev->pdev->dev, "pri_2_to_tc: 0x%x\n", pri_map->pri2_tc);
813 dev_info(&hdev->pdev->dev, "pri_3_to_tc: 0x%x\n", pri_map->pri3_tc);
814 dev_info(&hdev->pdev->dev, "pri_4_to_tc: 0x%x\n", pri_map->pri4_tc);
815 dev_info(&hdev->pdev->dev, "pri_5_to_tc: 0x%x\n", pri_map->pri5_tc);
816 dev_info(&hdev->pdev->dev, "pri_6_to_tc: 0x%x\n", pri_map->pri6_tc);
817 dev_info(&hdev->pdev->dev, "pri_7_to_tc: 0x%x\n", pri_map->pri7_tc);
818 }
819
hclge_dbg_dump_qos_buf_cfg(struct hclge_dev * hdev)820 static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
821 {
822 struct hclge_tx_buff_alloc_cmd *tx_buf_cmd;
823 struct hclge_rx_priv_buff_cmd *rx_buf_cmd;
824 struct hclge_rx_priv_wl_buf *rx_priv_wl;
825 struct hclge_rx_com_wl *rx_packet_cnt;
826 struct hclge_rx_com_thrd *rx_com_thrd;
827 struct hclge_rx_com_wl *rx_com_wl;
828 enum hclge_opcode_type cmd;
829 struct hclge_desc desc[2];
830 int i, ret;
831
832 cmd = HCLGE_OPC_TX_BUFF_ALLOC;
833 hclge_cmd_setup_basic_desc(desc, cmd, true);
834 ret = hclge_cmd_send(&hdev->hw, desc, 1);
835 if (ret)
836 goto err_qos_cmd_send;
837
838 dev_info(&hdev->pdev->dev, "dump qos buf cfg\n");
839
840 tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc[0].data;
841 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
842 dev_info(&hdev->pdev->dev, "tx_packet_buf_tc_%d: 0x%x\n", i,
843 le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i]));
844
845 cmd = HCLGE_OPC_RX_PRIV_BUFF_ALLOC;
846 hclge_cmd_setup_basic_desc(desc, cmd, true);
847 ret = hclge_cmd_send(&hdev->hw, desc, 1);
848 if (ret)
849 goto err_qos_cmd_send;
850
851 dev_info(&hdev->pdev->dev, "\n");
852 rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc[0].data;
853 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
854 dev_info(&hdev->pdev->dev, "rx_packet_buf_tc_%d: 0x%x\n", i,
855 le16_to_cpu(rx_buf_cmd->buf_num[i]));
856
857 dev_info(&hdev->pdev->dev, "rx_share_buf: 0x%x\n",
858 le16_to_cpu(rx_buf_cmd->shared_buf));
859
860 cmd = HCLGE_OPC_RX_COM_WL_ALLOC;
861 hclge_cmd_setup_basic_desc(desc, cmd, true);
862 ret = hclge_cmd_send(&hdev->hw, desc, 1);
863 if (ret)
864 goto err_qos_cmd_send;
865
866 rx_com_wl = (struct hclge_rx_com_wl *)desc[0].data;
867 dev_info(&hdev->pdev->dev, "\n");
868 dev_info(&hdev->pdev->dev, "rx_com_wl: high: 0x%x, low: 0x%x\n",
869 le16_to_cpu(rx_com_wl->com_wl.high),
870 le16_to_cpu(rx_com_wl->com_wl.low));
871
872 cmd = HCLGE_OPC_RX_GBL_PKT_CNT;
873 hclge_cmd_setup_basic_desc(desc, cmd, true);
874 ret = hclge_cmd_send(&hdev->hw, desc, 1);
875 if (ret)
876 goto err_qos_cmd_send;
877
878 rx_packet_cnt = (struct hclge_rx_com_wl *)desc[0].data;
879 dev_info(&hdev->pdev->dev,
880 "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
881 le16_to_cpu(rx_packet_cnt->com_wl.high),
882 le16_to_cpu(rx_packet_cnt->com_wl.low));
883 dev_info(&hdev->pdev->dev, "\n");
884
885 if (!hnae3_dev_dcb_supported(hdev)) {
886 dev_info(&hdev->pdev->dev,
887 "Only DCB-supported dev supports rx priv wl\n");
888 return;
889 }
890 cmd = HCLGE_OPC_RX_PRIV_WL_ALLOC;
891 hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
892 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
893 hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
894 ret = hclge_cmd_send(&hdev->hw, desc, 2);
895 if (ret)
896 goto err_qos_cmd_send;
897
898 rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data;
899 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
900 dev_info(&hdev->pdev->dev,
901 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i,
902 le16_to_cpu(rx_priv_wl->tc_wl[i].high),
903 le16_to_cpu(rx_priv_wl->tc_wl[i].low));
904
905 rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data;
906 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
907 dev_info(&hdev->pdev->dev,
908 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n",
909 i + HCLGE_TC_NUM_ONE_DESC,
910 le16_to_cpu(rx_priv_wl->tc_wl[i].high),
911 le16_to_cpu(rx_priv_wl->tc_wl[i].low));
912
913 cmd = HCLGE_OPC_RX_COM_THRD_ALLOC;
914 hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
915 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
916 hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
917 ret = hclge_cmd_send(&hdev->hw, desc, 2);
918 if (ret)
919 goto err_qos_cmd_send;
920
921 dev_info(&hdev->pdev->dev, "\n");
922 rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data;
923 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
924 dev_info(&hdev->pdev->dev,
925 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i,
926 le16_to_cpu(rx_com_thrd->com_thrd[i].high),
927 le16_to_cpu(rx_com_thrd->com_thrd[i].low));
928
929 rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data;
930 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
931 dev_info(&hdev->pdev->dev,
932 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n",
933 i + HCLGE_TC_NUM_ONE_DESC,
934 le16_to_cpu(rx_com_thrd->com_thrd[i].high),
935 le16_to_cpu(rx_com_thrd->com_thrd[i].low));
936 return;
937
938 err_qos_cmd_send:
939 dev_err(&hdev->pdev->dev,
940 "dump qos buf cfg fail(0x%x), ret = %d\n", cmd, ret);
941 }
942
hclge_dbg_dump_mng_table(struct hclge_dev * hdev)943 static void hclge_dbg_dump_mng_table(struct hclge_dev *hdev)
944 {
945 struct hclge_mac_ethertype_idx_rd_cmd *req0;
946 char printf_buf[HCLGE_DBG_BUF_LEN];
947 struct hclge_desc desc;
948 u32 msg_egress_port;
949 int ret, i;
950
951 dev_info(&hdev->pdev->dev, "mng tab:\n");
952 memset(printf_buf, 0, HCLGE_DBG_BUF_LEN);
953 strncat(printf_buf,
954 "entry|mac_addr |mask|ether|mask|vlan|mask",
955 HCLGE_DBG_BUF_LEN - 1);
956 strncat(printf_buf + strlen(printf_buf),
957 "|i_map|i_dir|e_type|pf_id|vf_id|q_id|drop\n",
958 HCLGE_DBG_BUF_LEN - strlen(printf_buf) - 1);
959
960 dev_info(&hdev->pdev->dev, "%s", printf_buf);
961
962 for (i = 0; i < HCLGE_DBG_MNG_TBL_MAX; i++) {
963 hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_ETHERTYPE_IDX_RD,
964 true);
965 req0 = (struct hclge_mac_ethertype_idx_rd_cmd *)&desc.data;
966 req0->index = cpu_to_le16(i);
967
968 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
969 if (ret) {
970 dev_err(&hdev->pdev->dev,
971 "call hclge_cmd_send fail, ret = %d\n", ret);
972 return;
973 }
974
975 if (!req0->resp_code)
976 continue;
977
978 memset(printf_buf, 0, HCLGE_DBG_BUF_LEN);
979 snprintf(printf_buf, HCLGE_DBG_BUF_LEN,
980 "%02u |%02x:%02x:%02x:%02x:%02x:%02x|",
981 le16_to_cpu(req0->index),
982 req0->mac_addr[0], req0->mac_addr[1],
983 req0->mac_addr[2], req0->mac_addr[3],
984 req0->mac_addr[4], req0->mac_addr[5]);
985
986 snprintf(printf_buf + strlen(printf_buf),
987 HCLGE_DBG_BUF_LEN - strlen(printf_buf),
988 "%x |%04x |%x |%04x|%x |%02x |%02x |",
989 !!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B),
990 le16_to_cpu(req0->ethter_type),
991 !!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B),
992 le16_to_cpu(req0->vlan_tag) & HCLGE_DBG_MNG_VLAN_TAG,
993 !!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B),
994 req0->i_port_bitmap, req0->i_port_direction);
995
996 msg_egress_port = le16_to_cpu(req0->egress_port);
997 snprintf(printf_buf + strlen(printf_buf),
998 HCLGE_DBG_BUF_LEN - strlen(printf_buf),
999 "%x |%x |%02x |%04x|%x\n",
1000 !!(msg_egress_port & HCLGE_DBG_MNG_E_TYPE_B),
1001 msg_egress_port & HCLGE_DBG_MNG_PF_ID,
1002 (msg_egress_port >> 3) & HCLGE_DBG_MNG_VF_ID,
1003 le16_to_cpu(req0->egress_queue),
1004 !!(msg_egress_port & HCLGE_DBG_MNG_DROP_B));
1005
1006 dev_info(&hdev->pdev->dev, "%s", printf_buf);
1007 }
1008 }
1009
hclge_dbg_fd_tcam_read(struct hclge_dev * hdev,u8 stage,bool sel_x,u32 loc)1010 static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, u8 stage,
1011 bool sel_x, u32 loc)
1012 {
1013 struct hclge_fd_tcam_config_1_cmd *req1;
1014 struct hclge_fd_tcam_config_2_cmd *req2;
1015 struct hclge_fd_tcam_config_3_cmd *req3;
1016 struct hclge_desc desc[3];
1017 int ret, i;
1018 u32 *req;
1019
1020 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true);
1021 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1022 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, true);
1023 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1024 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, true);
1025
1026 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
1027 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
1028 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
1029
1030 req1->stage = stage;
1031 req1->xy_sel = sel_x ? 1 : 0;
1032 req1->index = cpu_to_le32(loc);
1033
1034 ret = hclge_cmd_send(&hdev->hw, desc, 3);
1035 if (ret)
1036 return ret;
1037
1038 dev_info(&hdev->pdev->dev, " read result tcam key %s(%u):\n",
1039 sel_x ? "x" : "y", loc);
1040
1041 /* tcam_data0 ~ tcam_data1 */
1042 req = (u32 *)req1->tcam_data;
1043 for (i = 0; i < 2; i++)
1044 dev_info(&hdev->pdev->dev, "%08x\n", *req++);
1045
1046 /* tcam_data2 ~ tcam_data7 */
1047 req = (u32 *)req2->tcam_data;
1048 for (i = 0; i < 6; i++)
1049 dev_info(&hdev->pdev->dev, "%08x\n", *req++);
1050
1051 /* tcam_data8 ~ tcam_data12 */
1052 req = (u32 *)req3->tcam_data;
1053 for (i = 0; i < 5; i++)
1054 dev_info(&hdev->pdev->dev, "%08x\n", *req++);
1055
1056 return ret;
1057 }
1058
hclge_dbg_get_rules_location(struct hclge_dev * hdev,u16 * rule_locs)1059 static int hclge_dbg_get_rules_location(struct hclge_dev *hdev, u16 *rule_locs)
1060 {
1061 struct hclge_fd_rule *rule;
1062 struct hlist_node *node;
1063 int cnt = 0;
1064
1065 spin_lock_bh(&hdev->fd_rule_lock);
1066 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
1067 rule_locs[cnt] = rule->location;
1068 cnt++;
1069 }
1070 spin_unlock_bh(&hdev->fd_rule_lock);
1071
1072 if (cnt != hdev->hclge_fd_rule_num)
1073 return -EINVAL;
1074
1075 return cnt;
1076 }
1077
hclge_dbg_fd_tcam(struct hclge_dev * hdev)1078 static void hclge_dbg_fd_tcam(struct hclge_dev *hdev)
1079 {
1080 int i, ret, rule_cnt;
1081 u16 *rule_locs;
1082
1083 if (!hnae3_dev_fd_supported(hdev)) {
1084 dev_err(&hdev->pdev->dev,
1085 "Only FD-supported dev supports dump fd tcam\n");
1086 return;
1087 }
1088
1089 if (!hdev->hclge_fd_rule_num ||
1090 !hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
1091 return;
1092
1093 rule_locs = kcalloc(hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
1094 sizeof(u16), GFP_KERNEL);
1095 if (!rule_locs)
1096 return;
1097
1098 rule_cnt = hclge_dbg_get_rules_location(hdev, rule_locs);
1099 if (rule_cnt <= 0) {
1100 dev_err(&hdev->pdev->dev,
1101 "failed to get rule number, ret = %d\n", rule_cnt);
1102 kfree(rule_locs);
1103 return;
1104 }
1105
1106 for (i = 0; i < rule_cnt; i++) {
1107 ret = hclge_dbg_fd_tcam_read(hdev, 0, true, rule_locs[i]);
1108 if (ret) {
1109 dev_err(&hdev->pdev->dev,
1110 "failed to get fd tcam key x, ret = %d\n", ret);
1111 kfree(rule_locs);
1112 return;
1113 }
1114
1115 ret = hclge_dbg_fd_tcam_read(hdev, 0, false, rule_locs[i]);
1116 if (ret) {
1117 dev_err(&hdev->pdev->dev,
1118 "failed to get fd tcam key y, ret = %d\n", ret);
1119 kfree(rule_locs);
1120 return;
1121 }
1122 }
1123
1124 kfree(rule_locs);
1125 }
1126
hclge_dbg_dump_rst_info(struct hclge_dev * hdev)1127 void hclge_dbg_dump_rst_info(struct hclge_dev *hdev)
1128 {
1129 dev_info(&hdev->pdev->dev, "PF reset count: %u\n",
1130 hdev->rst_stats.pf_rst_cnt);
1131 dev_info(&hdev->pdev->dev, "FLR reset count: %u\n",
1132 hdev->rst_stats.flr_rst_cnt);
1133 dev_info(&hdev->pdev->dev, "GLOBAL reset count: %u\n",
1134 hdev->rst_stats.global_rst_cnt);
1135 dev_info(&hdev->pdev->dev, "IMP reset count: %u\n",
1136 hdev->rst_stats.imp_rst_cnt);
1137 dev_info(&hdev->pdev->dev, "reset done count: %u\n",
1138 hdev->rst_stats.reset_done_cnt);
1139 dev_info(&hdev->pdev->dev, "HW reset done count: %u\n",
1140 hdev->rst_stats.hw_reset_done_cnt);
1141 dev_info(&hdev->pdev->dev, "reset count: %u\n",
1142 hdev->rst_stats.reset_cnt);
1143 dev_info(&hdev->pdev->dev, "reset fail count: %u\n",
1144 hdev->rst_stats.reset_fail_cnt);
1145 dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n",
1146 hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_REG_BASE));
1147 dev_info(&hdev->pdev->dev, "reset interrupt source: 0x%x\n",
1148 hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG));
1149 dev_info(&hdev->pdev->dev, "reset interrupt status: 0x%x\n",
1150 hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS));
1151 dev_info(&hdev->pdev->dev, "hardware reset status: 0x%x\n",
1152 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
1153 dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n",
1154 hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG));
1155 dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n",
1156 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING));
1157 dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state);
1158 }
1159
hclge_dbg_dump_serv_info(struct hclge_dev * hdev)1160 static void hclge_dbg_dump_serv_info(struct hclge_dev *hdev)
1161 {
1162 dev_info(&hdev->pdev->dev, "last_serv_processed: %lu\n",
1163 hdev->last_serv_processed);
1164 dev_info(&hdev->pdev->dev, "last_serv_cnt: %lu\n",
1165 hdev->serv_processed_cnt);
1166 }
1167
hclge_dbg_dump_interrupt(struct hclge_dev * hdev)1168 static void hclge_dbg_dump_interrupt(struct hclge_dev *hdev)
1169 {
1170 dev_info(&hdev->pdev->dev, "num_nic_msi: %u\n", hdev->num_nic_msi);
1171 dev_info(&hdev->pdev->dev, "num_roce_msi: %u\n", hdev->num_roce_msi);
1172 dev_info(&hdev->pdev->dev, "num_msi_used: %u\n", hdev->num_msi_used);
1173 dev_info(&hdev->pdev->dev, "num_msi_left: %u\n", hdev->num_msi_left);
1174 }
1175
hclge_dbg_get_m7_stats_info(struct hclge_dev * hdev)1176 static void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev)
1177 {
1178 struct hclge_desc *desc_src, *desc_tmp;
1179 struct hclge_get_m7_bd_cmd *req;
1180 struct hclge_desc desc;
1181 u32 bd_num, buf_len;
1182 int ret, i;
1183
1184 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_M7_STATS_BD, true);
1185
1186 req = (struct hclge_get_m7_bd_cmd *)desc.data;
1187 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1188 if (ret) {
1189 dev_err(&hdev->pdev->dev,
1190 "get firmware statistics bd number failed, ret = %d\n",
1191 ret);
1192 return;
1193 }
1194
1195 bd_num = le32_to_cpu(req->bd_num);
1196
1197 buf_len = sizeof(struct hclge_desc) * bd_num;
1198 desc_src = kzalloc(buf_len, GFP_KERNEL);
1199 if (!desc_src)
1200 return;
1201
1202 desc_tmp = desc_src;
1203 ret = hclge_dbg_cmd_send(hdev, desc_tmp, 0, bd_num,
1204 HCLGE_OPC_M7_STATS_INFO);
1205 if (ret) {
1206 kfree(desc_src);
1207 dev_err(&hdev->pdev->dev,
1208 "get firmware statistics failed, ret = %d\n", ret);
1209 return;
1210 }
1211
1212 for (i = 0; i < bd_num; i++) {
1213 dev_info(&hdev->pdev->dev, "0x%08x 0x%08x 0x%08x\n",
1214 le32_to_cpu(desc_tmp->data[0]),
1215 le32_to_cpu(desc_tmp->data[1]),
1216 le32_to_cpu(desc_tmp->data[2]));
1217 dev_info(&hdev->pdev->dev, "0x%08x 0x%08x 0x%08x\n",
1218 le32_to_cpu(desc_tmp->data[3]),
1219 le32_to_cpu(desc_tmp->data[4]),
1220 le32_to_cpu(desc_tmp->data[5]));
1221
1222 desc_tmp++;
1223 }
1224
1225 kfree(desc_src);
1226 }
1227
1228 #define HCLGE_CMD_NCL_CONFIG_BD_NUM 5
1229
hclge_ncl_config_data_print(struct hclge_dev * hdev,struct hclge_desc * desc,int * offset,int * length)1230 static void hclge_ncl_config_data_print(struct hclge_dev *hdev,
1231 struct hclge_desc *desc, int *offset,
1232 int *length)
1233 {
1234 #define HCLGE_CMD_DATA_NUM 6
1235
1236 int i;
1237 int j;
1238
1239 for (i = 0; i < HCLGE_CMD_NCL_CONFIG_BD_NUM; i++) {
1240 for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) {
1241 if (i == 0 && j == 0)
1242 continue;
1243
1244 dev_info(&hdev->pdev->dev, "0x%04x | 0x%08x\n",
1245 *offset,
1246 le32_to_cpu(desc[i].data[j]));
1247 *offset += sizeof(u32);
1248 *length -= sizeof(u32);
1249 if (*length <= 0)
1250 return;
1251 }
1252 }
1253 }
1254
1255 /* hclge_dbg_dump_ncl_config: print specified range of NCL_CONFIG file
1256 * @hdev: pointer to struct hclge_dev
1257 * @cmd_buf: string that contains offset and length
1258 */
hclge_dbg_dump_ncl_config(struct hclge_dev * hdev,const char * cmd_buf)1259 static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev,
1260 const char *cmd_buf)
1261 {
1262 #define HCLGE_MAX_NCL_CONFIG_OFFSET 4096
1263 #define HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD (20 + 24 * 4)
1264 #define HCLGE_NCL_CONFIG_PARAM_NUM 2
1265
1266 struct hclge_desc desc[HCLGE_CMD_NCL_CONFIG_BD_NUM];
1267 int bd_num = HCLGE_CMD_NCL_CONFIG_BD_NUM;
1268 int offset;
1269 int length;
1270 int data0;
1271 int ret;
1272
1273 ret = sscanf(cmd_buf, "%x %x", &offset, &length);
1274 if (ret != HCLGE_NCL_CONFIG_PARAM_NUM) {
1275 dev_err(&hdev->pdev->dev,
1276 "Too few parameters, num = %d.\n", ret);
1277 return;
1278 }
1279
1280 if (offset < 0 || offset >= HCLGE_MAX_NCL_CONFIG_OFFSET ||
1281 length <= 0 || length > HCLGE_MAX_NCL_CONFIG_OFFSET - offset) {
1282 dev_err(&hdev->pdev->dev,
1283 "Invalid input, offset = %d, length = %d.\n",
1284 offset, length);
1285 return;
1286 }
1287
1288 dev_info(&hdev->pdev->dev, "offset | data\n");
1289
1290 while (length > 0) {
1291 data0 = offset;
1292 if (length >= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD)
1293 data0 |= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD << 16;
1294 else
1295 data0 |= length << 16;
1296 ret = hclge_dbg_cmd_send(hdev, desc, data0, bd_num,
1297 HCLGE_OPC_QUERY_NCL_CONFIG);
1298 if (ret)
1299 return;
1300
1301 hclge_ncl_config_data_print(hdev, desc, &offset, &length);
1302 }
1303 }
1304
hclge_dbg_dump_loopback(struct hclge_dev * hdev,const char * cmd_buf)1305 static void hclge_dbg_dump_loopback(struct hclge_dev *hdev,
1306 const char *cmd_buf)
1307 {
1308 struct phy_device *phydev = hdev->hw.mac.phydev;
1309 struct hclge_config_mac_mode_cmd *req_app;
1310 struct hclge_serdes_lb_cmd *req_serdes;
1311 struct hclge_desc desc;
1312 u8 loopback_en;
1313 int ret;
1314
1315 req_app = (struct hclge_config_mac_mode_cmd *)desc.data;
1316 req_serdes = (struct hclge_serdes_lb_cmd *)desc.data;
1317
1318 dev_info(&hdev->pdev->dev, "mac id: %u\n", hdev->hw.mac.mac_id);
1319
1320 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
1321 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1322 if (ret) {
1323 dev_err(&hdev->pdev->dev,
1324 "failed to dump app loopback status, ret = %d\n", ret);
1325 return;
1326 }
1327
1328 loopback_en = hnae3_get_bit(le32_to_cpu(req_app->txrx_pad_fcs_loop_en),
1329 HCLGE_MAC_APP_LP_B);
1330 dev_info(&hdev->pdev->dev, "app loopback: %s\n",
1331 loopback_en ? "on" : "off");
1332
1333 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, true);
1334 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1335 if (ret) {
1336 dev_err(&hdev->pdev->dev,
1337 "failed to dump serdes loopback status, ret = %d\n",
1338 ret);
1339 return;
1340 }
1341
1342 loopback_en = req_serdes->enable & HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
1343 dev_info(&hdev->pdev->dev, "serdes serial loopback: %s\n",
1344 loopback_en ? "on" : "off");
1345
1346 loopback_en = req_serdes->enable &
1347 HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
1348 dev_info(&hdev->pdev->dev, "serdes parallel loopback: %s\n",
1349 loopback_en ? "on" : "off");
1350
1351 if (phydev)
1352 dev_info(&hdev->pdev->dev, "phy loopback: %s\n",
1353 phydev->loopback_enabled ? "on" : "off");
1354 }
1355
1356 /* hclge_dbg_dump_mac_tnl_status: print message about mac tnl interrupt
1357 * @hdev: pointer to struct hclge_dev
1358 */
hclge_dbg_dump_mac_tnl_status(struct hclge_dev * hdev)1359 static void hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev)
1360 {
1361 #define HCLGE_BILLION_NANO_SECONDS 1000000000
1362
1363 struct hclge_mac_tnl_stats stats;
1364 unsigned long rem_nsec;
1365
1366 dev_info(&hdev->pdev->dev, "Recently generated mac tnl interruption:\n");
1367
1368 while (kfifo_get(&hdev->mac_tnl_log, &stats)) {
1369 rem_nsec = do_div(stats.time, HCLGE_BILLION_NANO_SECONDS);
1370 dev_info(&hdev->pdev->dev, "[%07lu.%03lu] status = 0x%x\n",
1371 (unsigned long)stats.time, rem_nsec / 1000,
1372 stats.status);
1373 }
1374 }
1375
hclge_dbg_dump_qs_shaper_single(struct hclge_dev * hdev,u16 qsid)1376 static void hclge_dbg_dump_qs_shaper_single(struct hclge_dev *hdev, u16 qsid)
1377 {
1378 struct hclge_qs_shapping_cmd *shap_cfg_cmd;
1379 u8 ir_u, ir_b, ir_s, bs_b, bs_s;
1380 struct hclge_desc desc;
1381 u32 shapping_para;
1382 int ret;
1383
1384 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, true);
1385
1386 shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data;
1387 shap_cfg_cmd->qs_id = cpu_to_le16(qsid);
1388
1389 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1390 if (ret) {
1391 dev_err(&hdev->pdev->dev,
1392 "qs%u failed to get tx_rate, ret=%d\n",
1393 qsid, ret);
1394 return;
1395 }
1396
1397 shapping_para = le32_to_cpu(shap_cfg_cmd->qs_shapping_para);
1398 ir_b = hclge_tm_get_field(shapping_para, IR_B);
1399 ir_u = hclge_tm_get_field(shapping_para, IR_U);
1400 ir_s = hclge_tm_get_field(shapping_para, IR_S);
1401 bs_b = hclge_tm_get_field(shapping_para, BS_B);
1402 bs_s = hclge_tm_get_field(shapping_para, BS_S);
1403
1404 dev_info(&hdev->pdev->dev,
1405 "qs%u ir_b:%u, ir_u:%u, ir_s:%u, bs_b:%u, bs_s:%u\n",
1406 qsid, ir_b, ir_u, ir_s, bs_b, bs_s);
1407 }
1408
hclge_dbg_dump_qs_shaper_all(struct hclge_dev * hdev)1409 static void hclge_dbg_dump_qs_shaper_all(struct hclge_dev *hdev)
1410 {
1411 struct hnae3_knic_private_info *kinfo;
1412 struct hclge_vport *vport;
1413 int vport_id, i;
1414
1415 for (vport_id = 0; vport_id <= pci_num_vf(hdev->pdev); vport_id++) {
1416 vport = &hdev->vport[vport_id];
1417 kinfo = &vport->nic.kinfo;
1418
1419 dev_info(&hdev->pdev->dev, "qs cfg of vport%d:\n", vport_id);
1420
1421 for (i = 0; i < kinfo->num_tc; i++) {
1422 u16 qsid = vport->qs_offset + i;
1423
1424 hclge_dbg_dump_qs_shaper_single(hdev, qsid);
1425 }
1426 }
1427 }
1428
hclge_dbg_dump_qs_shaper(struct hclge_dev * hdev,const char * cmd_buf)1429 static void hclge_dbg_dump_qs_shaper(struct hclge_dev *hdev,
1430 const char *cmd_buf)
1431 {
1432 #define HCLGE_MAX_QSET_NUM 1024
1433
1434 u16 qsid;
1435 int ret;
1436
1437 ret = kstrtou16(cmd_buf, 0, &qsid);
1438 if (ret) {
1439 hclge_dbg_dump_qs_shaper_all(hdev);
1440 return;
1441 }
1442
1443 if (qsid >= HCLGE_MAX_QSET_NUM) {
1444 dev_err(&hdev->pdev->dev, "qsid(%u) out of range[0-1023]\n",
1445 qsid);
1446 return;
1447 }
1448
1449 hclge_dbg_dump_qs_shaper_single(hdev, qsid);
1450 }
1451
hclge_dbg_dump_mac_list(struct hclge_dev * hdev,const char * cmd_buf,bool is_unicast)1452 static int hclge_dbg_dump_mac_list(struct hclge_dev *hdev, const char *cmd_buf,
1453 bool is_unicast)
1454 {
1455 struct hclge_mac_node *mac_node, *tmp;
1456 struct hclge_vport *vport;
1457 struct list_head *list;
1458 u32 func_id;
1459 int ret;
1460
1461 ret = kstrtouint(cmd_buf, 0, &func_id);
1462 if (ret < 0) {
1463 dev_err(&hdev->pdev->dev,
1464 "dump mac list: bad command string, ret = %d\n", ret);
1465 return -EINVAL;
1466 }
1467
1468 if (func_id >= hdev->num_alloc_vport) {
1469 dev_err(&hdev->pdev->dev,
1470 "function id(%u) is out of range(0-%u)\n", func_id,
1471 hdev->num_alloc_vport - 1);
1472 return -EINVAL;
1473 }
1474
1475 vport = &hdev->vport[func_id];
1476
1477 list = is_unicast ? &vport->uc_mac_list : &vport->mc_mac_list;
1478
1479 dev_info(&hdev->pdev->dev, "vport %u %s mac list:\n",
1480 func_id, is_unicast ? "uc" : "mc");
1481 dev_info(&hdev->pdev->dev, "mac address state\n");
1482
1483 spin_lock_bh(&vport->mac_list_lock);
1484
1485 list_for_each_entry_safe(mac_node, tmp, list, node) {
1486 dev_info(&hdev->pdev->dev, "%pM %d\n",
1487 mac_node->mac_addr, mac_node->state);
1488 }
1489
1490 spin_unlock_bh(&vport->mac_list_lock);
1491
1492 return 0;
1493 }
1494
hclge_dbg_run_cmd(struct hnae3_handle * handle,const char * cmd_buf)1495 int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
1496 {
1497 #define DUMP_REG "dump reg"
1498 #define DUMP_TM_MAP "dump tm map"
1499 #define DUMP_LOOPBACK "dump loopback"
1500 #define DUMP_INTERRUPT "dump intr"
1501
1502 struct hclge_vport *vport = hclge_get_vport(handle);
1503 struct hclge_dev *hdev = vport->back;
1504
1505 if (strncmp(cmd_buf, "dump fd tcam", 12) == 0) {
1506 hclge_dbg_fd_tcam(hdev);
1507 } else if (strncmp(cmd_buf, "dump tc", 7) == 0) {
1508 hclge_dbg_dump_tc(hdev);
1509 } else if (strncmp(cmd_buf, DUMP_TM_MAP, strlen(DUMP_TM_MAP)) == 0) {
1510 hclge_dbg_dump_tm_map(hdev, &cmd_buf[sizeof(DUMP_TM_MAP)]);
1511 } else if (strncmp(cmd_buf, "dump tm", 7) == 0) {
1512 hclge_dbg_dump_tm(hdev);
1513 } else if (strncmp(cmd_buf, "dump qos pause cfg", 18) == 0) {
1514 hclge_dbg_dump_qos_pause_cfg(hdev);
1515 } else if (strncmp(cmd_buf, "dump qos pri map", 16) == 0) {
1516 hclge_dbg_dump_qos_pri_map(hdev);
1517 } else if (strncmp(cmd_buf, "dump qos buf cfg", 16) == 0) {
1518 hclge_dbg_dump_qos_buf_cfg(hdev);
1519 } else if (strncmp(cmd_buf, "dump mng tbl", 12) == 0) {
1520 hclge_dbg_dump_mng_table(hdev);
1521 } else if (strncmp(cmd_buf, DUMP_REG, strlen(DUMP_REG)) == 0) {
1522 hclge_dbg_dump_reg_cmd(hdev, &cmd_buf[sizeof(DUMP_REG)]);
1523 } else if (strncmp(cmd_buf, "dump reset info", 15) == 0) {
1524 hclge_dbg_dump_rst_info(hdev);
1525 } else if (strncmp(cmd_buf, "dump serv info", 14) == 0) {
1526 hclge_dbg_dump_serv_info(hdev);
1527 } else if (strncmp(cmd_buf, "dump m7 info", 12) == 0) {
1528 hclge_dbg_get_m7_stats_info(hdev);
1529 } else if (strncmp(cmd_buf, "dump ncl_config", 15) == 0) {
1530 hclge_dbg_dump_ncl_config(hdev,
1531 &cmd_buf[sizeof("dump ncl_config")]);
1532 } else if (strncmp(cmd_buf, "dump mac tnl status", 19) == 0) {
1533 hclge_dbg_dump_mac_tnl_status(hdev);
1534 } else if (strncmp(cmd_buf, DUMP_LOOPBACK,
1535 strlen(DUMP_LOOPBACK)) == 0) {
1536 hclge_dbg_dump_loopback(hdev, &cmd_buf[sizeof(DUMP_LOOPBACK)]);
1537 } else if (strncmp(cmd_buf, "dump qs shaper", 14) == 0) {
1538 hclge_dbg_dump_qs_shaper(hdev,
1539 &cmd_buf[sizeof("dump qs shaper")]);
1540 } else if (strncmp(cmd_buf, "dump uc mac list", 16) == 0) {
1541 hclge_dbg_dump_mac_list(hdev,
1542 &cmd_buf[sizeof("dump uc mac list")],
1543 true);
1544 } else if (strncmp(cmd_buf, "dump mc mac list", 16) == 0) {
1545 hclge_dbg_dump_mac_list(hdev,
1546 &cmd_buf[sizeof("dump mc mac list")],
1547 false);
1548 } else if (strncmp(cmd_buf, DUMP_INTERRUPT,
1549 strlen(DUMP_INTERRUPT)) == 0) {
1550 hclge_dbg_dump_interrupt(hdev);
1551 } else {
1552 dev_info(&hdev->pdev->dev, "unknown command\n");
1553 return -EINVAL;
1554 }
1555
1556 return 0;
1557 }
1558