1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25
26 #define HCLGE_NAME "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29
30 #define HCLGE_BUF_SIZE_UNIT 256U
31 #define HCLGE_BUF_MUL_BY 2
32 #define HCLGE_BUF_DIV_BY 2
33 #define NEED_RESERVE_TC_NUM 2
34 #define BUF_MAX_PERCENT 100
35 #define BUF_RESERVE_PERCENT 90
36
37 #define HCLGE_RESET_MAX_FAIL_CNT 5
38 #define HCLGE_RESET_SYNC_TIME 100
39 #define HCLGE_PF_RESET_SYNC_TIME 20
40 #define HCLGE_PF_RESET_SYNC_CNT 1500
41
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET 1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET 2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET 3
46 #define HCLGE_DFX_IGU_BD_OFFSET 4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET 5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET 6
49 #define HCLGE_DFX_NCSI_BD_OFFSET 7
50 #define HCLGE_DFX_RTC_BD_OFFSET 8
51 #define HCLGE_DFX_PPP_BD_OFFSET 9
52 #define HCLGE_DFX_RCB_BD_OFFSET 10
53 #define HCLGE_DFX_TQP_BD_OFFSET 11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET 12
55
56 #define HCLGE_LINK_STATUS_MS 10
57
58 #define HCLGE_VF_VPORT_START_NUM 1
59
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
66 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
67 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
68 unsigned long *addr);
69 static int hclge_set_default_loopback(struct hclge_dev *hdev);
70
71 static void hclge_sync_mac_table(struct hclge_dev *hdev);
72 static void hclge_restore_hw_table(struct hclge_dev *hdev);
73 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
74
75 static struct hnae3_ae_algo ae_algo;
76
77 static struct workqueue_struct *hclge_wq;
78
79 static const struct pci_device_id ae_algo_pci_tbl[] = {
80 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
81 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
82 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
83 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
84 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
85 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
86 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
87 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
88 /* required last entry */
89 {0, }
90 };
91
92 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
93
94 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
95 HCLGE_CMDQ_TX_ADDR_H_REG,
96 HCLGE_CMDQ_TX_DEPTH_REG,
97 HCLGE_CMDQ_TX_TAIL_REG,
98 HCLGE_CMDQ_TX_HEAD_REG,
99 HCLGE_CMDQ_RX_ADDR_L_REG,
100 HCLGE_CMDQ_RX_ADDR_H_REG,
101 HCLGE_CMDQ_RX_DEPTH_REG,
102 HCLGE_CMDQ_RX_TAIL_REG,
103 HCLGE_CMDQ_RX_HEAD_REG,
104 HCLGE_VECTOR0_CMDQ_SRC_REG,
105 HCLGE_CMDQ_INTR_STS_REG,
106 HCLGE_CMDQ_INTR_EN_REG,
107 HCLGE_CMDQ_INTR_GEN_REG};
108
109 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
110 HCLGE_VECTOR0_OTER_EN_REG,
111 HCLGE_MISC_RESET_STS_REG,
112 HCLGE_MISC_VECTOR_INT_STS,
113 HCLGE_GLOBAL_RESET_REG,
114 HCLGE_FUN_RST_ING,
115 HCLGE_GRO_EN_REG};
116
117 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
118 HCLGE_RING_RX_ADDR_H_REG,
119 HCLGE_RING_RX_BD_NUM_REG,
120 HCLGE_RING_RX_BD_LENGTH_REG,
121 HCLGE_RING_RX_MERGE_EN_REG,
122 HCLGE_RING_RX_TAIL_REG,
123 HCLGE_RING_RX_HEAD_REG,
124 HCLGE_RING_RX_FBD_NUM_REG,
125 HCLGE_RING_RX_OFFSET_REG,
126 HCLGE_RING_RX_FBD_OFFSET_REG,
127 HCLGE_RING_RX_STASH_REG,
128 HCLGE_RING_RX_BD_ERR_REG,
129 HCLGE_RING_TX_ADDR_L_REG,
130 HCLGE_RING_TX_ADDR_H_REG,
131 HCLGE_RING_TX_BD_NUM_REG,
132 HCLGE_RING_TX_PRIORITY_REG,
133 HCLGE_RING_TX_TC_REG,
134 HCLGE_RING_TX_MERGE_EN_REG,
135 HCLGE_RING_TX_TAIL_REG,
136 HCLGE_RING_TX_HEAD_REG,
137 HCLGE_RING_TX_FBD_NUM_REG,
138 HCLGE_RING_TX_OFFSET_REG,
139 HCLGE_RING_TX_EBD_NUM_REG,
140 HCLGE_RING_TX_EBD_OFFSET_REG,
141 HCLGE_RING_TX_BD_ERR_REG,
142 HCLGE_RING_EN_REG};
143
144 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
145 HCLGE_TQP_INTR_GL0_REG,
146 HCLGE_TQP_INTR_GL1_REG,
147 HCLGE_TQP_INTR_GL2_REG,
148 HCLGE_TQP_INTR_RL_REG};
149
150 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
151 "App Loopback test",
152 "Serdes serial Loopback test",
153 "Serdes parallel Loopback test",
154 "Phy Loopback test"
155 };
156
157 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
158 {"mac_tx_mac_pause_num",
159 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
160 {"mac_rx_mac_pause_num",
161 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
162 {"mac_tx_control_pkt_num",
163 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
164 {"mac_rx_control_pkt_num",
165 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
166 {"mac_tx_pfc_pkt_num",
167 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
168 {"mac_tx_pfc_pri0_pkt_num",
169 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
170 {"mac_tx_pfc_pri1_pkt_num",
171 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
172 {"mac_tx_pfc_pri2_pkt_num",
173 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
174 {"mac_tx_pfc_pri3_pkt_num",
175 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
176 {"mac_tx_pfc_pri4_pkt_num",
177 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
178 {"mac_tx_pfc_pri5_pkt_num",
179 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
180 {"mac_tx_pfc_pri6_pkt_num",
181 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
182 {"mac_tx_pfc_pri7_pkt_num",
183 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
184 {"mac_rx_pfc_pkt_num",
185 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
186 {"mac_rx_pfc_pri0_pkt_num",
187 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
188 {"mac_rx_pfc_pri1_pkt_num",
189 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
190 {"mac_rx_pfc_pri2_pkt_num",
191 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
192 {"mac_rx_pfc_pri3_pkt_num",
193 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
194 {"mac_rx_pfc_pri4_pkt_num",
195 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
196 {"mac_rx_pfc_pri5_pkt_num",
197 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
198 {"mac_rx_pfc_pri6_pkt_num",
199 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
200 {"mac_rx_pfc_pri7_pkt_num",
201 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
202 {"mac_tx_total_pkt_num",
203 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
204 {"mac_tx_total_oct_num",
205 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
206 {"mac_tx_good_pkt_num",
207 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
208 {"mac_tx_bad_pkt_num",
209 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
210 {"mac_tx_good_oct_num",
211 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
212 {"mac_tx_bad_oct_num",
213 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
214 {"mac_tx_uni_pkt_num",
215 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
216 {"mac_tx_multi_pkt_num",
217 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
218 {"mac_tx_broad_pkt_num",
219 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
220 {"mac_tx_undersize_pkt_num",
221 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
222 {"mac_tx_oversize_pkt_num",
223 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
224 {"mac_tx_64_oct_pkt_num",
225 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
226 {"mac_tx_65_127_oct_pkt_num",
227 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
228 {"mac_tx_128_255_oct_pkt_num",
229 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
230 {"mac_tx_256_511_oct_pkt_num",
231 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
232 {"mac_tx_512_1023_oct_pkt_num",
233 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
234 {"mac_tx_1024_1518_oct_pkt_num",
235 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
236 {"mac_tx_1519_2047_oct_pkt_num",
237 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
238 {"mac_tx_2048_4095_oct_pkt_num",
239 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
240 {"mac_tx_4096_8191_oct_pkt_num",
241 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
242 {"mac_tx_8192_9216_oct_pkt_num",
243 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
244 {"mac_tx_9217_12287_oct_pkt_num",
245 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
246 {"mac_tx_12288_16383_oct_pkt_num",
247 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
248 {"mac_tx_1519_max_good_pkt_num",
249 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
250 {"mac_tx_1519_max_bad_pkt_num",
251 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
252 {"mac_rx_total_pkt_num",
253 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
254 {"mac_rx_total_oct_num",
255 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
256 {"mac_rx_good_pkt_num",
257 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
258 {"mac_rx_bad_pkt_num",
259 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
260 {"mac_rx_good_oct_num",
261 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
262 {"mac_rx_bad_oct_num",
263 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
264 {"mac_rx_uni_pkt_num",
265 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
266 {"mac_rx_multi_pkt_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
268 {"mac_rx_broad_pkt_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
270 {"mac_rx_undersize_pkt_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
272 {"mac_rx_oversize_pkt_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
274 {"mac_rx_64_oct_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
276 {"mac_rx_65_127_oct_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
278 {"mac_rx_128_255_oct_pkt_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
280 {"mac_rx_256_511_oct_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
282 {"mac_rx_512_1023_oct_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
284 {"mac_rx_1024_1518_oct_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
286 {"mac_rx_1519_2047_oct_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
288 {"mac_rx_2048_4095_oct_pkt_num",
289 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
290 {"mac_rx_4096_8191_oct_pkt_num",
291 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
292 {"mac_rx_8192_9216_oct_pkt_num",
293 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
294 {"mac_rx_9217_12287_oct_pkt_num",
295 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
296 {"mac_rx_12288_16383_oct_pkt_num",
297 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
298 {"mac_rx_1519_max_good_pkt_num",
299 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
300 {"mac_rx_1519_max_bad_pkt_num",
301 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
302
303 {"mac_tx_fragment_pkt_num",
304 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
305 {"mac_tx_undermin_pkt_num",
306 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
307 {"mac_tx_jabber_pkt_num",
308 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
309 {"mac_tx_err_all_pkt_num",
310 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
311 {"mac_tx_from_app_good_pkt_num",
312 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
313 {"mac_tx_from_app_bad_pkt_num",
314 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
315 {"mac_rx_fragment_pkt_num",
316 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
317 {"mac_rx_undermin_pkt_num",
318 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
319 {"mac_rx_jabber_pkt_num",
320 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
321 {"mac_rx_fcs_err_pkt_num",
322 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
323 {"mac_rx_send_app_good_pkt_num",
324 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
325 {"mac_rx_send_app_bad_pkt_num",
326 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
327 };
328
329 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
330 {
331 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
332 .ethter_type = cpu_to_le16(ETH_P_LLDP),
333 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
334 .i_port_bitmap = 0x1,
335 },
336 };
337
338 static const u8 hclge_hash_key[] = {
339 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
340 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
341 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
342 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
343 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
344 };
345
346 static const u32 hclge_dfx_bd_offset_list[] = {
347 HCLGE_DFX_BIOS_BD_OFFSET,
348 HCLGE_DFX_SSU_0_BD_OFFSET,
349 HCLGE_DFX_SSU_1_BD_OFFSET,
350 HCLGE_DFX_IGU_BD_OFFSET,
351 HCLGE_DFX_RPU_0_BD_OFFSET,
352 HCLGE_DFX_RPU_1_BD_OFFSET,
353 HCLGE_DFX_NCSI_BD_OFFSET,
354 HCLGE_DFX_RTC_BD_OFFSET,
355 HCLGE_DFX_PPP_BD_OFFSET,
356 HCLGE_DFX_RCB_BD_OFFSET,
357 HCLGE_DFX_TQP_BD_OFFSET,
358 HCLGE_DFX_SSU_2_BD_OFFSET
359 };
360
361 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
362 HCLGE_OPC_DFX_BIOS_COMMON_REG,
363 HCLGE_OPC_DFX_SSU_REG_0,
364 HCLGE_OPC_DFX_SSU_REG_1,
365 HCLGE_OPC_DFX_IGU_EGU_REG,
366 HCLGE_OPC_DFX_RPU_REG_0,
367 HCLGE_OPC_DFX_RPU_REG_1,
368 HCLGE_OPC_DFX_NCSI_REG,
369 HCLGE_OPC_DFX_RTC_REG,
370 HCLGE_OPC_DFX_PPP_REG,
371 HCLGE_OPC_DFX_RCB_REG,
372 HCLGE_OPC_DFX_TQP_REG,
373 HCLGE_OPC_DFX_SSU_REG_2
374 };
375
376 static const struct key_info meta_data_key_info[] = {
377 { PACKET_TYPE_ID, 6},
378 { IP_FRAGEMENT, 1},
379 { ROCE_TYPE, 1},
380 { NEXT_KEY, 5},
381 { VLAN_NUMBER, 2},
382 { SRC_VPORT, 12},
383 { DST_VPORT, 12},
384 { TUNNEL_PACKET, 1},
385 };
386
387 static const struct key_info tuple_key_info[] = {
388 { OUTER_DST_MAC, 48},
389 { OUTER_SRC_MAC, 48},
390 { OUTER_VLAN_TAG_FST, 16},
391 { OUTER_VLAN_TAG_SEC, 16},
392 { OUTER_ETH_TYPE, 16},
393 { OUTER_L2_RSV, 16},
394 { OUTER_IP_TOS, 8},
395 { OUTER_IP_PROTO, 8},
396 { OUTER_SRC_IP, 32},
397 { OUTER_DST_IP, 32},
398 { OUTER_L3_RSV, 16},
399 { OUTER_SRC_PORT, 16},
400 { OUTER_DST_PORT, 16},
401 { OUTER_L4_RSV, 32},
402 { OUTER_TUN_VNI, 24},
403 { OUTER_TUN_FLOW_ID, 8},
404 { INNER_DST_MAC, 48},
405 { INNER_SRC_MAC, 48},
406 { INNER_VLAN_TAG_FST, 16},
407 { INNER_VLAN_TAG_SEC, 16},
408 { INNER_ETH_TYPE, 16},
409 { INNER_L2_RSV, 16},
410 { INNER_IP_TOS, 8},
411 { INNER_IP_PROTO, 8},
412 { INNER_SRC_IP, 32},
413 { INNER_DST_IP, 32},
414 { INNER_L3_RSV, 16},
415 { INNER_SRC_PORT, 16},
416 { INNER_DST_PORT, 16},
417 { INNER_L4_RSV, 32},
418 };
419
hclge_mac_update_stats_defective(struct hclge_dev * hdev)420 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
421 {
422 #define HCLGE_MAC_CMD_NUM 21
423
424 u64 *data = (u64 *)(&hdev->mac_stats);
425 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
426 __le64 *desc_data;
427 int i, k, n;
428 int ret;
429
430 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
431 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
432 if (ret) {
433 dev_err(&hdev->pdev->dev,
434 "Get MAC pkt stats fail, status = %d.\n", ret);
435
436 return ret;
437 }
438
439 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
440 /* for special opcode 0032, only the first desc has the head */
441 if (unlikely(i == 0)) {
442 desc_data = (__le64 *)(&desc[i].data[0]);
443 n = HCLGE_RD_FIRST_STATS_NUM;
444 } else {
445 desc_data = (__le64 *)(&desc[i]);
446 n = HCLGE_RD_OTHER_STATS_NUM;
447 }
448
449 for (k = 0; k < n; k++) {
450 *data += le64_to_cpu(*desc_data);
451 data++;
452 desc_data++;
453 }
454 }
455
456 return 0;
457 }
458
hclge_mac_update_stats_complete(struct hclge_dev * hdev,u32 desc_num)459 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
460 {
461 u64 *data = (u64 *)(&hdev->mac_stats);
462 struct hclge_desc *desc;
463 __le64 *desc_data;
464 u16 i, k, n;
465 int ret;
466
467 /* This may be called inside atomic sections,
468 * so GFP_ATOMIC is more suitalbe here
469 */
470 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
471 if (!desc)
472 return -ENOMEM;
473
474 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
475 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
476 if (ret) {
477 kfree(desc);
478 return ret;
479 }
480
481 for (i = 0; i < desc_num; i++) {
482 /* for special opcode 0034, only the first desc has the head */
483 if (i == 0) {
484 desc_data = (__le64 *)(&desc[i].data[0]);
485 n = HCLGE_RD_FIRST_STATS_NUM;
486 } else {
487 desc_data = (__le64 *)(&desc[i]);
488 n = HCLGE_RD_OTHER_STATS_NUM;
489 }
490
491 for (k = 0; k < n; k++) {
492 *data += le64_to_cpu(*desc_data);
493 data++;
494 desc_data++;
495 }
496 }
497
498 kfree(desc);
499
500 return 0;
501 }
502
hclge_mac_query_reg_num(struct hclge_dev * hdev,u32 * desc_num)503 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
504 {
505 struct hclge_desc desc;
506 __le32 *desc_data;
507 u32 reg_num;
508 int ret;
509
510 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
511 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
512 if (ret)
513 return ret;
514
515 desc_data = (__le32 *)(&desc.data[0]);
516 reg_num = le32_to_cpu(*desc_data);
517
518 *desc_num = 1 + ((reg_num - 3) >> 2) +
519 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
520
521 return 0;
522 }
523
hclge_mac_update_stats(struct hclge_dev * hdev)524 static int hclge_mac_update_stats(struct hclge_dev *hdev)
525 {
526 u32 desc_num;
527 int ret;
528
529 ret = hclge_mac_query_reg_num(hdev, &desc_num);
530
531 /* The firmware supports the new statistics acquisition method */
532 if (!ret)
533 ret = hclge_mac_update_stats_complete(hdev, desc_num);
534 else if (ret == -EOPNOTSUPP)
535 ret = hclge_mac_update_stats_defective(hdev);
536 else
537 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
538
539 return ret;
540 }
541
hclge_tqps_update_stats(struct hnae3_handle * handle)542 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
543 {
544 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
545 struct hclge_vport *vport = hclge_get_vport(handle);
546 struct hclge_dev *hdev = vport->back;
547 struct hnae3_queue *queue;
548 struct hclge_desc desc[1];
549 struct hclge_tqp *tqp;
550 int ret, i;
551
552 for (i = 0; i < kinfo->num_tqps; i++) {
553 queue = handle->kinfo.tqp[i];
554 tqp = container_of(queue, struct hclge_tqp, q);
555 /* command : HCLGE_OPC_QUERY_IGU_STAT */
556 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
557 true);
558
559 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
560 ret = hclge_cmd_send(&hdev->hw, desc, 1);
561 if (ret) {
562 dev_err(&hdev->pdev->dev,
563 "Query tqp stat fail, status = %d,queue = %d\n",
564 ret, i);
565 return ret;
566 }
567 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
568 le32_to_cpu(desc[0].data[1]);
569 }
570
571 for (i = 0; i < kinfo->num_tqps; i++) {
572 queue = handle->kinfo.tqp[i];
573 tqp = container_of(queue, struct hclge_tqp, q);
574 /* command : HCLGE_OPC_QUERY_IGU_STAT */
575 hclge_cmd_setup_basic_desc(&desc[0],
576 HCLGE_OPC_QUERY_TX_STATS,
577 true);
578
579 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
580 ret = hclge_cmd_send(&hdev->hw, desc, 1);
581 if (ret) {
582 dev_err(&hdev->pdev->dev,
583 "Query tqp stat fail, status = %d,queue = %d\n",
584 ret, i);
585 return ret;
586 }
587 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
588 le32_to_cpu(desc[0].data[1]);
589 }
590
591 return 0;
592 }
593
hclge_tqps_get_stats(struct hnae3_handle * handle,u64 * data)594 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
595 {
596 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
597 struct hclge_tqp *tqp;
598 u64 *buff = data;
599 int i;
600
601 for (i = 0; i < kinfo->num_tqps; i++) {
602 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
603 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
604 }
605
606 for (i = 0; i < kinfo->num_tqps; i++) {
607 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
608 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
609 }
610
611 return buff;
612 }
613
hclge_tqps_get_sset_count(struct hnae3_handle * handle,int stringset)614 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
615 {
616 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
617
618 /* each tqp has TX & RX two queues */
619 return kinfo->num_tqps * (2);
620 }
621
hclge_tqps_get_strings(struct hnae3_handle * handle,u8 * data)622 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
623 {
624 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
625 u8 *buff = data;
626 int i;
627
628 for (i = 0; i < kinfo->num_tqps; i++) {
629 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
630 struct hclge_tqp, q);
631 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
632 tqp->index);
633 buff = buff + ETH_GSTRING_LEN;
634 }
635
636 for (i = 0; i < kinfo->num_tqps; i++) {
637 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
638 struct hclge_tqp, q);
639 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
640 tqp->index);
641 buff = buff + ETH_GSTRING_LEN;
642 }
643
644 return buff;
645 }
646
hclge_comm_get_stats(const void * comm_stats,const struct hclge_comm_stats_str strs[],int size,u64 * data)647 static u64 *hclge_comm_get_stats(const void *comm_stats,
648 const struct hclge_comm_stats_str strs[],
649 int size, u64 *data)
650 {
651 u64 *buf = data;
652 u32 i;
653
654 for (i = 0; i < size; i++)
655 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
656
657 return buf + size;
658 }
659
hclge_comm_get_strings(u32 stringset,const struct hclge_comm_stats_str strs[],int size,u8 * data)660 static u8 *hclge_comm_get_strings(u32 stringset,
661 const struct hclge_comm_stats_str strs[],
662 int size, u8 *data)
663 {
664 char *buff = (char *)data;
665 u32 i;
666
667 if (stringset != ETH_SS_STATS)
668 return buff;
669
670 for (i = 0; i < size; i++) {
671 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
672 buff = buff + ETH_GSTRING_LEN;
673 }
674
675 return (u8 *)buff;
676 }
677
hclge_update_stats_for_all(struct hclge_dev * hdev)678 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
679 {
680 struct hnae3_handle *handle;
681 int status;
682
683 handle = &hdev->vport[0].nic;
684 if (handle->client) {
685 status = hclge_tqps_update_stats(handle);
686 if (status) {
687 dev_err(&hdev->pdev->dev,
688 "Update TQPS stats fail, status = %d.\n",
689 status);
690 }
691 }
692
693 status = hclge_mac_update_stats(hdev);
694 if (status)
695 dev_err(&hdev->pdev->dev,
696 "Update MAC stats fail, status = %d.\n", status);
697 }
698
hclge_update_stats(struct hnae3_handle * handle,struct net_device_stats * net_stats)699 static void hclge_update_stats(struct hnae3_handle *handle,
700 struct net_device_stats *net_stats)
701 {
702 struct hclge_vport *vport = hclge_get_vport(handle);
703 struct hclge_dev *hdev = vport->back;
704 int status;
705
706 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
707 return;
708
709 status = hclge_mac_update_stats(hdev);
710 if (status)
711 dev_err(&hdev->pdev->dev,
712 "Update MAC stats fail, status = %d.\n",
713 status);
714
715 status = hclge_tqps_update_stats(handle);
716 if (status)
717 dev_err(&hdev->pdev->dev,
718 "Update TQPS stats fail, status = %d.\n",
719 status);
720
721 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
722 }
723
hclge_get_sset_count(struct hnae3_handle * handle,int stringset)724 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
725 {
726 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
727 HNAE3_SUPPORT_PHY_LOOPBACK |\
728 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
729 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
730
731 struct hclge_vport *vport = hclge_get_vport(handle);
732 struct hclge_dev *hdev = vport->back;
733 int count = 0;
734
735 /* Loopback test support rules:
736 * mac: only GE mode support
737 * serdes: all mac mode will support include GE/XGE/LGE/CGE
738 * phy: only support when phy device exist on board
739 */
740 if (stringset == ETH_SS_TEST) {
741 /* clear loopback bit flags at first */
742 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
743 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
744 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
745 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
746 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
747 count += 1;
748 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
749 }
750
751 count += 2;
752 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
753 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
754
755 if (hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
756 hdev->hw.mac.phydev->drv->set_loopback) {
757 count += 1;
758 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
759 }
760
761 } else if (stringset == ETH_SS_STATS) {
762 count = ARRAY_SIZE(g_mac_stats_string) +
763 hclge_tqps_get_sset_count(handle, stringset);
764 }
765
766 return count;
767 }
768
hclge_get_strings(struct hnae3_handle * handle,u32 stringset,u8 * data)769 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
770 u8 *data)
771 {
772 u8 *p = (char *)data;
773 int size;
774
775 if (stringset == ETH_SS_STATS) {
776 size = ARRAY_SIZE(g_mac_stats_string);
777 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
778 size, p);
779 p = hclge_tqps_get_strings(handle, p);
780 } else if (stringset == ETH_SS_TEST) {
781 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
782 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
783 ETH_GSTRING_LEN);
784 p += ETH_GSTRING_LEN;
785 }
786 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
787 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
788 ETH_GSTRING_LEN);
789 p += ETH_GSTRING_LEN;
790 }
791 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
792 memcpy(p,
793 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
794 ETH_GSTRING_LEN);
795 p += ETH_GSTRING_LEN;
796 }
797 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
798 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
799 ETH_GSTRING_LEN);
800 p += ETH_GSTRING_LEN;
801 }
802 }
803 }
804
hclge_get_stats(struct hnae3_handle * handle,u64 * data)805 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
806 {
807 struct hclge_vport *vport = hclge_get_vport(handle);
808 struct hclge_dev *hdev = vport->back;
809 u64 *p;
810
811 p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
812 ARRAY_SIZE(g_mac_stats_string), data);
813 p = hclge_tqps_get_stats(handle, p);
814 }
815
hclge_get_mac_stat(struct hnae3_handle * handle,struct hns3_mac_stats * mac_stats)816 static void hclge_get_mac_stat(struct hnae3_handle *handle,
817 struct hns3_mac_stats *mac_stats)
818 {
819 struct hclge_vport *vport = hclge_get_vport(handle);
820 struct hclge_dev *hdev = vport->back;
821
822 hclge_update_stats(handle, NULL);
823
824 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
825 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
826 }
827
hclge_parse_func_status(struct hclge_dev * hdev,struct hclge_func_status_cmd * status)828 static int hclge_parse_func_status(struct hclge_dev *hdev,
829 struct hclge_func_status_cmd *status)
830 {
831 #define HCLGE_MAC_ID_MASK 0xF
832
833 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
834 return -EINVAL;
835
836 /* Set the pf to main pf */
837 if (status->pf_state & HCLGE_PF_STATE_MAIN)
838 hdev->flag |= HCLGE_FLAG_MAIN;
839 else
840 hdev->flag &= ~HCLGE_FLAG_MAIN;
841
842 hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
843 return 0;
844 }
845
hclge_query_function_status(struct hclge_dev * hdev)846 static int hclge_query_function_status(struct hclge_dev *hdev)
847 {
848 #define HCLGE_QUERY_MAX_CNT 5
849
850 struct hclge_func_status_cmd *req;
851 struct hclge_desc desc;
852 int timeout = 0;
853 int ret;
854
855 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
856 req = (struct hclge_func_status_cmd *)desc.data;
857
858 do {
859 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
860 if (ret) {
861 dev_err(&hdev->pdev->dev,
862 "query function status failed %d.\n", ret);
863 return ret;
864 }
865
866 /* Check pf reset is done */
867 if (req->pf_state)
868 break;
869 usleep_range(1000, 2000);
870 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
871
872 return hclge_parse_func_status(hdev, req);
873 }
874
hclge_query_pf_resource(struct hclge_dev * hdev)875 static int hclge_query_pf_resource(struct hclge_dev *hdev)
876 {
877 struct hclge_pf_res_cmd *req;
878 struct hclge_desc desc;
879 int ret;
880
881 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
882 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
883 if (ret) {
884 dev_err(&hdev->pdev->dev,
885 "query pf resource failed %d.\n", ret);
886 return ret;
887 }
888
889 req = (struct hclge_pf_res_cmd *)desc.data;
890 hdev->num_tqps = le16_to_cpu(req->tqp_num);
891 hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
892
893 if (req->tx_buf_size)
894 hdev->tx_buf_size =
895 le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
896 else
897 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
898
899 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
900
901 if (req->dv_buf_size)
902 hdev->dv_buf_size =
903 le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
904 else
905 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
906
907 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
908
909 if (hnae3_dev_roce_supported(hdev)) {
910 hdev->roce_base_msix_offset =
911 hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
912 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
913 hdev->num_roce_msi =
914 hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
915 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
916
917 /* nic's msix numbers is always equals to the roce's. */
918 hdev->num_nic_msi = hdev->num_roce_msi;
919
920 /* PF should have NIC vectors and Roce vectors,
921 * NIC vectors are queued before Roce vectors.
922 */
923 hdev->num_msi = hdev->num_roce_msi +
924 hdev->roce_base_msix_offset;
925 } else {
926 hdev->num_msi =
927 hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
928 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
929
930 hdev->num_nic_msi = hdev->num_msi;
931 }
932
933 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
934 dev_err(&hdev->pdev->dev,
935 "Just %u msi resources, not enough for pf(min:2).\n",
936 hdev->num_nic_msi);
937 return -EINVAL;
938 }
939
940 return 0;
941 }
942
hclge_parse_speed(int speed_cmd,int * speed)943 static int hclge_parse_speed(int speed_cmd, int *speed)
944 {
945 switch (speed_cmd) {
946 case 6:
947 *speed = HCLGE_MAC_SPEED_10M;
948 break;
949 case 7:
950 *speed = HCLGE_MAC_SPEED_100M;
951 break;
952 case 0:
953 *speed = HCLGE_MAC_SPEED_1G;
954 break;
955 case 1:
956 *speed = HCLGE_MAC_SPEED_10G;
957 break;
958 case 2:
959 *speed = HCLGE_MAC_SPEED_25G;
960 break;
961 case 3:
962 *speed = HCLGE_MAC_SPEED_40G;
963 break;
964 case 4:
965 *speed = HCLGE_MAC_SPEED_50G;
966 break;
967 case 5:
968 *speed = HCLGE_MAC_SPEED_100G;
969 break;
970 case 8:
971 *speed = HCLGE_MAC_SPEED_200G;
972 break;
973 default:
974 return -EINVAL;
975 }
976
977 return 0;
978 }
979
hclge_check_port_speed(struct hnae3_handle * handle,u32 speed)980 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
981 {
982 struct hclge_vport *vport = hclge_get_vport(handle);
983 struct hclge_dev *hdev = vport->back;
984 u32 speed_ability = hdev->hw.mac.speed_ability;
985 u32 speed_bit = 0;
986
987 switch (speed) {
988 case HCLGE_MAC_SPEED_10M:
989 speed_bit = HCLGE_SUPPORT_10M_BIT;
990 break;
991 case HCLGE_MAC_SPEED_100M:
992 speed_bit = HCLGE_SUPPORT_100M_BIT;
993 break;
994 case HCLGE_MAC_SPEED_1G:
995 speed_bit = HCLGE_SUPPORT_1G_BIT;
996 break;
997 case HCLGE_MAC_SPEED_10G:
998 speed_bit = HCLGE_SUPPORT_10G_BIT;
999 break;
1000 case HCLGE_MAC_SPEED_25G:
1001 speed_bit = HCLGE_SUPPORT_25G_BIT;
1002 break;
1003 case HCLGE_MAC_SPEED_40G:
1004 speed_bit = HCLGE_SUPPORT_40G_BIT;
1005 break;
1006 case HCLGE_MAC_SPEED_50G:
1007 speed_bit = HCLGE_SUPPORT_50G_BIT;
1008 break;
1009 case HCLGE_MAC_SPEED_100G:
1010 speed_bit = HCLGE_SUPPORT_100G_BIT;
1011 break;
1012 case HCLGE_MAC_SPEED_200G:
1013 speed_bit = HCLGE_SUPPORT_200G_BIT;
1014 break;
1015 default:
1016 return -EINVAL;
1017 }
1018
1019 if (speed_bit & speed_ability)
1020 return 0;
1021
1022 return -EINVAL;
1023 }
1024
hclge_convert_setting_sr(struct hclge_mac * mac,u16 speed_ability)1025 static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1026 {
1027 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1028 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1029 mac->supported);
1030 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1031 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1032 mac->supported);
1033 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1034 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1035 mac->supported);
1036 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1037 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1038 mac->supported);
1039 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1040 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1041 mac->supported);
1042 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1043 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1044 mac->supported);
1045 }
1046
hclge_convert_setting_lr(struct hclge_mac * mac,u16 speed_ability)1047 static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1048 {
1049 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1050 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1051 mac->supported);
1052 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1053 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1054 mac->supported);
1055 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1056 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1057 mac->supported);
1058 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1059 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1060 mac->supported);
1061 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1062 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1063 mac->supported);
1064 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1065 linkmode_set_bit(
1066 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1067 mac->supported);
1068 }
1069
hclge_convert_setting_cr(struct hclge_mac * mac,u16 speed_ability)1070 static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1071 {
1072 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1073 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1074 mac->supported);
1075 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1076 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1077 mac->supported);
1078 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1079 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1080 mac->supported);
1081 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1082 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1083 mac->supported);
1084 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1085 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1086 mac->supported);
1087 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1088 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1089 mac->supported);
1090 }
1091
hclge_convert_setting_kr(struct hclge_mac * mac,u16 speed_ability)1092 static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1093 {
1094 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1095 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1096 mac->supported);
1097 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1098 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1099 mac->supported);
1100 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1101 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1102 mac->supported);
1103 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1104 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1105 mac->supported);
1106 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1107 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1108 mac->supported);
1109 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1110 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1111 mac->supported);
1112 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1113 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1114 mac->supported);
1115 }
1116
hclge_convert_setting_fec(struct hclge_mac * mac)1117 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1118 {
1119 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1120 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1121
1122 switch (mac->speed) {
1123 case HCLGE_MAC_SPEED_10G:
1124 case HCLGE_MAC_SPEED_40G:
1125 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1126 mac->supported);
1127 mac->fec_ability =
1128 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1129 break;
1130 case HCLGE_MAC_SPEED_25G:
1131 case HCLGE_MAC_SPEED_50G:
1132 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1133 mac->supported);
1134 mac->fec_ability =
1135 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1136 BIT(HNAE3_FEC_AUTO);
1137 break;
1138 case HCLGE_MAC_SPEED_100G:
1139 case HCLGE_MAC_SPEED_200G:
1140 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1141 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1142 break;
1143 default:
1144 mac->fec_ability = 0;
1145 break;
1146 }
1147 }
1148
hclge_parse_fiber_link_mode(struct hclge_dev * hdev,u16 speed_ability)1149 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1150 u16 speed_ability)
1151 {
1152 struct hclge_mac *mac = &hdev->hw.mac;
1153
1154 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1155 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1156 mac->supported);
1157
1158 hclge_convert_setting_sr(mac, speed_ability);
1159 hclge_convert_setting_lr(mac, speed_ability);
1160 hclge_convert_setting_cr(mac, speed_ability);
1161 if (hnae3_dev_fec_supported(hdev))
1162 hclge_convert_setting_fec(mac);
1163
1164 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1165 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1166 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1167 }
1168
hclge_parse_backplane_link_mode(struct hclge_dev * hdev,u16 speed_ability)1169 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1170 u16 speed_ability)
1171 {
1172 struct hclge_mac *mac = &hdev->hw.mac;
1173
1174 hclge_convert_setting_kr(mac, speed_ability);
1175 if (hnae3_dev_fec_supported(hdev))
1176 hclge_convert_setting_fec(mac);
1177 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1178 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1179 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1180 }
1181
hclge_parse_copper_link_mode(struct hclge_dev * hdev,u16 speed_ability)1182 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1183 u16 speed_ability)
1184 {
1185 unsigned long *supported = hdev->hw.mac.supported;
1186
1187 /* default to support all speed for GE port */
1188 if (!speed_ability)
1189 speed_ability = HCLGE_SUPPORT_GE;
1190
1191 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1192 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1193 supported);
1194
1195 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1196 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1197 supported);
1198 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1199 supported);
1200 }
1201
1202 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1203 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1204 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1205 }
1206
1207 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1208 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1209 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1210 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1211 }
1212
hclge_parse_link_mode(struct hclge_dev * hdev,u16 speed_ability)1213 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1214 {
1215 u8 media_type = hdev->hw.mac.media_type;
1216
1217 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1218 hclge_parse_fiber_link_mode(hdev, speed_ability);
1219 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1220 hclge_parse_copper_link_mode(hdev, speed_ability);
1221 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1222 hclge_parse_backplane_link_mode(hdev, speed_ability);
1223 }
1224
hclge_get_max_speed(u16 speed_ability)1225 static u32 hclge_get_max_speed(u16 speed_ability)
1226 {
1227 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1228 return HCLGE_MAC_SPEED_200G;
1229
1230 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1231 return HCLGE_MAC_SPEED_100G;
1232
1233 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1234 return HCLGE_MAC_SPEED_50G;
1235
1236 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1237 return HCLGE_MAC_SPEED_40G;
1238
1239 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1240 return HCLGE_MAC_SPEED_25G;
1241
1242 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1243 return HCLGE_MAC_SPEED_10G;
1244
1245 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1246 return HCLGE_MAC_SPEED_1G;
1247
1248 if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1249 return HCLGE_MAC_SPEED_100M;
1250
1251 if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1252 return HCLGE_MAC_SPEED_10M;
1253
1254 return HCLGE_MAC_SPEED_1G;
1255 }
1256
hclge_parse_cfg(struct hclge_cfg * cfg,struct hclge_desc * desc)1257 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1258 {
1259 #define SPEED_ABILITY_EXT_SHIFT 8
1260
1261 struct hclge_cfg_param_cmd *req;
1262 u64 mac_addr_tmp_high;
1263 u16 speed_ability_ext;
1264 u64 mac_addr_tmp;
1265 unsigned int i;
1266
1267 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1268
1269 /* get the configuration */
1270 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1271 HCLGE_CFG_VMDQ_M,
1272 HCLGE_CFG_VMDQ_S);
1273 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1274 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1275 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1276 HCLGE_CFG_TQP_DESC_N_M,
1277 HCLGE_CFG_TQP_DESC_N_S);
1278
1279 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1280 HCLGE_CFG_PHY_ADDR_M,
1281 HCLGE_CFG_PHY_ADDR_S);
1282 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1283 HCLGE_CFG_MEDIA_TP_M,
1284 HCLGE_CFG_MEDIA_TP_S);
1285 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1286 HCLGE_CFG_RX_BUF_LEN_M,
1287 HCLGE_CFG_RX_BUF_LEN_S);
1288 /* get mac_address */
1289 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1290 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1291 HCLGE_CFG_MAC_ADDR_H_M,
1292 HCLGE_CFG_MAC_ADDR_H_S);
1293
1294 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1295
1296 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1297 HCLGE_CFG_DEFAULT_SPEED_M,
1298 HCLGE_CFG_DEFAULT_SPEED_S);
1299 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1300 HCLGE_CFG_RSS_SIZE_M,
1301 HCLGE_CFG_RSS_SIZE_S);
1302
1303 for (i = 0; i < ETH_ALEN; i++)
1304 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1305
1306 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1307 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1308
1309 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1310 HCLGE_CFG_SPEED_ABILITY_M,
1311 HCLGE_CFG_SPEED_ABILITY_S);
1312 speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1313 HCLGE_CFG_SPEED_ABILITY_EXT_M,
1314 HCLGE_CFG_SPEED_ABILITY_EXT_S);
1315 cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1316
1317 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1318 HCLGE_CFG_UMV_TBL_SPACE_M,
1319 HCLGE_CFG_UMV_TBL_SPACE_S);
1320 if (!cfg->umv_space)
1321 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1322 }
1323
1324 /* hclge_get_cfg: query the static parameter from flash
1325 * @hdev: pointer to struct hclge_dev
1326 * @hcfg: the config structure to be getted
1327 */
hclge_get_cfg(struct hclge_dev * hdev,struct hclge_cfg * hcfg)1328 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1329 {
1330 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1331 struct hclge_cfg_param_cmd *req;
1332 unsigned int i;
1333 int ret;
1334
1335 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1336 u32 offset = 0;
1337
1338 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1339 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1340 true);
1341 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1342 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1343 /* Len should be united by 4 bytes when send to hardware */
1344 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1345 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1346 req->offset = cpu_to_le32(offset);
1347 }
1348
1349 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1350 if (ret) {
1351 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1352 return ret;
1353 }
1354
1355 hclge_parse_cfg(hcfg, desc);
1356
1357 return 0;
1358 }
1359
hclge_set_default_dev_specs(struct hclge_dev * hdev)1360 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1361 {
1362 #define HCLGE_MAX_NON_TSO_BD_NUM 8U
1363
1364 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1365
1366 ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1367 ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1368 ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1369 ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1370 }
1371
hclge_parse_dev_specs(struct hclge_dev * hdev,struct hclge_desc * desc)1372 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1373 struct hclge_desc *desc)
1374 {
1375 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1376 struct hclge_dev_specs_0_cmd *req0;
1377
1378 req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1379
1380 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1381 ae_dev->dev_specs.rss_ind_tbl_size =
1382 le16_to_cpu(req0->rss_ind_tbl_size);
1383 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1384 ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1385 }
1386
hclge_check_dev_specs(struct hclge_dev * hdev)1387 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1388 {
1389 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1390
1391 if (!dev_specs->max_non_tso_bd_num)
1392 dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1393 if (!dev_specs->rss_ind_tbl_size)
1394 dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1395 if (!dev_specs->rss_key_size)
1396 dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1397 if (!dev_specs->max_tm_rate)
1398 dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1399 }
1400
hclge_query_dev_specs(struct hclge_dev * hdev)1401 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1402 {
1403 struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1404 int ret;
1405 int i;
1406
1407 /* set default specifications as devices lower than version V3 do not
1408 * support querying specifications from firmware.
1409 */
1410 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1411 hclge_set_default_dev_specs(hdev);
1412 return 0;
1413 }
1414
1415 for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1416 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1417 true);
1418 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1419 }
1420 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1421
1422 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1423 if (ret)
1424 return ret;
1425
1426 hclge_parse_dev_specs(hdev, desc);
1427 hclge_check_dev_specs(hdev);
1428
1429 return 0;
1430 }
1431
hclge_get_cap(struct hclge_dev * hdev)1432 static int hclge_get_cap(struct hclge_dev *hdev)
1433 {
1434 int ret;
1435
1436 ret = hclge_query_function_status(hdev);
1437 if (ret) {
1438 dev_err(&hdev->pdev->dev,
1439 "query function status error %d.\n", ret);
1440 return ret;
1441 }
1442
1443 /* get pf resource */
1444 return hclge_query_pf_resource(hdev);
1445 }
1446
hclge_init_kdump_kernel_config(struct hclge_dev * hdev)1447 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1448 {
1449 #define HCLGE_MIN_TX_DESC 64
1450 #define HCLGE_MIN_RX_DESC 64
1451
1452 if (!is_kdump_kernel())
1453 return;
1454
1455 dev_info(&hdev->pdev->dev,
1456 "Running kdump kernel. Using minimal resources\n");
1457
1458 /* minimal queue pairs equals to the number of vports */
1459 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1460 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1461 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1462 }
1463
hclge_configure(struct hclge_dev * hdev)1464 static int hclge_configure(struct hclge_dev *hdev)
1465 {
1466 const struct cpumask *cpumask = cpu_online_mask;
1467 struct hclge_cfg cfg;
1468 unsigned int i;
1469 int node, ret;
1470
1471 ret = hclge_get_cfg(hdev, &cfg);
1472 if (ret)
1473 return ret;
1474
1475 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1476 hdev->base_tqp_pid = 0;
1477 hdev->rss_size_max = cfg.rss_size_max;
1478 hdev->rx_buf_len = cfg.rx_buf_len;
1479 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1480 hdev->hw.mac.media_type = cfg.media_type;
1481 hdev->hw.mac.phy_addr = cfg.phy_addr;
1482 hdev->num_tx_desc = cfg.tqp_desc_num;
1483 hdev->num_rx_desc = cfg.tqp_desc_num;
1484 hdev->tm_info.num_pg = 1;
1485 hdev->tc_max = cfg.tc_num;
1486 hdev->tm_info.hw_pfc_map = 0;
1487 hdev->wanted_umv_size = cfg.umv_space;
1488
1489 if (hnae3_dev_fd_supported(hdev)) {
1490 hdev->fd_en = true;
1491 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1492 }
1493
1494 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1495 if (ret) {
1496 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1497 cfg.default_speed, ret);
1498 return ret;
1499 }
1500
1501 hclge_parse_link_mode(hdev, cfg.speed_ability);
1502
1503 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1504
1505 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1506 (hdev->tc_max < 1)) {
1507 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1508 hdev->tc_max);
1509 hdev->tc_max = 1;
1510 }
1511
1512 /* Dev does not support DCB */
1513 if (!hnae3_dev_dcb_supported(hdev)) {
1514 hdev->tc_max = 1;
1515 hdev->pfc_max = 0;
1516 } else {
1517 hdev->pfc_max = hdev->tc_max;
1518 }
1519
1520 hdev->tm_info.num_tc = 1;
1521
1522 /* Currently not support uncontiuous tc */
1523 for (i = 0; i < hdev->tm_info.num_tc; i++)
1524 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1525
1526 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1527
1528 hclge_init_kdump_kernel_config(hdev);
1529
1530 /* Set the affinity based on numa node */
1531 node = dev_to_node(&hdev->pdev->dev);
1532 if (node != NUMA_NO_NODE)
1533 cpumask = cpumask_of_node(node);
1534
1535 cpumask_copy(&hdev->affinity_mask, cpumask);
1536
1537 return ret;
1538 }
1539
hclge_config_tso(struct hclge_dev * hdev,u16 tso_mss_min,u16 tso_mss_max)1540 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1541 u16 tso_mss_max)
1542 {
1543 struct hclge_cfg_tso_status_cmd *req;
1544 struct hclge_desc desc;
1545
1546 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1547
1548 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1549 req->tso_mss_min = cpu_to_le16(tso_mss_min);
1550 req->tso_mss_max = cpu_to_le16(tso_mss_max);
1551
1552 return hclge_cmd_send(&hdev->hw, &desc, 1);
1553 }
1554
hclge_config_gro(struct hclge_dev * hdev,bool en)1555 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1556 {
1557 struct hclge_cfg_gro_status_cmd *req;
1558 struct hclge_desc desc;
1559 int ret;
1560
1561 if (!hnae3_dev_gro_supported(hdev))
1562 return 0;
1563
1564 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1565 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1566
1567 req->gro_en = en ? 1 : 0;
1568
1569 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1570 if (ret)
1571 dev_err(&hdev->pdev->dev,
1572 "GRO hardware config cmd failed, ret = %d\n", ret);
1573
1574 return ret;
1575 }
1576
hclge_alloc_tqps(struct hclge_dev * hdev)1577 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1578 {
1579 struct hclge_tqp *tqp;
1580 int i;
1581
1582 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1583 sizeof(struct hclge_tqp), GFP_KERNEL);
1584 if (!hdev->htqp)
1585 return -ENOMEM;
1586
1587 tqp = hdev->htqp;
1588
1589 for (i = 0; i < hdev->num_tqps; i++) {
1590 tqp->dev = &hdev->pdev->dev;
1591 tqp->index = i;
1592
1593 tqp->q.ae_algo = &ae_algo;
1594 tqp->q.buf_size = hdev->rx_buf_len;
1595 tqp->q.tx_desc_num = hdev->num_tx_desc;
1596 tqp->q.rx_desc_num = hdev->num_rx_desc;
1597 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1598 i * HCLGE_TQP_REG_SIZE;
1599
1600 tqp++;
1601 }
1602
1603 return 0;
1604 }
1605
hclge_map_tqps_to_func(struct hclge_dev * hdev,u16 func_id,u16 tqp_pid,u16 tqp_vid,bool is_pf)1606 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1607 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1608 {
1609 struct hclge_tqp_map_cmd *req;
1610 struct hclge_desc desc;
1611 int ret;
1612
1613 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1614
1615 req = (struct hclge_tqp_map_cmd *)desc.data;
1616 req->tqp_id = cpu_to_le16(tqp_pid);
1617 req->tqp_vf = func_id;
1618 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1619 if (!is_pf)
1620 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1621 req->tqp_vid = cpu_to_le16(tqp_vid);
1622
1623 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1624 if (ret)
1625 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1626
1627 return ret;
1628 }
1629
hclge_assign_tqp(struct hclge_vport * vport,u16 num_tqps)1630 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1631 {
1632 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1633 struct hclge_dev *hdev = vport->back;
1634 int i, alloced;
1635
1636 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1637 alloced < num_tqps; i++) {
1638 if (!hdev->htqp[i].alloced) {
1639 hdev->htqp[i].q.handle = &vport->nic;
1640 hdev->htqp[i].q.tqp_index = alloced;
1641 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1642 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1643 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1644 hdev->htqp[i].alloced = true;
1645 alloced++;
1646 }
1647 }
1648 vport->alloc_tqps = alloced;
1649 kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1650 vport->alloc_tqps / hdev->tm_info.num_tc);
1651
1652 /* ensure one to one mapping between irq and queue at default */
1653 kinfo->rss_size = min_t(u16, kinfo->rss_size,
1654 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1655
1656 return 0;
1657 }
1658
hclge_knic_setup(struct hclge_vport * vport,u16 num_tqps,u16 num_tx_desc,u16 num_rx_desc)1659 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1660 u16 num_tx_desc, u16 num_rx_desc)
1661
1662 {
1663 struct hnae3_handle *nic = &vport->nic;
1664 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1665 struct hclge_dev *hdev = vport->back;
1666 int ret;
1667
1668 kinfo->num_tx_desc = num_tx_desc;
1669 kinfo->num_rx_desc = num_rx_desc;
1670
1671 kinfo->rx_buf_len = hdev->rx_buf_len;
1672
1673 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1674 sizeof(struct hnae3_queue *), GFP_KERNEL);
1675 if (!kinfo->tqp)
1676 return -ENOMEM;
1677
1678 ret = hclge_assign_tqp(vport, num_tqps);
1679 if (ret)
1680 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1681
1682 return ret;
1683 }
1684
hclge_map_tqp_to_vport(struct hclge_dev * hdev,struct hclge_vport * vport)1685 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1686 struct hclge_vport *vport)
1687 {
1688 struct hnae3_handle *nic = &vport->nic;
1689 struct hnae3_knic_private_info *kinfo;
1690 u16 i;
1691
1692 kinfo = &nic->kinfo;
1693 for (i = 0; i < vport->alloc_tqps; i++) {
1694 struct hclge_tqp *q =
1695 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1696 bool is_pf;
1697 int ret;
1698
1699 is_pf = !(vport->vport_id);
1700 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1701 i, is_pf);
1702 if (ret)
1703 return ret;
1704 }
1705
1706 return 0;
1707 }
1708
hclge_map_tqp(struct hclge_dev * hdev)1709 static int hclge_map_tqp(struct hclge_dev *hdev)
1710 {
1711 struct hclge_vport *vport = hdev->vport;
1712 u16 i, num_vport;
1713
1714 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1715 for (i = 0; i < num_vport; i++) {
1716 int ret;
1717
1718 ret = hclge_map_tqp_to_vport(hdev, vport);
1719 if (ret)
1720 return ret;
1721
1722 vport++;
1723 }
1724
1725 return 0;
1726 }
1727
hclge_vport_setup(struct hclge_vport * vport,u16 num_tqps)1728 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1729 {
1730 struct hnae3_handle *nic = &vport->nic;
1731 struct hclge_dev *hdev = vport->back;
1732 int ret;
1733
1734 nic->pdev = hdev->pdev;
1735 nic->ae_algo = &ae_algo;
1736 nic->numa_node_mask = hdev->numa_node_mask;
1737
1738 ret = hclge_knic_setup(vport, num_tqps,
1739 hdev->num_tx_desc, hdev->num_rx_desc);
1740 if (ret)
1741 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1742
1743 return ret;
1744 }
1745
hclge_alloc_vport(struct hclge_dev * hdev)1746 static int hclge_alloc_vport(struct hclge_dev *hdev)
1747 {
1748 struct pci_dev *pdev = hdev->pdev;
1749 struct hclge_vport *vport;
1750 u32 tqp_main_vport;
1751 u32 tqp_per_vport;
1752 int num_vport, i;
1753 int ret;
1754
1755 /* We need to alloc a vport for main NIC of PF */
1756 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1757
1758 if (hdev->num_tqps < num_vport) {
1759 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1760 hdev->num_tqps, num_vport);
1761 return -EINVAL;
1762 }
1763
1764 /* Alloc the same number of TQPs for every vport */
1765 tqp_per_vport = hdev->num_tqps / num_vport;
1766 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1767
1768 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1769 GFP_KERNEL);
1770 if (!vport)
1771 return -ENOMEM;
1772
1773 hdev->vport = vport;
1774 hdev->num_alloc_vport = num_vport;
1775
1776 if (IS_ENABLED(CONFIG_PCI_IOV))
1777 hdev->num_alloc_vfs = hdev->num_req_vfs;
1778
1779 for (i = 0; i < num_vport; i++) {
1780 vport->back = hdev;
1781 vport->vport_id = i;
1782 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1783 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1784 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1785 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1786 INIT_LIST_HEAD(&vport->vlan_list);
1787 INIT_LIST_HEAD(&vport->uc_mac_list);
1788 INIT_LIST_HEAD(&vport->mc_mac_list);
1789 spin_lock_init(&vport->mac_list_lock);
1790
1791 if (i == 0)
1792 ret = hclge_vport_setup(vport, tqp_main_vport);
1793 else
1794 ret = hclge_vport_setup(vport, tqp_per_vport);
1795 if (ret) {
1796 dev_err(&pdev->dev,
1797 "vport setup failed for vport %d, %d\n",
1798 i, ret);
1799 return ret;
1800 }
1801
1802 vport++;
1803 }
1804
1805 return 0;
1806 }
1807
hclge_cmd_alloc_tx_buff(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)1808 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1809 struct hclge_pkt_buf_alloc *buf_alloc)
1810 {
1811 /* TX buffer size is unit by 128 byte */
1812 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1813 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1814 struct hclge_tx_buff_alloc_cmd *req;
1815 struct hclge_desc desc;
1816 int ret;
1817 u8 i;
1818
1819 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1820
1821 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1822 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1823 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1824
1825 req->tx_pkt_buff[i] =
1826 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1827 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1828 }
1829
1830 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1831 if (ret)
1832 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1833 ret);
1834
1835 return ret;
1836 }
1837
hclge_tx_buffer_alloc(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)1838 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1839 struct hclge_pkt_buf_alloc *buf_alloc)
1840 {
1841 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1842
1843 if (ret)
1844 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1845
1846 return ret;
1847 }
1848
hclge_get_tc_num(struct hclge_dev * hdev)1849 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1850 {
1851 unsigned int i;
1852 u32 cnt = 0;
1853
1854 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1855 if (hdev->hw_tc_map & BIT(i))
1856 cnt++;
1857 return cnt;
1858 }
1859
1860 /* Get the number of pfc enabled TCs, which have private buffer */
hclge_get_pfc_priv_num(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)1861 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1862 struct hclge_pkt_buf_alloc *buf_alloc)
1863 {
1864 struct hclge_priv_buf *priv;
1865 unsigned int i;
1866 int cnt = 0;
1867
1868 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1869 priv = &buf_alloc->priv_buf[i];
1870 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1871 priv->enable)
1872 cnt++;
1873 }
1874
1875 return cnt;
1876 }
1877
1878 /* Get the number of pfc disabled TCs, which have private buffer */
hclge_get_no_pfc_priv_num(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)1879 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1880 struct hclge_pkt_buf_alloc *buf_alloc)
1881 {
1882 struct hclge_priv_buf *priv;
1883 unsigned int i;
1884 int cnt = 0;
1885
1886 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1887 priv = &buf_alloc->priv_buf[i];
1888 if (hdev->hw_tc_map & BIT(i) &&
1889 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1890 priv->enable)
1891 cnt++;
1892 }
1893
1894 return cnt;
1895 }
1896
hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc * buf_alloc)1897 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1898 {
1899 struct hclge_priv_buf *priv;
1900 u32 rx_priv = 0;
1901 int i;
1902
1903 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1904 priv = &buf_alloc->priv_buf[i];
1905 if (priv->enable)
1906 rx_priv += priv->buf_size;
1907 }
1908 return rx_priv;
1909 }
1910
hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc * buf_alloc)1911 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1912 {
1913 u32 i, total_tx_size = 0;
1914
1915 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1916 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1917
1918 return total_tx_size;
1919 }
1920
hclge_is_rx_buf_ok(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc,u32 rx_all)1921 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1922 struct hclge_pkt_buf_alloc *buf_alloc,
1923 u32 rx_all)
1924 {
1925 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1926 u32 tc_num = hclge_get_tc_num(hdev);
1927 u32 shared_buf, aligned_mps;
1928 u32 rx_priv;
1929 int i;
1930
1931 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1932
1933 if (hnae3_dev_dcb_supported(hdev))
1934 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1935 hdev->dv_buf_size;
1936 else
1937 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1938 + hdev->dv_buf_size;
1939
1940 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1941 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1942 HCLGE_BUF_SIZE_UNIT);
1943
1944 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1945 if (rx_all < rx_priv + shared_std)
1946 return false;
1947
1948 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1949 buf_alloc->s_buf.buf_size = shared_buf;
1950 if (hnae3_dev_dcb_supported(hdev)) {
1951 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1952 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1953 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1954 HCLGE_BUF_SIZE_UNIT);
1955 } else {
1956 buf_alloc->s_buf.self.high = aligned_mps +
1957 HCLGE_NON_DCB_ADDITIONAL_BUF;
1958 buf_alloc->s_buf.self.low = aligned_mps;
1959 }
1960
1961 if (hnae3_dev_dcb_supported(hdev)) {
1962 hi_thrd = shared_buf - hdev->dv_buf_size;
1963
1964 if (tc_num <= NEED_RESERVE_TC_NUM)
1965 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1966 / BUF_MAX_PERCENT;
1967
1968 if (tc_num)
1969 hi_thrd = hi_thrd / tc_num;
1970
1971 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1972 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1973 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1974 } else {
1975 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1976 lo_thrd = aligned_mps;
1977 }
1978
1979 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1980 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1981 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1982 }
1983
1984 return true;
1985 }
1986
hclge_tx_buffer_calc(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)1987 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1988 struct hclge_pkt_buf_alloc *buf_alloc)
1989 {
1990 u32 i, total_size;
1991
1992 total_size = hdev->pkt_buf_size;
1993
1994 /* alloc tx buffer for all enabled tc */
1995 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1996 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1997
1998 if (hdev->hw_tc_map & BIT(i)) {
1999 if (total_size < hdev->tx_buf_size)
2000 return -ENOMEM;
2001
2002 priv->tx_buf_size = hdev->tx_buf_size;
2003 } else {
2004 priv->tx_buf_size = 0;
2005 }
2006
2007 total_size -= priv->tx_buf_size;
2008 }
2009
2010 return 0;
2011 }
2012
hclge_rx_buf_calc_all(struct hclge_dev * hdev,bool max,struct hclge_pkt_buf_alloc * buf_alloc)2013 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2014 struct hclge_pkt_buf_alloc *buf_alloc)
2015 {
2016 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2017 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2018 unsigned int i;
2019
2020 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2021 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2022
2023 priv->enable = 0;
2024 priv->wl.low = 0;
2025 priv->wl.high = 0;
2026 priv->buf_size = 0;
2027
2028 if (!(hdev->hw_tc_map & BIT(i)))
2029 continue;
2030
2031 priv->enable = 1;
2032
2033 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2034 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2035 priv->wl.high = roundup(priv->wl.low + aligned_mps,
2036 HCLGE_BUF_SIZE_UNIT);
2037 } else {
2038 priv->wl.low = 0;
2039 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2040 aligned_mps;
2041 }
2042
2043 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2044 }
2045
2046 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2047 }
2048
hclge_drop_nopfc_buf_till_fit(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2049 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2050 struct hclge_pkt_buf_alloc *buf_alloc)
2051 {
2052 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2053 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2054 int i;
2055
2056 /* let the last to be cleared first */
2057 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2058 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2059 unsigned int mask = BIT((unsigned int)i);
2060
2061 if (hdev->hw_tc_map & mask &&
2062 !(hdev->tm_info.hw_pfc_map & mask)) {
2063 /* Clear the no pfc TC private buffer */
2064 priv->wl.low = 0;
2065 priv->wl.high = 0;
2066 priv->buf_size = 0;
2067 priv->enable = 0;
2068 no_pfc_priv_num--;
2069 }
2070
2071 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2072 no_pfc_priv_num == 0)
2073 break;
2074 }
2075
2076 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2077 }
2078
hclge_drop_pfc_buf_till_fit(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2079 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2080 struct hclge_pkt_buf_alloc *buf_alloc)
2081 {
2082 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2083 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2084 int i;
2085
2086 /* let the last to be cleared first */
2087 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2088 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2089 unsigned int mask = BIT((unsigned int)i);
2090
2091 if (hdev->hw_tc_map & mask &&
2092 hdev->tm_info.hw_pfc_map & mask) {
2093 /* Reduce the number of pfc TC with private buffer */
2094 priv->wl.low = 0;
2095 priv->enable = 0;
2096 priv->wl.high = 0;
2097 priv->buf_size = 0;
2098 pfc_priv_num--;
2099 }
2100
2101 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2102 pfc_priv_num == 0)
2103 break;
2104 }
2105
2106 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2107 }
2108
hclge_only_alloc_priv_buff(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2109 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2110 struct hclge_pkt_buf_alloc *buf_alloc)
2111 {
2112 #define COMPENSATE_BUFFER 0x3C00
2113 #define COMPENSATE_HALF_MPS_NUM 5
2114 #define PRIV_WL_GAP 0x1800
2115
2116 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2117 u32 tc_num = hclge_get_tc_num(hdev);
2118 u32 half_mps = hdev->mps >> 1;
2119 u32 min_rx_priv;
2120 unsigned int i;
2121
2122 if (tc_num)
2123 rx_priv = rx_priv / tc_num;
2124
2125 if (tc_num <= NEED_RESERVE_TC_NUM)
2126 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2127
2128 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2129 COMPENSATE_HALF_MPS_NUM * half_mps;
2130 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2131 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2132
2133 if (rx_priv < min_rx_priv)
2134 return false;
2135
2136 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2137 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2138
2139 priv->enable = 0;
2140 priv->wl.low = 0;
2141 priv->wl.high = 0;
2142 priv->buf_size = 0;
2143
2144 if (!(hdev->hw_tc_map & BIT(i)))
2145 continue;
2146
2147 priv->enable = 1;
2148 priv->buf_size = rx_priv;
2149 priv->wl.high = rx_priv - hdev->dv_buf_size;
2150 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2151 }
2152
2153 buf_alloc->s_buf.buf_size = 0;
2154
2155 return true;
2156 }
2157
2158 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2159 * @hdev: pointer to struct hclge_dev
2160 * @buf_alloc: pointer to buffer calculation data
2161 * @return: 0: calculate sucessful, negative: fail
2162 */
hclge_rx_buffer_calc(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2163 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2164 struct hclge_pkt_buf_alloc *buf_alloc)
2165 {
2166 /* When DCB is not supported, rx private buffer is not allocated. */
2167 if (!hnae3_dev_dcb_supported(hdev)) {
2168 u32 rx_all = hdev->pkt_buf_size;
2169
2170 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2171 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2172 return -ENOMEM;
2173
2174 return 0;
2175 }
2176
2177 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2178 return 0;
2179
2180 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2181 return 0;
2182
2183 /* try to decrease the buffer size */
2184 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2185 return 0;
2186
2187 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2188 return 0;
2189
2190 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2191 return 0;
2192
2193 return -ENOMEM;
2194 }
2195
hclge_rx_priv_buf_alloc(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2196 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2197 struct hclge_pkt_buf_alloc *buf_alloc)
2198 {
2199 struct hclge_rx_priv_buff_cmd *req;
2200 struct hclge_desc desc;
2201 int ret;
2202 int i;
2203
2204 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2205 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2206
2207 /* Alloc private buffer TCs */
2208 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2209 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2210
2211 req->buf_num[i] =
2212 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2213 req->buf_num[i] |=
2214 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2215 }
2216
2217 req->shared_buf =
2218 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2219 (1 << HCLGE_TC0_PRI_BUF_EN_B));
2220
2221 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2222 if (ret)
2223 dev_err(&hdev->pdev->dev,
2224 "rx private buffer alloc cmd failed %d\n", ret);
2225
2226 return ret;
2227 }
2228
hclge_rx_priv_wl_config(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2229 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2230 struct hclge_pkt_buf_alloc *buf_alloc)
2231 {
2232 struct hclge_rx_priv_wl_buf *req;
2233 struct hclge_priv_buf *priv;
2234 struct hclge_desc desc[2];
2235 int i, j;
2236 int ret;
2237
2238 for (i = 0; i < 2; i++) {
2239 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2240 false);
2241 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2242
2243 /* The first descriptor set the NEXT bit to 1 */
2244 if (i == 0)
2245 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2246 else
2247 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2248
2249 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2250 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2251
2252 priv = &buf_alloc->priv_buf[idx];
2253 req->tc_wl[j].high =
2254 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2255 req->tc_wl[j].high |=
2256 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2257 req->tc_wl[j].low =
2258 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2259 req->tc_wl[j].low |=
2260 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2261 }
2262 }
2263
2264 /* Send 2 descriptor at one time */
2265 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2266 if (ret)
2267 dev_err(&hdev->pdev->dev,
2268 "rx private waterline config cmd failed %d\n",
2269 ret);
2270 return ret;
2271 }
2272
hclge_common_thrd_config(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2273 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2274 struct hclge_pkt_buf_alloc *buf_alloc)
2275 {
2276 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2277 struct hclge_rx_com_thrd *req;
2278 struct hclge_desc desc[2];
2279 struct hclge_tc_thrd *tc;
2280 int i, j;
2281 int ret;
2282
2283 for (i = 0; i < 2; i++) {
2284 hclge_cmd_setup_basic_desc(&desc[i],
2285 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2286 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2287
2288 /* The first descriptor set the NEXT bit to 1 */
2289 if (i == 0)
2290 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2291 else
2292 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2293
2294 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2295 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2296
2297 req->com_thrd[j].high =
2298 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2299 req->com_thrd[j].high |=
2300 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2301 req->com_thrd[j].low =
2302 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2303 req->com_thrd[j].low |=
2304 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2305 }
2306 }
2307
2308 /* Send 2 descriptors at one time */
2309 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2310 if (ret)
2311 dev_err(&hdev->pdev->dev,
2312 "common threshold config cmd failed %d\n", ret);
2313 return ret;
2314 }
2315
hclge_common_wl_config(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2316 static int hclge_common_wl_config(struct hclge_dev *hdev,
2317 struct hclge_pkt_buf_alloc *buf_alloc)
2318 {
2319 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2320 struct hclge_rx_com_wl *req;
2321 struct hclge_desc desc;
2322 int ret;
2323
2324 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2325
2326 req = (struct hclge_rx_com_wl *)desc.data;
2327 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2328 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2329
2330 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2331 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2332
2333 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2334 if (ret)
2335 dev_err(&hdev->pdev->dev,
2336 "common waterline config cmd failed %d\n", ret);
2337
2338 return ret;
2339 }
2340
hclge_buffer_alloc(struct hclge_dev * hdev)2341 int hclge_buffer_alloc(struct hclge_dev *hdev)
2342 {
2343 struct hclge_pkt_buf_alloc *pkt_buf;
2344 int ret;
2345
2346 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2347 if (!pkt_buf)
2348 return -ENOMEM;
2349
2350 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2351 if (ret) {
2352 dev_err(&hdev->pdev->dev,
2353 "could not calc tx buffer size for all TCs %d\n", ret);
2354 goto out;
2355 }
2356
2357 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2358 if (ret) {
2359 dev_err(&hdev->pdev->dev,
2360 "could not alloc tx buffers %d\n", ret);
2361 goto out;
2362 }
2363
2364 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2365 if (ret) {
2366 dev_err(&hdev->pdev->dev,
2367 "could not calc rx priv buffer size for all TCs %d\n",
2368 ret);
2369 goto out;
2370 }
2371
2372 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2373 if (ret) {
2374 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2375 ret);
2376 goto out;
2377 }
2378
2379 if (hnae3_dev_dcb_supported(hdev)) {
2380 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2381 if (ret) {
2382 dev_err(&hdev->pdev->dev,
2383 "could not configure rx private waterline %d\n",
2384 ret);
2385 goto out;
2386 }
2387
2388 ret = hclge_common_thrd_config(hdev, pkt_buf);
2389 if (ret) {
2390 dev_err(&hdev->pdev->dev,
2391 "could not configure common threshold %d\n",
2392 ret);
2393 goto out;
2394 }
2395 }
2396
2397 ret = hclge_common_wl_config(hdev, pkt_buf);
2398 if (ret)
2399 dev_err(&hdev->pdev->dev,
2400 "could not configure common waterline %d\n", ret);
2401
2402 out:
2403 kfree(pkt_buf);
2404 return ret;
2405 }
2406
hclge_init_roce_base_info(struct hclge_vport * vport)2407 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2408 {
2409 struct hnae3_handle *roce = &vport->roce;
2410 struct hnae3_handle *nic = &vport->nic;
2411
2412 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2413
2414 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2415 vport->back->num_msi_left == 0)
2416 return -EINVAL;
2417
2418 roce->rinfo.base_vector = vport->back->roce_base_vector;
2419
2420 roce->rinfo.netdev = nic->kinfo.netdev;
2421 roce->rinfo.roce_io_base = vport->back->hw.io_base;
2422
2423 roce->pdev = nic->pdev;
2424 roce->ae_algo = nic->ae_algo;
2425 roce->numa_node_mask = nic->numa_node_mask;
2426
2427 return 0;
2428 }
2429
hclge_init_msi(struct hclge_dev * hdev)2430 static int hclge_init_msi(struct hclge_dev *hdev)
2431 {
2432 struct pci_dev *pdev = hdev->pdev;
2433 int vectors;
2434 int i;
2435
2436 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2437 hdev->num_msi,
2438 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2439 if (vectors < 0) {
2440 dev_err(&pdev->dev,
2441 "failed(%d) to allocate MSI/MSI-X vectors\n",
2442 vectors);
2443 return vectors;
2444 }
2445 if (vectors < hdev->num_msi)
2446 dev_warn(&hdev->pdev->dev,
2447 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2448 hdev->num_msi, vectors);
2449
2450 hdev->num_msi = vectors;
2451 hdev->num_msi_left = vectors;
2452
2453 hdev->base_msi_vector = pdev->irq;
2454 hdev->roce_base_vector = hdev->base_msi_vector +
2455 hdev->roce_base_msix_offset;
2456
2457 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2458 sizeof(u16), GFP_KERNEL);
2459 if (!hdev->vector_status) {
2460 pci_free_irq_vectors(pdev);
2461 return -ENOMEM;
2462 }
2463
2464 for (i = 0; i < hdev->num_msi; i++)
2465 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2466
2467 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2468 sizeof(int), GFP_KERNEL);
2469 if (!hdev->vector_irq) {
2470 pci_free_irq_vectors(pdev);
2471 return -ENOMEM;
2472 }
2473
2474 return 0;
2475 }
2476
hclge_check_speed_dup(u8 duplex,int speed)2477 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2478 {
2479 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2480 duplex = HCLGE_MAC_FULL;
2481
2482 return duplex;
2483 }
2484
hclge_cfg_mac_speed_dup_hw(struct hclge_dev * hdev,int speed,u8 duplex)2485 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2486 u8 duplex)
2487 {
2488 struct hclge_config_mac_speed_dup_cmd *req;
2489 struct hclge_desc desc;
2490 int ret;
2491
2492 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2493
2494 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2495
2496 if (duplex)
2497 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2498
2499 switch (speed) {
2500 case HCLGE_MAC_SPEED_10M:
2501 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2502 HCLGE_CFG_SPEED_S, 6);
2503 break;
2504 case HCLGE_MAC_SPEED_100M:
2505 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2506 HCLGE_CFG_SPEED_S, 7);
2507 break;
2508 case HCLGE_MAC_SPEED_1G:
2509 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2510 HCLGE_CFG_SPEED_S, 0);
2511 break;
2512 case HCLGE_MAC_SPEED_10G:
2513 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2514 HCLGE_CFG_SPEED_S, 1);
2515 break;
2516 case HCLGE_MAC_SPEED_25G:
2517 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2518 HCLGE_CFG_SPEED_S, 2);
2519 break;
2520 case HCLGE_MAC_SPEED_40G:
2521 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2522 HCLGE_CFG_SPEED_S, 3);
2523 break;
2524 case HCLGE_MAC_SPEED_50G:
2525 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2526 HCLGE_CFG_SPEED_S, 4);
2527 break;
2528 case HCLGE_MAC_SPEED_100G:
2529 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2530 HCLGE_CFG_SPEED_S, 5);
2531 break;
2532 case HCLGE_MAC_SPEED_200G:
2533 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2534 HCLGE_CFG_SPEED_S, 8);
2535 break;
2536 default:
2537 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2538 return -EINVAL;
2539 }
2540
2541 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2542 1);
2543
2544 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2545 if (ret) {
2546 dev_err(&hdev->pdev->dev,
2547 "mac speed/duplex config cmd failed %d.\n", ret);
2548 return ret;
2549 }
2550
2551 return 0;
2552 }
2553
hclge_cfg_mac_speed_dup(struct hclge_dev * hdev,int speed,u8 duplex)2554 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2555 {
2556 struct hclge_mac *mac = &hdev->hw.mac;
2557 int ret;
2558
2559 duplex = hclge_check_speed_dup(duplex, speed);
2560 if (!mac->support_autoneg && mac->speed == speed &&
2561 mac->duplex == duplex)
2562 return 0;
2563
2564 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2565 if (ret)
2566 return ret;
2567
2568 hdev->hw.mac.speed = speed;
2569 hdev->hw.mac.duplex = duplex;
2570
2571 return 0;
2572 }
2573
hclge_cfg_mac_speed_dup_h(struct hnae3_handle * handle,int speed,u8 duplex)2574 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2575 u8 duplex)
2576 {
2577 struct hclge_vport *vport = hclge_get_vport(handle);
2578 struct hclge_dev *hdev = vport->back;
2579
2580 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2581 }
2582
hclge_set_autoneg_en(struct hclge_dev * hdev,bool enable)2583 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2584 {
2585 struct hclge_config_auto_neg_cmd *req;
2586 struct hclge_desc desc;
2587 u32 flag = 0;
2588 int ret;
2589
2590 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2591
2592 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2593 if (enable)
2594 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2595 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2596
2597 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2598 if (ret)
2599 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2600 ret);
2601
2602 return ret;
2603 }
2604
hclge_set_autoneg(struct hnae3_handle * handle,bool enable)2605 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2606 {
2607 struct hclge_vport *vport = hclge_get_vport(handle);
2608 struct hclge_dev *hdev = vport->back;
2609
2610 if (!hdev->hw.mac.support_autoneg) {
2611 if (enable) {
2612 dev_err(&hdev->pdev->dev,
2613 "autoneg is not supported by current port\n");
2614 return -EOPNOTSUPP;
2615 } else {
2616 return 0;
2617 }
2618 }
2619
2620 return hclge_set_autoneg_en(hdev, enable);
2621 }
2622
hclge_get_autoneg(struct hnae3_handle * handle)2623 static int hclge_get_autoneg(struct hnae3_handle *handle)
2624 {
2625 struct hclge_vport *vport = hclge_get_vport(handle);
2626 struct hclge_dev *hdev = vport->back;
2627 struct phy_device *phydev = hdev->hw.mac.phydev;
2628
2629 if (phydev)
2630 return phydev->autoneg;
2631
2632 return hdev->hw.mac.autoneg;
2633 }
2634
hclge_restart_autoneg(struct hnae3_handle * handle)2635 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2636 {
2637 struct hclge_vport *vport = hclge_get_vport(handle);
2638 struct hclge_dev *hdev = vport->back;
2639 int ret;
2640
2641 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2642
2643 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2644 if (ret)
2645 return ret;
2646 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2647 }
2648
hclge_halt_autoneg(struct hnae3_handle * handle,bool halt)2649 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2650 {
2651 struct hclge_vport *vport = hclge_get_vport(handle);
2652 struct hclge_dev *hdev = vport->back;
2653
2654 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2655 return hclge_set_autoneg_en(hdev, !halt);
2656
2657 return 0;
2658 }
2659
hclge_set_fec_hw(struct hclge_dev * hdev,u32 fec_mode)2660 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2661 {
2662 struct hclge_config_fec_cmd *req;
2663 struct hclge_desc desc;
2664 int ret;
2665
2666 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2667
2668 req = (struct hclge_config_fec_cmd *)desc.data;
2669 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2670 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2671 if (fec_mode & BIT(HNAE3_FEC_RS))
2672 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2673 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2674 if (fec_mode & BIT(HNAE3_FEC_BASER))
2675 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2676 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2677
2678 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2679 if (ret)
2680 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2681
2682 return ret;
2683 }
2684
hclge_set_fec(struct hnae3_handle * handle,u32 fec_mode)2685 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2686 {
2687 struct hclge_vport *vport = hclge_get_vport(handle);
2688 struct hclge_dev *hdev = vport->back;
2689 struct hclge_mac *mac = &hdev->hw.mac;
2690 int ret;
2691
2692 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2693 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2694 return -EINVAL;
2695 }
2696
2697 ret = hclge_set_fec_hw(hdev, fec_mode);
2698 if (ret)
2699 return ret;
2700
2701 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2702 return 0;
2703 }
2704
hclge_get_fec(struct hnae3_handle * handle,u8 * fec_ability,u8 * fec_mode)2705 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2706 u8 *fec_mode)
2707 {
2708 struct hclge_vport *vport = hclge_get_vport(handle);
2709 struct hclge_dev *hdev = vport->back;
2710 struct hclge_mac *mac = &hdev->hw.mac;
2711
2712 if (fec_ability)
2713 *fec_ability = mac->fec_ability;
2714 if (fec_mode)
2715 *fec_mode = mac->fec_mode;
2716 }
2717
hclge_mac_init(struct hclge_dev * hdev)2718 static int hclge_mac_init(struct hclge_dev *hdev)
2719 {
2720 struct hclge_mac *mac = &hdev->hw.mac;
2721 int ret;
2722
2723 hdev->support_sfp_query = true;
2724 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2725 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2726 hdev->hw.mac.duplex);
2727 if (ret)
2728 return ret;
2729
2730 if (hdev->hw.mac.support_autoneg) {
2731 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2732 if (ret)
2733 return ret;
2734 }
2735
2736 mac->link = 0;
2737
2738 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2739 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2740 if (ret)
2741 return ret;
2742 }
2743
2744 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2745 if (ret) {
2746 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2747 return ret;
2748 }
2749
2750 ret = hclge_set_default_loopback(hdev);
2751 if (ret)
2752 return ret;
2753
2754 ret = hclge_buffer_alloc(hdev);
2755 if (ret)
2756 dev_err(&hdev->pdev->dev,
2757 "allocate buffer fail, ret=%d\n", ret);
2758
2759 return ret;
2760 }
2761
hclge_mbx_task_schedule(struct hclge_dev * hdev)2762 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2763 {
2764 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2765 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2766 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2767 hclge_wq, &hdev->service_task, 0);
2768 }
2769
hclge_reset_task_schedule(struct hclge_dev * hdev)2770 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2771 {
2772 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2773 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2774 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2775 hclge_wq, &hdev->service_task, 0);
2776 }
2777
hclge_task_schedule(struct hclge_dev * hdev,unsigned long delay_time)2778 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2779 {
2780 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2781 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2782 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2783 hclge_wq, &hdev->service_task,
2784 delay_time);
2785 }
2786
hclge_get_mac_link_status(struct hclge_dev * hdev,int * link_status)2787 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2788 {
2789 struct hclge_link_status_cmd *req;
2790 struct hclge_desc desc;
2791 int ret;
2792
2793 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2794 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2795 if (ret) {
2796 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2797 ret);
2798 return ret;
2799 }
2800
2801 req = (struct hclge_link_status_cmd *)desc.data;
2802 *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2803 HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2804
2805 return 0;
2806 }
2807
hclge_get_mac_phy_link(struct hclge_dev * hdev,int * link_status)2808 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2809 {
2810 struct phy_device *phydev = hdev->hw.mac.phydev;
2811
2812 *link_status = HCLGE_LINK_STATUS_DOWN;
2813
2814 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2815 return 0;
2816
2817 if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2818 return 0;
2819
2820 return hclge_get_mac_link_status(hdev, link_status);
2821 }
2822
hclge_update_link_status(struct hclge_dev * hdev)2823 static void hclge_update_link_status(struct hclge_dev *hdev)
2824 {
2825 struct hnae3_client *rclient = hdev->roce_client;
2826 struct hnae3_client *client = hdev->nic_client;
2827 struct hnae3_handle *rhandle;
2828 struct hnae3_handle *handle;
2829 int state;
2830 int ret;
2831 int i;
2832
2833 if (!client)
2834 return;
2835
2836 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2837 return;
2838
2839 ret = hclge_get_mac_phy_link(hdev, &state);
2840 if (ret) {
2841 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2842 return;
2843 }
2844
2845 if (state != hdev->hw.mac.link) {
2846 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2847 handle = &hdev->vport[i].nic;
2848 client->ops->link_status_change(handle, state);
2849 hclge_config_mac_tnl_int(hdev, state);
2850 rhandle = &hdev->vport[i].roce;
2851 if (rclient && rclient->ops->link_status_change)
2852 rclient->ops->link_status_change(rhandle,
2853 state);
2854 }
2855 hdev->hw.mac.link = state;
2856 }
2857
2858 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2859 }
2860
hclge_update_port_capability(struct hclge_mac * mac)2861 static void hclge_update_port_capability(struct hclge_mac *mac)
2862 {
2863 /* update fec ability by speed */
2864 hclge_convert_setting_fec(mac);
2865
2866 /* firmware can not identify back plane type, the media type
2867 * read from configuration can help deal it
2868 */
2869 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2870 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2871 mac->module_type = HNAE3_MODULE_TYPE_KR;
2872 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2873 mac->module_type = HNAE3_MODULE_TYPE_TP;
2874
2875 if (mac->support_autoneg) {
2876 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2877 linkmode_copy(mac->advertising, mac->supported);
2878 } else {
2879 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2880 mac->supported);
2881 linkmode_zero(mac->advertising);
2882 }
2883 }
2884
hclge_get_sfp_speed(struct hclge_dev * hdev,u32 * speed)2885 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2886 {
2887 struct hclge_sfp_info_cmd *resp;
2888 struct hclge_desc desc;
2889 int ret;
2890
2891 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2892 resp = (struct hclge_sfp_info_cmd *)desc.data;
2893 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2894 if (ret == -EOPNOTSUPP) {
2895 dev_warn(&hdev->pdev->dev,
2896 "IMP do not support get SFP speed %d\n", ret);
2897 return ret;
2898 } else if (ret) {
2899 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2900 return ret;
2901 }
2902
2903 *speed = le32_to_cpu(resp->speed);
2904
2905 return 0;
2906 }
2907
hclge_get_sfp_info(struct hclge_dev * hdev,struct hclge_mac * mac)2908 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2909 {
2910 struct hclge_sfp_info_cmd *resp;
2911 struct hclge_desc desc;
2912 int ret;
2913
2914 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2915 resp = (struct hclge_sfp_info_cmd *)desc.data;
2916
2917 resp->query_type = QUERY_ACTIVE_SPEED;
2918
2919 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2920 if (ret == -EOPNOTSUPP) {
2921 dev_warn(&hdev->pdev->dev,
2922 "IMP does not support get SFP info %d\n", ret);
2923 return ret;
2924 } else if (ret) {
2925 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2926 return ret;
2927 }
2928
2929 /* In some case, mac speed get from IMP may be 0, it shouldn't be
2930 * set to mac->speed.
2931 */
2932 if (!le32_to_cpu(resp->speed))
2933 return 0;
2934
2935 mac->speed = le32_to_cpu(resp->speed);
2936 /* if resp->speed_ability is 0, it means it's an old version
2937 * firmware, do not update these params
2938 */
2939 if (resp->speed_ability) {
2940 mac->module_type = le32_to_cpu(resp->module_type);
2941 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2942 mac->autoneg = resp->autoneg;
2943 mac->support_autoneg = resp->autoneg_ability;
2944 mac->speed_type = QUERY_ACTIVE_SPEED;
2945 if (!resp->active_fec)
2946 mac->fec_mode = 0;
2947 else
2948 mac->fec_mode = BIT(resp->active_fec);
2949 } else {
2950 mac->speed_type = QUERY_SFP_SPEED;
2951 }
2952
2953 return 0;
2954 }
2955
hclge_update_port_info(struct hclge_dev * hdev)2956 static int hclge_update_port_info(struct hclge_dev *hdev)
2957 {
2958 struct hclge_mac *mac = &hdev->hw.mac;
2959 int speed = HCLGE_MAC_SPEED_UNKNOWN;
2960 int ret;
2961
2962 /* get the port info from SFP cmd if not copper port */
2963 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2964 return 0;
2965
2966 /* if IMP does not support get SFP/qSFP info, return directly */
2967 if (!hdev->support_sfp_query)
2968 return 0;
2969
2970 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
2971 ret = hclge_get_sfp_info(hdev, mac);
2972 else
2973 ret = hclge_get_sfp_speed(hdev, &speed);
2974
2975 if (ret == -EOPNOTSUPP) {
2976 hdev->support_sfp_query = false;
2977 return ret;
2978 } else if (ret) {
2979 return ret;
2980 }
2981
2982 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
2983 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2984 hclge_update_port_capability(mac);
2985 return 0;
2986 }
2987 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2988 HCLGE_MAC_FULL);
2989 } else {
2990 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2991 return 0; /* do nothing if no SFP */
2992
2993 /* must config full duplex for SFP */
2994 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2995 }
2996 }
2997
hclge_get_status(struct hnae3_handle * handle)2998 static int hclge_get_status(struct hnae3_handle *handle)
2999 {
3000 struct hclge_vport *vport = hclge_get_vport(handle);
3001 struct hclge_dev *hdev = vport->back;
3002
3003 hclge_update_link_status(hdev);
3004
3005 return hdev->hw.mac.link;
3006 }
3007
hclge_get_vf_vport(struct hclge_dev * hdev,int vf)3008 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3009 {
3010 if (!pci_num_vf(hdev->pdev)) {
3011 dev_err(&hdev->pdev->dev,
3012 "SRIOV is disabled, can not get vport(%d) info.\n", vf);
3013 return NULL;
3014 }
3015
3016 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3017 dev_err(&hdev->pdev->dev,
3018 "vf id(%d) is out of range(0 <= vfid < %d)\n",
3019 vf, pci_num_vf(hdev->pdev));
3020 return NULL;
3021 }
3022
3023 /* VF start from 1 in vport */
3024 vf += HCLGE_VF_VPORT_START_NUM;
3025 return &hdev->vport[vf];
3026 }
3027
hclge_get_vf_config(struct hnae3_handle * handle,int vf,struct ifla_vf_info * ivf)3028 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3029 struct ifla_vf_info *ivf)
3030 {
3031 struct hclge_vport *vport = hclge_get_vport(handle);
3032 struct hclge_dev *hdev = vport->back;
3033
3034 vport = hclge_get_vf_vport(hdev, vf);
3035 if (!vport)
3036 return -EINVAL;
3037
3038 ivf->vf = vf;
3039 ivf->linkstate = vport->vf_info.link_state;
3040 ivf->spoofchk = vport->vf_info.spoofchk;
3041 ivf->trusted = vport->vf_info.trusted;
3042 ivf->min_tx_rate = 0;
3043 ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3044 ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3045 ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3046 ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3047 ether_addr_copy(ivf->mac, vport->vf_info.mac);
3048
3049 return 0;
3050 }
3051
hclge_set_vf_link_state(struct hnae3_handle * handle,int vf,int link_state)3052 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3053 int link_state)
3054 {
3055 struct hclge_vport *vport = hclge_get_vport(handle);
3056 struct hclge_dev *hdev = vport->back;
3057
3058 vport = hclge_get_vf_vport(hdev, vf);
3059 if (!vport)
3060 return -EINVAL;
3061
3062 vport->vf_info.link_state = link_state;
3063
3064 return 0;
3065 }
3066
hclge_check_event_cause(struct hclge_dev * hdev,u32 * clearval)3067 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3068 {
3069 u32 cmdq_src_reg, msix_src_reg;
3070
3071 /* fetch the events from their corresponding regs */
3072 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3073 msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3074
3075 /* Assumption: If by any chance reset and mailbox events are reported
3076 * together then we will only process reset event in this go and will
3077 * defer the processing of the mailbox events. Since, we would have not
3078 * cleared RX CMDQ event this time we would receive again another
3079 * interrupt from H/W just for the mailbox.
3080 *
3081 * check for vector0 reset event sources
3082 */
3083 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3084 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3085 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3086 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3087 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3088 hdev->rst_stats.imp_rst_cnt++;
3089 return HCLGE_VECTOR0_EVENT_RST;
3090 }
3091
3092 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3093 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3094 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3095 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3096 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3097 hdev->rst_stats.global_rst_cnt++;
3098 return HCLGE_VECTOR0_EVENT_RST;
3099 }
3100
3101 /* check for vector0 msix event source */
3102 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3103 *clearval = msix_src_reg;
3104 return HCLGE_VECTOR0_EVENT_ERR;
3105 }
3106
3107 /* check for vector0 mailbox(=CMDQ RX) event source */
3108 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3109 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3110 *clearval = cmdq_src_reg;
3111 return HCLGE_VECTOR0_EVENT_MBX;
3112 }
3113
3114 /* print other vector0 event source */
3115 dev_info(&hdev->pdev->dev,
3116 "CMDQ INT status:0x%x, other INT status:0x%x\n",
3117 cmdq_src_reg, msix_src_reg);
3118 *clearval = msix_src_reg;
3119
3120 return HCLGE_VECTOR0_EVENT_OTHER;
3121 }
3122
hclge_clear_event_cause(struct hclge_dev * hdev,u32 event_type,u32 regclr)3123 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3124 u32 regclr)
3125 {
3126 switch (event_type) {
3127 case HCLGE_VECTOR0_EVENT_RST:
3128 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3129 break;
3130 case HCLGE_VECTOR0_EVENT_MBX:
3131 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3132 break;
3133 default:
3134 break;
3135 }
3136 }
3137
hclge_clear_all_event_cause(struct hclge_dev * hdev)3138 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3139 {
3140 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3141 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3142 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3143 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3144 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3145 }
3146
hclge_enable_vector(struct hclge_misc_vector * vector,bool enable)3147 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3148 {
3149 writel(enable ? 1 : 0, vector->addr);
3150 }
3151
hclge_misc_irq_handle(int irq,void * data)3152 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3153 {
3154 struct hclge_dev *hdev = data;
3155 u32 clearval = 0;
3156 u32 event_cause;
3157
3158 hclge_enable_vector(&hdev->misc_vector, false);
3159 event_cause = hclge_check_event_cause(hdev, &clearval);
3160
3161 /* vector 0 interrupt is shared with reset and mailbox source events.*/
3162 switch (event_cause) {
3163 case HCLGE_VECTOR0_EVENT_ERR:
3164 /* we do not know what type of reset is required now. This could
3165 * only be decided after we fetch the type of errors which
3166 * caused this event. Therefore, we will do below for now:
3167 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3168 * have defered type of reset to be used.
3169 * 2. Schedule the reset serivce task.
3170 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
3171 * will fetch the correct type of reset. This would be done
3172 * by first decoding the types of errors.
3173 */
3174 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3175 fallthrough;
3176 case HCLGE_VECTOR0_EVENT_RST:
3177 hclge_reset_task_schedule(hdev);
3178 break;
3179 case HCLGE_VECTOR0_EVENT_MBX:
3180 /* If we are here then,
3181 * 1. Either we are not handling any mbx task and we are not
3182 * scheduled as well
3183 * OR
3184 * 2. We could be handling a mbx task but nothing more is
3185 * scheduled.
3186 * In both cases, we should schedule mbx task as there are more
3187 * mbx messages reported by this interrupt.
3188 */
3189 hclge_mbx_task_schedule(hdev);
3190 break;
3191 default:
3192 dev_warn(&hdev->pdev->dev,
3193 "received unknown or unhandled event of vector0\n");
3194 break;
3195 }
3196
3197 hclge_clear_event_cause(hdev, event_cause, clearval);
3198
3199 /* Enable interrupt if it is not cause by reset. And when
3200 * clearval equal to 0, it means interrupt status may be
3201 * cleared by hardware before driver reads status register.
3202 * For this case, vector0 interrupt also should be enabled.
3203 */
3204 if (!clearval ||
3205 event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3206 hclge_enable_vector(&hdev->misc_vector, true);
3207 }
3208
3209 return IRQ_HANDLED;
3210 }
3211
hclge_free_vector(struct hclge_dev * hdev,int vector_id)3212 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3213 {
3214 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3215 dev_warn(&hdev->pdev->dev,
3216 "vector(vector_id %d) has been freed.\n", vector_id);
3217 return;
3218 }
3219
3220 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3221 hdev->num_msi_left += 1;
3222 hdev->num_msi_used -= 1;
3223 }
3224
hclge_get_misc_vector(struct hclge_dev * hdev)3225 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3226 {
3227 struct hclge_misc_vector *vector = &hdev->misc_vector;
3228
3229 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3230
3231 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3232 hdev->vector_status[0] = 0;
3233
3234 hdev->num_msi_left -= 1;
3235 hdev->num_msi_used += 1;
3236 }
3237
hclge_irq_affinity_notify(struct irq_affinity_notify * notify,const cpumask_t * mask)3238 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3239 const cpumask_t *mask)
3240 {
3241 struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3242 affinity_notify);
3243
3244 cpumask_copy(&hdev->affinity_mask, mask);
3245 }
3246
hclge_irq_affinity_release(struct kref * ref)3247 static void hclge_irq_affinity_release(struct kref *ref)
3248 {
3249 }
3250
hclge_misc_affinity_setup(struct hclge_dev * hdev)3251 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3252 {
3253 irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3254 &hdev->affinity_mask);
3255
3256 hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3257 hdev->affinity_notify.release = hclge_irq_affinity_release;
3258 irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3259 &hdev->affinity_notify);
3260 }
3261
hclge_misc_affinity_teardown(struct hclge_dev * hdev)3262 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3263 {
3264 irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3265 irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3266 }
3267
hclge_misc_irq_init(struct hclge_dev * hdev)3268 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3269 {
3270 int ret;
3271
3272 hclge_get_misc_vector(hdev);
3273
3274 /* this would be explicitly freed in the end */
3275 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3276 HCLGE_NAME, pci_name(hdev->pdev));
3277 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3278 0, hdev->misc_vector.name, hdev);
3279 if (ret) {
3280 hclge_free_vector(hdev, 0);
3281 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3282 hdev->misc_vector.vector_irq);
3283 }
3284
3285 return ret;
3286 }
3287
hclge_misc_irq_uninit(struct hclge_dev * hdev)3288 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3289 {
3290 free_irq(hdev->misc_vector.vector_irq, hdev);
3291 hclge_free_vector(hdev, 0);
3292 }
3293
hclge_notify_client(struct hclge_dev * hdev,enum hnae3_reset_notify_type type)3294 int hclge_notify_client(struct hclge_dev *hdev,
3295 enum hnae3_reset_notify_type type)
3296 {
3297 struct hnae3_client *client = hdev->nic_client;
3298 u16 i;
3299
3300 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3301 return 0;
3302
3303 if (!client->ops->reset_notify)
3304 return -EOPNOTSUPP;
3305
3306 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3307 struct hnae3_handle *handle = &hdev->vport[i].nic;
3308 int ret;
3309
3310 ret = client->ops->reset_notify(handle, type);
3311 if (ret) {
3312 dev_err(&hdev->pdev->dev,
3313 "notify nic client failed %d(%d)\n", type, ret);
3314 return ret;
3315 }
3316 }
3317
3318 return 0;
3319 }
3320
hclge_notify_roce_client(struct hclge_dev * hdev,enum hnae3_reset_notify_type type)3321 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3322 enum hnae3_reset_notify_type type)
3323 {
3324 struct hnae3_client *client = hdev->roce_client;
3325 int ret;
3326 u16 i;
3327
3328 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3329 return 0;
3330
3331 if (!client->ops->reset_notify)
3332 return -EOPNOTSUPP;
3333
3334 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3335 struct hnae3_handle *handle = &hdev->vport[i].roce;
3336
3337 ret = client->ops->reset_notify(handle, type);
3338 if (ret) {
3339 dev_err(&hdev->pdev->dev,
3340 "notify roce client failed %d(%d)",
3341 type, ret);
3342 return ret;
3343 }
3344 }
3345
3346 return ret;
3347 }
3348
hclge_reset_wait(struct hclge_dev * hdev)3349 static int hclge_reset_wait(struct hclge_dev *hdev)
3350 {
3351 #define HCLGE_RESET_WATI_MS 100
3352 #define HCLGE_RESET_WAIT_CNT 350
3353
3354 u32 val, reg, reg_bit;
3355 u32 cnt = 0;
3356
3357 switch (hdev->reset_type) {
3358 case HNAE3_IMP_RESET:
3359 reg = HCLGE_GLOBAL_RESET_REG;
3360 reg_bit = HCLGE_IMP_RESET_BIT;
3361 break;
3362 case HNAE3_GLOBAL_RESET:
3363 reg = HCLGE_GLOBAL_RESET_REG;
3364 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3365 break;
3366 case HNAE3_FUNC_RESET:
3367 reg = HCLGE_FUN_RST_ING;
3368 reg_bit = HCLGE_FUN_RST_ING_B;
3369 break;
3370 default:
3371 dev_err(&hdev->pdev->dev,
3372 "Wait for unsupported reset type: %d\n",
3373 hdev->reset_type);
3374 return -EINVAL;
3375 }
3376
3377 val = hclge_read_dev(&hdev->hw, reg);
3378 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3379 msleep(HCLGE_RESET_WATI_MS);
3380 val = hclge_read_dev(&hdev->hw, reg);
3381 cnt++;
3382 }
3383
3384 if (cnt >= HCLGE_RESET_WAIT_CNT) {
3385 dev_warn(&hdev->pdev->dev,
3386 "Wait for reset timeout: %d\n", hdev->reset_type);
3387 return -EBUSY;
3388 }
3389
3390 return 0;
3391 }
3392
hclge_set_vf_rst(struct hclge_dev * hdev,int func_id,bool reset)3393 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3394 {
3395 struct hclge_vf_rst_cmd *req;
3396 struct hclge_desc desc;
3397
3398 req = (struct hclge_vf_rst_cmd *)desc.data;
3399 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3400 req->dest_vfid = func_id;
3401
3402 if (reset)
3403 req->vf_rst = 0x1;
3404
3405 return hclge_cmd_send(&hdev->hw, &desc, 1);
3406 }
3407
hclge_set_all_vf_rst(struct hclge_dev * hdev,bool reset)3408 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3409 {
3410 int i;
3411
3412 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3413 struct hclge_vport *vport = &hdev->vport[i];
3414 int ret;
3415
3416 /* Send cmd to set/clear VF's FUNC_RST_ING */
3417 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3418 if (ret) {
3419 dev_err(&hdev->pdev->dev,
3420 "set vf(%u) rst failed %d!\n",
3421 vport->vport_id, ret);
3422 return ret;
3423 }
3424
3425 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3426 continue;
3427
3428 /* Inform VF to process the reset.
3429 * hclge_inform_reset_assert_to_vf may fail if VF
3430 * driver is not loaded.
3431 */
3432 ret = hclge_inform_reset_assert_to_vf(vport);
3433 if (ret)
3434 dev_warn(&hdev->pdev->dev,
3435 "inform reset to vf(%u) failed %d!\n",
3436 vport->vport_id, ret);
3437 }
3438
3439 return 0;
3440 }
3441
hclge_mailbox_service_task(struct hclge_dev * hdev)3442 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3443 {
3444 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3445 test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3446 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3447 return;
3448
3449 hclge_mbx_handler(hdev);
3450
3451 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3452 }
3453
hclge_func_reset_sync_vf(struct hclge_dev * hdev)3454 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3455 {
3456 struct hclge_pf_rst_sync_cmd *req;
3457 struct hclge_desc desc;
3458 int cnt = 0;
3459 int ret;
3460
3461 req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3462 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3463
3464 do {
3465 /* vf need to down netdev by mbx during PF or FLR reset */
3466 hclge_mailbox_service_task(hdev);
3467
3468 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3469 /* for compatible with old firmware, wait
3470 * 100 ms for VF to stop IO
3471 */
3472 if (ret == -EOPNOTSUPP) {
3473 msleep(HCLGE_RESET_SYNC_TIME);
3474 return;
3475 } else if (ret) {
3476 dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3477 ret);
3478 return;
3479 } else if (req->all_vf_ready) {
3480 return;
3481 }
3482 msleep(HCLGE_PF_RESET_SYNC_TIME);
3483 hclge_cmd_reuse_desc(&desc, true);
3484 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3485
3486 dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3487 }
3488
hclge_report_hw_error(struct hclge_dev * hdev,enum hnae3_hw_error_type type)3489 void hclge_report_hw_error(struct hclge_dev *hdev,
3490 enum hnae3_hw_error_type type)
3491 {
3492 struct hnae3_client *client = hdev->nic_client;
3493 u16 i;
3494
3495 if (!client || !client->ops->process_hw_error ||
3496 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3497 return;
3498
3499 for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3500 client->ops->process_hw_error(&hdev->vport[i].nic, type);
3501 }
3502
hclge_handle_imp_error(struct hclge_dev * hdev)3503 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3504 {
3505 u32 reg_val;
3506
3507 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3508 if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3509 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3510 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3511 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3512 }
3513
3514 if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3515 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3516 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3517 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3518 }
3519 }
3520
hclge_func_reset_cmd(struct hclge_dev * hdev,int func_id)3521 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3522 {
3523 struct hclge_desc desc;
3524 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3525 int ret;
3526
3527 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3528 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3529 req->fun_reset_vfid = func_id;
3530
3531 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3532 if (ret)
3533 dev_err(&hdev->pdev->dev,
3534 "send function reset cmd fail, status =%d\n", ret);
3535
3536 return ret;
3537 }
3538
hclge_do_reset(struct hclge_dev * hdev)3539 static void hclge_do_reset(struct hclge_dev *hdev)
3540 {
3541 struct hnae3_handle *handle = &hdev->vport[0].nic;
3542 struct pci_dev *pdev = hdev->pdev;
3543 u32 val;
3544
3545 if (hclge_get_hw_reset_stat(handle)) {
3546 dev_info(&pdev->dev, "hardware reset not finish\n");
3547 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3548 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3549 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3550 return;
3551 }
3552
3553 switch (hdev->reset_type) {
3554 case HNAE3_GLOBAL_RESET:
3555 dev_info(&pdev->dev, "global reset requested\n");
3556 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3557 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3558 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3559 break;
3560 case HNAE3_FUNC_RESET:
3561 dev_info(&pdev->dev, "PF reset requested\n");
3562 /* schedule again to check later */
3563 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3564 hclge_reset_task_schedule(hdev);
3565 break;
3566 default:
3567 dev_warn(&pdev->dev,
3568 "unsupported reset type: %d\n", hdev->reset_type);
3569 break;
3570 }
3571 }
3572
hclge_get_reset_level(struct hnae3_ae_dev * ae_dev,unsigned long * addr)3573 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3574 unsigned long *addr)
3575 {
3576 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3577 struct hclge_dev *hdev = ae_dev->priv;
3578
3579 /* first, resolve any unknown reset type to the known type(s) */
3580 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3581 u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3582 HCLGE_MISC_VECTOR_INT_STS);
3583 /* we will intentionally ignore any errors from this function
3584 * as we will end up in *some* reset request in any case
3585 */
3586 if (hclge_handle_hw_msix_error(hdev, addr))
3587 dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3588 msix_sts_reg);
3589
3590 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3591 /* We defered the clearing of the error event which caused
3592 * interrupt since it was not posssible to do that in
3593 * interrupt context (and this is the reason we introduced
3594 * new UNKNOWN reset type). Now, the errors have been
3595 * handled and cleared in hardware we can safely enable
3596 * interrupts. This is an exception to the norm.
3597 */
3598 hclge_enable_vector(&hdev->misc_vector, true);
3599 }
3600
3601 /* return the highest priority reset level amongst all */
3602 if (test_bit(HNAE3_IMP_RESET, addr)) {
3603 rst_level = HNAE3_IMP_RESET;
3604 clear_bit(HNAE3_IMP_RESET, addr);
3605 clear_bit(HNAE3_GLOBAL_RESET, addr);
3606 clear_bit(HNAE3_FUNC_RESET, addr);
3607 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3608 rst_level = HNAE3_GLOBAL_RESET;
3609 clear_bit(HNAE3_GLOBAL_RESET, addr);
3610 clear_bit(HNAE3_FUNC_RESET, addr);
3611 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3612 rst_level = HNAE3_FUNC_RESET;
3613 clear_bit(HNAE3_FUNC_RESET, addr);
3614 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3615 rst_level = HNAE3_FLR_RESET;
3616 clear_bit(HNAE3_FLR_RESET, addr);
3617 }
3618
3619 if (hdev->reset_type != HNAE3_NONE_RESET &&
3620 rst_level < hdev->reset_type)
3621 return HNAE3_NONE_RESET;
3622
3623 return rst_level;
3624 }
3625
hclge_clear_reset_cause(struct hclge_dev * hdev)3626 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3627 {
3628 u32 clearval = 0;
3629
3630 switch (hdev->reset_type) {
3631 case HNAE3_IMP_RESET:
3632 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3633 break;
3634 case HNAE3_GLOBAL_RESET:
3635 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3636 break;
3637 default:
3638 break;
3639 }
3640
3641 if (!clearval)
3642 return;
3643
3644 /* For revision 0x20, the reset interrupt source
3645 * can only be cleared after hardware reset done
3646 */
3647 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3648 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3649 clearval);
3650
3651 hclge_enable_vector(&hdev->misc_vector, true);
3652 }
3653
hclge_reset_handshake(struct hclge_dev * hdev,bool enable)3654 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3655 {
3656 u32 reg_val;
3657
3658 reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3659 if (enable)
3660 reg_val |= HCLGE_NIC_SW_RST_RDY;
3661 else
3662 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3663
3664 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3665 }
3666
hclge_func_reset_notify_vf(struct hclge_dev * hdev)3667 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3668 {
3669 int ret;
3670
3671 ret = hclge_set_all_vf_rst(hdev, true);
3672 if (ret)
3673 return ret;
3674
3675 hclge_func_reset_sync_vf(hdev);
3676
3677 return 0;
3678 }
3679
hclge_reset_prepare_wait(struct hclge_dev * hdev)3680 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3681 {
3682 u32 reg_val;
3683 int ret = 0;
3684
3685 switch (hdev->reset_type) {
3686 case HNAE3_FUNC_RESET:
3687 ret = hclge_func_reset_notify_vf(hdev);
3688 if (ret)
3689 return ret;
3690
3691 ret = hclge_func_reset_cmd(hdev, 0);
3692 if (ret) {
3693 dev_err(&hdev->pdev->dev,
3694 "asserting function reset fail %d!\n", ret);
3695 return ret;
3696 }
3697
3698 /* After performaning pf reset, it is not necessary to do the
3699 * mailbox handling or send any command to firmware, because
3700 * any mailbox handling or command to firmware is only valid
3701 * after hclge_cmd_init is called.
3702 */
3703 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3704 hdev->rst_stats.pf_rst_cnt++;
3705 break;
3706 case HNAE3_FLR_RESET:
3707 ret = hclge_func_reset_notify_vf(hdev);
3708 if (ret)
3709 return ret;
3710 break;
3711 case HNAE3_IMP_RESET:
3712 hclge_handle_imp_error(hdev);
3713 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3714 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3715 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3716 break;
3717 default:
3718 break;
3719 }
3720
3721 /* inform hardware that preparatory work is done */
3722 msleep(HCLGE_RESET_SYNC_TIME);
3723 hclge_reset_handshake(hdev, true);
3724 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3725
3726 return ret;
3727 }
3728
hclge_reset_err_handle(struct hclge_dev * hdev)3729 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3730 {
3731 #define MAX_RESET_FAIL_CNT 5
3732
3733 if (hdev->reset_pending) {
3734 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3735 hdev->reset_pending);
3736 return true;
3737 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3738 HCLGE_RESET_INT_M) {
3739 dev_info(&hdev->pdev->dev,
3740 "reset failed because new reset interrupt\n");
3741 hclge_clear_reset_cause(hdev);
3742 return false;
3743 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3744 hdev->rst_stats.reset_fail_cnt++;
3745 set_bit(hdev->reset_type, &hdev->reset_pending);
3746 dev_info(&hdev->pdev->dev,
3747 "re-schedule reset task(%u)\n",
3748 hdev->rst_stats.reset_fail_cnt);
3749 return true;
3750 }
3751
3752 hclge_clear_reset_cause(hdev);
3753
3754 /* recover the handshake status when reset fail */
3755 hclge_reset_handshake(hdev, true);
3756
3757 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3758
3759 hclge_dbg_dump_rst_info(hdev);
3760
3761 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3762
3763 return false;
3764 }
3765
hclge_set_rst_done(struct hclge_dev * hdev)3766 static int hclge_set_rst_done(struct hclge_dev *hdev)
3767 {
3768 struct hclge_pf_rst_done_cmd *req;
3769 struct hclge_desc desc;
3770 int ret;
3771
3772 req = (struct hclge_pf_rst_done_cmd *)desc.data;
3773 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3774 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3775
3776 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3777 /* To be compatible with the old firmware, which does not support
3778 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3779 * return success
3780 */
3781 if (ret == -EOPNOTSUPP) {
3782 dev_warn(&hdev->pdev->dev,
3783 "current firmware does not support command(0x%x)!\n",
3784 HCLGE_OPC_PF_RST_DONE);
3785 return 0;
3786 } else if (ret) {
3787 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3788 ret);
3789 }
3790
3791 return ret;
3792 }
3793
hclge_reset_prepare_up(struct hclge_dev * hdev)3794 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3795 {
3796 int ret = 0;
3797
3798 switch (hdev->reset_type) {
3799 case HNAE3_FUNC_RESET:
3800 case HNAE3_FLR_RESET:
3801 ret = hclge_set_all_vf_rst(hdev, false);
3802 break;
3803 case HNAE3_GLOBAL_RESET:
3804 case HNAE3_IMP_RESET:
3805 ret = hclge_set_rst_done(hdev);
3806 break;
3807 default:
3808 break;
3809 }
3810
3811 /* clear up the handshake status after re-initialize done */
3812 hclge_reset_handshake(hdev, false);
3813
3814 return ret;
3815 }
3816
hclge_reset_stack(struct hclge_dev * hdev)3817 static int hclge_reset_stack(struct hclge_dev *hdev)
3818 {
3819 int ret;
3820
3821 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3822 if (ret)
3823 return ret;
3824
3825 ret = hclge_reset_ae_dev(hdev->ae_dev);
3826 if (ret)
3827 return ret;
3828
3829 return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3830 }
3831
hclge_reset_prepare(struct hclge_dev * hdev)3832 static int hclge_reset_prepare(struct hclge_dev *hdev)
3833 {
3834 int ret;
3835
3836 hdev->rst_stats.reset_cnt++;
3837 /* perform reset of the stack & ae device for a client */
3838 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3839 if (ret)
3840 return ret;
3841
3842 rtnl_lock();
3843 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3844 rtnl_unlock();
3845 if (ret)
3846 return ret;
3847
3848 return hclge_reset_prepare_wait(hdev);
3849 }
3850
hclge_reset_rebuild(struct hclge_dev * hdev)3851 static int hclge_reset_rebuild(struct hclge_dev *hdev)
3852 {
3853 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3854 enum hnae3_reset_type reset_level;
3855 int ret;
3856
3857 hdev->rst_stats.hw_reset_done_cnt++;
3858
3859 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3860 if (ret)
3861 return ret;
3862
3863 rtnl_lock();
3864 ret = hclge_reset_stack(hdev);
3865 rtnl_unlock();
3866 if (ret)
3867 return ret;
3868
3869 hclge_clear_reset_cause(hdev);
3870
3871 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3872 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3873 * times
3874 */
3875 if (ret &&
3876 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3877 return ret;
3878
3879 ret = hclge_reset_prepare_up(hdev);
3880 if (ret)
3881 return ret;
3882
3883 rtnl_lock();
3884 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3885 rtnl_unlock();
3886 if (ret)
3887 return ret;
3888
3889 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3890 if (ret)
3891 return ret;
3892
3893 hdev->last_reset_time = jiffies;
3894 hdev->rst_stats.reset_fail_cnt = 0;
3895 hdev->rst_stats.reset_done_cnt++;
3896 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3897
3898 /* if default_reset_request has a higher level reset request,
3899 * it should be handled as soon as possible. since some errors
3900 * need this kind of reset to fix.
3901 */
3902 reset_level = hclge_get_reset_level(ae_dev,
3903 &hdev->default_reset_request);
3904 if (reset_level != HNAE3_NONE_RESET)
3905 set_bit(reset_level, &hdev->reset_request);
3906
3907 return 0;
3908 }
3909
hclge_reset(struct hclge_dev * hdev)3910 static void hclge_reset(struct hclge_dev *hdev)
3911 {
3912 if (hclge_reset_prepare(hdev))
3913 goto err_reset;
3914
3915 if (hclge_reset_wait(hdev))
3916 goto err_reset;
3917
3918 if (hclge_reset_rebuild(hdev))
3919 goto err_reset;
3920
3921 return;
3922
3923 err_reset:
3924 if (hclge_reset_err_handle(hdev))
3925 hclge_reset_task_schedule(hdev);
3926 }
3927
hclge_reset_event(struct pci_dev * pdev,struct hnae3_handle * handle)3928 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3929 {
3930 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3931 struct hclge_dev *hdev = ae_dev->priv;
3932
3933 /* We might end up getting called broadly because of 2 below cases:
3934 * 1. Recoverable error was conveyed through APEI and only way to bring
3935 * normalcy is to reset.
3936 * 2. A new reset request from the stack due to timeout
3937 *
3938 * For the first case,error event might not have ae handle available.
3939 * check if this is a new reset request and we are not here just because
3940 * last reset attempt did not succeed and watchdog hit us again. We will
3941 * know this if last reset request did not occur very recently (watchdog
3942 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3943 * In case of new request we reset the "reset level" to PF reset.
3944 * And if it is a repeat reset request of the most recent one then we
3945 * want to make sure we throttle the reset request. Therefore, we will
3946 * not allow it again before 3*HZ times.
3947 */
3948 if (!handle)
3949 handle = &hdev->vport[0].nic;
3950
3951 if (time_before(jiffies, (hdev->last_reset_time +
3952 HCLGE_RESET_INTERVAL))) {
3953 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3954 return;
3955 } else if (hdev->default_reset_request) {
3956 hdev->reset_level =
3957 hclge_get_reset_level(ae_dev,
3958 &hdev->default_reset_request);
3959 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
3960 hdev->reset_level = HNAE3_FUNC_RESET;
3961 }
3962
3963 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3964 hdev->reset_level);
3965
3966 /* request reset & schedule reset task */
3967 set_bit(hdev->reset_level, &hdev->reset_request);
3968 hclge_reset_task_schedule(hdev);
3969
3970 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3971 hdev->reset_level++;
3972 }
3973
hclge_set_def_reset_request(struct hnae3_ae_dev * ae_dev,enum hnae3_reset_type rst_type)3974 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3975 enum hnae3_reset_type rst_type)
3976 {
3977 struct hclge_dev *hdev = ae_dev->priv;
3978
3979 set_bit(rst_type, &hdev->default_reset_request);
3980 }
3981
hclge_reset_timer(struct timer_list * t)3982 static void hclge_reset_timer(struct timer_list *t)
3983 {
3984 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3985
3986 /* if default_reset_request has no value, it means that this reset
3987 * request has already be handled, so just return here
3988 */
3989 if (!hdev->default_reset_request)
3990 return;
3991
3992 dev_info(&hdev->pdev->dev,
3993 "triggering reset in reset timer\n");
3994 hclge_reset_event(hdev->pdev, NULL);
3995 }
3996
hclge_reset_subtask(struct hclge_dev * hdev)3997 static void hclge_reset_subtask(struct hclge_dev *hdev)
3998 {
3999 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4000
4001 /* check if there is any ongoing reset in the hardware. This status can
4002 * be checked from reset_pending. If there is then, we need to wait for
4003 * hardware to complete reset.
4004 * a. If we are able to figure out in reasonable time that hardware
4005 * has fully resetted then, we can proceed with driver, client
4006 * reset.
4007 * b. else, we can come back later to check this status so re-sched
4008 * now.
4009 */
4010 hdev->last_reset_time = jiffies;
4011 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4012 if (hdev->reset_type != HNAE3_NONE_RESET)
4013 hclge_reset(hdev);
4014
4015 /* check if we got any *new* reset requests to be honored */
4016 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4017 if (hdev->reset_type != HNAE3_NONE_RESET)
4018 hclge_do_reset(hdev);
4019
4020 hdev->reset_type = HNAE3_NONE_RESET;
4021 }
4022
hclge_reset_service_task(struct hclge_dev * hdev)4023 static void hclge_reset_service_task(struct hclge_dev *hdev)
4024 {
4025 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4026 return;
4027
4028 down(&hdev->reset_sem);
4029 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4030
4031 hclge_reset_subtask(hdev);
4032
4033 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4034 up(&hdev->reset_sem);
4035 }
4036
hclge_update_vport_alive(struct hclge_dev * hdev)4037 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4038 {
4039 int i;
4040
4041 /* start from vport 1 for PF is always alive */
4042 for (i = 1; i < hdev->num_alloc_vport; i++) {
4043 struct hclge_vport *vport = &hdev->vport[i];
4044
4045 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4046 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4047
4048 /* If vf is not alive, set to default value */
4049 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4050 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4051 }
4052 }
4053
hclge_periodic_service_task(struct hclge_dev * hdev)4054 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4055 {
4056 unsigned long delta = round_jiffies_relative(HZ);
4057
4058 if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4059 return;
4060
4061 /* Always handle the link updating to make sure link state is
4062 * updated when it is triggered by mbx.
4063 */
4064 hclge_update_link_status(hdev);
4065 hclge_sync_mac_table(hdev);
4066 hclge_sync_promisc_mode(hdev);
4067
4068 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4069 delta = jiffies - hdev->last_serv_processed;
4070
4071 if (delta < round_jiffies_relative(HZ)) {
4072 delta = round_jiffies_relative(HZ) - delta;
4073 goto out;
4074 }
4075 }
4076
4077 hdev->serv_processed_cnt++;
4078 hclge_update_vport_alive(hdev);
4079
4080 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4081 hdev->last_serv_processed = jiffies;
4082 goto out;
4083 }
4084
4085 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4086 hclge_update_stats_for_all(hdev);
4087
4088 hclge_update_port_info(hdev);
4089 hclge_sync_vlan_filter(hdev);
4090
4091 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4092 hclge_rfs_filter_expire(hdev);
4093
4094 hdev->last_serv_processed = jiffies;
4095
4096 out:
4097 hclge_task_schedule(hdev, delta);
4098 }
4099
hclge_service_task(struct work_struct * work)4100 static void hclge_service_task(struct work_struct *work)
4101 {
4102 struct hclge_dev *hdev =
4103 container_of(work, struct hclge_dev, service_task.work);
4104
4105 hclge_reset_service_task(hdev);
4106 hclge_mailbox_service_task(hdev);
4107 hclge_periodic_service_task(hdev);
4108
4109 /* Handle reset and mbx again in case periodical task delays the
4110 * handling by calling hclge_task_schedule() in
4111 * hclge_periodic_service_task().
4112 */
4113 hclge_reset_service_task(hdev);
4114 hclge_mailbox_service_task(hdev);
4115 }
4116
hclge_get_vport(struct hnae3_handle * handle)4117 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4118 {
4119 /* VF handle has no client */
4120 if (!handle->client)
4121 return container_of(handle, struct hclge_vport, nic);
4122 else if (handle->client->type == HNAE3_CLIENT_ROCE)
4123 return container_of(handle, struct hclge_vport, roce);
4124 else
4125 return container_of(handle, struct hclge_vport, nic);
4126 }
4127
hclge_get_vector(struct hnae3_handle * handle,u16 vector_num,struct hnae3_vector_info * vector_info)4128 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4129 struct hnae3_vector_info *vector_info)
4130 {
4131 struct hclge_vport *vport = hclge_get_vport(handle);
4132 struct hnae3_vector_info *vector = vector_info;
4133 struct hclge_dev *hdev = vport->back;
4134 int alloc = 0;
4135 int i, j;
4136
4137 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4138 vector_num = min(hdev->num_msi_left, vector_num);
4139
4140 for (j = 0; j < vector_num; j++) {
4141 for (i = 1; i < hdev->num_msi; i++) {
4142 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4143 vector->vector = pci_irq_vector(hdev->pdev, i);
4144 vector->io_addr = hdev->hw.io_base +
4145 HCLGE_VECTOR_REG_BASE +
4146 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
4147 vport->vport_id *
4148 HCLGE_VECTOR_VF_OFFSET;
4149 hdev->vector_status[i] = vport->vport_id;
4150 hdev->vector_irq[i] = vector->vector;
4151
4152 vector++;
4153 alloc++;
4154
4155 break;
4156 }
4157 }
4158 }
4159 hdev->num_msi_left -= alloc;
4160 hdev->num_msi_used += alloc;
4161
4162 return alloc;
4163 }
4164
hclge_get_vector_index(struct hclge_dev * hdev,int vector)4165 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4166 {
4167 int i;
4168
4169 for (i = 0; i < hdev->num_msi; i++)
4170 if (vector == hdev->vector_irq[i])
4171 return i;
4172
4173 return -EINVAL;
4174 }
4175
hclge_put_vector(struct hnae3_handle * handle,int vector)4176 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4177 {
4178 struct hclge_vport *vport = hclge_get_vport(handle);
4179 struct hclge_dev *hdev = vport->back;
4180 int vector_id;
4181
4182 vector_id = hclge_get_vector_index(hdev, vector);
4183 if (vector_id < 0) {
4184 dev_err(&hdev->pdev->dev,
4185 "Get vector index fail. vector = %d\n", vector);
4186 return vector_id;
4187 }
4188
4189 hclge_free_vector(hdev, vector_id);
4190
4191 return 0;
4192 }
4193
hclge_get_rss_key_size(struct hnae3_handle * handle)4194 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4195 {
4196 return HCLGE_RSS_KEY_SIZE;
4197 }
4198
hclge_get_rss_indir_size(struct hnae3_handle * handle)4199 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
4200 {
4201 return HCLGE_RSS_IND_TBL_SIZE;
4202 }
4203
hclge_set_rss_algo_key(struct hclge_dev * hdev,const u8 hfunc,const u8 * key)4204 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4205 const u8 hfunc, const u8 *key)
4206 {
4207 struct hclge_rss_config_cmd *req;
4208 unsigned int key_offset = 0;
4209 struct hclge_desc desc;
4210 int key_counts;
4211 int key_size;
4212 int ret;
4213
4214 key_counts = HCLGE_RSS_KEY_SIZE;
4215 req = (struct hclge_rss_config_cmd *)desc.data;
4216
4217 while (key_counts) {
4218 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4219 false);
4220
4221 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4222 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4223
4224 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4225 memcpy(req->hash_key,
4226 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4227
4228 key_counts -= key_size;
4229 key_offset++;
4230 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4231 if (ret) {
4232 dev_err(&hdev->pdev->dev,
4233 "Configure RSS config fail, status = %d\n",
4234 ret);
4235 return ret;
4236 }
4237 }
4238 return 0;
4239 }
4240
hclge_set_rss_indir_table(struct hclge_dev * hdev,const u8 * indir)4241 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
4242 {
4243 struct hclge_rss_indirection_table_cmd *req;
4244 struct hclge_desc desc;
4245 int i, j;
4246 int ret;
4247
4248 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4249
4250 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4251 hclge_cmd_setup_basic_desc
4252 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4253
4254 req->start_table_index =
4255 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4256 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4257
4258 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
4259 req->rss_result[j] =
4260 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4261
4262 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4263 if (ret) {
4264 dev_err(&hdev->pdev->dev,
4265 "Configure rss indir table fail,status = %d\n",
4266 ret);
4267 return ret;
4268 }
4269 }
4270 return 0;
4271 }
4272
hclge_set_rss_tc_mode(struct hclge_dev * hdev,u16 * tc_valid,u16 * tc_size,u16 * tc_offset)4273 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4274 u16 *tc_size, u16 *tc_offset)
4275 {
4276 struct hclge_rss_tc_mode_cmd *req;
4277 struct hclge_desc desc;
4278 int ret;
4279 int i;
4280
4281 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4282 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4283
4284 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4285 u16 mode = 0;
4286
4287 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4288 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4289 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4290 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4291 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4292
4293 req->rss_tc_mode[i] = cpu_to_le16(mode);
4294 }
4295
4296 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4297 if (ret)
4298 dev_err(&hdev->pdev->dev,
4299 "Configure rss tc mode fail, status = %d\n", ret);
4300
4301 return ret;
4302 }
4303
hclge_get_rss_type(struct hclge_vport * vport)4304 static void hclge_get_rss_type(struct hclge_vport *vport)
4305 {
4306 if (vport->rss_tuple_sets.ipv4_tcp_en ||
4307 vport->rss_tuple_sets.ipv4_udp_en ||
4308 vport->rss_tuple_sets.ipv4_sctp_en ||
4309 vport->rss_tuple_sets.ipv6_tcp_en ||
4310 vport->rss_tuple_sets.ipv6_udp_en ||
4311 vport->rss_tuple_sets.ipv6_sctp_en)
4312 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4313 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4314 vport->rss_tuple_sets.ipv6_fragment_en)
4315 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4316 else
4317 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4318 }
4319
hclge_set_rss_input_tuple(struct hclge_dev * hdev)4320 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4321 {
4322 struct hclge_rss_input_tuple_cmd *req;
4323 struct hclge_desc desc;
4324 int ret;
4325
4326 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4327
4328 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4329
4330 /* Get the tuple cfg from pf */
4331 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4332 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4333 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4334 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4335 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4336 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4337 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4338 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4339 hclge_get_rss_type(&hdev->vport[0]);
4340 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4341 if (ret)
4342 dev_err(&hdev->pdev->dev,
4343 "Configure rss input fail, status = %d\n", ret);
4344 return ret;
4345 }
4346
hclge_get_rss(struct hnae3_handle * handle,u32 * indir,u8 * key,u8 * hfunc)4347 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4348 u8 *key, u8 *hfunc)
4349 {
4350 struct hclge_vport *vport = hclge_get_vport(handle);
4351 int i;
4352
4353 /* Get hash algorithm */
4354 if (hfunc) {
4355 switch (vport->rss_algo) {
4356 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4357 *hfunc = ETH_RSS_HASH_TOP;
4358 break;
4359 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4360 *hfunc = ETH_RSS_HASH_XOR;
4361 break;
4362 default:
4363 *hfunc = ETH_RSS_HASH_UNKNOWN;
4364 break;
4365 }
4366 }
4367
4368 /* Get the RSS Key required by the user */
4369 if (key)
4370 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4371
4372 /* Get indirect table */
4373 if (indir)
4374 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4375 indir[i] = vport->rss_indirection_tbl[i];
4376
4377 return 0;
4378 }
4379
hclge_parse_rss_hfunc(struct hclge_vport * vport,const u8 hfunc,u8 * hash_algo)4380 static int hclge_parse_rss_hfunc(struct hclge_vport *vport, const u8 hfunc,
4381 u8 *hash_algo)
4382 {
4383 switch (hfunc) {
4384 case ETH_RSS_HASH_TOP:
4385 *hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4386 return 0;
4387 case ETH_RSS_HASH_XOR:
4388 *hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4389 return 0;
4390 case ETH_RSS_HASH_NO_CHANGE:
4391 *hash_algo = vport->rss_algo;
4392 return 0;
4393 default:
4394 return -EINVAL;
4395 }
4396 }
4397
hclge_set_rss(struct hnae3_handle * handle,const u32 * indir,const u8 * key,const u8 hfunc)4398 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4399 const u8 *key, const u8 hfunc)
4400 {
4401 struct hclge_vport *vport = hclge_get_vport(handle);
4402 struct hclge_dev *hdev = vport->back;
4403 u8 hash_algo;
4404 int ret, i;
4405
4406 ret = hclge_parse_rss_hfunc(vport, hfunc, &hash_algo);
4407 if (ret) {
4408 dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc);
4409 return ret;
4410 }
4411
4412 /* Set the RSS Hash Key if specififed by the user */
4413 if (key) {
4414 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4415 if (ret)
4416 return ret;
4417
4418 /* Update the shadow RSS key with user specified qids */
4419 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4420 } else {
4421 ret = hclge_set_rss_algo_key(hdev, hash_algo,
4422 vport->rss_hash_key);
4423 if (ret)
4424 return ret;
4425 }
4426 vport->rss_algo = hash_algo;
4427
4428 /* Update the shadow RSS table with user specified qids */
4429 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4430 vport->rss_indirection_tbl[i] = indir[i];
4431
4432 /* Update the hardware */
4433 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4434 }
4435
hclge_get_rss_hash_bits(struct ethtool_rxnfc * nfc)4436 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4437 {
4438 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4439
4440 if (nfc->data & RXH_L4_B_2_3)
4441 hash_sets |= HCLGE_D_PORT_BIT;
4442 else
4443 hash_sets &= ~HCLGE_D_PORT_BIT;
4444
4445 if (nfc->data & RXH_IP_SRC)
4446 hash_sets |= HCLGE_S_IP_BIT;
4447 else
4448 hash_sets &= ~HCLGE_S_IP_BIT;
4449
4450 if (nfc->data & RXH_IP_DST)
4451 hash_sets |= HCLGE_D_IP_BIT;
4452 else
4453 hash_sets &= ~HCLGE_D_IP_BIT;
4454
4455 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4456 hash_sets |= HCLGE_V_TAG_BIT;
4457
4458 return hash_sets;
4459 }
4460
hclge_set_rss_tuple(struct hnae3_handle * handle,struct ethtool_rxnfc * nfc)4461 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4462 struct ethtool_rxnfc *nfc)
4463 {
4464 struct hclge_vport *vport = hclge_get_vport(handle);
4465 struct hclge_dev *hdev = vport->back;
4466 struct hclge_rss_input_tuple_cmd *req;
4467 struct hclge_desc desc;
4468 u8 tuple_sets;
4469 int ret;
4470
4471 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4472 RXH_L4_B_0_1 | RXH_L4_B_2_3))
4473 return -EINVAL;
4474
4475 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4476 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4477
4478 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4479 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4480 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4481 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4482 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4483 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4484 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4485 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4486
4487 tuple_sets = hclge_get_rss_hash_bits(nfc);
4488 switch (nfc->flow_type) {
4489 case TCP_V4_FLOW:
4490 req->ipv4_tcp_en = tuple_sets;
4491 break;
4492 case TCP_V6_FLOW:
4493 req->ipv6_tcp_en = tuple_sets;
4494 break;
4495 case UDP_V4_FLOW:
4496 req->ipv4_udp_en = tuple_sets;
4497 break;
4498 case UDP_V6_FLOW:
4499 req->ipv6_udp_en = tuple_sets;
4500 break;
4501 case SCTP_V4_FLOW:
4502 req->ipv4_sctp_en = tuple_sets;
4503 break;
4504 case SCTP_V6_FLOW:
4505 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
4506 (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
4507 return -EINVAL;
4508
4509 req->ipv6_sctp_en = tuple_sets;
4510 break;
4511 case IPV4_FLOW:
4512 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4513 break;
4514 case IPV6_FLOW:
4515 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4516 break;
4517 default:
4518 return -EINVAL;
4519 }
4520
4521 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4522 if (ret) {
4523 dev_err(&hdev->pdev->dev,
4524 "Set rss tuple fail, status = %d\n", ret);
4525 return ret;
4526 }
4527
4528 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4529 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4530 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4531 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4532 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4533 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4534 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4535 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4536 hclge_get_rss_type(vport);
4537 return 0;
4538 }
4539
hclge_get_rss_tuple(struct hnae3_handle * handle,struct ethtool_rxnfc * nfc)4540 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4541 struct ethtool_rxnfc *nfc)
4542 {
4543 struct hclge_vport *vport = hclge_get_vport(handle);
4544 u8 tuple_sets;
4545
4546 nfc->data = 0;
4547
4548 switch (nfc->flow_type) {
4549 case TCP_V4_FLOW:
4550 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4551 break;
4552 case UDP_V4_FLOW:
4553 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4554 break;
4555 case TCP_V6_FLOW:
4556 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4557 break;
4558 case UDP_V6_FLOW:
4559 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4560 break;
4561 case SCTP_V4_FLOW:
4562 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4563 break;
4564 case SCTP_V6_FLOW:
4565 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4566 break;
4567 case IPV4_FLOW:
4568 case IPV6_FLOW:
4569 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4570 break;
4571 default:
4572 return -EINVAL;
4573 }
4574
4575 if (!tuple_sets)
4576 return 0;
4577
4578 if (tuple_sets & HCLGE_D_PORT_BIT)
4579 nfc->data |= RXH_L4_B_2_3;
4580 if (tuple_sets & HCLGE_S_PORT_BIT)
4581 nfc->data |= RXH_L4_B_0_1;
4582 if (tuple_sets & HCLGE_D_IP_BIT)
4583 nfc->data |= RXH_IP_DST;
4584 if (tuple_sets & HCLGE_S_IP_BIT)
4585 nfc->data |= RXH_IP_SRC;
4586
4587 return 0;
4588 }
4589
hclge_get_tc_size(struct hnae3_handle * handle)4590 static int hclge_get_tc_size(struct hnae3_handle *handle)
4591 {
4592 struct hclge_vport *vport = hclge_get_vport(handle);
4593 struct hclge_dev *hdev = vport->back;
4594
4595 return hdev->rss_size_max;
4596 }
4597
hclge_rss_init_hw(struct hclge_dev * hdev)4598 int hclge_rss_init_hw(struct hclge_dev *hdev)
4599 {
4600 struct hclge_vport *vport = hdev->vport;
4601 u8 *rss_indir = vport[0].rss_indirection_tbl;
4602 u16 rss_size = vport[0].alloc_rss_size;
4603 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4604 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4605 u8 *key = vport[0].rss_hash_key;
4606 u8 hfunc = vport[0].rss_algo;
4607 u16 tc_valid[HCLGE_MAX_TC_NUM];
4608 u16 roundup_size;
4609 unsigned int i;
4610 int ret;
4611
4612 ret = hclge_set_rss_indir_table(hdev, rss_indir);
4613 if (ret)
4614 return ret;
4615
4616 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4617 if (ret)
4618 return ret;
4619
4620 ret = hclge_set_rss_input_tuple(hdev);
4621 if (ret)
4622 return ret;
4623
4624 /* Each TC have the same queue size, and tc_size set to hardware is
4625 * the log2 of roundup power of two of rss_size, the acutal queue
4626 * size is limited by indirection table.
4627 */
4628 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4629 dev_err(&hdev->pdev->dev,
4630 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4631 rss_size);
4632 return -EINVAL;
4633 }
4634
4635 roundup_size = roundup_pow_of_two(rss_size);
4636 roundup_size = ilog2(roundup_size);
4637
4638 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4639 tc_valid[i] = 0;
4640
4641 if (!(hdev->hw_tc_map & BIT(i)))
4642 continue;
4643
4644 tc_valid[i] = 1;
4645 tc_size[i] = roundup_size;
4646 tc_offset[i] = rss_size * i;
4647 }
4648
4649 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4650 }
4651
hclge_rss_indir_init_cfg(struct hclge_dev * hdev)4652 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4653 {
4654 struct hclge_vport *vport = hdev->vport;
4655 int i, j;
4656
4657 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4658 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4659 vport[j].rss_indirection_tbl[i] =
4660 i % vport[j].alloc_rss_size;
4661 }
4662 }
4663
hclge_rss_init_cfg(struct hclge_dev * hdev)4664 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4665 {
4666 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4667 struct hclge_vport *vport = hdev->vport;
4668
4669 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
4670 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4671
4672 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4673 vport[i].rss_tuple_sets.ipv4_tcp_en =
4674 HCLGE_RSS_INPUT_TUPLE_OTHER;
4675 vport[i].rss_tuple_sets.ipv4_udp_en =
4676 HCLGE_RSS_INPUT_TUPLE_OTHER;
4677 vport[i].rss_tuple_sets.ipv4_sctp_en =
4678 HCLGE_RSS_INPUT_TUPLE_SCTP;
4679 vport[i].rss_tuple_sets.ipv4_fragment_en =
4680 HCLGE_RSS_INPUT_TUPLE_OTHER;
4681 vport[i].rss_tuple_sets.ipv6_tcp_en =
4682 HCLGE_RSS_INPUT_TUPLE_OTHER;
4683 vport[i].rss_tuple_sets.ipv6_udp_en =
4684 HCLGE_RSS_INPUT_TUPLE_OTHER;
4685 vport[i].rss_tuple_sets.ipv6_sctp_en =
4686 hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
4687 HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
4688 HCLGE_RSS_INPUT_TUPLE_SCTP;
4689 vport[i].rss_tuple_sets.ipv6_fragment_en =
4690 HCLGE_RSS_INPUT_TUPLE_OTHER;
4691
4692 vport[i].rss_algo = rss_algo;
4693
4694 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4695 HCLGE_RSS_KEY_SIZE);
4696 }
4697
4698 hclge_rss_indir_init_cfg(hdev);
4699 }
4700
hclge_bind_ring_with_vector(struct hclge_vport * vport,int vector_id,bool en,struct hnae3_ring_chain_node * ring_chain)4701 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4702 int vector_id, bool en,
4703 struct hnae3_ring_chain_node *ring_chain)
4704 {
4705 struct hclge_dev *hdev = vport->back;
4706 struct hnae3_ring_chain_node *node;
4707 struct hclge_desc desc;
4708 struct hclge_ctrl_vector_chain_cmd *req =
4709 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4710 enum hclge_cmd_status status;
4711 enum hclge_opcode_type op;
4712 u16 tqp_type_and_id;
4713 int i;
4714
4715 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4716 hclge_cmd_setup_basic_desc(&desc, op, false);
4717 req->int_vector_id = vector_id;
4718
4719 i = 0;
4720 for (node = ring_chain; node; node = node->next) {
4721 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4722 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4723 HCLGE_INT_TYPE_S,
4724 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4725 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4726 HCLGE_TQP_ID_S, node->tqp_index);
4727 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4728 HCLGE_INT_GL_IDX_S,
4729 hnae3_get_field(node->int_gl_idx,
4730 HNAE3_RING_GL_IDX_M,
4731 HNAE3_RING_GL_IDX_S));
4732 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4733 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4734 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4735 req->vfid = vport->vport_id;
4736
4737 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4738 if (status) {
4739 dev_err(&hdev->pdev->dev,
4740 "Map TQP fail, status is %d.\n",
4741 status);
4742 return -EIO;
4743 }
4744 i = 0;
4745
4746 hclge_cmd_setup_basic_desc(&desc,
4747 op,
4748 false);
4749 req->int_vector_id = vector_id;
4750 }
4751 }
4752
4753 if (i > 0) {
4754 req->int_cause_num = i;
4755 req->vfid = vport->vport_id;
4756 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4757 if (status) {
4758 dev_err(&hdev->pdev->dev,
4759 "Map TQP fail, status is %d.\n", status);
4760 return -EIO;
4761 }
4762 }
4763
4764 return 0;
4765 }
4766
hclge_map_ring_to_vector(struct hnae3_handle * handle,int vector,struct hnae3_ring_chain_node * ring_chain)4767 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4768 struct hnae3_ring_chain_node *ring_chain)
4769 {
4770 struct hclge_vport *vport = hclge_get_vport(handle);
4771 struct hclge_dev *hdev = vport->back;
4772 int vector_id;
4773
4774 vector_id = hclge_get_vector_index(hdev, vector);
4775 if (vector_id < 0) {
4776 dev_err(&hdev->pdev->dev,
4777 "failed to get vector index. vector=%d\n", vector);
4778 return vector_id;
4779 }
4780
4781 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4782 }
4783
hclge_unmap_ring_frm_vector(struct hnae3_handle * handle,int vector,struct hnae3_ring_chain_node * ring_chain)4784 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4785 struct hnae3_ring_chain_node *ring_chain)
4786 {
4787 struct hclge_vport *vport = hclge_get_vport(handle);
4788 struct hclge_dev *hdev = vport->back;
4789 int vector_id, ret;
4790
4791 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4792 return 0;
4793
4794 vector_id = hclge_get_vector_index(hdev, vector);
4795 if (vector_id < 0) {
4796 dev_err(&handle->pdev->dev,
4797 "Get vector index fail. ret =%d\n", vector_id);
4798 return vector_id;
4799 }
4800
4801 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4802 if (ret)
4803 dev_err(&handle->pdev->dev,
4804 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4805 vector_id, ret);
4806
4807 return ret;
4808 }
4809
hclge_cmd_set_promisc_mode(struct hclge_dev * hdev,struct hclge_promisc_param * param)4810 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4811 struct hclge_promisc_param *param)
4812 {
4813 struct hclge_promisc_cfg_cmd *req;
4814 struct hclge_desc desc;
4815 int ret;
4816
4817 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4818
4819 req = (struct hclge_promisc_cfg_cmd *)desc.data;
4820 req->vf_id = param->vf_id;
4821
4822 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4823 * pdev revision(0x20), new revision support them. The
4824 * value of this two fields will not return error when driver
4825 * send command to fireware in revision(0x20).
4826 */
4827 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4828 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4829
4830 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4831 if (ret)
4832 dev_err(&hdev->pdev->dev,
4833 "failed to set vport %d promisc mode, ret = %d.\n",
4834 param->vf_id, ret);
4835
4836 return ret;
4837 }
4838
hclge_promisc_param_init(struct hclge_promisc_param * param,bool en_uc,bool en_mc,bool en_bc,int vport_id)4839 static void hclge_promisc_param_init(struct hclge_promisc_param *param,
4840 bool en_uc, bool en_mc, bool en_bc,
4841 int vport_id)
4842 {
4843 if (!param)
4844 return;
4845
4846 memset(param, 0, sizeof(struct hclge_promisc_param));
4847 if (en_uc)
4848 param->enable = HCLGE_PROMISC_EN_UC;
4849 if (en_mc)
4850 param->enable |= HCLGE_PROMISC_EN_MC;
4851 if (en_bc)
4852 param->enable |= HCLGE_PROMISC_EN_BC;
4853 param->vf_id = vport_id;
4854 }
4855
hclge_set_vport_promisc_mode(struct hclge_vport * vport,bool en_uc_pmc,bool en_mc_pmc,bool en_bc_pmc)4856 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4857 bool en_mc_pmc, bool en_bc_pmc)
4858 {
4859 struct hclge_dev *hdev = vport->back;
4860 struct hclge_promisc_param param;
4861
4862 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4863 vport->vport_id);
4864 return hclge_cmd_set_promisc_mode(hdev, ¶m);
4865 }
4866
hclge_set_promisc_mode(struct hnae3_handle * handle,bool en_uc_pmc,bool en_mc_pmc)4867 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4868 bool en_mc_pmc)
4869 {
4870 struct hclge_vport *vport = hclge_get_vport(handle);
4871 struct hclge_dev *hdev = vport->back;
4872 bool en_bc_pmc = true;
4873
4874 /* For device whose version below V2, if broadcast promisc enabled,
4875 * vlan filter is always bypassed. So broadcast promisc should be
4876 * disabled until user enable promisc mode
4877 */
4878 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
4879 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4880
4881 return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4882 en_bc_pmc);
4883 }
4884
hclge_request_update_promisc_mode(struct hnae3_handle * handle)4885 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
4886 {
4887 struct hclge_vport *vport = hclge_get_vport(handle);
4888 struct hclge_dev *hdev = vport->back;
4889
4890 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
4891 }
4892
hclge_get_fd_mode(struct hclge_dev * hdev,u8 * fd_mode)4893 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4894 {
4895 struct hclge_get_fd_mode_cmd *req;
4896 struct hclge_desc desc;
4897 int ret;
4898
4899 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4900
4901 req = (struct hclge_get_fd_mode_cmd *)desc.data;
4902
4903 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4904 if (ret) {
4905 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4906 return ret;
4907 }
4908
4909 *fd_mode = req->mode;
4910
4911 return ret;
4912 }
4913
hclge_get_fd_allocation(struct hclge_dev * hdev,u32 * stage1_entry_num,u32 * stage2_entry_num,u16 * stage1_counter_num,u16 * stage2_counter_num)4914 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4915 u32 *stage1_entry_num,
4916 u32 *stage2_entry_num,
4917 u16 *stage1_counter_num,
4918 u16 *stage2_counter_num)
4919 {
4920 struct hclge_get_fd_allocation_cmd *req;
4921 struct hclge_desc desc;
4922 int ret;
4923
4924 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4925
4926 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4927
4928 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4929 if (ret) {
4930 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4931 ret);
4932 return ret;
4933 }
4934
4935 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4936 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4937 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4938 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4939
4940 return ret;
4941 }
4942
hclge_set_fd_key_config(struct hclge_dev * hdev,enum HCLGE_FD_STAGE stage_num)4943 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
4944 enum HCLGE_FD_STAGE stage_num)
4945 {
4946 struct hclge_set_fd_key_config_cmd *req;
4947 struct hclge_fd_key_cfg *stage;
4948 struct hclge_desc desc;
4949 int ret;
4950
4951 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4952
4953 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4954 stage = &hdev->fd_cfg.key_cfg[stage_num];
4955 req->stage = stage_num;
4956 req->key_select = stage->key_sel;
4957 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4958 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4959 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4960 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4961 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4962 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4963
4964 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4965 if (ret)
4966 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4967
4968 return ret;
4969 }
4970
hclge_init_fd_config(struct hclge_dev * hdev)4971 static int hclge_init_fd_config(struct hclge_dev *hdev)
4972 {
4973 #define LOW_2_WORDS 0x03
4974 struct hclge_fd_key_cfg *key_cfg;
4975 int ret;
4976
4977 if (!hnae3_dev_fd_supported(hdev))
4978 return 0;
4979
4980 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4981 if (ret)
4982 return ret;
4983
4984 switch (hdev->fd_cfg.fd_mode) {
4985 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4986 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4987 break;
4988 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4989 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4990 break;
4991 default:
4992 dev_err(&hdev->pdev->dev,
4993 "Unsupported flow director mode %u\n",
4994 hdev->fd_cfg.fd_mode);
4995 return -EOPNOTSUPP;
4996 }
4997
4998 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4999 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
5000 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5001 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5002 key_cfg->outer_sipv6_word_en = 0;
5003 key_cfg->outer_dipv6_word_en = 0;
5004
5005 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5006 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5007 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5008 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5009
5010 /* If use max 400bit key, we can support tuples for ether type */
5011 if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1)
5012 key_cfg->tuple_active |=
5013 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5014
5015 /* roce_type is used to filter roce frames
5016 * dst_vport is used to specify the rule
5017 */
5018 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5019
5020 ret = hclge_get_fd_allocation(hdev,
5021 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5022 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5023 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5024 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5025 if (ret)
5026 return ret;
5027
5028 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5029 }
5030
hclge_fd_tcam_config(struct hclge_dev * hdev,u8 stage,bool sel_x,int loc,u8 * key,bool is_add)5031 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5032 int loc, u8 *key, bool is_add)
5033 {
5034 struct hclge_fd_tcam_config_1_cmd *req1;
5035 struct hclge_fd_tcam_config_2_cmd *req2;
5036 struct hclge_fd_tcam_config_3_cmd *req3;
5037 struct hclge_desc desc[3];
5038 int ret;
5039
5040 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5041 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5042 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5043 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5044 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5045
5046 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5047 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5048 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5049
5050 req1->stage = stage;
5051 req1->xy_sel = sel_x ? 1 : 0;
5052 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5053 req1->index = cpu_to_le32(loc);
5054 req1->entry_vld = sel_x ? is_add : 0;
5055
5056 if (key) {
5057 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5058 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5059 sizeof(req2->tcam_data));
5060 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5061 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5062 }
5063
5064 ret = hclge_cmd_send(&hdev->hw, desc, 3);
5065 if (ret)
5066 dev_err(&hdev->pdev->dev,
5067 "config tcam key fail, ret=%d\n",
5068 ret);
5069
5070 return ret;
5071 }
5072
hclge_fd_ad_config(struct hclge_dev * hdev,u8 stage,int loc,struct hclge_fd_ad_data * action)5073 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5074 struct hclge_fd_ad_data *action)
5075 {
5076 struct hclge_fd_ad_config_cmd *req;
5077 struct hclge_desc desc;
5078 u64 ad_data = 0;
5079 int ret;
5080
5081 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5082
5083 req = (struct hclge_fd_ad_config_cmd *)desc.data;
5084 req->index = cpu_to_le32(loc);
5085 req->stage = stage;
5086
5087 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5088 action->write_rule_id_to_bd);
5089 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5090 action->rule_id);
5091 ad_data <<= 32;
5092 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5093 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5094 action->forward_to_direct_queue);
5095 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5096 action->queue_id);
5097 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5098 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5099 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5100 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5101 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5102 action->counter_id);
5103
5104 req->ad_data = cpu_to_le64(ad_data);
5105 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5106 if (ret)
5107 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5108
5109 return ret;
5110 }
5111
hclge_fd_convert_tuple(u32 tuple_bit,u8 * key_x,u8 * key_y,struct hclge_fd_rule * rule)5112 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5113 struct hclge_fd_rule *rule)
5114 {
5115 u16 tmp_x_s, tmp_y_s;
5116 u32 tmp_x_l, tmp_y_l;
5117 int i;
5118
5119 if (rule->unused_tuple & tuple_bit)
5120 return true;
5121
5122 switch (tuple_bit) {
5123 case BIT(INNER_DST_MAC):
5124 for (i = 0; i < ETH_ALEN; i++) {
5125 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5126 rule->tuples_mask.dst_mac[i]);
5127 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5128 rule->tuples_mask.dst_mac[i]);
5129 }
5130
5131 return true;
5132 case BIT(INNER_SRC_MAC):
5133 for (i = 0; i < ETH_ALEN; i++) {
5134 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5135 rule->tuples_mask.src_mac[i]);
5136 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5137 rule->tuples_mask.src_mac[i]);
5138 }
5139
5140 return true;
5141 case BIT(INNER_VLAN_TAG_FST):
5142 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5143 rule->tuples_mask.vlan_tag1);
5144 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5145 rule->tuples_mask.vlan_tag1);
5146 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5147 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5148
5149 return true;
5150 case BIT(INNER_ETH_TYPE):
5151 calc_x(tmp_x_s, rule->tuples.ether_proto,
5152 rule->tuples_mask.ether_proto);
5153 calc_y(tmp_y_s, rule->tuples.ether_proto,
5154 rule->tuples_mask.ether_proto);
5155 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5156 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5157
5158 return true;
5159 case BIT(INNER_IP_TOS):
5160 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5161 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5162
5163 return true;
5164 case BIT(INNER_IP_PROTO):
5165 calc_x(*key_x, rule->tuples.ip_proto,
5166 rule->tuples_mask.ip_proto);
5167 calc_y(*key_y, rule->tuples.ip_proto,
5168 rule->tuples_mask.ip_proto);
5169
5170 return true;
5171 case BIT(INNER_SRC_IP):
5172 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5173 rule->tuples_mask.src_ip[IPV4_INDEX]);
5174 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5175 rule->tuples_mask.src_ip[IPV4_INDEX]);
5176 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5177 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5178
5179 return true;
5180 case BIT(INNER_DST_IP):
5181 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5182 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5183 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5184 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5185 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5186 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5187
5188 return true;
5189 case BIT(INNER_SRC_PORT):
5190 calc_x(tmp_x_s, rule->tuples.src_port,
5191 rule->tuples_mask.src_port);
5192 calc_y(tmp_y_s, rule->tuples.src_port,
5193 rule->tuples_mask.src_port);
5194 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5195 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5196
5197 return true;
5198 case BIT(INNER_DST_PORT):
5199 calc_x(tmp_x_s, rule->tuples.dst_port,
5200 rule->tuples_mask.dst_port);
5201 calc_y(tmp_y_s, rule->tuples.dst_port,
5202 rule->tuples_mask.dst_port);
5203 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5204 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5205
5206 return true;
5207 default:
5208 return false;
5209 }
5210 }
5211
hclge_get_port_number(enum HLCGE_PORT_TYPE port_type,u8 pf_id,u8 vf_id,u8 network_port_id)5212 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5213 u8 vf_id, u8 network_port_id)
5214 {
5215 u32 port_number = 0;
5216
5217 if (port_type == HOST_PORT) {
5218 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5219 pf_id);
5220 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5221 vf_id);
5222 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5223 } else {
5224 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5225 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5226 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5227 }
5228
5229 return port_number;
5230 }
5231
hclge_fd_convert_meta_data(struct hclge_fd_key_cfg * key_cfg,__le32 * key_x,__le32 * key_y,struct hclge_fd_rule * rule)5232 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5233 __le32 *key_x, __le32 *key_y,
5234 struct hclge_fd_rule *rule)
5235 {
5236 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5237 u8 cur_pos = 0, tuple_size, shift_bits;
5238 unsigned int i;
5239
5240 for (i = 0; i < MAX_META_DATA; i++) {
5241 tuple_size = meta_data_key_info[i].key_length;
5242 tuple_bit = key_cfg->meta_data_active & BIT(i);
5243
5244 switch (tuple_bit) {
5245 case BIT(ROCE_TYPE):
5246 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5247 cur_pos += tuple_size;
5248 break;
5249 case BIT(DST_VPORT):
5250 port_number = hclge_get_port_number(HOST_PORT, 0,
5251 rule->vf_id, 0);
5252 hnae3_set_field(meta_data,
5253 GENMASK(cur_pos + tuple_size, cur_pos),
5254 cur_pos, port_number);
5255 cur_pos += tuple_size;
5256 break;
5257 default:
5258 break;
5259 }
5260 }
5261
5262 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5263 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5264 shift_bits = sizeof(meta_data) * 8 - cur_pos;
5265
5266 *key_x = cpu_to_le32(tmp_x << shift_bits);
5267 *key_y = cpu_to_le32(tmp_y << shift_bits);
5268 }
5269
5270 /* A complete key is combined with meta data key and tuple key.
5271 * Meta data key is stored at the MSB region, and tuple key is stored at
5272 * the LSB region, unused bits will be filled 0.
5273 */
hclge_config_key(struct hclge_dev * hdev,u8 stage,struct hclge_fd_rule * rule)5274 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5275 struct hclge_fd_rule *rule)
5276 {
5277 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5278 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5279 u8 *cur_key_x, *cur_key_y;
5280 u8 meta_data_region;
5281 u8 tuple_size;
5282 int ret;
5283 u32 i;
5284
5285 memset(key_x, 0, sizeof(key_x));
5286 memset(key_y, 0, sizeof(key_y));
5287 cur_key_x = key_x;
5288 cur_key_y = key_y;
5289
5290 for (i = 0 ; i < MAX_TUPLE; i++) {
5291 bool tuple_valid;
5292 u32 check_tuple;
5293
5294 tuple_size = tuple_key_info[i].key_length / 8;
5295 check_tuple = key_cfg->tuple_active & BIT(i);
5296
5297 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5298 cur_key_y, rule);
5299 if (tuple_valid) {
5300 cur_key_x += tuple_size;
5301 cur_key_y += tuple_size;
5302 }
5303 }
5304
5305 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5306 MAX_META_DATA_LENGTH / 8;
5307
5308 hclge_fd_convert_meta_data(key_cfg,
5309 (__le32 *)(key_x + meta_data_region),
5310 (__le32 *)(key_y + meta_data_region),
5311 rule);
5312
5313 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5314 true);
5315 if (ret) {
5316 dev_err(&hdev->pdev->dev,
5317 "fd key_y config fail, loc=%u, ret=%d\n",
5318 rule->queue_id, ret);
5319 return ret;
5320 }
5321
5322 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5323 true);
5324 if (ret)
5325 dev_err(&hdev->pdev->dev,
5326 "fd key_x config fail, loc=%u, ret=%d\n",
5327 rule->queue_id, ret);
5328 return ret;
5329 }
5330
hclge_config_action(struct hclge_dev * hdev,u8 stage,struct hclge_fd_rule * rule)5331 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5332 struct hclge_fd_rule *rule)
5333 {
5334 struct hclge_fd_ad_data ad_data;
5335
5336 ad_data.ad_id = rule->location;
5337
5338 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5339 ad_data.drop_packet = true;
5340 ad_data.forward_to_direct_queue = false;
5341 ad_data.queue_id = 0;
5342 } else {
5343 ad_data.drop_packet = false;
5344 ad_data.forward_to_direct_queue = true;
5345 ad_data.queue_id = rule->queue_id;
5346 }
5347
5348 ad_data.use_counter = false;
5349 ad_data.counter_id = 0;
5350
5351 ad_data.use_next_stage = false;
5352 ad_data.next_input_key = 0;
5353
5354 ad_data.write_rule_id_to_bd = true;
5355 ad_data.rule_id = rule->location;
5356
5357 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5358 }
5359
hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec * spec,u32 * unused_tuple)5360 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5361 u32 *unused_tuple)
5362 {
5363 if (!spec || !unused_tuple)
5364 return -EINVAL;
5365
5366 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5367
5368 if (!spec->ip4src)
5369 *unused_tuple |= BIT(INNER_SRC_IP);
5370
5371 if (!spec->ip4dst)
5372 *unused_tuple |= BIT(INNER_DST_IP);
5373
5374 if (!spec->psrc)
5375 *unused_tuple |= BIT(INNER_SRC_PORT);
5376
5377 if (!spec->pdst)
5378 *unused_tuple |= BIT(INNER_DST_PORT);
5379
5380 if (!spec->tos)
5381 *unused_tuple |= BIT(INNER_IP_TOS);
5382
5383 return 0;
5384 }
5385
hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec * spec,u32 * unused_tuple)5386 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5387 u32 *unused_tuple)
5388 {
5389 if (!spec || !unused_tuple)
5390 return -EINVAL;
5391
5392 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5393 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5394
5395 if (!spec->ip4src)
5396 *unused_tuple |= BIT(INNER_SRC_IP);
5397
5398 if (!spec->ip4dst)
5399 *unused_tuple |= BIT(INNER_DST_IP);
5400
5401 if (!spec->tos)
5402 *unused_tuple |= BIT(INNER_IP_TOS);
5403
5404 if (!spec->proto)
5405 *unused_tuple |= BIT(INNER_IP_PROTO);
5406
5407 if (spec->l4_4_bytes)
5408 return -EOPNOTSUPP;
5409
5410 if (spec->ip_ver != ETH_RX_NFC_IP4)
5411 return -EOPNOTSUPP;
5412
5413 return 0;
5414 }
5415
hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec * spec,u32 * unused_tuple)5416 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5417 u32 *unused_tuple)
5418 {
5419 if (!spec || !unused_tuple)
5420 return -EINVAL;
5421
5422 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5423 BIT(INNER_IP_TOS);
5424
5425 /* check whether src/dst ip address used */
5426 if (!spec->ip6src[0] && !spec->ip6src[1] &&
5427 !spec->ip6src[2] && !spec->ip6src[3])
5428 *unused_tuple |= BIT(INNER_SRC_IP);
5429
5430 if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5431 !spec->ip6dst[2] && !spec->ip6dst[3])
5432 *unused_tuple |= BIT(INNER_DST_IP);
5433
5434 if (!spec->psrc)
5435 *unused_tuple |= BIT(INNER_SRC_PORT);
5436
5437 if (!spec->pdst)
5438 *unused_tuple |= BIT(INNER_DST_PORT);
5439
5440 if (spec->tclass)
5441 return -EOPNOTSUPP;
5442
5443 return 0;
5444 }
5445
hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec * spec,u32 * unused_tuple)5446 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
5447 u32 *unused_tuple)
5448 {
5449 if (!spec || !unused_tuple)
5450 return -EINVAL;
5451
5452 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5453 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5454
5455 /* check whether src/dst ip address used */
5456 if (!spec->ip6src[0] && !spec->ip6src[1] &&
5457 !spec->ip6src[2] && !spec->ip6src[3])
5458 *unused_tuple |= BIT(INNER_SRC_IP);
5459
5460 if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5461 !spec->ip6dst[2] && !spec->ip6dst[3])
5462 *unused_tuple |= BIT(INNER_DST_IP);
5463
5464 if (!spec->l4_proto)
5465 *unused_tuple |= BIT(INNER_IP_PROTO);
5466
5467 if (spec->tclass)
5468 return -EOPNOTSUPP;
5469
5470 if (spec->l4_4_bytes)
5471 return -EOPNOTSUPP;
5472
5473 return 0;
5474 }
5475
hclge_fd_check_ether_tuple(struct ethhdr * spec,u32 * unused_tuple)5476 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
5477 {
5478 if (!spec || !unused_tuple)
5479 return -EINVAL;
5480
5481 *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5482 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5483 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5484
5485 if (is_zero_ether_addr(spec->h_source))
5486 *unused_tuple |= BIT(INNER_SRC_MAC);
5487
5488 if (is_zero_ether_addr(spec->h_dest))
5489 *unused_tuple |= BIT(INNER_DST_MAC);
5490
5491 if (!spec->h_proto)
5492 *unused_tuple |= BIT(INNER_ETH_TYPE);
5493
5494 return 0;
5495 }
5496
hclge_fd_check_ext_tuple(struct hclge_dev * hdev,struct ethtool_rx_flow_spec * fs,u32 * unused_tuple)5497 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
5498 struct ethtool_rx_flow_spec *fs,
5499 u32 *unused_tuple)
5500 {
5501 if (fs->flow_type & FLOW_EXT) {
5502 if (fs->h_ext.vlan_etype) {
5503 dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
5504 return -EOPNOTSUPP;
5505 }
5506
5507 if (!fs->h_ext.vlan_tci)
5508 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5509
5510 if (fs->m_ext.vlan_tci &&
5511 be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
5512 dev_err(&hdev->pdev->dev,
5513 "failed to config vlan_tci, invalid vlan_tci: %u, max is %u.\n",
5514 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
5515 return -EINVAL;
5516 }
5517 } else {
5518 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5519 }
5520
5521 if (fs->flow_type & FLOW_MAC_EXT) {
5522 if (hdev->fd_cfg.fd_mode !=
5523 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5524 dev_err(&hdev->pdev->dev,
5525 "FLOW_MAC_EXT is not supported in current fd mode!\n");
5526 return -EOPNOTSUPP;
5527 }
5528
5529 if (is_zero_ether_addr(fs->h_ext.h_dest))
5530 *unused_tuple |= BIT(INNER_DST_MAC);
5531 else
5532 *unused_tuple &= ~BIT(INNER_DST_MAC);
5533 }
5534
5535 return 0;
5536 }
5537
hclge_fd_check_spec(struct hclge_dev * hdev,struct ethtool_rx_flow_spec * fs,u32 * unused_tuple)5538 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5539 struct ethtool_rx_flow_spec *fs,
5540 u32 *unused_tuple)
5541 {
5542 u32 flow_type;
5543 int ret;
5544
5545 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5546 dev_err(&hdev->pdev->dev,
5547 "failed to config fd rules, invalid rule location: %u, max is %u\n.",
5548 fs->location,
5549 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
5550 return -EINVAL;
5551 }
5552
5553 if ((fs->flow_type & FLOW_EXT) &&
5554 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5555 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5556 return -EOPNOTSUPP;
5557 }
5558
5559 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5560 switch (flow_type) {
5561 case SCTP_V4_FLOW:
5562 case TCP_V4_FLOW:
5563 case UDP_V4_FLOW:
5564 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
5565 unused_tuple);
5566 break;
5567 case IP_USER_FLOW:
5568 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
5569 unused_tuple);
5570 break;
5571 case SCTP_V6_FLOW:
5572 case TCP_V6_FLOW:
5573 case UDP_V6_FLOW:
5574 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
5575 unused_tuple);
5576 break;
5577 case IPV6_USER_FLOW:
5578 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
5579 unused_tuple);
5580 break;
5581 case ETHER_FLOW:
5582 if (hdev->fd_cfg.fd_mode !=
5583 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5584 dev_err(&hdev->pdev->dev,
5585 "ETHER_FLOW is not supported in current fd mode!\n");
5586 return -EOPNOTSUPP;
5587 }
5588
5589 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
5590 unused_tuple);
5591 break;
5592 default:
5593 dev_err(&hdev->pdev->dev,
5594 "unsupported protocol type, protocol type = %#x\n",
5595 flow_type);
5596 return -EOPNOTSUPP;
5597 }
5598
5599 if (ret) {
5600 dev_err(&hdev->pdev->dev,
5601 "failed to check flow union tuple, ret = %d\n",
5602 ret);
5603 return ret;
5604 }
5605
5606 return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
5607 }
5608
hclge_fd_rule_exist(struct hclge_dev * hdev,u16 location)5609 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5610 {
5611 struct hclge_fd_rule *rule = NULL;
5612 struct hlist_node *node2;
5613
5614 spin_lock_bh(&hdev->fd_rule_lock);
5615 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5616 if (rule->location >= location)
5617 break;
5618 }
5619
5620 spin_unlock_bh(&hdev->fd_rule_lock);
5621
5622 return rule && rule->location == location;
5623 }
5624
5625 /* make sure being called after lock up with fd_rule_lock */
hclge_fd_update_rule_list(struct hclge_dev * hdev,struct hclge_fd_rule * new_rule,u16 location,bool is_add)5626 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5627 struct hclge_fd_rule *new_rule,
5628 u16 location,
5629 bool is_add)
5630 {
5631 struct hclge_fd_rule *rule = NULL, *parent = NULL;
5632 struct hlist_node *node2;
5633
5634 if (is_add && !new_rule)
5635 return -EINVAL;
5636
5637 hlist_for_each_entry_safe(rule, node2,
5638 &hdev->fd_rule_list, rule_node) {
5639 if (rule->location >= location)
5640 break;
5641 parent = rule;
5642 }
5643
5644 if (rule && rule->location == location) {
5645 hlist_del(&rule->rule_node);
5646 kfree(rule);
5647 hdev->hclge_fd_rule_num--;
5648
5649 if (!is_add) {
5650 if (!hdev->hclge_fd_rule_num)
5651 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5652 clear_bit(location, hdev->fd_bmap);
5653
5654 return 0;
5655 }
5656 } else if (!is_add) {
5657 dev_err(&hdev->pdev->dev,
5658 "delete fail, rule %u is inexistent\n",
5659 location);
5660 return -EINVAL;
5661 }
5662
5663 INIT_HLIST_NODE(&new_rule->rule_node);
5664
5665 if (parent)
5666 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5667 else
5668 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5669
5670 set_bit(location, hdev->fd_bmap);
5671 hdev->hclge_fd_rule_num++;
5672 hdev->fd_active_type = new_rule->rule_type;
5673
5674 return 0;
5675 }
5676
hclge_fd_get_tuple(struct hclge_dev * hdev,struct ethtool_rx_flow_spec * fs,struct hclge_fd_rule * rule)5677 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5678 struct ethtool_rx_flow_spec *fs,
5679 struct hclge_fd_rule *rule)
5680 {
5681 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5682
5683 switch (flow_type) {
5684 case SCTP_V4_FLOW:
5685 case TCP_V4_FLOW:
5686 case UDP_V4_FLOW:
5687 rule->tuples.src_ip[IPV4_INDEX] =
5688 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5689 rule->tuples_mask.src_ip[IPV4_INDEX] =
5690 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5691
5692 rule->tuples.dst_ip[IPV4_INDEX] =
5693 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5694 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5695 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5696
5697 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5698 rule->tuples_mask.src_port =
5699 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5700
5701 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5702 rule->tuples_mask.dst_port =
5703 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5704
5705 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5706 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5707
5708 rule->tuples.ether_proto = ETH_P_IP;
5709 rule->tuples_mask.ether_proto = 0xFFFF;
5710
5711 break;
5712 case IP_USER_FLOW:
5713 rule->tuples.src_ip[IPV4_INDEX] =
5714 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5715 rule->tuples_mask.src_ip[IPV4_INDEX] =
5716 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5717
5718 rule->tuples.dst_ip[IPV4_INDEX] =
5719 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5720 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5721 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5722
5723 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5724 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5725
5726 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5727 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5728
5729 rule->tuples.ether_proto = ETH_P_IP;
5730 rule->tuples_mask.ether_proto = 0xFFFF;
5731
5732 break;
5733 case SCTP_V6_FLOW:
5734 case TCP_V6_FLOW:
5735 case UDP_V6_FLOW:
5736 be32_to_cpu_array(rule->tuples.src_ip,
5737 fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5738 be32_to_cpu_array(rule->tuples_mask.src_ip,
5739 fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5740
5741 be32_to_cpu_array(rule->tuples.dst_ip,
5742 fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5743 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5744 fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5745
5746 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5747 rule->tuples_mask.src_port =
5748 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5749
5750 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5751 rule->tuples_mask.dst_port =
5752 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5753
5754 rule->tuples.ether_proto = ETH_P_IPV6;
5755 rule->tuples_mask.ether_proto = 0xFFFF;
5756
5757 break;
5758 case IPV6_USER_FLOW:
5759 be32_to_cpu_array(rule->tuples.src_ip,
5760 fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5761 be32_to_cpu_array(rule->tuples_mask.src_ip,
5762 fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5763
5764 be32_to_cpu_array(rule->tuples.dst_ip,
5765 fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5766 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5767 fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5768
5769 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5770 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5771
5772 rule->tuples.ether_proto = ETH_P_IPV6;
5773 rule->tuples_mask.ether_proto = 0xFFFF;
5774
5775 break;
5776 case ETHER_FLOW:
5777 ether_addr_copy(rule->tuples.src_mac,
5778 fs->h_u.ether_spec.h_source);
5779 ether_addr_copy(rule->tuples_mask.src_mac,
5780 fs->m_u.ether_spec.h_source);
5781
5782 ether_addr_copy(rule->tuples.dst_mac,
5783 fs->h_u.ether_spec.h_dest);
5784 ether_addr_copy(rule->tuples_mask.dst_mac,
5785 fs->m_u.ether_spec.h_dest);
5786
5787 rule->tuples.ether_proto =
5788 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5789 rule->tuples_mask.ether_proto =
5790 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5791
5792 break;
5793 default:
5794 return -EOPNOTSUPP;
5795 }
5796
5797 switch (flow_type) {
5798 case SCTP_V4_FLOW:
5799 case SCTP_V6_FLOW:
5800 rule->tuples.ip_proto = IPPROTO_SCTP;
5801 rule->tuples_mask.ip_proto = 0xFF;
5802 break;
5803 case TCP_V4_FLOW:
5804 case TCP_V6_FLOW:
5805 rule->tuples.ip_proto = IPPROTO_TCP;
5806 rule->tuples_mask.ip_proto = 0xFF;
5807 break;
5808 case UDP_V4_FLOW:
5809 case UDP_V6_FLOW:
5810 rule->tuples.ip_proto = IPPROTO_UDP;
5811 rule->tuples_mask.ip_proto = 0xFF;
5812 break;
5813 default:
5814 break;
5815 }
5816
5817 if (fs->flow_type & FLOW_EXT) {
5818 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5819 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5820 }
5821
5822 if (fs->flow_type & FLOW_MAC_EXT) {
5823 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5824 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5825 }
5826
5827 return 0;
5828 }
5829
5830 /* make sure being called after lock up with fd_rule_lock */
hclge_fd_config_rule(struct hclge_dev * hdev,struct hclge_fd_rule * rule)5831 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5832 struct hclge_fd_rule *rule)
5833 {
5834 int ret;
5835
5836 if (!rule) {
5837 dev_err(&hdev->pdev->dev,
5838 "The flow director rule is NULL\n");
5839 return -EINVAL;
5840 }
5841
5842 /* it will never fail here, so needn't to check return value */
5843 hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5844
5845 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5846 if (ret)
5847 goto clear_rule;
5848
5849 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5850 if (ret)
5851 goto clear_rule;
5852
5853 return 0;
5854
5855 clear_rule:
5856 hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5857 return ret;
5858 }
5859
hclge_add_fd_entry(struct hnae3_handle * handle,struct ethtool_rxnfc * cmd)5860 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5861 struct ethtool_rxnfc *cmd)
5862 {
5863 struct hclge_vport *vport = hclge_get_vport(handle);
5864 struct hclge_dev *hdev = vport->back;
5865 u16 dst_vport_id = 0, q_index = 0;
5866 struct ethtool_rx_flow_spec *fs;
5867 struct hclge_fd_rule *rule;
5868 u32 unused = 0;
5869 u8 action;
5870 int ret;
5871
5872 if (!hnae3_dev_fd_supported(hdev)) {
5873 dev_err(&hdev->pdev->dev,
5874 "flow table director is not supported\n");
5875 return -EOPNOTSUPP;
5876 }
5877
5878 if (!hdev->fd_en) {
5879 dev_err(&hdev->pdev->dev,
5880 "please enable flow director first\n");
5881 return -EOPNOTSUPP;
5882 }
5883
5884 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5885
5886 ret = hclge_fd_check_spec(hdev, fs, &unused);
5887 if (ret)
5888 return ret;
5889
5890 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5891 action = HCLGE_FD_ACTION_DROP_PACKET;
5892 } else {
5893 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5894 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5895 u16 tqps;
5896
5897 if (vf > hdev->num_req_vfs) {
5898 dev_err(&hdev->pdev->dev,
5899 "Error: vf id (%u) > max vf num (%u)\n",
5900 vf, hdev->num_req_vfs);
5901 return -EINVAL;
5902 }
5903
5904 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5905 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5906
5907 if (ring >= tqps) {
5908 dev_err(&hdev->pdev->dev,
5909 "Error: queue id (%u) > max tqp num (%u)\n",
5910 ring, tqps - 1);
5911 return -EINVAL;
5912 }
5913
5914 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5915 q_index = ring;
5916 }
5917
5918 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5919 if (!rule)
5920 return -ENOMEM;
5921
5922 ret = hclge_fd_get_tuple(hdev, fs, rule);
5923 if (ret) {
5924 kfree(rule);
5925 return ret;
5926 }
5927
5928 rule->flow_type = fs->flow_type;
5929 rule->location = fs->location;
5930 rule->unused_tuple = unused;
5931 rule->vf_id = dst_vport_id;
5932 rule->queue_id = q_index;
5933 rule->action = action;
5934 rule->rule_type = HCLGE_FD_EP_ACTIVE;
5935
5936 /* to avoid rule conflict, when user configure rule by ethtool,
5937 * we need to clear all arfs rules
5938 */
5939 spin_lock_bh(&hdev->fd_rule_lock);
5940 hclge_clear_arfs_rules(handle);
5941
5942 ret = hclge_fd_config_rule(hdev, rule);
5943
5944 spin_unlock_bh(&hdev->fd_rule_lock);
5945
5946 return ret;
5947 }
5948
hclge_del_fd_entry(struct hnae3_handle * handle,struct ethtool_rxnfc * cmd)5949 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5950 struct ethtool_rxnfc *cmd)
5951 {
5952 struct hclge_vport *vport = hclge_get_vport(handle);
5953 struct hclge_dev *hdev = vport->back;
5954 struct ethtool_rx_flow_spec *fs;
5955 int ret;
5956
5957 if (!hnae3_dev_fd_supported(hdev))
5958 return -EOPNOTSUPP;
5959
5960 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5961
5962 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5963 return -EINVAL;
5964
5965 if (!hclge_fd_rule_exist(hdev, fs->location)) {
5966 dev_err(&hdev->pdev->dev,
5967 "Delete fail, rule %u is inexistent\n", fs->location);
5968 return -ENOENT;
5969 }
5970
5971 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5972 NULL, false);
5973 if (ret)
5974 return ret;
5975
5976 spin_lock_bh(&hdev->fd_rule_lock);
5977 ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5978
5979 spin_unlock_bh(&hdev->fd_rule_lock);
5980
5981 return ret;
5982 }
5983
5984 /* make sure being called after lock up with fd_rule_lock */
hclge_del_all_fd_entries(struct hnae3_handle * handle,bool clear_list)5985 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5986 bool clear_list)
5987 {
5988 struct hclge_vport *vport = hclge_get_vport(handle);
5989 struct hclge_dev *hdev = vport->back;
5990 struct hclge_fd_rule *rule;
5991 struct hlist_node *node;
5992 u16 location;
5993
5994 if (!hnae3_dev_fd_supported(hdev))
5995 return;
5996
5997 for_each_set_bit(location, hdev->fd_bmap,
5998 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5999 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6000 NULL, false);
6001
6002 if (clear_list) {
6003 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6004 rule_node) {
6005 hlist_del(&rule->rule_node);
6006 kfree(rule);
6007 }
6008 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6009 hdev->hclge_fd_rule_num = 0;
6010 bitmap_zero(hdev->fd_bmap,
6011 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6012 }
6013 }
6014
hclge_restore_fd_entries(struct hnae3_handle * handle)6015 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6016 {
6017 struct hclge_vport *vport = hclge_get_vport(handle);
6018 struct hclge_dev *hdev = vport->back;
6019 struct hclge_fd_rule *rule;
6020 struct hlist_node *node;
6021 int ret;
6022
6023 /* Return ok here, because reset error handling will check this
6024 * return value. If error is returned here, the reset process will
6025 * fail.
6026 */
6027 if (!hnae3_dev_fd_supported(hdev))
6028 return 0;
6029
6030 /* if fd is disabled, should not restore it when reset */
6031 if (!hdev->fd_en)
6032 return 0;
6033
6034 spin_lock_bh(&hdev->fd_rule_lock);
6035 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6036 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6037 if (!ret)
6038 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6039
6040 if (ret) {
6041 dev_warn(&hdev->pdev->dev,
6042 "Restore rule %u failed, remove it\n",
6043 rule->location);
6044 clear_bit(rule->location, hdev->fd_bmap);
6045 hlist_del(&rule->rule_node);
6046 kfree(rule);
6047 hdev->hclge_fd_rule_num--;
6048 }
6049 }
6050
6051 if (hdev->hclge_fd_rule_num)
6052 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
6053
6054 spin_unlock_bh(&hdev->fd_rule_lock);
6055
6056 return 0;
6057 }
6058
hclge_get_fd_rule_cnt(struct hnae3_handle * handle,struct ethtool_rxnfc * cmd)6059 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6060 struct ethtool_rxnfc *cmd)
6061 {
6062 struct hclge_vport *vport = hclge_get_vport(handle);
6063 struct hclge_dev *hdev = vport->back;
6064
6065 if (!hnae3_dev_fd_supported(hdev))
6066 return -EOPNOTSUPP;
6067
6068 cmd->rule_cnt = hdev->hclge_fd_rule_num;
6069 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6070
6071 return 0;
6072 }
6073
hclge_fd_get_tcpip4_info(struct hclge_fd_rule * rule,struct ethtool_tcpip4_spec * spec,struct ethtool_tcpip4_spec * spec_mask)6074 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6075 struct ethtool_tcpip4_spec *spec,
6076 struct ethtool_tcpip4_spec *spec_mask)
6077 {
6078 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6079 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6080 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6081
6082 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6083 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6084 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6085
6086 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6087 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6088 0 : cpu_to_be16(rule->tuples_mask.src_port);
6089
6090 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6091 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6092 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6093
6094 spec->tos = rule->tuples.ip_tos;
6095 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6096 0 : rule->tuples_mask.ip_tos;
6097 }
6098
hclge_fd_get_ip4_info(struct hclge_fd_rule * rule,struct ethtool_usrip4_spec * spec,struct ethtool_usrip4_spec * spec_mask)6099 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6100 struct ethtool_usrip4_spec *spec,
6101 struct ethtool_usrip4_spec *spec_mask)
6102 {
6103 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6104 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6105 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6106
6107 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6108 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6109 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6110
6111 spec->tos = rule->tuples.ip_tos;
6112 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6113 0 : rule->tuples_mask.ip_tos;
6114
6115 spec->proto = rule->tuples.ip_proto;
6116 spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6117 0 : rule->tuples_mask.ip_proto;
6118
6119 spec->ip_ver = ETH_RX_NFC_IP4;
6120 }
6121
hclge_fd_get_tcpip6_info(struct hclge_fd_rule * rule,struct ethtool_tcpip6_spec * spec,struct ethtool_tcpip6_spec * spec_mask)6122 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6123 struct ethtool_tcpip6_spec *spec,
6124 struct ethtool_tcpip6_spec *spec_mask)
6125 {
6126 cpu_to_be32_array(spec->ip6src,
6127 rule->tuples.src_ip, IPV6_SIZE);
6128 cpu_to_be32_array(spec->ip6dst,
6129 rule->tuples.dst_ip, IPV6_SIZE);
6130 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6131 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6132 else
6133 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6134 IPV6_SIZE);
6135
6136 if (rule->unused_tuple & BIT(INNER_DST_IP))
6137 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6138 else
6139 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6140 IPV6_SIZE);
6141
6142 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6143 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6144 0 : cpu_to_be16(rule->tuples_mask.src_port);
6145
6146 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6147 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6148 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6149 }
6150
hclge_fd_get_ip6_info(struct hclge_fd_rule * rule,struct ethtool_usrip6_spec * spec,struct ethtool_usrip6_spec * spec_mask)6151 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6152 struct ethtool_usrip6_spec *spec,
6153 struct ethtool_usrip6_spec *spec_mask)
6154 {
6155 cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6156 cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6157 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6158 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6159 else
6160 cpu_to_be32_array(spec_mask->ip6src,
6161 rule->tuples_mask.src_ip, IPV6_SIZE);
6162
6163 if (rule->unused_tuple & BIT(INNER_DST_IP))
6164 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6165 else
6166 cpu_to_be32_array(spec_mask->ip6dst,
6167 rule->tuples_mask.dst_ip, IPV6_SIZE);
6168
6169 spec->l4_proto = rule->tuples.ip_proto;
6170 spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6171 0 : rule->tuples_mask.ip_proto;
6172 }
6173
hclge_fd_get_ether_info(struct hclge_fd_rule * rule,struct ethhdr * spec,struct ethhdr * spec_mask)6174 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6175 struct ethhdr *spec,
6176 struct ethhdr *spec_mask)
6177 {
6178 ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6179 ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6180
6181 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6182 eth_zero_addr(spec_mask->h_source);
6183 else
6184 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6185
6186 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6187 eth_zero_addr(spec_mask->h_dest);
6188 else
6189 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6190
6191 spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6192 spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6193 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6194 }
6195
hclge_fd_get_ext_info(struct ethtool_rx_flow_spec * fs,struct hclge_fd_rule * rule)6196 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6197 struct hclge_fd_rule *rule)
6198 {
6199 if (fs->flow_type & FLOW_EXT) {
6200 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6201 fs->m_ext.vlan_tci =
6202 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6203 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
6204 }
6205
6206 if (fs->flow_type & FLOW_MAC_EXT) {
6207 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6208 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6209 eth_zero_addr(fs->m_u.ether_spec.h_dest);
6210 else
6211 ether_addr_copy(fs->m_u.ether_spec.h_dest,
6212 rule->tuples_mask.dst_mac);
6213 }
6214 }
6215
hclge_get_fd_rule_info(struct hnae3_handle * handle,struct ethtool_rxnfc * cmd)6216 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6217 struct ethtool_rxnfc *cmd)
6218 {
6219 struct hclge_vport *vport = hclge_get_vport(handle);
6220 struct hclge_fd_rule *rule = NULL;
6221 struct hclge_dev *hdev = vport->back;
6222 struct ethtool_rx_flow_spec *fs;
6223 struct hlist_node *node2;
6224
6225 if (!hnae3_dev_fd_supported(hdev))
6226 return -EOPNOTSUPP;
6227
6228 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6229
6230 spin_lock_bh(&hdev->fd_rule_lock);
6231
6232 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6233 if (rule->location >= fs->location)
6234 break;
6235 }
6236
6237 if (!rule || fs->location != rule->location) {
6238 spin_unlock_bh(&hdev->fd_rule_lock);
6239
6240 return -ENOENT;
6241 }
6242
6243 fs->flow_type = rule->flow_type;
6244 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6245 case SCTP_V4_FLOW:
6246 case TCP_V4_FLOW:
6247 case UDP_V4_FLOW:
6248 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6249 &fs->m_u.tcp_ip4_spec);
6250 break;
6251 case IP_USER_FLOW:
6252 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6253 &fs->m_u.usr_ip4_spec);
6254 break;
6255 case SCTP_V6_FLOW:
6256 case TCP_V6_FLOW:
6257 case UDP_V6_FLOW:
6258 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6259 &fs->m_u.tcp_ip6_spec);
6260 break;
6261 case IPV6_USER_FLOW:
6262 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6263 &fs->m_u.usr_ip6_spec);
6264 break;
6265 /* The flow type of fd rule has been checked before adding in to rule
6266 * list. As other flow types have been handled, it must be ETHER_FLOW
6267 * for the default case
6268 */
6269 default:
6270 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6271 &fs->m_u.ether_spec);
6272 break;
6273 }
6274
6275 hclge_fd_get_ext_info(fs, rule);
6276
6277 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6278 fs->ring_cookie = RX_CLS_FLOW_DISC;
6279 } else {
6280 u64 vf_id;
6281
6282 fs->ring_cookie = rule->queue_id;
6283 vf_id = rule->vf_id;
6284 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6285 fs->ring_cookie |= vf_id;
6286 }
6287
6288 spin_unlock_bh(&hdev->fd_rule_lock);
6289
6290 return 0;
6291 }
6292
hclge_get_all_rules(struct hnae3_handle * handle,struct ethtool_rxnfc * cmd,u32 * rule_locs)6293 static int hclge_get_all_rules(struct hnae3_handle *handle,
6294 struct ethtool_rxnfc *cmd, u32 *rule_locs)
6295 {
6296 struct hclge_vport *vport = hclge_get_vport(handle);
6297 struct hclge_dev *hdev = vport->back;
6298 struct hclge_fd_rule *rule;
6299 struct hlist_node *node2;
6300 int cnt = 0;
6301
6302 if (!hnae3_dev_fd_supported(hdev))
6303 return -EOPNOTSUPP;
6304
6305 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6306
6307 spin_lock_bh(&hdev->fd_rule_lock);
6308 hlist_for_each_entry_safe(rule, node2,
6309 &hdev->fd_rule_list, rule_node) {
6310 if (cnt == cmd->rule_cnt) {
6311 spin_unlock_bh(&hdev->fd_rule_lock);
6312 return -EMSGSIZE;
6313 }
6314
6315 rule_locs[cnt] = rule->location;
6316 cnt++;
6317 }
6318
6319 spin_unlock_bh(&hdev->fd_rule_lock);
6320
6321 cmd->rule_cnt = cnt;
6322
6323 return 0;
6324 }
6325
hclge_fd_get_flow_tuples(const struct flow_keys * fkeys,struct hclge_fd_rule_tuples * tuples)6326 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6327 struct hclge_fd_rule_tuples *tuples)
6328 {
6329 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6330 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6331
6332 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6333 tuples->ip_proto = fkeys->basic.ip_proto;
6334 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6335
6336 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6337 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6338 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6339 } else {
6340 int i;
6341
6342 for (i = 0; i < IPV6_SIZE; i++) {
6343 tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6344 tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6345 }
6346 }
6347 }
6348
6349 /* traverse all rules, check whether an existed rule has the same tuples */
6350 static struct hclge_fd_rule *
hclge_fd_search_flow_keys(struct hclge_dev * hdev,const struct hclge_fd_rule_tuples * tuples)6351 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6352 const struct hclge_fd_rule_tuples *tuples)
6353 {
6354 struct hclge_fd_rule *rule = NULL;
6355 struct hlist_node *node;
6356
6357 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6358 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6359 return rule;
6360 }
6361
6362 return NULL;
6363 }
6364
hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples * tuples,struct hclge_fd_rule * rule)6365 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6366 struct hclge_fd_rule *rule)
6367 {
6368 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6369 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6370 BIT(INNER_SRC_PORT);
6371 rule->action = 0;
6372 rule->vf_id = 0;
6373 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6374 if (tuples->ether_proto == ETH_P_IP) {
6375 if (tuples->ip_proto == IPPROTO_TCP)
6376 rule->flow_type = TCP_V4_FLOW;
6377 else
6378 rule->flow_type = UDP_V4_FLOW;
6379 } else {
6380 if (tuples->ip_proto == IPPROTO_TCP)
6381 rule->flow_type = TCP_V6_FLOW;
6382 else
6383 rule->flow_type = UDP_V6_FLOW;
6384 }
6385 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6386 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6387 }
6388
hclge_add_fd_entry_by_arfs(struct hnae3_handle * handle,u16 queue_id,u16 flow_id,struct flow_keys * fkeys)6389 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6390 u16 flow_id, struct flow_keys *fkeys)
6391 {
6392 struct hclge_vport *vport = hclge_get_vport(handle);
6393 struct hclge_fd_rule_tuples new_tuples = {};
6394 struct hclge_dev *hdev = vport->back;
6395 struct hclge_fd_rule *rule;
6396 u16 tmp_queue_id;
6397 u16 bit_id;
6398 int ret;
6399
6400 if (!hnae3_dev_fd_supported(hdev))
6401 return -EOPNOTSUPP;
6402
6403 /* when there is already fd rule existed add by user,
6404 * arfs should not work
6405 */
6406 spin_lock_bh(&hdev->fd_rule_lock);
6407 if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6408 spin_unlock_bh(&hdev->fd_rule_lock);
6409 return -EOPNOTSUPP;
6410 }
6411
6412 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6413
6414 /* check is there flow director filter existed for this flow,
6415 * if not, create a new filter for it;
6416 * if filter exist with different queue id, modify the filter;
6417 * if filter exist with same queue id, do nothing
6418 */
6419 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6420 if (!rule) {
6421 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6422 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6423 spin_unlock_bh(&hdev->fd_rule_lock);
6424 return -ENOSPC;
6425 }
6426
6427 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6428 if (!rule) {
6429 spin_unlock_bh(&hdev->fd_rule_lock);
6430 return -ENOMEM;
6431 }
6432
6433 set_bit(bit_id, hdev->fd_bmap);
6434 rule->location = bit_id;
6435 rule->flow_id = flow_id;
6436 rule->queue_id = queue_id;
6437 hclge_fd_build_arfs_rule(&new_tuples, rule);
6438 ret = hclge_fd_config_rule(hdev, rule);
6439
6440 spin_unlock_bh(&hdev->fd_rule_lock);
6441
6442 if (ret)
6443 return ret;
6444
6445 return rule->location;
6446 }
6447
6448 spin_unlock_bh(&hdev->fd_rule_lock);
6449
6450 if (rule->queue_id == queue_id)
6451 return rule->location;
6452
6453 tmp_queue_id = rule->queue_id;
6454 rule->queue_id = queue_id;
6455 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6456 if (ret) {
6457 rule->queue_id = tmp_queue_id;
6458 return ret;
6459 }
6460
6461 return rule->location;
6462 }
6463
hclge_rfs_filter_expire(struct hclge_dev * hdev)6464 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6465 {
6466 #ifdef CONFIG_RFS_ACCEL
6467 struct hnae3_handle *handle = &hdev->vport[0].nic;
6468 struct hclge_fd_rule *rule;
6469 struct hlist_node *node;
6470 HLIST_HEAD(del_list);
6471
6472 spin_lock_bh(&hdev->fd_rule_lock);
6473 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6474 spin_unlock_bh(&hdev->fd_rule_lock);
6475 return;
6476 }
6477 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6478 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6479 rule->flow_id, rule->location)) {
6480 hlist_del_init(&rule->rule_node);
6481 hlist_add_head(&rule->rule_node, &del_list);
6482 hdev->hclge_fd_rule_num--;
6483 clear_bit(rule->location, hdev->fd_bmap);
6484 }
6485 }
6486 spin_unlock_bh(&hdev->fd_rule_lock);
6487
6488 hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6489 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6490 rule->location, NULL, false);
6491 kfree(rule);
6492 }
6493 #endif
6494 }
6495
6496 /* make sure being called after lock up with fd_rule_lock */
hclge_clear_arfs_rules(struct hnae3_handle * handle)6497 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6498 {
6499 #ifdef CONFIG_RFS_ACCEL
6500 struct hclge_vport *vport = hclge_get_vport(handle);
6501 struct hclge_dev *hdev = vport->back;
6502
6503 if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6504 hclge_del_all_fd_entries(handle, true);
6505 #endif
6506 }
6507
hclge_get_hw_reset_stat(struct hnae3_handle * handle)6508 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6509 {
6510 struct hclge_vport *vport = hclge_get_vport(handle);
6511 struct hclge_dev *hdev = vport->back;
6512
6513 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6514 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6515 }
6516
hclge_get_cmdq_stat(struct hnae3_handle * handle)6517 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
6518 {
6519 struct hclge_vport *vport = hclge_get_vport(handle);
6520 struct hclge_dev *hdev = vport->back;
6521
6522 return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
6523 }
6524
hclge_ae_dev_resetting(struct hnae3_handle * handle)6525 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6526 {
6527 struct hclge_vport *vport = hclge_get_vport(handle);
6528 struct hclge_dev *hdev = vport->back;
6529
6530 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6531 }
6532
hclge_ae_dev_reset_cnt(struct hnae3_handle * handle)6533 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6534 {
6535 struct hclge_vport *vport = hclge_get_vport(handle);
6536 struct hclge_dev *hdev = vport->back;
6537
6538 return hdev->rst_stats.hw_reset_done_cnt;
6539 }
6540
hclge_enable_fd(struct hnae3_handle * handle,bool enable)6541 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6542 {
6543 struct hclge_vport *vport = hclge_get_vport(handle);
6544 struct hclge_dev *hdev = vport->back;
6545 bool clear;
6546
6547 hdev->fd_en = enable;
6548 clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6549
6550 if (!enable) {
6551 spin_lock_bh(&hdev->fd_rule_lock);
6552 hclge_del_all_fd_entries(handle, clear);
6553 spin_unlock_bh(&hdev->fd_rule_lock);
6554 } else {
6555 hclge_restore_fd_entries(handle);
6556 }
6557 }
6558
hclge_cfg_mac_mode(struct hclge_dev * hdev,bool enable)6559 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6560 {
6561 struct hclge_desc desc;
6562 struct hclge_config_mac_mode_cmd *req =
6563 (struct hclge_config_mac_mode_cmd *)desc.data;
6564 u32 loop_en = 0;
6565 int ret;
6566
6567 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6568
6569 if (enable) {
6570 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6571 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6572 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6573 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6574 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6575 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6576 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6577 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6578 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6579 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6580 }
6581
6582 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6583
6584 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6585 if (ret)
6586 dev_err(&hdev->pdev->dev,
6587 "mac enable fail, ret =%d.\n", ret);
6588 }
6589
hclge_config_switch_param(struct hclge_dev * hdev,int vfid,u8 switch_param,u8 param_mask)6590 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6591 u8 switch_param, u8 param_mask)
6592 {
6593 struct hclge_mac_vlan_switch_cmd *req;
6594 struct hclge_desc desc;
6595 u32 func_id;
6596 int ret;
6597
6598 func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6599 req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6600
6601 /* read current config parameter */
6602 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6603 true);
6604 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6605 req->func_id = cpu_to_le32(func_id);
6606
6607 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6608 if (ret) {
6609 dev_err(&hdev->pdev->dev,
6610 "read mac vlan switch parameter fail, ret = %d\n", ret);
6611 return ret;
6612 }
6613
6614 /* modify and write new config parameter */
6615 hclge_cmd_reuse_desc(&desc, false);
6616 req->switch_param = (req->switch_param & param_mask) | switch_param;
6617 req->param_mask = param_mask;
6618
6619 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6620 if (ret)
6621 dev_err(&hdev->pdev->dev,
6622 "set mac vlan switch parameter fail, ret = %d\n", ret);
6623 return ret;
6624 }
6625
hclge_phy_link_status_wait(struct hclge_dev * hdev,int link_ret)6626 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6627 int link_ret)
6628 {
6629 #define HCLGE_PHY_LINK_STATUS_NUM 200
6630
6631 struct phy_device *phydev = hdev->hw.mac.phydev;
6632 int i = 0;
6633 int ret;
6634
6635 do {
6636 ret = phy_read_status(phydev);
6637 if (ret) {
6638 dev_err(&hdev->pdev->dev,
6639 "phy update link status fail, ret = %d\n", ret);
6640 return;
6641 }
6642
6643 if (phydev->link == link_ret)
6644 break;
6645
6646 msleep(HCLGE_LINK_STATUS_MS);
6647 } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6648 }
6649
hclge_mac_link_status_wait(struct hclge_dev * hdev,int link_ret)6650 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
6651 {
6652 #define HCLGE_MAC_LINK_STATUS_NUM 100
6653
6654 int link_status;
6655 int i = 0;
6656 int ret;
6657
6658 do {
6659 ret = hclge_get_mac_link_status(hdev, &link_status);
6660 if (ret)
6661 return ret;
6662 if (link_status == link_ret)
6663 return 0;
6664
6665 msleep(HCLGE_LINK_STATUS_MS);
6666 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6667 return -EBUSY;
6668 }
6669
hclge_mac_phy_link_status_wait(struct hclge_dev * hdev,bool en,bool is_phy)6670 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6671 bool is_phy)
6672 {
6673 int link_ret;
6674
6675 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6676
6677 if (is_phy)
6678 hclge_phy_link_status_wait(hdev, link_ret);
6679
6680 return hclge_mac_link_status_wait(hdev, link_ret);
6681 }
6682
hclge_set_app_loopback(struct hclge_dev * hdev,bool en)6683 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
6684 {
6685 struct hclge_config_mac_mode_cmd *req;
6686 struct hclge_desc desc;
6687 u32 loop_en;
6688 int ret;
6689
6690 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6691 /* 1 Read out the MAC mode config at first */
6692 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6693 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6694 if (ret) {
6695 dev_err(&hdev->pdev->dev,
6696 "mac loopback get fail, ret =%d.\n", ret);
6697 return ret;
6698 }
6699
6700 /* 2 Then setup the loopback flag */
6701 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
6702 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
6703
6704 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6705
6706 /* 3 Config mac work mode with loopback flag
6707 * and its original configure parameters
6708 */
6709 hclge_cmd_reuse_desc(&desc, false);
6710 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6711 if (ret)
6712 dev_err(&hdev->pdev->dev,
6713 "mac loopback set fail, ret =%d.\n", ret);
6714 return ret;
6715 }
6716
hclge_cfg_serdes_loopback(struct hclge_dev * hdev,bool en,enum hnae3_loop loop_mode)6717 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
6718 enum hnae3_loop loop_mode)
6719 {
6720 #define HCLGE_SERDES_RETRY_MS 10
6721 #define HCLGE_SERDES_RETRY_NUM 100
6722
6723 struct hclge_serdes_lb_cmd *req;
6724 struct hclge_desc desc;
6725 int ret, i = 0;
6726 u8 loop_mode_b;
6727
6728 req = (struct hclge_serdes_lb_cmd *)desc.data;
6729 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6730
6731 switch (loop_mode) {
6732 case HNAE3_LOOP_SERIAL_SERDES:
6733 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6734 break;
6735 case HNAE3_LOOP_PARALLEL_SERDES:
6736 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6737 break;
6738 default:
6739 dev_err(&hdev->pdev->dev,
6740 "unsupported serdes loopback mode %d\n", loop_mode);
6741 return -ENOTSUPP;
6742 }
6743
6744 if (en) {
6745 req->enable = loop_mode_b;
6746 req->mask = loop_mode_b;
6747 } else {
6748 req->mask = loop_mode_b;
6749 }
6750
6751 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6752 if (ret) {
6753 dev_err(&hdev->pdev->dev,
6754 "serdes loopback set fail, ret = %d\n", ret);
6755 return ret;
6756 }
6757
6758 do {
6759 msleep(HCLGE_SERDES_RETRY_MS);
6760 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6761 true);
6762 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6763 if (ret) {
6764 dev_err(&hdev->pdev->dev,
6765 "serdes loopback get, ret = %d\n", ret);
6766 return ret;
6767 }
6768 } while (++i < HCLGE_SERDES_RETRY_NUM &&
6769 !(req->result & HCLGE_CMD_SERDES_DONE_B));
6770
6771 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6772 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6773 return -EBUSY;
6774 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6775 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6776 return -EIO;
6777 }
6778 return ret;
6779 }
6780
hclge_set_serdes_loopback(struct hclge_dev * hdev,bool en,enum hnae3_loop loop_mode)6781 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6782 enum hnae3_loop loop_mode)
6783 {
6784 int ret;
6785
6786 ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
6787 if (ret)
6788 return ret;
6789
6790 hclge_cfg_mac_mode(hdev, en);
6791
6792 ret = hclge_mac_phy_link_status_wait(hdev, en, false);
6793 if (ret)
6794 dev_err(&hdev->pdev->dev,
6795 "serdes loopback config mac mode timeout\n");
6796
6797 return ret;
6798 }
6799
hclge_enable_phy_loopback(struct hclge_dev * hdev,struct phy_device * phydev)6800 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6801 struct phy_device *phydev)
6802 {
6803 int ret;
6804
6805 if (!phydev->suspended) {
6806 ret = phy_suspend(phydev);
6807 if (ret)
6808 return ret;
6809 }
6810
6811 ret = phy_resume(phydev);
6812 if (ret)
6813 return ret;
6814
6815 return phy_loopback(phydev, true);
6816 }
6817
hclge_disable_phy_loopback(struct hclge_dev * hdev,struct phy_device * phydev)6818 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6819 struct phy_device *phydev)
6820 {
6821 int ret;
6822
6823 ret = phy_loopback(phydev, false);
6824 if (ret)
6825 return ret;
6826
6827 return phy_suspend(phydev);
6828 }
6829
hclge_set_phy_loopback(struct hclge_dev * hdev,bool en)6830 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6831 {
6832 struct phy_device *phydev = hdev->hw.mac.phydev;
6833 int ret;
6834
6835 if (!phydev)
6836 return -ENOTSUPP;
6837
6838 if (en)
6839 ret = hclge_enable_phy_loopback(hdev, phydev);
6840 else
6841 ret = hclge_disable_phy_loopback(hdev, phydev);
6842 if (ret) {
6843 dev_err(&hdev->pdev->dev,
6844 "set phy loopback fail, ret = %d\n", ret);
6845 return ret;
6846 }
6847
6848 hclge_cfg_mac_mode(hdev, en);
6849
6850 ret = hclge_mac_phy_link_status_wait(hdev, en, true);
6851 if (ret)
6852 dev_err(&hdev->pdev->dev,
6853 "phy loopback config mac mode timeout\n");
6854
6855 return ret;
6856 }
6857
hclge_tqp_enable(struct hclge_dev * hdev,unsigned int tqp_id,int stream_id,bool enable)6858 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6859 int stream_id, bool enable)
6860 {
6861 struct hclge_desc desc;
6862 struct hclge_cfg_com_tqp_queue_cmd *req =
6863 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6864 int ret;
6865
6866 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6867 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6868 req->stream_id = cpu_to_le16(stream_id);
6869 if (enable)
6870 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6871
6872 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6873 if (ret)
6874 dev_err(&hdev->pdev->dev,
6875 "Tqp enable fail, status =%d.\n", ret);
6876 return ret;
6877 }
6878
hclge_set_loopback(struct hnae3_handle * handle,enum hnae3_loop loop_mode,bool en)6879 static int hclge_set_loopback(struct hnae3_handle *handle,
6880 enum hnae3_loop loop_mode, bool en)
6881 {
6882 struct hclge_vport *vport = hclge_get_vport(handle);
6883 struct hnae3_knic_private_info *kinfo;
6884 struct hclge_dev *hdev = vport->back;
6885 int i, ret;
6886
6887 /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
6888 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
6889 * the same, the packets are looped back in the SSU. If SSU loopback
6890 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
6891 */
6892 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
6893 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6894
6895 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6896 HCLGE_SWITCH_ALW_LPBK_MASK);
6897 if (ret)
6898 return ret;
6899 }
6900
6901 switch (loop_mode) {
6902 case HNAE3_LOOP_APP:
6903 ret = hclge_set_app_loopback(hdev, en);
6904 break;
6905 case HNAE3_LOOP_SERIAL_SERDES:
6906 case HNAE3_LOOP_PARALLEL_SERDES:
6907 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6908 break;
6909 case HNAE3_LOOP_PHY:
6910 ret = hclge_set_phy_loopback(hdev, en);
6911 break;
6912 default:
6913 ret = -ENOTSUPP;
6914 dev_err(&hdev->pdev->dev,
6915 "loop_mode %d is not supported\n", loop_mode);
6916 break;
6917 }
6918
6919 if (ret)
6920 return ret;
6921
6922 kinfo = &vport->nic.kinfo;
6923 for (i = 0; i < kinfo->num_tqps; i++) {
6924 ret = hclge_tqp_enable(hdev, i, 0, en);
6925 if (ret)
6926 return ret;
6927 }
6928
6929 return 0;
6930 }
6931
hclge_set_default_loopback(struct hclge_dev * hdev)6932 static int hclge_set_default_loopback(struct hclge_dev *hdev)
6933 {
6934 int ret;
6935
6936 ret = hclge_set_app_loopback(hdev, false);
6937 if (ret)
6938 return ret;
6939
6940 ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
6941 if (ret)
6942 return ret;
6943
6944 return hclge_cfg_serdes_loopback(hdev, false,
6945 HNAE3_LOOP_PARALLEL_SERDES);
6946 }
6947
hclge_reset_tqp_stats(struct hnae3_handle * handle)6948 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6949 {
6950 struct hclge_vport *vport = hclge_get_vport(handle);
6951 struct hnae3_knic_private_info *kinfo;
6952 struct hnae3_queue *queue;
6953 struct hclge_tqp *tqp;
6954 int i;
6955
6956 kinfo = &vport->nic.kinfo;
6957 for (i = 0; i < kinfo->num_tqps; i++) {
6958 queue = handle->kinfo.tqp[i];
6959 tqp = container_of(queue, struct hclge_tqp, q);
6960 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6961 }
6962 }
6963
hclge_flush_link_update(struct hclge_dev * hdev)6964 static void hclge_flush_link_update(struct hclge_dev *hdev)
6965 {
6966 #define HCLGE_FLUSH_LINK_TIMEOUT 100000
6967
6968 unsigned long last = hdev->serv_processed_cnt;
6969 int i = 0;
6970
6971 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
6972 i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
6973 last == hdev->serv_processed_cnt)
6974 usleep_range(1, 1);
6975 }
6976
hclge_set_timer_task(struct hnae3_handle * handle,bool enable)6977 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6978 {
6979 struct hclge_vport *vport = hclge_get_vport(handle);
6980 struct hclge_dev *hdev = vport->back;
6981
6982 if (enable) {
6983 hclge_task_schedule(hdev, 0);
6984 } else {
6985 /* Set the DOWN flag here to disable link updating */
6986 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6987
6988 /* flush memory to make sure DOWN is seen by service task */
6989 smp_mb__before_atomic();
6990 hclge_flush_link_update(hdev);
6991 }
6992 }
6993
hclge_ae_start(struct hnae3_handle * handle)6994 static int hclge_ae_start(struct hnae3_handle *handle)
6995 {
6996 struct hclge_vport *vport = hclge_get_vport(handle);
6997 struct hclge_dev *hdev = vport->back;
6998
6999 /* mac enable */
7000 hclge_cfg_mac_mode(hdev, true);
7001 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
7002 hdev->hw.mac.link = 0;
7003
7004 /* reset tqp stats */
7005 hclge_reset_tqp_stats(handle);
7006
7007 hclge_mac_start_phy(hdev);
7008
7009 return 0;
7010 }
7011
hclge_ae_stop(struct hnae3_handle * handle)7012 static void hclge_ae_stop(struct hnae3_handle *handle)
7013 {
7014 struct hclge_vport *vport = hclge_get_vport(handle);
7015 struct hclge_dev *hdev = vport->back;
7016 int i;
7017
7018 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7019 spin_lock_bh(&hdev->fd_rule_lock);
7020 hclge_clear_arfs_rules(handle);
7021 spin_unlock_bh(&hdev->fd_rule_lock);
7022
7023 /* If it is not PF reset or FLR, the firmware will disable the MAC,
7024 * so it only need to stop phy here.
7025 */
7026 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
7027 hdev->reset_type != HNAE3_FUNC_RESET &&
7028 hdev->reset_type != HNAE3_FLR_RESET) {
7029 hclge_mac_stop_phy(hdev);
7030 hclge_update_link_status(hdev);
7031 return;
7032 }
7033
7034 for (i = 0; i < handle->kinfo.num_tqps; i++)
7035 hclge_reset_tqp(handle, i);
7036
7037 hclge_config_mac_tnl_int(hdev, false);
7038
7039 /* Mac disable */
7040 hclge_cfg_mac_mode(hdev, false);
7041
7042 hclge_mac_stop_phy(hdev);
7043
7044 /* reset tqp stats */
7045 hclge_reset_tqp_stats(handle);
7046 hclge_update_link_status(hdev);
7047 }
7048
hclge_vport_start(struct hclge_vport * vport)7049 int hclge_vport_start(struct hclge_vport *vport)
7050 {
7051 struct hclge_dev *hdev = vport->back;
7052
7053 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7054 vport->last_active_jiffies = jiffies;
7055
7056 if (test_bit(vport->vport_id, hdev->vport_config_block)) {
7057 if (vport->vport_id) {
7058 hclge_restore_mac_table_common(vport);
7059 hclge_restore_vport_vlan_table(vport);
7060 } else {
7061 hclge_restore_hw_table(hdev);
7062 }
7063 }
7064
7065 clear_bit(vport->vport_id, hdev->vport_config_block);
7066
7067 return 0;
7068 }
7069
hclge_vport_stop(struct hclge_vport * vport)7070 void hclge_vport_stop(struct hclge_vport *vport)
7071 {
7072 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7073 }
7074
hclge_client_start(struct hnae3_handle * handle)7075 static int hclge_client_start(struct hnae3_handle *handle)
7076 {
7077 struct hclge_vport *vport = hclge_get_vport(handle);
7078
7079 return hclge_vport_start(vport);
7080 }
7081
hclge_client_stop(struct hnae3_handle * handle)7082 static void hclge_client_stop(struct hnae3_handle *handle)
7083 {
7084 struct hclge_vport *vport = hclge_get_vport(handle);
7085
7086 hclge_vport_stop(vport);
7087 }
7088
hclge_get_mac_vlan_cmd_status(struct hclge_vport * vport,u16 cmdq_resp,u8 resp_code,enum hclge_mac_vlan_tbl_opcode op)7089 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
7090 u16 cmdq_resp, u8 resp_code,
7091 enum hclge_mac_vlan_tbl_opcode op)
7092 {
7093 struct hclge_dev *hdev = vport->back;
7094
7095 if (cmdq_resp) {
7096 dev_err(&hdev->pdev->dev,
7097 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
7098 cmdq_resp);
7099 return -EIO;
7100 }
7101
7102 if (op == HCLGE_MAC_VLAN_ADD) {
7103 if (!resp_code || resp_code == 1)
7104 return 0;
7105 else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
7106 resp_code == HCLGE_ADD_MC_OVERFLOW)
7107 return -ENOSPC;
7108
7109 dev_err(&hdev->pdev->dev,
7110 "add mac addr failed for undefined, code=%u.\n",
7111 resp_code);
7112 return -EIO;
7113 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
7114 if (!resp_code) {
7115 return 0;
7116 } else if (resp_code == 1) {
7117 dev_dbg(&hdev->pdev->dev,
7118 "remove mac addr failed for miss.\n");
7119 return -ENOENT;
7120 }
7121
7122 dev_err(&hdev->pdev->dev,
7123 "remove mac addr failed for undefined, code=%u.\n",
7124 resp_code);
7125 return -EIO;
7126 } else if (op == HCLGE_MAC_VLAN_LKUP) {
7127 if (!resp_code) {
7128 return 0;
7129 } else if (resp_code == 1) {
7130 dev_dbg(&hdev->pdev->dev,
7131 "lookup mac addr failed for miss.\n");
7132 return -ENOENT;
7133 }
7134
7135 dev_err(&hdev->pdev->dev,
7136 "lookup mac addr failed for undefined, code=%u.\n",
7137 resp_code);
7138 return -EIO;
7139 }
7140
7141 dev_err(&hdev->pdev->dev,
7142 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
7143
7144 return -EINVAL;
7145 }
7146
hclge_update_desc_vfid(struct hclge_desc * desc,int vfid,bool clr)7147 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
7148 {
7149 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
7150
7151 unsigned int word_num;
7152 unsigned int bit_num;
7153
7154 if (vfid > 255 || vfid < 0)
7155 return -EIO;
7156
7157 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
7158 word_num = vfid / 32;
7159 bit_num = vfid % 32;
7160 if (clr)
7161 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7162 else
7163 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
7164 } else {
7165 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
7166 bit_num = vfid % 32;
7167 if (clr)
7168 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7169 else
7170 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
7171 }
7172
7173 return 0;
7174 }
7175
hclge_is_all_function_id_zero(struct hclge_desc * desc)7176 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
7177 {
7178 #define HCLGE_DESC_NUMBER 3
7179 #define HCLGE_FUNC_NUMBER_PER_DESC 6
7180 int i, j;
7181
7182 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
7183 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
7184 if (desc[i].data[j])
7185 return false;
7186
7187 return true;
7188 }
7189
hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd * new_req,const u8 * addr,bool is_mc)7190 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
7191 const u8 *addr, bool is_mc)
7192 {
7193 const unsigned char *mac_addr = addr;
7194 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
7195 (mac_addr[0]) | (mac_addr[1] << 8);
7196 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
7197
7198 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7199 if (is_mc) {
7200 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
7201 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7202 }
7203
7204 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
7205 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
7206 }
7207
hclge_remove_mac_vlan_tbl(struct hclge_vport * vport,struct hclge_mac_vlan_tbl_entry_cmd * req)7208 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
7209 struct hclge_mac_vlan_tbl_entry_cmd *req)
7210 {
7211 struct hclge_dev *hdev = vport->back;
7212 struct hclge_desc desc;
7213 u8 resp_code;
7214 u16 retval;
7215 int ret;
7216
7217 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
7218
7219 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7220
7221 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7222 if (ret) {
7223 dev_err(&hdev->pdev->dev,
7224 "del mac addr failed for cmd_send, ret =%d.\n",
7225 ret);
7226 return ret;
7227 }
7228 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7229 retval = le16_to_cpu(desc.retval);
7230
7231 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7232 HCLGE_MAC_VLAN_REMOVE);
7233 }
7234
hclge_lookup_mac_vlan_tbl(struct hclge_vport * vport,struct hclge_mac_vlan_tbl_entry_cmd * req,struct hclge_desc * desc,bool is_mc)7235 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
7236 struct hclge_mac_vlan_tbl_entry_cmd *req,
7237 struct hclge_desc *desc,
7238 bool is_mc)
7239 {
7240 struct hclge_dev *hdev = vport->back;
7241 u8 resp_code;
7242 u16 retval;
7243 int ret;
7244
7245 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7246 if (is_mc) {
7247 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7248 memcpy(desc[0].data,
7249 req,
7250 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7251 hclge_cmd_setup_basic_desc(&desc[1],
7252 HCLGE_OPC_MAC_VLAN_ADD,
7253 true);
7254 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7255 hclge_cmd_setup_basic_desc(&desc[2],
7256 HCLGE_OPC_MAC_VLAN_ADD,
7257 true);
7258 ret = hclge_cmd_send(&hdev->hw, desc, 3);
7259 } else {
7260 memcpy(desc[0].data,
7261 req,
7262 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7263 ret = hclge_cmd_send(&hdev->hw, desc, 1);
7264 }
7265 if (ret) {
7266 dev_err(&hdev->pdev->dev,
7267 "lookup mac addr failed for cmd_send, ret =%d.\n",
7268 ret);
7269 return ret;
7270 }
7271 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7272 retval = le16_to_cpu(desc[0].retval);
7273
7274 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7275 HCLGE_MAC_VLAN_LKUP);
7276 }
7277
hclge_add_mac_vlan_tbl(struct hclge_vport * vport,struct hclge_mac_vlan_tbl_entry_cmd * req,struct hclge_desc * mc_desc)7278 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7279 struct hclge_mac_vlan_tbl_entry_cmd *req,
7280 struct hclge_desc *mc_desc)
7281 {
7282 struct hclge_dev *hdev = vport->back;
7283 int cfg_status;
7284 u8 resp_code;
7285 u16 retval;
7286 int ret;
7287
7288 if (!mc_desc) {
7289 struct hclge_desc desc;
7290
7291 hclge_cmd_setup_basic_desc(&desc,
7292 HCLGE_OPC_MAC_VLAN_ADD,
7293 false);
7294 memcpy(desc.data, req,
7295 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7296 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7297 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7298 retval = le16_to_cpu(desc.retval);
7299
7300 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7301 resp_code,
7302 HCLGE_MAC_VLAN_ADD);
7303 } else {
7304 hclge_cmd_reuse_desc(&mc_desc[0], false);
7305 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7306 hclge_cmd_reuse_desc(&mc_desc[1], false);
7307 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7308 hclge_cmd_reuse_desc(&mc_desc[2], false);
7309 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7310 memcpy(mc_desc[0].data, req,
7311 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7312 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7313 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7314 retval = le16_to_cpu(mc_desc[0].retval);
7315
7316 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7317 resp_code,
7318 HCLGE_MAC_VLAN_ADD);
7319 }
7320
7321 if (ret) {
7322 dev_err(&hdev->pdev->dev,
7323 "add mac addr failed for cmd_send, ret =%d.\n",
7324 ret);
7325 return ret;
7326 }
7327
7328 return cfg_status;
7329 }
7330
hclge_set_umv_space(struct hclge_dev * hdev,u16 space_size,u16 * allocated_size)7331 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7332 u16 *allocated_size)
7333 {
7334 struct hclge_umv_spc_alc_cmd *req;
7335 struct hclge_desc desc;
7336 int ret;
7337
7338 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7339 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7340
7341 req->space_size = cpu_to_le32(space_size);
7342
7343 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7344 if (ret) {
7345 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
7346 ret);
7347 return ret;
7348 }
7349
7350 *allocated_size = le32_to_cpu(desc.data[1]);
7351
7352 return 0;
7353 }
7354
hclge_init_umv_space(struct hclge_dev * hdev)7355 static int hclge_init_umv_space(struct hclge_dev *hdev)
7356 {
7357 u16 allocated_size = 0;
7358 int ret;
7359
7360 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
7361 if (ret)
7362 return ret;
7363
7364 if (allocated_size < hdev->wanted_umv_size)
7365 dev_warn(&hdev->pdev->dev,
7366 "failed to alloc umv space, want %u, get %u\n",
7367 hdev->wanted_umv_size, allocated_size);
7368
7369 hdev->max_umv_size = allocated_size;
7370 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
7371 hdev->share_umv_size = hdev->priv_umv_size +
7372 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7373
7374 return 0;
7375 }
7376
hclge_reset_umv_space(struct hclge_dev * hdev)7377 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7378 {
7379 struct hclge_vport *vport;
7380 int i;
7381
7382 for (i = 0; i < hdev->num_alloc_vport; i++) {
7383 vport = &hdev->vport[i];
7384 vport->used_umv_num = 0;
7385 }
7386
7387 mutex_lock(&hdev->vport_lock);
7388 hdev->share_umv_size = hdev->priv_umv_size +
7389 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7390 mutex_unlock(&hdev->vport_lock);
7391 }
7392
hclge_is_umv_space_full(struct hclge_vport * vport,bool need_lock)7393 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
7394 {
7395 struct hclge_dev *hdev = vport->back;
7396 bool is_full;
7397
7398 if (need_lock)
7399 mutex_lock(&hdev->vport_lock);
7400
7401 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7402 hdev->share_umv_size == 0);
7403
7404 if (need_lock)
7405 mutex_unlock(&hdev->vport_lock);
7406
7407 return is_full;
7408 }
7409
hclge_update_umv_space(struct hclge_vport * vport,bool is_free)7410 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7411 {
7412 struct hclge_dev *hdev = vport->back;
7413
7414 if (is_free) {
7415 if (vport->used_umv_num > hdev->priv_umv_size)
7416 hdev->share_umv_size++;
7417
7418 if (vport->used_umv_num > 0)
7419 vport->used_umv_num--;
7420 } else {
7421 if (vport->used_umv_num >= hdev->priv_umv_size &&
7422 hdev->share_umv_size > 0)
7423 hdev->share_umv_size--;
7424 vport->used_umv_num++;
7425 }
7426 }
7427
hclge_find_mac_node(struct list_head * list,const u8 * mac_addr)7428 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
7429 const u8 *mac_addr)
7430 {
7431 struct hclge_mac_node *mac_node, *tmp;
7432
7433 list_for_each_entry_safe(mac_node, tmp, list, node)
7434 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
7435 return mac_node;
7436
7437 return NULL;
7438 }
7439
hclge_update_mac_node(struct hclge_mac_node * mac_node,enum HCLGE_MAC_NODE_STATE state)7440 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
7441 enum HCLGE_MAC_NODE_STATE state)
7442 {
7443 switch (state) {
7444 /* from set_rx_mode or tmp_add_list */
7445 case HCLGE_MAC_TO_ADD:
7446 if (mac_node->state == HCLGE_MAC_TO_DEL)
7447 mac_node->state = HCLGE_MAC_ACTIVE;
7448 break;
7449 /* only from set_rx_mode */
7450 case HCLGE_MAC_TO_DEL:
7451 if (mac_node->state == HCLGE_MAC_TO_ADD) {
7452 list_del(&mac_node->node);
7453 kfree(mac_node);
7454 } else {
7455 mac_node->state = HCLGE_MAC_TO_DEL;
7456 }
7457 break;
7458 /* only from tmp_add_list, the mac_node->state won't be
7459 * ACTIVE.
7460 */
7461 case HCLGE_MAC_ACTIVE:
7462 if (mac_node->state == HCLGE_MAC_TO_ADD)
7463 mac_node->state = HCLGE_MAC_ACTIVE;
7464
7465 break;
7466 }
7467 }
7468
hclge_update_mac_list(struct hclge_vport * vport,enum HCLGE_MAC_NODE_STATE state,enum HCLGE_MAC_ADDR_TYPE mac_type,const unsigned char * addr)7469 int hclge_update_mac_list(struct hclge_vport *vport,
7470 enum HCLGE_MAC_NODE_STATE state,
7471 enum HCLGE_MAC_ADDR_TYPE mac_type,
7472 const unsigned char *addr)
7473 {
7474 struct hclge_dev *hdev = vport->back;
7475 struct hclge_mac_node *mac_node;
7476 struct list_head *list;
7477
7478 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7479 &vport->uc_mac_list : &vport->mc_mac_list;
7480
7481 spin_lock_bh(&vport->mac_list_lock);
7482
7483 /* if the mac addr is already in the mac list, no need to add a new
7484 * one into it, just check the mac addr state, convert it to a new
7485 * new state, or just remove it, or do nothing.
7486 */
7487 mac_node = hclge_find_mac_node(list, addr);
7488 if (mac_node) {
7489 hclge_update_mac_node(mac_node, state);
7490 spin_unlock_bh(&vport->mac_list_lock);
7491 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7492 return 0;
7493 }
7494
7495 /* if this address is never added, unnecessary to delete */
7496 if (state == HCLGE_MAC_TO_DEL) {
7497 spin_unlock_bh(&vport->mac_list_lock);
7498 dev_err(&hdev->pdev->dev,
7499 "failed to delete address %pM from mac list\n",
7500 addr);
7501 return -ENOENT;
7502 }
7503
7504 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
7505 if (!mac_node) {
7506 spin_unlock_bh(&vport->mac_list_lock);
7507 return -ENOMEM;
7508 }
7509
7510 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7511
7512 mac_node->state = state;
7513 ether_addr_copy(mac_node->mac_addr, addr);
7514 list_add_tail(&mac_node->node, list);
7515
7516 spin_unlock_bh(&vport->mac_list_lock);
7517
7518 return 0;
7519 }
7520
hclge_add_uc_addr(struct hnae3_handle * handle,const unsigned char * addr)7521 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7522 const unsigned char *addr)
7523 {
7524 struct hclge_vport *vport = hclge_get_vport(handle);
7525
7526 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
7527 addr);
7528 }
7529
hclge_add_uc_addr_common(struct hclge_vport * vport,const unsigned char * addr)7530 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7531 const unsigned char *addr)
7532 {
7533 struct hclge_dev *hdev = vport->back;
7534 struct hclge_mac_vlan_tbl_entry_cmd req;
7535 struct hclge_desc desc;
7536 u16 egress_port = 0;
7537 int ret;
7538
7539 /* mac addr check */
7540 if (is_zero_ether_addr(addr) ||
7541 is_broadcast_ether_addr(addr) ||
7542 is_multicast_ether_addr(addr)) {
7543 dev_err(&hdev->pdev->dev,
7544 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7545 addr, is_zero_ether_addr(addr),
7546 is_broadcast_ether_addr(addr),
7547 is_multicast_ether_addr(addr));
7548 return -EINVAL;
7549 }
7550
7551 memset(&req, 0, sizeof(req));
7552
7553 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7554 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7555
7556 req.egress_port = cpu_to_le16(egress_port);
7557
7558 hclge_prepare_mac_addr(&req, addr, false);
7559
7560 /* Lookup the mac address in the mac_vlan table, and add
7561 * it if the entry is inexistent. Repeated unicast entry
7562 * is not allowed in the mac vlan table.
7563 */
7564 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7565 if (ret == -ENOENT) {
7566 mutex_lock(&hdev->vport_lock);
7567 if (!hclge_is_umv_space_full(vport, false)) {
7568 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7569 if (!ret)
7570 hclge_update_umv_space(vport, false);
7571 mutex_unlock(&hdev->vport_lock);
7572 return ret;
7573 }
7574 mutex_unlock(&hdev->vport_lock);
7575
7576 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
7577 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7578 hdev->priv_umv_size);
7579
7580 return -ENOSPC;
7581 }
7582
7583 /* check if we just hit the duplicate */
7584 if (!ret)
7585 return -EEXIST;
7586
7587 return ret;
7588 }
7589
hclge_rm_uc_addr(struct hnae3_handle * handle,const unsigned char * addr)7590 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7591 const unsigned char *addr)
7592 {
7593 struct hclge_vport *vport = hclge_get_vport(handle);
7594
7595 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
7596 addr);
7597 }
7598
hclge_rm_uc_addr_common(struct hclge_vport * vport,const unsigned char * addr)7599 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7600 const unsigned char *addr)
7601 {
7602 struct hclge_dev *hdev = vport->back;
7603 struct hclge_mac_vlan_tbl_entry_cmd req;
7604 int ret;
7605
7606 /* mac addr check */
7607 if (is_zero_ether_addr(addr) ||
7608 is_broadcast_ether_addr(addr) ||
7609 is_multicast_ether_addr(addr)) {
7610 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7611 addr);
7612 return -EINVAL;
7613 }
7614
7615 memset(&req, 0, sizeof(req));
7616 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7617 hclge_prepare_mac_addr(&req, addr, false);
7618 ret = hclge_remove_mac_vlan_tbl(vport, &req);
7619 if (!ret) {
7620 mutex_lock(&hdev->vport_lock);
7621 hclge_update_umv_space(vport, true);
7622 mutex_unlock(&hdev->vport_lock);
7623 } else if (ret == -ENOENT) {
7624 ret = 0;
7625 }
7626
7627 return ret;
7628 }
7629
hclge_add_mc_addr(struct hnae3_handle * handle,const unsigned char * addr)7630 static int hclge_add_mc_addr(struct hnae3_handle *handle,
7631 const unsigned char *addr)
7632 {
7633 struct hclge_vport *vport = hclge_get_vport(handle);
7634
7635 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
7636 addr);
7637 }
7638
hclge_add_mc_addr_common(struct hclge_vport * vport,const unsigned char * addr)7639 int hclge_add_mc_addr_common(struct hclge_vport *vport,
7640 const unsigned char *addr)
7641 {
7642 struct hclge_dev *hdev = vport->back;
7643 struct hclge_mac_vlan_tbl_entry_cmd req;
7644 struct hclge_desc desc[3];
7645 int status;
7646
7647 /* mac addr check */
7648 if (!is_multicast_ether_addr(addr)) {
7649 dev_err(&hdev->pdev->dev,
7650 "Add mc mac err! invalid mac:%pM.\n",
7651 addr);
7652 return -EINVAL;
7653 }
7654 memset(&req, 0, sizeof(req));
7655 hclge_prepare_mac_addr(&req, addr, true);
7656 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7657 if (status) {
7658 /* This mac addr do not exist, add new entry for it */
7659 memset(desc[0].data, 0, sizeof(desc[0].data));
7660 memset(desc[1].data, 0, sizeof(desc[0].data));
7661 memset(desc[2].data, 0, sizeof(desc[0].data));
7662 }
7663 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7664 if (status)
7665 return status;
7666 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7667
7668 /* if already overflow, not to print each time */
7669 if (status == -ENOSPC &&
7670 !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
7671 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
7672
7673 return status;
7674 }
7675
hclge_rm_mc_addr(struct hnae3_handle * handle,const unsigned char * addr)7676 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
7677 const unsigned char *addr)
7678 {
7679 struct hclge_vport *vport = hclge_get_vport(handle);
7680
7681 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
7682 addr);
7683 }
7684
hclge_rm_mc_addr_common(struct hclge_vport * vport,const unsigned char * addr)7685 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
7686 const unsigned char *addr)
7687 {
7688 struct hclge_dev *hdev = vport->back;
7689 struct hclge_mac_vlan_tbl_entry_cmd req;
7690 enum hclge_cmd_status status;
7691 struct hclge_desc desc[3];
7692
7693 /* mac addr check */
7694 if (!is_multicast_ether_addr(addr)) {
7695 dev_dbg(&hdev->pdev->dev,
7696 "Remove mc mac err! invalid mac:%pM.\n",
7697 addr);
7698 return -EINVAL;
7699 }
7700
7701 memset(&req, 0, sizeof(req));
7702 hclge_prepare_mac_addr(&req, addr, true);
7703 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7704 if (!status) {
7705 /* This mac addr exist, remove this handle's VFID for it */
7706 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7707 if (status)
7708 return status;
7709
7710 if (hclge_is_all_function_id_zero(desc))
7711 /* All the vfid is zero, so need to delete this entry */
7712 status = hclge_remove_mac_vlan_tbl(vport, &req);
7713 else
7714 /* Not all the vfid is zero, update the vfid */
7715 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7716
7717 } else if (status == -ENOENT) {
7718 status = 0;
7719 }
7720
7721 return status;
7722 }
7723
hclge_sync_vport_mac_list(struct hclge_vport * vport,struct list_head * list,int (* sync)(struct hclge_vport *,const unsigned char *))7724 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
7725 struct list_head *list,
7726 int (*sync)(struct hclge_vport *,
7727 const unsigned char *))
7728 {
7729 struct hclge_mac_node *mac_node, *tmp;
7730 int ret;
7731
7732 list_for_each_entry_safe(mac_node, tmp, list, node) {
7733 ret = sync(vport, mac_node->mac_addr);
7734 if (!ret) {
7735 mac_node->state = HCLGE_MAC_ACTIVE;
7736 } else {
7737 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
7738 &vport->state);
7739
7740 /* If one unicast mac address is existing in hardware,
7741 * we need to try whether other unicast mac addresses
7742 * are new addresses that can be added.
7743 */
7744 if (ret != -EEXIST)
7745 break;
7746 }
7747 }
7748 }
7749
hclge_unsync_vport_mac_list(struct hclge_vport * vport,struct list_head * list,int (* unsync)(struct hclge_vport *,const unsigned char *))7750 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
7751 struct list_head *list,
7752 int (*unsync)(struct hclge_vport *,
7753 const unsigned char *))
7754 {
7755 struct hclge_mac_node *mac_node, *tmp;
7756 int ret;
7757
7758 list_for_each_entry_safe(mac_node, tmp, list, node) {
7759 ret = unsync(vport, mac_node->mac_addr);
7760 if (!ret || ret == -ENOENT) {
7761 list_del(&mac_node->node);
7762 kfree(mac_node);
7763 } else {
7764 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
7765 &vport->state);
7766 break;
7767 }
7768 }
7769 }
7770
hclge_sync_from_add_list(struct list_head * add_list,struct list_head * mac_list)7771 static bool hclge_sync_from_add_list(struct list_head *add_list,
7772 struct list_head *mac_list)
7773 {
7774 struct hclge_mac_node *mac_node, *tmp, *new_node;
7775 bool all_added = true;
7776
7777 list_for_each_entry_safe(mac_node, tmp, add_list, node) {
7778 if (mac_node->state == HCLGE_MAC_TO_ADD)
7779 all_added = false;
7780
7781 /* if the mac address from tmp_add_list is not in the
7782 * uc/mc_mac_list, it means have received a TO_DEL request
7783 * during the time window of adding the mac address into mac
7784 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
7785 * then it will be removed at next time. else it must be TO_ADD,
7786 * this address hasn't been added into mac table,
7787 * so just remove the mac node.
7788 */
7789 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
7790 if (new_node) {
7791 hclge_update_mac_node(new_node, mac_node->state);
7792 list_del(&mac_node->node);
7793 kfree(mac_node);
7794 } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
7795 mac_node->state = HCLGE_MAC_TO_DEL;
7796 list_del(&mac_node->node);
7797 list_add_tail(&mac_node->node, mac_list);
7798 } else {
7799 list_del(&mac_node->node);
7800 kfree(mac_node);
7801 }
7802 }
7803
7804 return all_added;
7805 }
7806
hclge_sync_from_del_list(struct list_head * del_list,struct list_head * mac_list)7807 static void hclge_sync_from_del_list(struct list_head *del_list,
7808 struct list_head *mac_list)
7809 {
7810 struct hclge_mac_node *mac_node, *tmp, *new_node;
7811
7812 list_for_each_entry_safe(mac_node, tmp, del_list, node) {
7813 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
7814 if (new_node) {
7815 /* If the mac addr exists in the mac list, it means
7816 * received a new TO_ADD request during the time window
7817 * of configuring the mac address. For the mac node
7818 * state is TO_ADD, and the address is already in the
7819 * in the hardware(due to delete fail), so we just need
7820 * to change the mac node state to ACTIVE.
7821 */
7822 new_node->state = HCLGE_MAC_ACTIVE;
7823 list_del(&mac_node->node);
7824 kfree(mac_node);
7825 } else {
7826 list_del(&mac_node->node);
7827 list_add_tail(&mac_node->node, mac_list);
7828 }
7829 }
7830 }
7831
hclge_update_overflow_flags(struct hclge_vport * vport,enum HCLGE_MAC_ADDR_TYPE mac_type,bool is_all_added)7832 static void hclge_update_overflow_flags(struct hclge_vport *vport,
7833 enum HCLGE_MAC_ADDR_TYPE mac_type,
7834 bool is_all_added)
7835 {
7836 if (mac_type == HCLGE_MAC_ADDR_UC) {
7837 if (is_all_added)
7838 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
7839 else
7840 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
7841 } else {
7842 if (is_all_added)
7843 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
7844 else
7845 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
7846 }
7847 }
7848
hclge_sync_vport_mac_table(struct hclge_vport * vport,enum HCLGE_MAC_ADDR_TYPE mac_type)7849 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
7850 enum HCLGE_MAC_ADDR_TYPE mac_type)
7851 {
7852 struct hclge_mac_node *mac_node, *tmp, *new_node;
7853 struct list_head tmp_add_list, tmp_del_list;
7854 struct list_head *list;
7855 bool all_added;
7856
7857 INIT_LIST_HEAD(&tmp_add_list);
7858 INIT_LIST_HEAD(&tmp_del_list);
7859
7860 /* move the mac addr to the tmp_add_list and tmp_del_list, then
7861 * we can add/delete these mac addr outside the spin lock
7862 */
7863 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7864 &vport->uc_mac_list : &vport->mc_mac_list;
7865
7866 spin_lock_bh(&vport->mac_list_lock);
7867
7868 list_for_each_entry_safe(mac_node, tmp, list, node) {
7869 switch (mac_node->state) {
7870 case HCLGE_MAC_TO_DEL:
7871 list_del(&mac_node->node);
7872 list_add_tail(&mac_node->node, &tmp_del_list);
7873 break;
7874 case HCLGE_MAC_TO_ADD:
7875 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
7876 if (!new_node)
7877 goto stop_traverse;
7878 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
7879 new_node->state = mac_node->state;
7880 list_add_tail(&new_node->node, &tmp_add_list);
7881 break;
7882 default:
7883 break;
7884 }
7885 }
7886
7887 stop_traverse:
7888 spin_unlock_bh(&vport->mac_list_lock);
7889
7890 /* delete first, in order to get max mac table space for adding */
7891 if (mac_type == HCLGE_MAC_ADDR_UC) {
7892 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7893 hclge_rm_uc_addr_common);
7894 hclge_sync_vport_mac_list(vport, &tmp_add_list,
7895 hclge_add_uc_addr_common);
7896 } else {
7897 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7898 hclge_rm_mc_addr_common);
7899 hclge_sync_vport_mac_list(vport, &tmp_add_list,
7900 hclge_add_mc_addr_common);
7901 }
7902
7903 /* if some mac addresses were added/deleted fail, move back to the
7904 * mac_list, and retry at next time.
7905 */
7906 spin_lock_bh(&vport->mac_list_lock);
7907
7908 hclge_sync_from_del_list(&tmp_del_list, list);
7909 all_added = hclge_sync_from_add_list(&tmp_add_list, list);
7910
7911 spin_unlock_bh(&vport->mac_list_lock);
7912
7913 hclge_update_overflow_flags(vport, mac_type, all_added);
7914 }
7915
hclge_need_sync_mac_table(struct hclge_vport * vport)7916 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
7917 {
7918 struct hclge_dev *hdev = vport->back;
7919
7920 if (test_bit(vport->vport_id, hdev->vport_config_block))
7921 return false;
7922
7923 if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
7924 return true;
7925
7926 return false;
7927 }
7928
hclge_sync_mac_table(struct hclge_dev * hdev)7929 static void hclge_sync_mac_table(struct hclge_dev *hdev)
7930 {
7931 int i;
7932
7933 for (i = 0; i < hdev->num_alloc_vport; i++) {
7934 struct hclge_vport *vport = &hdev->vport[i];
7935
7936 if (!hclge_need_sync_mac_table(vport))
7937 continue;
7938
7939 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
7940 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
7941 }
7942 }
7943
hclge_rm_vport_all_mac_table(struct hclge_vport * vport,bool is_del_list,enum HCLGE_MAC_ADDR_TYPE mac_type)7944 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7945 enum HCLGE_MAC_ADDR_TYPE mac_type)
7946 {
7947 int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
7948 struct hclge_mac_node *mac_cfg, *tmp;
7949 struct hclge_dev *hdev = vport->back;
7950 struct list_head tmp_del_list, *list;
7951 int ret;
7952
7953 if (mac_type == HCLGE_MAC_ADDR_UC) {
7954 list = &vport->uc_mac_list;
7955 unsync = hclge_rm_uc_addr_common;
7956 } else {
7957 list = &vport->mc_mac_list;
7958 unsync = hclge_rm_mc_addr_common;
7959 }
7960
7961 INIT_LIST_HEAD(&tmp_del_list);
7962
7963 if (!is_del_list)
7964 set_bit(vport->vport_id, hdev->vport_config_block);
7965
7966 spin_lock_bh(&vport->mac_list_lock);
7967
7968 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7969 switch (mac_cfg->state) {
7970 case HCLGE_MAC_TO_DEL:
7971 case HCLGE_MAC_ACTIVE:
7972 list_del(&mac_cfg->node);
7973 list_add_tail(&mac_cfg->node, &tmp_del_list);
7974 break;
7975 case HCLGE_MAC_TO_ADD:
7976 if (is_del_list) {
7977 list_del(&mac_cfg->node);
7978 kfree(mac_cfg);
7979 }
7980 break;
7981 }
7982 }
7983
7984 spin_unlock_bh(&vport->mac_list_lock);
7985
7986 list_for_each_entry_safe(mac_cfg, tmp, &tmp_del_list, node) {
7987 ret = unsync(vport, mac_cfg->mac_addr);
7988 if (!ret || ret == -ENOENT) {
7989 /* clear all mac addr from hardware, but remain these
7990 * mac addr in the mac list, and restore them after
7991 * vf reset finished.
7992 */
7993 if (!is_del_list &&
7994 mac_cfg->state == HCLGE_MAC_ACTIVE) {
7995 mac_cfg->state = HCLGE_MAC_TO_ADD;
7996 } else {
7997 list_del(&mac_cfg->node);
7998 kfree(mac_cfg);
7999 }
8000 } else if (is_del_list) {
8001 mac_cfg->state = HCLGE_MAC_TO_DEL;
8002 }
8003 }
8004
8005 spin_lock_bh(&vport->mac_list_lock);
8006
8007 hclge_sync_from_del_list(&tmp_del_list, list);
8008
8009 spin_unlock_bh(&vport->mac_list_lock);
8010 }
8011
8012 /* remove all mac address when uninitailize */
hclge_uninit_vport_mac_list(struct hclge_vport * vport,enum HCLGE_MAC_ADDR_TYPE mac_type)8013 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
8014 enum HCLGE_MAC_ADDR_TYPE mac_type)
8015 {
8016 struct hclge_mac_node *mac_node, *tmp;
8017 struct hclge_dev *hdev = vport->back;
8018 struct list_head tmp_del_list, *list;
8019
8020 INIT_LIST_HEAD(&tmp_del_list);
8021
8022 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8023 &vport->uc_mac_list : &vport->mc_mac_list;
8024
8025 spin_lock_bh(&vport->mac_list_lock);
8026
8027 list_for_each_entry_safe(mac_node, tmp, list, node) {
8028 switch (mac_node->state) {
8029 case HCLGE_MAC_TO_DEL:
8030 case HCLGE_MAC_ACTIVE:
8031 list_del(&mac_node->node);
8032 list_add_tail(&mac_node->node, &tmp_del_list);
8033 break;
8034 case HCLGE_MAC_TO_ADD:
8035 list_del(&mac_node->node);
8036 kfree(mac_node);
8037 break;
8038 }
8039 }
8040
8041 spin_unlock_bh(&vport->mac_list_lock);
8042
8043 if (mac_type == HCLGE_MAC_ADDR_UC)
8044 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8045 hclge_rm_uc_addr_common);
8046 else
8047 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8048 hclge_rm_mc_addr_common);
8049
8050 if (!list_empty(&tmp_del_list))
8051 dev_warn(&hdev->pdev->dev,
8052 "uninit %s mac list for vport %u not completely.\n",
8053 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
8054 vport->vport_id);
8055
8056 list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
8057 list_del(&mac_node->node);
8058 kfree(mac_node);
8059 }
8060 }
8061
hclge_uninit_mac_table(struct hclge_dev * hdev)8062 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
8063 {
8064 struct hclge_vport *vport;
8065 int i;
8066
8067 for (i = 0; i < hdev->num_alloc_vport; i++) {
8068 vport = &hdev->vport[i];
8069 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
8070 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
8071 }
8072 }
8073
hclge_get_mac_ethertype_cmd_status(struct hclge_dev * hdev,u16 cmdq_resp,u8 resp_code)8074 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
8075 u16 cmdq_resp, u8 resp_code)
8076 {
8077 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
8078 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
8079 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
8080 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
8081
8082 int return_status;
8083
8084 if (cmdq_resp) {
8085 dev_err(&hdev->pdev->dev,
8086 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
8087 cmdq_resp);
8088 return -EIO;
8089 }
8090
8091 switch (resp_code) {
8092 case HCLGE_ETHERTYPE_SUCCESS_ADD:
8093 case HCLGE_ETHERTYPE_ALREADY_ADD:
8094 return_status = 0;
8095 break;
8096 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
8097 dev_err(&hdev->pdev->dev,
8098 "add mac ethertype failed for manager table overflow.\n");
8099 return_status = -EIO;
8100 break;
8101 case HCLGE_ETHERTYPE_KEY_CONFLICT:
8102 dev_err(&hdev->pdev->dev,
8103 "add mac ethertype failed for key conflict.\n");
8104 return_status = -EIO;
8105 break;
8106 default:
8107 dev_err(&hdev->pdev->dev,
8108 "add mac ethertype failed for undefined, code=%u.\n",
8109 resp_code);
8110 return_status = -EIO;
8111 }
8112
8113 return return_status;
8114 }
8115
hclge_check_vf_mac_exist(struct hclge_vport * vport,int vf_idx,u8 * mac_addr)8116 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
8117 u8 *mac_addr)
8118 {
8119 struct hclge_mac_vlan_tbl_entry_cmd req;
8120 struct hclge_dev *hdev = vport->back;
8121 struct hclge_desc desc;
8122 u16 egress_port = 0;
8123 int i;
8124
8125 if (is_zero_ether_addr(mac_addr))
8126 return false;
8127
8128 memset(&req, 0, sizeof(req));
8129 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8130 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8131 req.egress_port = cpu_to_le16(egress_port);
8132 hclge_prepare_mac_addr(&req, mac_addr, false);
8133
8134 if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
8135 return true;
8136
8137 vf_idx += HCLGE_VF_VPORT_START_NUM;
8138 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
8139 if (i != vf_idx &&
8140 ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
8141 return true;
8142
8143 return false;
8144 }
8145
hclge_set_vf_mac(struct hnae3_handle * handle,int vf,u8 * mac_addr)8146 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
8147 u8 *mac_addr)
8148 {
8149 struct hclge_vport *vport = hclge_get_vport(handle);
8150 struct hclge_dev *hdev = vport->back;
8151
8152 vport = hclge_get_vf_vport(hdev, vf);
8153 if (!vport)
8154 return -EINVAL;
8155
8156 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
8157 dev_info(&hdev->pdev->dev,
8158 "Specified MAC(=%pM) is same as before, no change committed!\n",
8159 mac_addr);
8160 return 0;
8161 }
8162
8163 if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
8164 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
8165 mac_addr);
8166 return -EEXIST;
8167 }
8168
8169 ether_addr_copy(vport->vf_info.mac, mac_addr);
8170
8171 if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8172 dev_info(&hdev->pdev->dev,
8173 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
8174 vf, mac_addr);
8175 return hclge_inform_reset_assert_to_vf(vport);
8176 }
8177
8178 dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
8179 vf, mac_addr);
8180 return 0;
8181 }
8182
hclge_add_mgr_tbl(struct hclge_dev * hdev,const struct hclge_mac_mgr_tbl_entry_cmd * req)8183 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
8184 const struct hclge_mac_mgr_tbl_entry_cmd *req)
8185 {
8186 struct hclge_desc desc;
8187 u8 resp_code;
8188 u16 retval;
8189 int ret;
8190
8191 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
8192 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
8193
8194 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8195 if (ret) {
8196 dev_err(&hdev->pdev->dev,
8197 "add mac ethertype failed for cmd_send, ret =%d.\n",
8198 ret);
8199 return ret;
8200 }
8201
8202 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8203 retval = le16_to_cpu(desc.retval);
8204
8205 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
8206 }
8207
init_mgr_tbl(struct hclge_dev * hdev)8208 static int init_mgr_tbl(struct hclge_dev *hdev)
8209 {
8210 int ret;
8211 int i;
8212
8213 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
8214 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
8215 if (ret) {
8216 dev_err(&hdev->pdev->dev,
8217 "add mac ethertype failed, ret =%d.\n",
8218 ret);
8219 return ret;
8220 }
8221 }
8222
8223 return 0;
8224 }
8225
hclge_get_mac_addr(struct hnae3_handle * handle,u8 * p)8226 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
8227 {
8228 struct hclge_vport *vport = hclge_get_vport(handle);
8229 struct hclge_dev *hdev = vport->back;
8230
8231 ether_addr_copy(p, hdev->hw.mac.mac_addr);
8232 }
8233
hclge_update_mac_node_for_dev_addr(struct hclge_vport * vport,const u8 * old_addr,const u8 * new_addr)8234 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
8235 const u8 *old_addr, const u8 *new_addr)
8236 {
8237 struct list_head *list = &vport->uc_mac_list;
8238 struct hclge_mac_node *old_node, *new_node;
8239
8240 new_node = hclge_find_mac_node(list, new_addr);
8241 if (!new_node) {
8242 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8243 if (!new_node)
8244 return -ENOMEM;
8245
8246 new_node->state = HCLGE_MAC_TO_ADD;
8247 ether_addr_copy(new_node->mac_addr, new_addr);
8248 list_add(&new_node->node, list);
8249 } else {
8250 if (new_node->state == HCLGE_MAC_TO_DEL)
8251 new_node->state = HCLGE_MAC_ACTIVE;
8252
8253 /* make sure the new addr is in the list head, avoid dev
8254 * addr may be not re-added into mac table for the umv space
8255 * limitation after global/imp reset which will clear mac
8256 * table by hardware.
8257 */
8258 list_move(&new_node->node, list);
8259 }
8260
8261 if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
8262 old_node = hclge_find_mac_node(list, old_addr);
8263 if (old_node) {
8264 if (old_node->state == HCLGE_MAC_TO_ADD) {
8265 list_del(&old_node->node);
8266 kfree(old_node);
8267 } else {
8268 old_node->state = HCLGE_MAC_TO_DEL;
8269 }
8270 }
8271 }
8272
8273 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8274
8275 return 0;
8276 }
8277
hclge_set_mac_addr(struct hnae3_handle * handle,void * p,bool is_first)8278 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
8279 bool is_first)
8280 {
8281 const unsigned char *new_addr = (const unsigned char *)p;
8282 struct hclge_vport *vport = hclge_get_vport(handle);
8283 struct hclge_dev *hdev = vport->back;
8284 unsigned char *old_addr = NULL;
8285 int ret;
8286
8287 /* mac addr check */
8288 if (is_zero_ether_addr(new_addr) ||
8289 is_broadcast_ether_addr(new_addr) ||
8290 is_multicast_ether_addr(new_addr)) {
8291 dev_err(&hdev->pdev->dev,
8292 "change uc mac err! invalid mac: %pM.\n",
8293 new_addr);
8294 return -EINVAL;
8295 }
8296
8297 ret = hclge_pause_addr_cfg(hdev, new_addr);
8298 if (ret) {
8299 dev_err(&hdev->pdev->dev,
8300 "failed to configure mac pause address, ret = %d\n",
8301 ret);
8302 return ret;
8303 }
8304
8305 if (!is_first)
8306 old_addr = hdev->hw.mac.mac_addr;
8307
8308 spin_lock_bh(&vport->mac_list_lock);
8309 ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
8310 if (ret) {
8311 dev_err(&hdev->pdev->dev,
8312 "failed to change the mac addr:%pM, ret = %d\n",
8313 new_addr, ret);
8314 spin_unlock_bh(&vport->mac_list_lock);
8315
8316 if (!is_first)
8317 hclge_pause_addr_cfg(hdev, old_addr);
8318
8319 return ret;
8320 }
8321 /* we must update dev addr with spin lock protect, preventing dev addr
8322 * being removed by set_rx_mode path.
8323 */
8324 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
8325 spin_unlock_bh(&vport->mac_list_lock);
8326
8327 hclge_task_schedule(hdev, 0);
8328
8329 return 0;
8330 }
8331
hclge_do_ioctl(struct hnae3_handle * handle,struct ifreq * ifr,int cmd)8332 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
8333 int cmd)
8334 {
8335 struct hclge_vport *vport = hclge_get_vport(handle);
8336 struct hclge_dev *hdev = vport->back;
8337
8338 if (!hdev->hw.mac.phydev)
8339 return -EOPNOTSUPP;
8340
8341 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
8342 }
8343
hclge_set_vlan_filter_ctrl(struct hclge_dev * hdev,u8 vlan_type,u8 fe_type,bool filter_en,u8 vf_id)8344 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
8345 u8 fe_type, bool filter_en, u8 vf_id)
8346 {
8347 struct hclge_vlan_filter_ctrl_cmd *req;
8348 struct hclge_desc desc;
8349 int ret;
8350
8351 /* read current vlan filter parameter */
8352 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
8353 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
8354 req->vlan_type = vlan_type;
8355 req->vf_id = vf_id;
8356
8357 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8358 if (ret) {
8359 dev_err(&hdev->pdev->dev,
8360 "failed to get vlan filter config, ret = %d.\n", ret);
8361 return ret;
8362 }
8363
8364 /* modify and write new config parameter */
8365 hclge_cmd_reuse_desc(&desc, false);
8366 req->vlan_fe = filter_en ?
8367 (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
8368
8369 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8370 if (ret)
8371 dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
8372 ret);
8373
8374 return ret;
8375 }
8376
8377 #define HCLGE_FILTER_TYPE_VF 0
8378 #define HCLGE_FILTER_TYPE_PORT 1
8379 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
8380 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
8381 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
8382 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
8383 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
8384 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
8385 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
8386 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
8387 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
8388
hclge_enable_vlan_filter(struct hnae3_handle * handle,bool enable)8389 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
8390 {
8391 struct hclge_vport *vport = hclge_get_vport(handle);
8392 struct hclge_dev *hdev = vport->back;
8393
8394 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
8395 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8396 HCLGE_FILTER_FE_EGRESS, enable, 0);
8397 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8398 HCLGE_FILTER_FE_INGRESS, enable, 0);
8399 } else {
8400 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8401 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
8402 0);
8403 }
8404 if (enable)
8405 handle->netdev_flags |= HNAE3_VLAN_FLTR;
8406 else
8407 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
8408 }
8409
hclge_set_vf_vlan_common(struct hclge_dev * hdev,u16 vfid,bool is_kill,u16 vlan,__be16 proto)8410 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
8411 bool is_kill, u16 vlan,
8412 __be16 proto)
8413 {
8414 struct hclge_vport *vport = &hdev->vport[vfid];
8415 struct hclge_vlan_filter_vf_cfg_cmd *req0;
8416 struct hclge_vlan_filter_vf_cfg_cmd *req1;
8417 struct hclge_desc desc[2];
8418 u8 vf_byte_val;
8419 u8 vf_byte_off;
8420 int ret;
8421
8422 /* if vf vlan table is full, firmware will close vf vlan filter, it
8423 * is unable and unnecessary to add new vlan id to vf vlan filter.
8424 * If spoof check is enable, and vf vlan is full, it shouldn't add
8425 * new vlan, because tx packets with these vlan id will be dropped.
8426 */
8427 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
8428 if (vport->vf_info.spoofchk && vlan) {
8429 dev_err(&hdev->pdev->dev,
8430 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
8431 return -EPERM;
8432 }
8433 return 0;
8434 }
8435
8436 hclge_cmd_setup_basic_desc(&desc[0],
8437 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8438 hclge_cmd_setup_basic_desc(&desc[1],
8439 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8440
8441 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8442
8443 vf_byte_off = vfid / 8;
8444 vf_byte_val = 1 << (vfid % 8);
8445
8446 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
8447 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
8448
8449 req0->vlan_id = cpu_to_le16(vlan);
8450 req0->vlan_cfg = is_kill;
8451
8452 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
8453 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
8454 else
8455 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
8456
8457 ret = hclge_cmd_send(&hdev->hw, desc, 2);
8458 if (ret) {
8459 dev_err(&hdev->pdev->dev,
8460 "Send vf vlan command fail, ret =%d.\n",
8461 ret);
8462 return ret;
8463 }
8464
8465 if (!is_kill) {
8466 #define HCLGE_VF_VLAN_NO_ENTRY 2
8467 if (!req0->resp_code || req0->resp_code == 1)
8468 return 0;
8469
8470 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
8471 set_bit(vfid, hdev->vf_vlan_full);
8472 dev_warn(&hdev->pdev->dev,
8473 "vf vlan table is full, vf vlan filter is disabled\n");
8474 return 0;
8475 }
8476
8477 dev_err(&hdev->pdev->dev,
8478 "Add vf vlan filter fail, ret =%u.\n",
8479 req0->resp_code);
8480 } else {
8481 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
8482 if (!req0->resp_code)
8483 return 0;
8484
8485 /* vf vlan filter is disabled when vf vlan table is full,
8486 * then new vlan id will not be added into vf vlan table.
8487 * Just return 0 without warning, avoid massive verbose
8488 * print logs when unload.
8489 */
8490 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
8491 return 0;
8492
8493 dev_err(&hdev->pdev->dev,
8494 "Kill vf vlan filter fail, ret =%u.\n",
8495 req0->resp_code);
8496 }
8497
8498 return -EIO;
8499 }
8500
hclge_set_port_vlan_filter(struct hclge_dev * hdev,__be16 proto,u16 vlan_id,bool is_kill)8501 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
8502 u16 vlan_id, bool is_kill)
8503 {
8504 struct hclge_vlan_filter_pf_cfg_cmd *req;
8505 struct hclge_desc desc;
8506 u8 vlan_offset_byte_val;
8507 u8 vlan_offset_byte;
8508 u8 vlan_offset_160;
8509 int ret;
8510
8511 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
8512
8513 vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
8514 vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
8515 HCLGE_VLAN_BYTE_SIZE;
8516 vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
8517
8518 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
8519 req->vlan_offset = vlan_offset_160;
8520 req->vlan_cfg = is_kill;
8521 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
8522
8523 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8524 if (ret)
8525 dev_err(&hdev->pdev->dev,
8526 "port vlan command, send fail, ret =%d.\n", ret);
8527 return ret;
8528 }
8529
hclge_set_vlan_filter_hw(struct hclge_dev * hdev,__be16 proto,u16 vport_id,u16 vlan_id,bool is_kill)8530 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
8531 u16 vport_id, u16 vlan_id,
8532 bool is_kill)
8533 {
8534 u16 vport_idx, vport_num = 0;
8535 int ret;
8536
8537 if (is_kill && !vlan_id)
8538 return 0;
8539
8540 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
8541 proto);
8542 if (ret) {
8543 dev_err(&hdev->pdev->dev,
8544 "Set %u vport vlan filter config fail, ret =%d.\n",
8545 vport_id, ret);
8546 return ret;
8547 }
8548
8549 /* vlan 0 may be added twice when 8021q module is enabled */
8550 if (!is_kill && !vlan_id &&
8551 test_bit(vport_id, hdev->vlan_table[vlan_id]))
8552 return 0;
8553
8554 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
8555 dev_err(&hdev->pdev->dev,
8556 "Add port vlan failed, vport %u is already in vlan %u\n",
8557 vport_id, vlan_id);
8558 return -EINVAL;
8559 }
8560
8561 if (is_kill &&
8562 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
8563 dev_err(&hdev->pdev->dev,
8564 "Delete port vlan failed, vport %u is not in vlan %u\n",
8565 vport_id, vlan_id);
8566 return -EINVAL;
8567 }
8568
8569 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
8570 vport_num++;
8571
8572 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
8573 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
8574 is_kill);
8575
8576 return ret;
8577 }
8578
hclge_set_vlan_tx_offload_cfg(struct hclge_vport * vport)8579 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
8580 {
8581 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
8582 struct hclge_vport_vtag_tx_cfg_cmd *req;
8583 struct hclge_dev *hdev = vport->back;
8584 struct hclge_desc desc;
8585 u16 bmap_index;
8586 int status;
8587
8588 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
8589
8590 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
8591 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
8592 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
8593 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
8594 vcfg->accept_tag1 ? 1 : 0);
8595 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
8596 vcfg->accept_untag1 ? 1 : 0);
8597 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
8598 vcfg->accept_tag2 ? 1 : 0);
8599 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
8600 vcfg->accept_untag2 ? 1 : 0);
8601 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
8602 vcfg->insert_tag1_en ? 1 : 0);
8603 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
8604 vcfg->insert_tag2_en ? 1 : 0);
8605 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
8606
8607 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8608 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8609 HCLGE_VF_NUM_PER_BYTE;
8610 req->vf_bitmap[bmap_index] =
8611 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8612
8613 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8614 if (status)
8615 dev_err(&hdev->pdev->dev,
8616 "Send port txvlan cfg command fail, ret =%d\n",
8617 status);
8618
8619 return status;
8620 }
8621
hclge_set_vlan_rx_offload_cfg(struct hclge_vport * vport)8622 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
8623 {
8624 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
8625 struct hclge_vport_vtag_rx_cfg_cmd *req;
8626 struct hclge_dev *hdev = vport->back;
8627 struct hclge_desc desc;
8628 u16 bmap_index;
8629 int status;
8630
8631 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
8632
8633 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
8634 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
8635 vcfg->strip_tag1_en ? 1 : 0);
8636 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
8637 vcfg->strip_tag2_en ? 1 : 0);
8638 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
8639 vcfg->vlan1_vlan_prionly ? 1 : 0);
8640 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
8641 vcfg->vlan2_vlan_prionly ? 1 : 0);
8642
8643 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8644 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8645 HCLGE_VF_NUM_PER_BYTE;
8646 req->vf_bitmap[bmap_index] =
8647 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8648
8649 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8650 if (status)
8651 dev_err(&hdev->pdev->dev,
8652 "Send port rxvlan cfg command fail, ret =%d\n",
8653 status);
8654
8655 return status;
8656 }
8657
hclge_vlan_offload_cfg(struct hclge_vport * vport,u16 port_base_vlan_state,u16 vlan_tag)8658 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
8659 u16 port_base_vlan_state,
8660 u16 vlan_tag)
8661 {
8662 int ret;
8663
8664 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8665 vport->txvlan_cfg.accept_tag1 = true;
8666 vport->txvlan_cfg.insert_tag1_en = false;
8667 vport->txvlan_cfg.default_tag1 = 0;
8668 } else {
8669 vport->txvlan_cfg.accept_tag1 = false;
8670 vport->txvlan_cfg.insert_tag1_en = true;
8671 vport->txvlan_cfg.default_tag1 = vlan_tag;
8672 }
8673
8674 vport->txvlan_cfg.accept_untag1 = true;
8675
8676 /* accept_tag2 and accept_untag2 are not supported on
8677 * pdev revision(0x20), new revision support them,
8678 * this two fields can not be configured by user.
8679 */
8680 vport->txvlan_cfg.accept_tag2 = true;
8681 vport->txvlan_cfg.accept_untag2 = true;
8682 vport->txvlan_cfg.insert_tag2_en = false;
8683 vport->txvlan_cfg.default_tag2 = 0;
8684
8685 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8686 vport->rxvlan_cfg.strip_tag1_en = false;
8687 vport->rxvlan_cfg.strip_tag2_en =
8688 vport->rxvlan_cfg.rx_vlan_offload_en;
8689 } else {
8690 vport->rxvlan_cfg.strip_tag1_en =
8691 vport->rxvlan_cfg.rx_vlan_offload_en;
8692 vport->rxvlan_cfg.strip_tag2_en = true;
8693 }
8694 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8695 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8696
8697 ret = hclge_set_vlan_tx_offload_cfg(vport);
8698 if (ret)
8699 return ret;
8700
8701 return hclge_set_vlan_rx_offload_cfg(vport);
8702 }
8703
hclge_set_vlan_protocol_type(struct hclge_dev * hdev)8704 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
8705 {
8706 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
8707 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
8708 struct hclge_desc desc;
8709 int status;
8710
8711 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
8712 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
8713 rx_req->ot_fst_vlan_type =
8714 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
8715 rx_req->ot_sec_vlan_type =
8716 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
8717 rx_req->in_fst_vlan_type =
8718 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
8719 rx_req->in_sec_vlan_type =
8720 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
8721
8722 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8723 if (status) {
8724 dev_err(&hdev->pdev->dev,
8725 "Send rxvlan protocol type command fail, ret =%d\n",
8726 status);
8727 return status;
8728 }
8729
8730 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
8731
8732 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
8733 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
8734 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
8735
8736 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8737 if (status)
8738 dev_err(&hdev->pdev->dev,
8739 "Send txvlan protocol type command fail, ret =%d\n",
8740 status);
8741
8742 return status;
8743 }
8744
hclge_init_vlan_config(struct hclge_dev * hdev)8745 static int hclge_init_vlan_config(struct hclge_dev *hdev)
8746 {
8747 #define HCLGE_DEF_VLAN_TYPE 0x8100
8748
8749 struct hnae3_handle *handle = &hdev->vport[0].nic;
8750 struct hclge_vport *vport;
8751 int ret;
8752 int i;
8753
8754 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
8755 /* for revision 0x21, vf vlan filter is per function */
8756 for (i = 0; i < hdev->num_alloc_vport; i++) {
8757 vport = &hdev->vport[i];
8758 ret = hclge_set_vlan_filter_ctrl(hdev,
8759 HCLGE_FILTER_TYPE_VF,
8760 HCLGE_FILTER_FE_EGRESS,
8761 true,
8762 vport->vport_id);
8763 if (ret)
8764 return ret;
8765 }
8766
8767 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8768 HCLGE_FILTER_FE_INGRESS, true,
8769 0);
8770 if (ret)
8771 return ret;
8772 } else {
8773 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8774 HCLGE_FILTER_FE_EGRESS_V1_B,
8775 true, 0);
8776 if (ret)
8777 return ret;
8778 }
8779
8780 handle->netdev_flags |= HNAE3_VLAN_FLTR;
8781
8782 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8783 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8784 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8785 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8786 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
8787 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
8788
8789 ret = hclge_set_vlan_protocol_type(hdev);
8790 if (ret)
8791 return ret;
8792
8793 for (i = 0; i < hdev->num_alloc_vport; i++) {
8794 u16 vlan_tag;
8795
8796 vport = &hdev->vport[i];
8797 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8798
8799 ret = hclge_vlan_offload_cfg(vport,
8800 vport->port_base_vlan_cfg.state,
8801 vlan_tag);
8802 if (ret)
8803 return ret;
8804 }
8805
8806 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
8807 }
8808
hclge_add_vport_vlan_table(struct hclge_vport * vport,u16 vlan_id,bool writen_to_tbl)8809 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8810 bool writen_to_tbl)
8811 {
8812 struct hclge_vport_vlan_cfg *vlan, *tmp;
8813
8814 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
8815 if (vlan->vlan_id == vlan_id)
8816 return;
8817
8818 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
8819 if (!vlan)
8820 return;
8821
8822 vlan->hd_tbl_status = writen_to_tbl;
8823 vlan->vlan_id = vlan_id;
8824
8825 list_add_tail(&vlan->node, &vport->vlan_list);
8826 }
8827
hclge_add_vport_all_vlan_table(struct hclge_vport * vport)8828 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
8829 {
8830 struct hclge_vport_vlan_cfg *vlan, *tmp;
8831 struct hclge_dev *hdev = vport->back;
8832 int ret;
8833
8834 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8835 if (!vlan->hd_tbl_status) {
8836 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8837 vport->vport_id,
8838 vlan->vlan_id, false);
8839 if (ret) {
8840 dev_err(&hdev->pdev->dev,
8841 "restore vport vlan list failed, ret=%d\n",
8842 ret);
8843 return ret;
8844 }
8845 }
8846 vlan->hd_tbl_status = true;
8847 }
8848
8849 return 0;
8850 }
8851
hclge_rm_vport_vlan_table(struct hclge_vport * vport,u16 vlan_id,bool is_write_tbl)8852 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8853 bool is_write_tbl)
8854 {
8855 struct hclge_vport_vlan_cfg *vlan, *tmp;
8856 struct hclge_dev *hdev = vport->back;
8857
8858 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8859 if (vlan->vlan_id == vlan_id) {
8860 if (is_write_tbl && vlan->hd_tbl_status)
8861 hclge_set_vlan_filter_hw(hdev,
8862 htons(ETH_P_8021Q),
8863 vport->vport_id,
8864 vlan_id,
8865 true);
8866
8867 list_del(&vlan->node);
8868 kfree(vlan);
8869 break;
8870 }
8871 }
8872 }
8873
hclge_rm_vport_all_vlan_table(struct hclge_vport * vport,bool is_del_list)8874 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
8875 {
8876 struct hclge_vport_vlan_cfg *vlan, *tmp;
8877 struct hclge_dev *hdev = vport->back;
8878
8879 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8880 if (vlan->hd_tbl_status)
8881 hclge_set_vlan_filter_hw(hdev,
8882 htons(ETH_P_8021Q),
8883 vport->vport_id,
8884 vlan->vlan_id,
8885 true);
8886
8887 vlan->hd_tbl_status = false;
8888 if (is_del_list) {
8889 list_del(&vlan->node);
8890 kfree(vlan);
8891 }
8892 }
8893 clear_bit(vport->vport_id, hdev->vf_vlan_full);
8894 }
8895
hclge_uninit_vport_vlan_table(struct hclge_dev * hdev)8896 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8897 {
8898 struct hclge_vport_vlan_cfg *vlan, *tmp;
8899 struct hclge_vport *vport;
8900 int i;
8901
8902 for (i = 0; i < hdev->num_alloc_vport; i++) {
8903 vport = &hdev->vport[i];
8904 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8905 list_del(&vlan->node);
8906 kfree(vlan);
8907 }
8908 }
8909 }
8910
hclge_restore_vport_vlan_table(struct hclge_vport * vport)8911 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
8912 {
8913 struct hclge_vport_vlan_cfg *vlan, *tmp;
8914 struct hclge_dev *hdev = vport->back;
8915 u16 vlan_proto;
8916 u16 vlan_id;
8917 u16 state;
8918 int ret;
8919
8920 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8921 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8922 state = vport->port_base_vlan_cfg.state;
8923
8924 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8925 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
8926 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8927 vport->vport_id, vlan_id,
8928 false);
8929 return;
8930 }
8931
8932 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8933 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8934 vport->vport_id,
8935 vlan->vlan_id, false);
8936 if (ret)
8937 break;
8938 vlan->hd_tbl_status = true;
8939 }
8940 }
8941
8942 /* For global reset and imp reset, hardware will clear the mac table,
8943 * so we change the mac address state from ACTIVE to TO_ADD, then they
8944 * can be restored in the service task after reset complete. Furtherly,
8945 * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
8946 * be restored after reset, so just remove these mac nodes from mac_list.
8947 */
hclge_mac_node_convert_for_reset(struct list_head * list)8948 static void hclge_mac_node_convert_for_reset(struct list_head *list)
8949 {
8950 struct hclge_mac_node *mac_node, *tmp;
8951
8952 list_for_each_entry_safe(mac_node, tmp, list, node) {
8953 if (mac_node->state == HCLGE_MAC_ACTIVE) {
8954 mac_node->state = HCLGE_MAC_TO_ADD;
8955 } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
8956 list_del(&mac_node->node);
8957 kfree(mac_node);
8958 }
8959 }
8960 }
8961
hclge_restore_mac_table_common(struct hclge_vport * vport)8962 void hclge_restore_mac_table_common(struct hclge_vport *vport)
8963 {
8964 spin_lock_bh(&vport->mac_list_lock);
8965
8966 hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
8967 hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
8968 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8969
8970 spin_unlock_bh(&vport->mac_list_lock);
8971 }
8972
hclge_restore_hw_table(struct hclge_dev * hdev)8973 static void hclge_restore_hw_table(struct hclge_dev *hdev)
8974 {
8975 struct hclge_vport *vport = &hdev->vport[0];
8976 struct hnae3_handle *handle = &vport->nic;
8977
8978 hclge_restore_mac_table_common(vport);
8979 hclge_restore_vport_vlan_table(vport);
8980 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
8981
8982 hclge_restore_fd_entries(handle);
8983 }
8984
hclge_en_hw_strip_rxvtag(struct hnae3_handle * handle,bool enable)8985 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
8986 {
8987 struct hclge_vport *vport = hclge_get_vport(handle);
8988
8989 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8990 vport->rxvlan_cfg.strip_tag1_en = false;
8991 vport->rxvlan_cfg.strip_tag2_en = enable;
8992 } else {
8993 vport->rxvlan_cfg.strip_tag1_en = enable;
8994 vport->rxvlan_cfg.strip_tag2_en = true;
8995 }
8996 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8997 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8998 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
8999
9000 return hclge_set_vlan_rx_offload_cfg(vport);
9001 }
9002
hclge_update_vlan_filter_entries(struct hclge_vport * vport,u16 port_base_vlan_state,struct hclge_vlan_info * new_info,struct hclge_vlan_info * old_info)9003 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
9004 u16 port_base_vlan_state,
9005 struct hclge_vlan_info *new_info,
9006 struct hclge_vlan_info *old_info)
9007 {
9008 struct hclge_dev *hdev = vport->back;
9009 int ret;
9010
9011 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
9012 hclge_rm_vport_all_vlan_table(vport, false);
9013 return hclge_set_vlan_filter_hw(hdev,
9014 htons(new_info->vlan_proto),
9015 vport->vport_id,
9016 new_info->vlan_tag,
9017 false);
9018 }
9019
9020 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
9021 vport->vport_id, old_info->vlan_tag,
9022 true);
9023 if (ret)
9024 return ret;
9025
9026 return hclge_add_vport_all_vlan_table(vport);
9027 }
9028
hclge_update_port_base_vlan_cfg(struct hclge_vport * vport,u16 state,struct hclge_vlan_info * vlan_info)9029 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
9030 struct hclge_vlan_info *vlan_info)
9031 {
9032 struct hnae3_handle *nic = &vport->nic;
9033 struct hclge_vlan_info *old_vlan_info;
9034 struct hclge_dev *hdev = vport->back;
9035 int ret;
9036
9037 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9038
9039 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
9040 if (ret)
9041 return ret;
9042
9043 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
9044 /* add new VLAN tag */
9045 ret = hclge_set_vlan_filter_hw(hdev,
9046 htons(vlan_info->vlan_proto),
9047 vport->vport_id,
9048 vlan_info->vlan_tag,
9049 false);
9050 if (ret)
9051 return ret;
9052
9053 /* remove old VLAN tag */
9054 ret = hclge_set_vlan_filter_hw(hdev,
9055 htons(old_vlan_info->vlan_proto),
9056 vport->vport_id,
9057 old_vlan_info->vlan_tag,
9058 true);
9059 if (ret)
9060 return ret;
9061
9062 goto update;
9063 }
9064
9065 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
9066 old_vlan_info);
9067 if (ret)
9068 return ret;
9069
9070 /* update state only when disable/enable port based VLAN */
9071 vport->port_base_vlan_cfg.state = state;
9072 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
9073 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
9074 else
9075 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
9076
9077 update:
9078 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
9079 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
9080 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
9081
9082 return 0;
9083 }
9084
hclge_get_port_base_vlan_state(struct hclge_vport * vport,enum hnae3_port_base_vlan_state state,u16 vlan)9085 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
9086 enum hnae3_port_base_vlan_state state,
9087 u16 vlan)
9088 {
9089 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9090 if (!vlan)
9091 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9092 else
9093 return HNAE3_PORT_BASE_VLAN_ENABLE;
9094 } else {
9095 if (!vlan)
9096 return HNAE3_PORT_BASE_VLAN_DISABLE;
9097 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
9098 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9099 else
9100 return HNAE3_PORT_BASE_VLAN_MODIFY;
9101 }
9102 }
9103
hclge_set_vf_vlan_filter(struct hnae3_handle * handle,int vfid,u16 vlan,u8 qos,__be16 proto)9104 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
9105 u16 vlan, u8 qos, __be16 proto)
9106 {
9107 struct hclge_vport *vport = hclge_get_vport(handle);
9108 struct hclge_dev *hdev = vport->back;
9109 struct hclge_vlan_info vlan_info;
9110 u16 state;
9111 int ret;
9112
9113 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9114 return -EOPNOTSUPP;
9115
9116 vport = hclge_get_vf_vport(hdev, vfid);
9117 if (!vport)
9118 return -EINVAL;
9119
9120 /* qos is a 3 bits value, so can not be bigger than 7 */
9121 if (vlan > VLAN_N_VID - 1 || qos > 7)
9122 return -EINVAL;
9123 if (proto != htons(ETH_P_8021Q))
9124 return -EPROTONOSUPPORT;
9125
9126 state = hclge_get_port_base_vlan_state(vport,
9127 vport->port_base_vlan_cfg.state,
9128 vlan);
9129 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
9130 return 0;
9131
9132 vlan_info.vlan_tag = vlan;
9133 vlan_info.qos = qos;
9134 vlan_info.vlan_proto = ntohs(proto);
9135
9136 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9137 return hclge_update_port_base_vlan_cfg(vport, state,
9138 &vlan_info);
9139 } else {
9140 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
9141 vport->vport_id, state,
9142 vlan, qos,
9143 ntohs(proto));
9144 return ret;
9145 }
9146 }
9147
hclge_clear_vf_vlan(struct hclge_dev * hdev)9148 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
9149 {
9150 struct hclge_vlan_info *vlan_info;
9151 struct hclge_vport *vport;
9152 int ret;
9153 int vf;
9154
9155 /* clear port base vlan for all vf */
9156 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9157 vport = &hdev->vport[vf];
9158 vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9159
9160 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9161 vport->vport_id,
9162 vlan_info->vlan_tag, true);
9163 if (ret)
9164 dev_err(&hdev->pdev->dev,
9165 "failed to clear vf vlan for vf%d, ret = %d\n",
9166 vf - HCLGE_VF_VPORT_START_NUM, ret);
9167 }
9168 }
9169
hclge_set_vlan_filter(struct hnae3_handle * handle,__be16 proto,u16 vlan_id,bool is_kill)9170 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
9171 u16 vlan_id, bool is_kill)
9172 {
9173 struct hclge_vport *vport = hclge_get_vport(handle);
9174 struct hclge_dev *hdev = vport->back;
9175 bool writen_to_tbl = false;
9176 int ret = 0;
9177
9178 /* When device is resetting or reset failed, firmware is unable to
9179 * handle mailbox. Just record the vlan id, and remove it after
9180 * reset finished.
9181 */
9182 if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9183 test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
9184 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9185 return -EBUSY;
9186 }
9187
9188 /* when port base vlan enabled, we use port base vlan as the vlan
9189 * filter entry. In this case, we don't update vlan filter table
9190 * when user add new vlan or remove exist vlan, just update the vport
9191 * vlan list. The vlan id in vlan list will be writen in vlan filter
9192 * table until port base vlan disabled
9193 */
9194 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9195 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
9196 vlan_id, is_kill);
9197 writen_to_tbl = true;
9198 }
9199
9200 if (!ret) {
9201 if (is_kill)
9202 hclge_rm_vport_vlan_table(vport, vlan_id, false);
9203 else
9204 hclge_add_vport_vlan_table(vport, vlan_id,
9205 writen_to_tbl);
9206 } else if (is_kill) {
9207 /* when remove hw vlan filter failed, record the vlan id,
9208 * and try to remove it from hw later, to be consistence
9209 * with stack
9210 */
9211 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9212 }
9213 return ret;
9214 }
9215
hclge_sync_vlan_filter(struct hclge_dev * hdev)9216 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
9217 {
9218 #define HCLGE_MAX_SYNC_COUNT 60
9219
9220 int i, ret, sync_cnt = 0;
9221 u16 vlan_id;
9222
9223 /* start from vport 1 for PF is always alive */
9224 for (i = 0; i < hdev->num_alloc_vport; i++) {
9225 struct hclge_vport *vport = &hdev->vport[i];
9226
9227 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9228 VLAN_N_VID);
9229 while (vlan_id != VLAN_N_VID) {
9230 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9231 vport->vport_id, vlan_id,
9232 true);
9233 if (ret && ret != -EINVAL)
9234 return;
9235
9236 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
9237 hclge_rm_vport_vlan_table(vport, vlan_id, false);
9238
9239 sync_cnt++;
9240 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
9241 return;
9242
9243 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9244 VLAN_N_VID);
9245 }
9246 }
9247 }
9248
hclge_set_mac_mtu(struct hclge_dev * hdev,int new_mps)9249 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
9250 {
9251 struct hclge_config_max_frm_size_cmd *req;
9252 struct hclge_desc desc;
9253
9254 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
9255
9256 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
9257 req->max_frm_size = cpu_to_le16(new_mps);
9258 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
9259
9260 return hclge_cmd_send(&hdev->hw, &desc, 1);
9261 }
9262
hclge_set_mtu(struct hnae3_handle * handle,int new_mtu)9263 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
9264 {
9265 struct hclge_vport *vport = hclge_get_vport(handle);
9266
9267 return hclge_set_vport_mtu(vport, new_mtu);
9268 }
9269
hclge_set_vport_mtu(struct hclge_vport * vport,int new_mtu)9270 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
9271 {
9272 struct hclge_dev *hdev = vport->back;
9273 int i, max_frm_size, ret;
9274
9275 /* HW supprt 2 layer vlan */
9276 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9277 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
9278 max_frm_size > HCLGE_MAC_MAX_FRAME)
9279 return -EINVAL;
9280
9281 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
9282 mutex_lock(&hdev->vport_lock);
9283 /* VF's mps must fit within hdev->mps */
9284 if (vport->vport_id && max_frm_size > hdev->mps) {
9285 mutex_unlock(&hdev->vport_lock);
9286 return -EINVAL;
9287 } else if (vport->vport_id) {
9288 vport->mps = max_frm_size;
9289 mutex_unlock(&hdev->vport_lock);
9290 return 0;
9291 }
9292
9293 /* PF's mps must be greater then VF's mps */
9294 for (i = 1; i < hdev->num_alloc_vport; i++)
9295 if (max_frm_size < hdev->vport[i].mps) {
9296 mutex_unlock(&hdev->vport_lock);
9297 return -EINVAL;
9298 }
9299
9300 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
9301
9302 ret = hclge_set_mac_mtu(hdev, max_frm_size);
9303 if (ret) {
9304 dev_err(&hdev->pdev->dev,
9305 "Change mtu fail, ret =%d\n", ret);
9306 goto out;
9307 }
9308
9309 hdev->mps = max_frm_size;
9310 vport->mps = max_frm_size;
9311
9312 ret = hclge_buffer_alloc(hdev);
9313 if (ret)
9314 dev_err(&hdev->pdev->dev,
9315 "Allocate buffer fail, ret =%d\n", ret);
9316
9317 out:
9318 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
9319 mutex_unlock(&hdev->vport_lock);
9320 return ret;
9321 }
9322
hclge_send_reset_tqp_cmd(struct hclge_dev * hdev,u16 queue_id,bool enable)9323 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
9324 bool enable)
9325 {
9326 struct hclge_reset_tqp_queue_cmd *req;
9327 struct hclge_desc desc;
9328 int ret;
9329
9330 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
9331
9332 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9333 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
9334 if (enable)
9335 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
9336
9337 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9338 if (ret) {
9339 dev_err(&hdev->pdev->dev,
9340 "Send tqp reset cmd error, status =%d\n", ret);
9341 return ret;
9342 }
9343
9344 return 0;
9345 }
9346
hclge_get_reset_status(struct hclge_dev * hdev,u16 queue_id)9347 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
9348 {
9349 struct hclge_reset_tqp_queue_cmd *req;
9350 struct hclge_desc desc;
9351 int ret;
9352
9353 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
9354
9355 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9356 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
9357
9358 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9359 if (ret) {
9360 dev_err(&hdev->pdev->dev,
9361 "Get reset status error, status =%d\n", ret);
9362 return ret;
9363 }
9364
9365 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
9366 }
9367
hclge_covert_handle_qid_global(struct hnae3_handle * handle,u16 queue_id)9368 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
9369 {
9370 struct hnae3_queue *queue;
9371 struct hclge_tqp *tqp;
9372
9373 queue = handle->kinfo.tqp[queue_id];
9374 tqp = container_of(queue, struct hclge_tqp, q);
9375
9376 return tqp->index;
9377 }
9378
hclge_reset_tqp(struct hnae3_handle * handle,u16 queue_id)9379 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
9380 {
9381 struct hclge_vport *vport = hclge_get_vport(handle);
9382 struct hclge_dev *hdev = vport->back;
9383 int reset_try_times = 0;
9384 int reset_status;
9385 u16 queue_gid;
9386 int ret;
9387
9388 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
9389
9390 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
9391 if (ret) {
9392 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
9393 return ret;
9394 }
9395
9396 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9397 if (ret) {
9398 dev_err(&hdev->pdev->dev,
9399 "Send reset tqp cmd fail, ret = %d\n", ret);
9400 return ret;
9401 }
9402
9403 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9404 reset_status = hclge_get_reset_status(hdev, queue_gid);
9405 if (reset_status)
9406 break;
9407
9408 /* Wait for tqp hw reset */
9409 usleep_range(1000, 1200);
9410 }
9411
9412 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9413 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
9414 return ret;
9415 }
9416
9417 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9418 if (ret)
9419 dev_err(&hdev->pdev->dev,
9420 "Deassert the soft reset fail, ret = %d\n", ret);
9421
9422 return ret;
9423 }
9424
hclge_reset_vf_queue(struct hclge_vport * vport,u16 queue_id)9425 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
9426 {
9427 struct hnae3_handle *handle = &vport->nic;
9428 struct hclge_dev *hdev = vport->back;
9429 int reset_try_times = 0;
9430 int reset_status;
9431 u16 queue_gid;
9432 int ret;
9433
9434 if (queue_id >= handle->kinfo.num_tqps) {
9435 dev_warn(&hdev->pdev->dev, "Invalid vf queue id(%u)\n",
9436 queue_id);
9437 return;
9438 }
9439
9440 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
9441
9442 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9443 if (ret) {
9444 dev_warn(&hdev->pdev->dev,
9445 "Send reset tqp cmd fail, ret = %d\n", ret);
9446 return;
9447 }
9448
9449 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9450 reset_status = hclge_get_reset_status(hdev, queue_gid);
9451 if (reset_status)
9452 break;
9453
9454 /* Wait for tqp hw reset */
9455 usleep_range(1000, 1200);
9456 }
9457
9458 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9459 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
9460 return;
9461 }
9462
9463 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9464 if (ret)
9465 dev_warn(&hdev->pdev->dev,
9466 "Deassert the soft reset fail, ret = %d\n", ret);
9467 }
9468
hclge_get_fw_version(struct hnae3_handle * handle)9469 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
9470 {
9471 struct hclge_vport *vport = hclge_get_vport(handle);
9472 struct hclge_dev *hdev = vport->back;
9473
9474 return hdev->fw_version;
9475 }
9476
hclge_set_flowctrl_adv(struct hclge_dev * hdev,u32 rx_en,u32 tx_en)9477 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9478 {
9479 struct phy_device *phydev = hdev->hw.mac.phydev;
9480
9481 if (!phydev)
9482 return;
9483
9484 phy_set_asym_pause(phydev, rx_en, tx_en);
9485 }
9486
hclge_cfg_pauseparam(struct hclge_dev * hdev,u32 rx_en,u32 tx_en)9487 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9488 {
9489 int ret;
9490
9491 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
9492 return 0;
9493
9494 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
9495 if (ret)
9496 dev_err(&hdev->pdev->dev,
9497 "configure pauseparam error, ret = %d.\n", ret);
9498
9499 return ret;
9500 }
9501
hclge_cfg_flowctrl(struct hclge_dev * hdev)9502 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
9503 {
9504 struct phy_device *phydev = hdev->hw.mac.phydev;
9505 u16 remote_advertising = 0;
9506 u16 local_advertising;
9507 u32 rx_pause, tx_pause;
9508 u8 flowctl;
9509
9510 if (!phydev->link || !phydev->autoneg)
9511 return 0;
9512
9513 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
9514
9515 if (phydev->pause)
9516 remote_advertising = LPA_PAUSE_CAP;
9517
9518 if (phydev->asym_pause)
9519 remote_advertising |= LPA_PAUSE_ASYM;
9520
9521 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
9522 remote_advertising);
9523 tx_pause = flowctl & FLOW_CTRL_TX;
9524 rx_pause = flowctl & FLOW_CTRL_RX;
9525
9526 if (phydev->duplex == HCLGE_MAC_HALF) {
9527 tx_pause = 0;
9528 rx_pause = 0;
9529 }
9530
9531 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
9532 }
9533
hclge_get_pauseparam(struct hnae3_handle * handle,u32 * auto_neg,u32 * rx_en,u32 * tx_en)9534 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
9535 u32 *rx_en, u32 *tx_en)
9536 {
9537 struct hclge_vport *vport = hclge_get_vport(handle);
9538 struct hclge_dev *hdev = vport->back;
9539 struct phy_device *phydev = hdev->hw.mac.phydev;
9540
9541 *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
9542
9543 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
9544 *rx_en = 0;
9545 *tx_en = 0;
9546 return;
9547 }
9548
9549 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
9550 *rx_en = 1;
9551 *tx_en = 0;
9552 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
9553 *tx_en = 1;
9554 *rx_en = 0;
9555 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
9556 *rx_en = 1;
9557 *tx_en = 1;
9558 } else {
9559 *rx_en = 0;
9560 *tx_en = 0;
9561 }
9562 }
9563
hclge_record_user_pauseparam(struct hclge_dev * hdev,u32 rx_en,u32 tx_en)9564 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
9565 u32 rx_en, u32 tx_en)
9566 {
9567 if (rx_en && tx_en)
9568 hdev->fc_mode_last_time = HCLGE_FC_FULL;
9569 else if (rx_en && !tx_en)
9570 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
9571 else if (!rx_en && tx_en)
9572 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
9573 else
9574 hdev->fc_mode_last_time = HCLGE_FC_NONE;
9575
9576 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
9577 }
9578
hclge_set_pauseparam(struct hnae3_handle * handle,u32 auto_neg,u32 rx_en,u32 tx_en)9579 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
9580 u32 rx_en, u32 tx_en)
9581 {
9582 struct hclge_vport *vport = hclge_get_vport(handle);
9583 struct hclge_dev *hdev = vport->back;
9584 struct phy_device *phydev = hdev->hw.mac.phydev;
9585 u32 fc_autoneg;
9586
9587 if (phydev) {
9588 fc_autoneg = hclge_get_autoneg(handle);
9589 if (auto_neg != fc_autoneg) {
9590 dev_info(&hdev->pdev->dev,
9591 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
9592 return -EOPNOTSUPP;
9593 }
9594 }
9595
9596 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
9597 dev_info(&hdev->pdev->dev,
9598 "Priority flow control enabled. Cannot set link flow control.\n");
9599 return -EOPNOTSUPP;
9600 }
9601
9602 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
9603
9604 hclge_record_user_pauseparam(hdev, rx_en, tx_en);
9605
9606 if (!auto_neg)
9607 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
9608
9609 if (phydev)
9610 return phy_start_aneg(phydev);
9611
9612 return -EOPNOTSUPP;
9613 }
9614
hclge_get_ksettings_an_result(struct hnae3_handle * handle,u8 * auto_neg,u32 * speed,u8 * duplex)9615 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
9616 u8 *auto_neg, u32 *speed, u8 *duplex)
9617 {
9618 struct hclge_vport *vport = hclge_get_vport(handle);
9619 struct hclge_dev *hdev = vport->back;
9620
9621 if (speed)
9622 *speed = hdev->hw.mac.speed;
9623 if (duplex)
9624 *duplex = hdev->hw.mac.duplex;
9625 if (auto_neg)
9626 *auto_neg = hdev->hw.mac.autoneg;
9627 }
9628
hclge_get_media_type(struct hnae3_handle * handle,u8 * media_type,u8 * module_type)9629 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
9630 u8 *module_type)
9631 {
9632 struct hclge_vport *vport = hclge_get_vport(handle);
9633 struct hclge_dev *hdev = vport->back;
9634
9635 /* When nic is down, the service task is not running, doesn't update
9636 * the port information per second. Query the port information before
9637 * return the media type, ensure getting the correct media information.
9638 */
9639 hclge_update_port_info(hdev);
9640
9641 if (media_type)
9642 *media_type = hdev->hw.mac.media_type;
9643
9644 if (module_type)
9645 *module_type = hdev->hw.mac.module_type;
9646 }
9647
hclge_get_mdix_mode(struct hnae3_handle * handle,u8 * tp_mdix_ctrl,u8 * tp_mdix)9648 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
9649 u8 *tp_mdix_ctrl, u8 *tp_mdix)
9650 {
9651 struct hclge_vport *vport = hclge_get_vport(handle);
9652 struct hclge_dev *hdev = vport->back;
9653 struct phy_device *phydev = hdev->hw.mac.phydev;
9654 int mdix_ctrl, mdix, is_resolved;
9655 unsigned int retval;
9656
9657 if (!phydev) {
9658 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9659 *tp_mdix = ETH_TP_MDI_INVALID;
9660 return;
9661 }
9662
9663 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
9664
9665 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
9666 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
9667 HCLGE_PHY_MDIX_CTRL_S);
9668
9669 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
9670 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
9671 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
9672
9673 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
9674
9675 switch (mdix_ctrl) {
9676 case 0x0:
9677 *tp_mdix_ctrl = ETH_TP_MDI;
9678 break;
9679 case 0x1:
9680 *tp_mdix_ctrl = ETH_TP_MDI_X;
9681 break;
9682 case 0x3:
9683 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
9684 break;
9685 default:
9686 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9687 break;
9688 }
9689
9690 if (!is_resolved)
9691 *tp_mdix = ETH_TP_MDI_INVALID;
9692 else if (mdix)
9693 *tp_mdix = ETH_TP_MDI_X;
9694 else
9695 *tp_mdix = ETH_TP_MDI;
9696 }
9697
hclge_info_show(struct hclge_dev * hdev)9698 static void hclge_info_show(struct hclge_dev *hdev)
9699 {
9700 struct device *dev = &hdev->pdev->dev;
9701
9702 dev_info(dev, "PF info begin:\n");
9703
9704 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
9705 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
9706 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
9707 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
9708 dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
9709 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
9710 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
9711 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
9712 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
9713 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
9714 dev_info(dev, "This is %s PF\n",
9715 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
9716 dev_info(dev, "DCB %s\n",
9717 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
9718 dev_info(dev, "MQPRIO %s\n",
9719 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
9720
9721 dev_info(dev, "PF info end.\n");
9722 }
9723
hclge_init_nic_client_instance(struct hnae3_ae_dev * ae_dev,struct hclge_vport * vport)9724 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
9725 struct hclge_vport *vport)
9726 {
9727 struct hnae3_client *client = vport->nic.client;
9728 struct hclge_dev *hdev = ae_dev->priv;
9729 int rst_cnt = hdev->rst_stats.reset_cnt;
9730 int ret;
9731
9732 ret = client->ops->init_instance(&vport->nic);
9733 if (ret)
9734 return ret;
9735
9736 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9737 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9738 rst_cnt != hdev->rst_stats.reset_cnt) {
9739 ret = -EBUSY;
9740 goto init_nic_err;
9741 }
9742
9743 /* Enable nic hw error interrupts */
9744 ret = hclge_config_nic_hw_error(hdev, true);
9745 if (ret) {
9746 dev_err(&ae_dev->pdev->dev,
9747 "fail(%d) to enable hw error interrupts\n", ret);
9748 goto init_nic_err;
9749 }
9750
9751 hnae3_set_client_init_flag(client, ae_dev, 1);
9752
9753 if (netif_msg_drv(&hdev->vport->nic))
9754 hclge_info_show(hdev);
9755
9756 return ret;
9757
9758 init_nic_err:
9759 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9760 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9761 msleep(HCLGE_WAIT_RESET_DONE);
9762
9763 client->ops->uninit_instance(&vport->nic, 0);
9764
9765 return ret;
9766 }
9767
hclge_init_roce_client_instance(struct hnae3_ae_dev * ae_dev,struct hclge_vport * vport)9768 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
9769 struct hclge_vport *vport)
9770 {
9771 struct hclge_dev *hdev = ae_dev->priv;
9772 struct hnae3_client *client;
9773 int rst_cnt;
9774 int ret;
9775
9776 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
9777 !hdev->nic_client)
9778 return 0;
9779
9780 client = hdev->roce_client;
9781 ret = hclge_init_roce_base_info(vport);
9782 if (ret)
9783 return ret;
9784
9785 rst_cnt = hdev->rst_stats.reset_cnt;
9786 ret = client->ops->init_instance(&vport->roce);
9787 if (ret)
9788 return ret;
9789
9790 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9791 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9792 rst_cnt != hdev->rst_stats.reset_cnt) {
9793 ret = -EBUSY;
9794 goto init_roce_err;
9795 }
9796
9797 /* Enable roce ras interrupts */
9798 ret = hclge_config_rocee_ras_interrupt(hdev, true);
9799 if (ret) {
9800 dev_err(&ae_dev->pdev->dev,
9801 "fail(%d) to enable roce ras interrupts\n", ret);
9802 goto init_roce_err;
9803 }
9804
9805 hnae3_set_client_init_flag(client, ae_dev, 1);
9806
9807 return 0;
9808
9809 init_roce_err:
9810 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9811 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9812 msleep(HCLGE_WAIT_RESET_DONE);
9813
9814 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
9815
9816 return ret;
9817 }
9818
hclge_init_client_instance(struct hnae3_client * client,struct hnae3_ae_dev * ae_dev)9819 static int hclge_init_client_instance(struct hnae3_client *client,
9820 struct hnae3_ae_dev *ae_dev)
9821 {
9822 struct hclge_dev *hdev = ae_dev->priv;
9823 struct hclge_vport *vport;
9824 int i, ret;
9825
9826 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9827 vport = &hdev->vport[i];
9828
9829 switch (client->type) {
9830 case HNAE3_CLIENT_KNIC:
9831 hdev->nic_client = client;
9832 vport->nic.client = client;
9833 ret = hclge_init_nic_client_instance(ae_dev, vport);
9834 if (ret)
9835 goto clear_nic;
9836
9837 ret = hclge_init_roce_client_instance(ae_dev, vport);
9838 if (ret)
9839 goto clear_roce;
9840
9841 break;
9842 case HNAE3_CLIENT_ROCE:
9843 if (hnae3_dev_roce_supported(hdev)) {
9844 hdev->roce_client = client;
9845 vport->roce.client = client;
9846 }
9847
9848 ret = hclge_init_roce_client_instance(ae_dev, vport);
9849 if (ret)
9850 goto clear_roce;
9851
9852 break;
9853 default:
9854 return -EINVAL;
9855 }
9856 }
9857
9858 return 0;
9859
9860 clear_nic:
9861 hdev->nic_client = NULL;
9862 vport->nic.client = NULL;
9863 return ret;
9864 clear_roce:
9865 hdev->roce_client = NULL;
9866 vport->roce.client = NULL;
9867 return ret;
9868 }
9869
hclge_uninit_client_instance(struct hnae3_client * client,struct hnae3_ae_dev * ae_dev)9870 static void hclge_uninit_client_instance(struct hnae3_client *client,
9871 struct hnae3_ae_dev *ae_dev)
9872 {
9873 struct hclge_dev *hdev = ae_dev->priv;
9874 struct hclge_vport *vport;
9875 int i;
9876
9877 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9878 vport = &hdev->vport[i];
9879 if (hdev->roce_client) {
9880 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9881 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9882 msleep(HCLGE_WAIT_RESET_DONE);
9883
9884 hdev->roce_client->ops->uninit_instance(&vport->roce,
9885 0);
9886 hdev->roce_client = NULL;
9887 vport->roce.client = NULL;
9888 }
9889 if (client->type == HNAE3_CLIENT_ROCE)
9890 return;
9891 if (hdev->nic_client && client->ops->uninit_instance) {
9892 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9893 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9894 msleep(HCLGE_WAIT_RESET_DONE);
9895
9896 client->ops->uninit_instance(&vport->nic, 0);
9897 hdev->nic_client = NULL;
9898 vport->nic.client = NULL;
9899 }
9900 }
9901 }
9902
hclge_pci_init(struct hclge_dev * hdev)9903 static int hclge_pci_init(struct hclge_dev *hdev)
9904 {
9905 struct pci_dev *pdev = hdev->pdev;
9906 struct hclge_hw *hw;
9907 int ret;
9908
9909 ret = pci_enable_device(pdev);
9910 if (ret) {
9911 dev_err(&pdev->dev, "failed to enable PCI device\n");
9912 return ret;
9913 }
9914
9915 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9916 if (ret) {
9917 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9918 if (ret) {
9919 dev_err(&pdev->dev,
9920 "can't set consistent PCI DMA");
9921 goto err_disable_device;
9922 }
9923 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
9924 }
9925
9926 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
9927 if (ret) {
9928 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
9929 goto err_disable_device;
9930 }
9931
9932 pci_set_master(pdev);
9933 hw = &hdev->hw;
9934 hw->io_base = pcim_iomap(pdev, 2, 0);
9935 if (!hw->io_base) {
9936 dev_err(&pdev->dev, "Can't map configuration register space\n");
9937 ret = -ENOMEM;
9938 goto err_clr_master;
9939 }
9940
9941 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
9942
9943 return 0;
9944 err_clr_master:
9945 pci_clear_master(pdev);
9946 pci_release_regions(pdev);
9947 err_disable_device:
9948 pci_disable_device(pdev);
9949
9950 return ret;
9951 }
9952
hclge_pci_uninit(struct hclge_dev * hdev)9953 static void hclge_pci_uninit(struct hclge_dev *hdev)
9954 {
9955 struct pci_dev *pdev = hdev->pdev;
9956
9957 pcim_iounmap(pdev, hdev->hw.io_base);
9958 pci_free_irq_vectors(pdev);
9959 pci_clear_master(pdev);
9960 pci_release_mem_regions(pdev);
9961 pci_disable_device(pdev);
9962 }
9963
hclge_state_init(struct hclge_dev * hdev)9964 static void hclge_state_init(struct hclge_dev *hdev)
9965 {
9966 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
9967 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9968 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
9969 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9970 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
9971 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
9972 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
9973 }
9974
hclge_state_uninit(struct hclge_dev * hdev)9975 static void hclge_state_uninit(struct hclge_dev *hdev)
9976 {
9977 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9978 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
9979
9980 if (hdev->reset_timer.function)
9981 del_timer_sync(&hdev->reset_timer);
9982 if (hdev->service_task.work.func)
9983 cancel_delayed_work_sync(&hdev->service_task);
9984 }
9985
hclge_flr_prepare(struct hnae3_ae_dev * ae_dev)9986 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
9987 {
9988 #define HCLGE_FLR_RETRY_WAIT_MS 500
9989 #define HCLGE_FLR_RETRY_CNT 5
9990
9991 struct hclge_dev *hdev = ae_dev->priv;
9992 int retry_cnt = 0;
9993 int ret;
9994
9995 retry:
9996 down(&hdev->reset_sem);
9997 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9998 hdev->reset_type = HNAE3_FLR_RESET;
9999 ret = hclge_reset_prepare(hdev);
10000 if (ret || hdev->reset_pending) {
10001 dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
10002 ret);
10003 if (hdev->reset_pending ||
10004 retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
10005 dev_err(&hdev->pdev->dev,
10006 "reset_pending:0x%lx, retry_cnt:%d\n",
10007 hdev->reset_pending, retry_cnt);
10008 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10009 up(&hdev->reset_sem);
10010 msleep(HCLGE_FLR_RETRY_WAIT_MS);
10011 goto retry;
10012 }
10013 }
10014
10015 /* disable misc vector before FLR done */
10016 hclge_enable_vector(&hdev->misc_vector, false);
10017 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
10018 hdev->rst_stats.flr_rst_cnt++;
10019 }
10020
hclge_flr_done(struct hnae3_ae_dev * ae_dev)10021 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
10022 {
10023 struct hclge_dev *hdev = ae_dev->priv;
10024 int ret;
10025
10026 hclge_enable_vector(&hdev->misc_vector, true);
10027
10028 ret = hclge_reset_rebuild(hdev);
10029 if (ret)
10030 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
10031
10032 hdev->reset_type = HNAE3_NONE_RESET;
10033 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10034 up(&hdev->reset_sem);
10035 }
10036
hclge_clear_resetting_state(struct hclge_dev * hdev)10037 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
10038 {
10039 u16 i;
10040
10041 for (i = 0; i < hdev->num_alloc_vport; i++) {
10042 struct hclge_vport *vport = &hdev->vport[i];
10043 int ret;
10044
10045 /* Send cmd to clear VF's FUNC_RST_ING */
10046 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
10047 if (ret)
10048 dev_warn(&hdev->pdev->dev,
10049 "clear vf(%u) rst failed %d!\n",
10050 vport->vport_id, ret);
10051 }
10052 }
10053
hclge_clear_hw_resource(struct hclge_dev * hdev)10054 static int hclge_clear_hw_resource(struct hclge_dev *hdev)
10055 {
10056 struct hclge_desc desc;
10057 int ret;
10058
10059 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false);
10060
10061 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10062 /* This new command is only supported by new firmware, it will
10063 * fail with older firmware. Error value -EOPNOSUPP can only be
10064 * returned by older firmware running this command, to keep code
10065 * backward compatible we will override this value and return
10066 * success.
10067 */
10068 if (ret && ret != -EOPNOTSUPP) {
10069 dev_err(&hdev->pdev->dev,
10070 "failed to clear hw resource, ret = %d\n", ret);
10071 return ret;
10072 }
10073 return 0;
10074 }
10075
hclge_init_ae_dev(struct hnae3_ae_dev * ae_dev)10076 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
10077 {
10078 struct pci_dev *pdev = ae_dev->pdev;
10079 struct hclge_dev *hdev;
10080 int ret;
10081
10082 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
10083 if (!hdev)
10084 return -ENOMEM;
10085
10086 hdev->pdev = pdev;
10087 hdev->ae_dev = ae_dev;
10088 hdev->reset_type = HNAE3_NONE_RESET;
10089 hdev->reset_level = HNAE3_FUNC_RESET;
10090 ae_dev->priv = hdev;
10091
10092 /* HW supprt 2 layer vlan */
10093 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10094
10095 mutex_init(&hdev->vport_lock);
10096 spin_lock_init(&hdev->fd_rule_lock);
10097 sema_init(&hdev->reset_sem, 1);
10098
10099 ret = hclge_pci_init(hdev);
10100 if (ret)
10101 goto out;
10102
10103 /* Firmware command queue initialize */
10104 ret = hclge_cmd_queue_init(hdev);
10105 if (ret)
10106 goto err_pci_uninit;
10107
10108 /* Firmware command initialize */
10109 ret = hclge_cmd_init(hdev);
10110 if (ret)
10111 goto err_cmd_uninit;
10112
10113 ret = hclge_clear_hw_resource(hdev);
10114 if (ret)
10115 goto err_cmd_uninit;
10116
10117 ret = hclge_get_cap(hdev);
10118 if (ret)
10119 goto err_cmd_uninit;
10120
10121 ret = hclge_query_dev_specs(hdev);
10122 if (ret) {
10123 dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
10124 ret);
10125 goto err_cmd_uninit;
10126 }
10127
10128 ret = hclge_configure(hdev);
10129 if (ret) {
10130 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
10131 goto err_cmd_uninit;
10132 }
10133
10134 ret = hclge_init_msi(hdev);
10135 if (ret) {
10136 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
10137 goto err_cmd_uninit;
10138 }
10139
10140 ret = hclge_misc_irq_init(hdev);
10141 if (ret)
10142 goto err_msi_uninit;
10143
10144 ret = hclge_alloc_tqps(hdev);
10145 if (ret) {
10146 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
10147 goto err_msi_irq_uninit;
10148 }
10149
10150 ret = hclge_alloc_vport(hdev);
10151 if (ret)
10152 goto err_msi_irq_uninit;
10153
10154 ret = hclge_map_tqp(hdev);
10155 if (ret)
10156 goto err_msi_irq_uninit;
10157
10158 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
10159 ret = hclge_mac_mdio_config(hdev);
10160 if (ret)
10161 goto err_msi_irq_uninit;
10162 }
10163
10164 ret = hclge_init_umv_space(hdev);
10165 if (ret)
10166 goto err_mdiobus_unreg;
10167
10168 ret = hclge_mac_init(hdev);
10169 if (ret) {
10170 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10171 goto err_mdiobus_unreg;
10172 }
10173
10174 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10175 if (ret) {
10176 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10177 goto err_mdiobus_unreg;
10178 }
10179
10180 ret = hclge_config_gro(hdev, true);
10181 if (ret)
10182 goto err_mdiobus_unreg;
10183
10184 ret = hclge_init_vlan_config(hdev);
10185 if (ret) {
10186 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10187 goto err_mdiobus_unreg;
10188 }
10189
10190 ret = hclge_tm_schd_init(hdev);
10191 if (ret) {
10192 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
10193 goto err_mdiobus_unreg;
10194 }
10195
10196 hclge_rss_init_cfg(hdev);
10197 ret = hclge_rss_init_hw(hdev);
10198 if (ret) {
10199 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10200 goto err_mdiobus_unreg;
10201 }
10202
10203 ret = init_mgr_tbl(hdev);
10204 if (ret) {
10205 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
10206 goto err_mdiobus_unreg;
10207 }
10208
10209 ret = hclge_init_fd_config(hdev);
10210 if (ret) {
10211 dev_err(&pdev->dev,
10212 "fd table init fail, ret=%d\n", ret);
10213 goto err_mdiobus_unreg;
10214 }
10215
10216 INIT_KFIFO(hdev->mac_tnl_log);
10217
10218 hclge_dcb_ops_set(hdev);
10219
10220 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
10221 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
10222
10223 /* Setup affinity after service timer setup because add_timer_on
10224 * is called in affinity notify.
10225 */
10226 hclge_misc_affinity_setup(hdev);
10227
10228 hclge_clear_all_event_cause(hdev);
10229 hclge_clear_resetting_state(hdev);
10230
10231 /* Log and clear the hw errors those already occurred */
10232 hclge_handle_all_hns_hw_errors(ae_dev);
10233
10234 /* request delayed reset for the error recovery because an immediate
10235 * global reset on a PF affecting pending initialization of other PFs
10236 */
10237 if (ae_dev->hw_err_reset_req) {
10238 enum hnae3_reset_type reset_level;
10239
10240 reset_level = hclge_get_reset_level(ae_dev,
10241 &ae_dev->hw_err_reset_req);
10242 hclge_set_def_reset_request(ae_dev, reset_level);
10243 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
10244 }
10245
10246 /* Enable MISC vector(vector0) */
10247 hclge_enable_vector(&hdev->misc_vector, true);
10248
10249 hclge_state_init(hdev);
10250 hdev->last_reset_time = jiffies;
10251
10252 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
10253 HCLGE_DRIVER_NAME);
10254
10255 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
10256
10257 return 0;
10258
10259 err_mdiobus_unreg:
10260 if (hdev->hw.mac.phydev)
10261 mdiobus_unregister(hdev->hw.mac.mdio_bus);
10262 err_msi_irq_uninit:
10263 hclge_misc_irq_uninit(hdev);
10264 err_msi_uninit:
10265 pci_free_irq_vectors(pdev);
10266 err_cmd_uninit:
10267 hclge_cmd_uninit(hdev);
10268 err_pci_uninit:
10269 pcim_iounmap(pdev, hdev->hw.io_base);
10270 pci_clear_master(pdev);
10271 pci_release_regions(pdev);
10272 pci_disable_device(pdev);
10273 out:
10274 mutex_destroy(&hdev->vport_lock);
10275 return ret;
10276 }
10277
hclge_stats_clear(struct hclge_dev * hdev)10278 static void hclge_stats_clear(struct hclge_dev *hdev)
10279 {
10280 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
10281 }
10282
hclge_set_mac_spoofchk(struct hclge_dev * hdev,int vf,bool enable)10283 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10284 {
10285 return hclge_config_switch_param(hdev, vf, enable,
10286 HCLGE_SWITCH_ANTI_SPOOF_MASK);
10287 }
10288
hclge_set_vlan_spoofchk(struct hclge_dev * hdev,int vf,bool enable)10289 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10290 {
10291 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10292 HCLGE_FILTER_FE_NIC_INGRESS_B,
10293 enable, vf);
10294 }
10295
hclge_set_vf_spoofchk_hw(struct hclge_dev * hdev,int vf,bool enable)10296 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
10297 {
10298 int ret;
10299
10300 ret = hclge_set_mac_spoofchk(hdev, vf, enable);
10301 if (ret) {
10302 dev_err(&hdev->pdev->dev,
10303 "Set vf %d mac spoof check %s failed, ret=%d\n",
10304 vf, enable ? "on" : "off", ret);
10305 return ret;
10306 }
10307
10308 ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
10309 if (ret)
10310 dev_err(&hdev->pdev->dev,
10311 "Set vf %d vlan spoof check %s failed, ret=%d\n",
10312 vf, enable ? "on" : "off", ret);
10313
10314 return ret;
10315 }
10316
hclge_set_vf_spoofchk(struct hnae3_handle * handle,int vf,bool enable)10317 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
10318 bool enable)
10319 {
10320 struct hclge_vport *vport = hclge_get_vport(handle);
10321 struct hclge_dev *hdev = vport->back;
10322 u32 new_spoofchk = enable ? 1 : 0;
10323 int ret;
10324
10325 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10326 return -EOPNOTSUPP;
10327
10328 vport = hclge_get_vf_vport(hdev, vf);
10329 if (!vport)
10330 return -EINVAL;
10331
10332 if (vport->vf_info.spoofchk == new_spoofchk)
10333 return 0;
10334
10335 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
10336 dev_warn(&hdev->pdev->dev,
10337 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
10338 vf);
10339 else if (enable && hclge_is_umv_space_full(vport, true))
10340 dev_warn(&hdev->pdev->dev,
10341 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
10342 vf);
10343
10344 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
10345 if (ret)
10346 return ret;
10347
10348 vport->vf_info.spoofchk = new_spoofchk;
10349 return 0;
10350 }
10351
hclge_reset_vport_spoofchk(struct hclge_dev * hdev)10352 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
10353 {
10354 struct hclge_vport *vport = hdev->vport;
10355 int ret;
10356 int i;
10357
10358 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10359 return 0;
10360
10361 /* resume the vf spoof check state after reset */
10362 for (i = 0; i < hdev->num_alloc_vport; i++) {
10363 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
10364 vport->vf_info.spoofchk);
10365 if (ret)
10366 return ret;
10367
10368 vport++;
10369 }
10370
10371 return 0;
10372 }
10373
hclge_set_vf_trust(struct hnae3_handle * handle,int vf,bool enable)10374 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
10375 {
10376 struct hclge_vport *vport = hclge_get_vport(handle);
10377 struct hclge_dev *hdev = vport->back;
10378 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
10379 u32 new_trusted = enable ? 1 : 0;
10380 bool en_bc_pmc;
10381 int ret;
10382
10383 vport = hclge_get_vf_vport(hdev, vf);
10384 if (!vport)
10385 return -EINVAL;
10386
10387 if (vport->vf_info.trusted == new_trusted)
10388 return 0;
10389
10390 /* Disable promisc mode for VF if it is not trusted any more. */
10391 if (!enable && vport->vf_info.promisc_enable) {
10392 en_bc_pmc = ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
10393 ret = hclge_set_vport_promisc_mode(vport, false, false,
10394 en_bc_pmc);
10395 if (ret)
10396 return ret;
10397 vport->vf_info.promisc_enable = 0;
10398 hclge_inform_vf_promisc_info(vport);
10399 }
10400
10401 vport->vf_info.trusted = new_trusted;
10402
10403 return 0;
10404 }
10405
hclge_reset_vf_rate(struct hclge_dev * hdev)10406 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
10407 {
10408 int ret;
10409 int vf;
10410
10411 /* reset vf rate to default value */
10412 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10413 struct hclge_vport *vport = &hdev->vport[vf];
10414
10415 vport->vf_info.max_tx_rate = 0;
10416 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
10417 if (ret)
10418 dev_err(&hdev->pdev->dev,
10419 "vf%d failed to reset to default, ret=%d\n",
10420 vf - HCLGE_VF_VPORT_START_NUM, ret);
10421 }
10422 }
10423
hclge_vf_rate_param_check(struct hclge_dev * hdev,int vf,int min_tx_rate,int max_tx_rate)10424 static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
10425 int min_tx_rate, int max_tx_rate)
10426 {
10427 if (min_tx_rate != 0 ||
10428 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
10429 dev_err(&hdev->pdev->dev,
10430 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
10431 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
10432 return -EINVAL;
10433 }
10434
10435 return 0;
10436 }
10437
hclge_set_vf_rate(struct hnae3_handle * handle,int vf,int min_tx_rate,int max_tx_rate,bool force)10438 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
10439 int min_tx_rate, int max_tx_rate, bool force)
10440 {
10441 struct hclge_vport *vport = hclge_get_vport(handle);
10442 struct hclge_dev *hdev = vport->back;
10443 int ret;
10444
10445 ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
10446 if (ret)
10447 return ret;
10448
10449 vport = hclge_get_vf_vport(hdev, vf);
10450 if (!vport)
10451 return -EINVAL;
10452
10453 if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
10454 return 0;
10455
10456 ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
10457 if (ret)
10458 return ret;
10459
10460 vport->vf_info.max_tx_rate = max_tx_rate;
10461
10462 return 0;
10463 }
10464
hclge_resume_vf_rate(struct hclge_dev * hdev)10465 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
10466 {
10467 struct hnae3_handle *handle = &hdev->vport->nic;
10468 struct hclge_vport *vport;
10469 int ret;
10470 int vf;
10471
10472 /* resume the vf max_tx_rate after reset */
10473 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
10474 vport = hclge_get_vf_vport(hdev, vf);
10475 if (!vport)
10476 return -EINVAL;
10477
10478 /* zero means max rate, after reset, firmware already set it to
10479 * max rate, so just continue.
10480 */
10481 if (!vport->vf_info.max_tx_rate)
10482 continue;
10483
10484 ret = hclge_set_vf_rate(handle, vf, 0,
10485 vport->vf_info.max_tx_rate, true);
10486 if (ret) {
10487 dev_err(&hdev->pdev->dev,
10488 "vf%d failed to resume tx_rate:%u, ret=%d\n",
10489 vf, vport->vf_info.max_tx_rate, ret);
10490 return ret;
10491 }
10492 }
10493
10494 return 0;
10495 }
10496
hclge_reset_vport_state(struct hclge_dev * hdev)10497 static void hclge_reset_vport_state(struct hclge_dev *hdev)
10498 {
10499 struct hclge_vport *vport = hdev->vport;
10500 int i;
10501
10502 for (i = 0; i < hdev->num_alloc_vport; i++) {
10503 hclge_vport_stop(vport);
10504 vport++;
10505 }
10506 }
10507
hclge_reset_ae_dev(struct hnae3_ae_dev * ae_dev)10508 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
10509 {
10510 struct hclge_dev *hdev = ae_dev->priv;
10511 struct pci_dev *pdev = ae_dev->pdev;
10512 int ret;
10513
10514 set_bit(HCLGE_STATE_DOWN, &hdev->state);
10515
10516 hclge_stats_clear(hdev);
10517 /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
10518 * so here should not clean table in memory.
10519 */
10520 if (hdev->reset_type == HNAE3_IMP_RESET ||
10521 hdev->reset_type == HNAE3_GLOBAL_RESET) {
10522 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
10523 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
10524 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
10525 hclge_reset_umv_space(hdev);
10526 }
10527
10528 ret = hclge_cmd_init(hdev);
10529 if (ret) {
10530 dev_err(&pdev->dev, "Cmd queue init failed\n");
10531 return ret;
10532 }
10533
10534 ret = hclge_map_tqp(hdev);
10535 if (ret) {
10536 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
10537 return ret;
10538 }
10539
10540 ret = hclge_mac_init(hdev);
10541 if (ret) {
10542 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10543 return ret;
10544 }
10545
10546 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10547 if (ret) {
10548 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10549 return ret;
10550 }
10551
10552 ret = hclge_config_gro(hdev, true);
10553 if (ret)
10554 return ret;
10555
10556 ret = hclge_init_vlan_config(hdev);
10557 if (ret) {
10558 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10559 return ret;
10560 }
10561
10562 ret = hclge_tm_init_hw(hdev, true);
10563 if (ret) {
10564 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
10565 return ret;
10566 }
10567
10568 ret = hclge_rss_init_hw(hdev);
10569 if (ret) {
10570 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10571 return ret;
10572 }
10573
10574 ret = init_mgr_tbl(hdev);
10575 if (ret) {
10576 dev_err(&pdev->dev,
10577 "failed to reinit manager table, ret = %d\n", ret);
10578 return ret;
10579 }
10580
10581 ret = hclge_init_fd_config(hdev);
10582 if (ret) {
10583 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
10584 return ret;
10585 }
10586
10587 /* Log and clear the hw errors those already occurred */
10588 hclge_handle_all_hns_hw_errors(ae_dev);
10589
10590 /* Re-enable the hw error interrupts because
10591 * the interrupts get disabled on global reset.
10592 */
10593 ret = hclge_config_nic_hw_error(hdev, true);
10594 if (ret) {
10595 dev_err(&pdev->dev,
10596 "fail(%d) to re-enable NIC hw error interrupts\n",
10597 ret);
10598 return ret;
10599 }
10600
10601 if (hdev->roce_client) {
10602 ret = hclge_config_rocee_ras_interrupt(hdev, true);
10603 if (ret) {
10604 dev_err(&pdev->dev,
10605 "fail(%d) to re-enable roce ras interrupts\n",
10606 ret);
10607 return ret;
10608 }
10609 }
10610
10611 hclge_reset_vport_state(hdev);
10612 ret = hclge_reset_vport_spoofchk(hdev);
10613 if (ret)
10614 return ret;
10615
10616 ret = hclge_resume_vf_rate(hdev);
10617 if (ret)
10618 return ret;
10619
10620 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
10621 HCLGE_DRIVER_NAME);
10622
10623 return 0;
10624 }
10625
hclge_uninit_ae_dev(struct hnae3_ae_dev * ae_dev)10626 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
10627 {
10628 struct hclge_dev *hdev = ae_dev->priv;
10629 struct hclge_mac *mac = &hdev->hw.mac;
10630
10631 hclge_reset_vf_rate(hdev);
10632 hclge_clear_vf_vlan(hdev);
10633 hclge_misc_affinity_teardown(hdev);
10634 hclge_state_uninit(hdev);
10635 hclge_uninit_mac_table(hdev);
10636
10637 if (mac->phydev)
10638 mdiobus_unregister(mac->mdio_bus);
10639
10640 /* Disable MISC vector(vector0) */
10641 hclge_enable_vector(&hdev->misc_vector, false);
10642 synchronize_irq(hdev->misc_vector.vector_irq);
10643
10644 /* Disable all hw interrupts */
10645 hclge_config_mac_tnl_int(hdev, false);
10646 hclge_config_nic_hw_error(hdev, false);
10647 hclge_config_rocee_ras_interrupt(hdev, false);
10648
10649 hclge_cmd_uninit(hdev);
10650 hclge_misc_irq_uninit(hdev);
10651 hclge_pci_uninit(hdev);
10652 mutex_destroy(&hdev->vport_lock);
10653 hclge_uninit_vport_vlan_table(hdev);
10654 ae_dev->priv = NULL;
10655 }
10656
hclge_get_max_channels(struct hnae3_handle * handle)10657 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
10658 {
10659 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10660 struct hclge_vport *vport = hclge_get_vport(handle);
10661 struct hclge_dev *hdev = vport->back;
10662
10663 return min_t(u32, hdev->rss_size_max,
10664 vport->alloc_tqps / kinfo->num_tc);
10665 }
10666
hclge_get_channels(struct hnae3_handle * handle,struct ethtool_channels * ch)10667 static void hclge_get_channels(struct hnae3_handle *handle,
10668 struct ethtool_channels *ch)
10669 {
10670 ch->max_combined = hclge_get_max_channels(handle);
10671 ch->other_count = 1;
10672 ch->max_other = 1;
10673 ch->combined_count = handle->kinfo.rss_size;
10674 }
10675
hclge_get_tqps_and_rss_info(struct hnae3_handle * handle,u16 * alloc_tqps,u16 * max_rss_size)10676 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
10677 u16 *alloc_tqps, u16 *max_rss_size)
10678 {
10679 struct hclge_vport *vport = hclge_get_vport(handle);
10680 struct hclge_dev *hdev = vport->back;
10681
10682 *alloc_tqps = vport->alloc_tqps;
10683 *max_rss_size = hdev->rss_size_max;
10684 }
10685
hclge_set_channels(struct hnae3_handle * handle,u32 new_tqps_num,bool rxfh_configured)10686 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
10687 bool rxfh_configured)
10688 {
10689 struct hclge_vport *vport = hclge_get_vport(handle);
10690 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
10691 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
10692 struct hclge_dev *hdev = vport->back;
10693 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
10694 u16 cur_rss_size = kinfo->rss_size;
10695 u16 cur_tqps = kinfo->num_tqps;
10696 u16 tc_valid[HCLGE_MAX_TC_NUM];
10697 u16 roundup_size;
10698 u32 *rss_indir;
10699 unsigned int i;
10700 int ret;
10701
10702 kinfo->req_rss_size = new_tqps_num;
10703
10704 ret = hclge_tm_vport_map_update(hdev);
10705 if (ret) {
10706 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
10707 return ret;
10708 }
10709
10710 roundup_size = roundup_pow_of_two(kinfo->rss_size);
10711 roundup_size = ilog2(roundup_size);
10712 /* Set the RSS TC mode according to the new RSS size */
10713 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
10714 tc_valid[i] = 0;
10715
10716 if (!(hdev->hw_tc_map & BIT(i)))
10717 continue;
10718
10719 tc_valid[i] = 1;
10720 tc_size[i] = roundup_size;
10721 tc_offset[i] = kinfo->rss_size * i;
10722 }
10723 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
10724 if (ret)
10725 return ret;
10726
10727 /* RSS indirection table has been configuared by user */
10728 if (rxfh_configured)
10729 goto out;
10730
10731 /* Reinitializes the rss indirect table according to the new RSS size */
10732 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
10733 if (!rss_indir)
10734 return -ENOMEM;
10735
10736 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
10737 rss_indir[i] = i % kinfo->rss_size;
10738
10739 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
10740 if (ret)
10741 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
10742 ret);
10743
10744 kfree(rss_indir);
10745
10746 out:
10747 if (!ret)
10748 dev_info(&hdev->pdev->dev,
10749 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
10750 cur_rss_size, kinfo->rss_size,
10751 cur_tqps, kinfo->rss_size * kinfo->num_tc);
10752
10753 return ret;
10754 }
10755
hclge_get_regs_num(struct hclge_dev * hdev,u32 * regs_num_32_bit,u32 * regs_num_64_bit)10756 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
10757 u32 *regs_num_64_bit)
10758 {
10759 struct hclge_desc desc;
10760 u32 total_num;
10761 int ret;
10762
10763 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
10764 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10765 if (ret) {
10766 dev_err(&hdev->pdev->dev,
10767 "Query register number cmd failed, ret = %d.\n", ret);
10768 return ret;
10769 }
10770
10771 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
10772 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
10773
10774 total_num = *regs_num_32_bit + *regs_num_64_bit;
10775 if (!total_num)
10776 return -EINVAL;
10777
10778 return 0;
10779 }
10780
hclge_get_32_bit_regs(struct hclge_dev * hdev,u32 regs_num,void * data)10781 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10782 void *data)
10783 {
10784 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
10785 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
10786
10787 struct hclge_desc *desc;
10788 u32 *reg_val = data;
10789 __le32 *desc_data;
10790 int nodata_num;
10791 int cmd_num;
10792 int i, k, n;
10793 int ret;
10794
10795 if (regs_num == 0)
10796 return 0;
10797
10798 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
10799 cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
10800 HCLGE_32_BIT_REG_RTN_DATANUM);
10801 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10802 if (!desc)
10803 return -ENOMEM;
10804
10805 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
10806 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10807 if (ret) {
10808 dev_err(&hdev->pdev->dev,
10809 "Query 32 bit register cmd failed, ret = %d.\n", ret);
10810 kfree(desc);
10811 return ret;
10812 }
10813
10814 for (i = 0; i < cmd_num; i++) {
10815 if (i == 0) {
10816 desc_data = (__le32 *)(&desc[i].data[0]);
10817 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
10818 } else {
10819 desc_data = (__le32 *)(&desc[i]);
10820 n = HCLGE_32_BIT_REG_RTN_DATANUM;
10821 }
10822 for (k = 0; k < n; k++) {
10823 *reg_val++ = le32_to_cpu(*desc_data++);
10824
10825 regs_num--;
10826 if (!regs_num)
10827 break;
10828 }
10829 }
10830
10831 kfree(desc);
10832 return 0;
10833 }
10834
hclge_get_64_bit_regs(struct hclge_dev * hdev,u32 regs_num,void * data)10835 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10836 void *data)
10837 {
10838 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
10839 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
10840
10841 struct hclge_desc *desc;
10842 u64 *reg_val = data;
10843 __le64 *desc_data;
10844 int nodata_len;
10845 int cmd_num;
10846 int i, k, n;
10847 int ret;
10848
10849 if (regs_num == 0)
10850 return 0;
10851
10852 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
10853 cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
10854 HCLGE_64_BIT_REG_RTN_DATANUM);
10855 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10856 if (!desc)
10857 return -ENOMEM;
10858
10859 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
10860 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10861 if (ret) {
10862 dev_err(&hdev->pdev->dev,
10863 "Query 64 bit register cmd failed, ret = %d.\n", ret);
10864 kfree(desc);
10865 return ret;
10866 }
10867
10868 for (i = 0; i < cmd_num; i++) {
10869 if (i == 0) {
10870 desc_data = (__le64 *)(&desc[i].data[0]);
10871 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
10872 } else {
10873 desc_data = (__le64 *)(&desc[i]);
10874 n = HCLGE_64_BIT_REG_RTN_DATANUM;
10875 }
10876 for (k = 0; k < n; k++) {
10877 *reg_val++ = le64_to_cpu(*desc_data++);
10878
10879 regs_num--;
10880 if (!regs_num)
10881 break;
10882 }
10883 }
10884
10885 kfree(desc);
10886 return 0;
10887 }
10888
10889 #define MAX_SEPARATE_NUM 4
10890 #define SEPARATOR_VALUE 0xFDFCFBFA
10891 #define REG_NUM_PER_LINE 4
10892 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
10893 #define REG_SEPARATOR_LINE 1
10894 #define REG_NUM_REMAIN_MASK 3
10895
hclge_query_bd_num_cmd_send(struct hclge_dev * hdev,struct hclge_desc * desc)10896 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
10897 {
10898 int i;
10899
10900 /* initialize command BD except the last one */
10901 for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
10902 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
10903 true);
10904 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10905 }
10906
10907 /* initialize the last command BD */
10908 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
10909
10910 return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
10911 }
10912
hclge_get_dfx_reg_bd_num(struct hclge_dev * hdev,int * bd_num_list,u32 type_num)10913 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
10914 int *bd_num_list,
10915 u32 type_num)
10916 {
10917 u32 entries_per_desc, desc_index, index, offset, i;
10918 struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
10919 int ret;
10920
10921 ret = hclge_query_bd_num_cmd_send(hdev, desc);
10922 if (ret) {
10923 dev_err(&hdev->pdev->dev,
10924 "Get dfx bd num fail, status is %d.\n", ret);
10925 return ret;
10926 }
10927
10928 entries_per_desc = ARRAY_SIZE(desc[0].data);
10929 for (i = 0; i < type_num; i++) {
10930 offset = hclge_dfx_bd_offset_list[i];
10931 index = offset % entries_per_desc;
10932 desc_index = offset / entries_per_desc;
10933 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
10934 }
10935
10936 return ret;
10937 }
10938
hclge_dfx_reg_cmd_send(struct hclge_dev * hdev,struct hclge_desc * desc_src,int bd_num,enum hclge_opcode_type cmd)10939 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
10940 struct hclge_desc *desc_src, int bd_num,
10941 enum hclge_opcode_type cmd)
10942 {
10943 struct hclge_desc *desc = desc_src;
10944 int i, ret;
10945
10946 hclge_cmd_setup_basic_desc(desc, cmd, true);
10947 for (i = 0; i < bd_num - 1; i++) {
10948 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10949 desc++;
10950 hclge_cmd_setup_basic_desc(desc, cmd, true);
10951 }
10952
10953 desc = desc_src;
10954 ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
10955 if (ret)
10956 dev_err(&hdev->pdev->dev,
10957 "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
10958 cmd, ret);
10959
10960 return ret;
10961 }
10962
hclge_dfx_reg_fetch_data(struct hclge_desc * desc_src,int bd_num,void * data)10963 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
10964 void *data)
10965 {
10966 int entries_per_desc, reg_num, separator_num, desc_index, index, i;
10967 struct hclge_desc *desc = desc_src;
10968 u32 *reg = data;
10969
10970 entries_per_desc = ARRAY_SIZE(desc->data);
10971 reg_num = entries_per_desc * bd_num;
10972 separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
10973 for (i = 0; i < reg_num; i++) {
10974 index = i % entries_per_desc;
10975 desc_index = i / entries_per_desc;
10976 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
10977 }
10978 for (i = 0; i < separator_num; i++)
10979 *reg++ = SEPARATOR_VALUE;
10980
10981 return reg_num + separator_num;
10982 }
10983
hclge_get_dfx_reg_len(struct hclge_dev * hdev,int * len)10984 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
10985 {
10986 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10987 int data_len_per_desc, bd_num, i;
10988 int *bd_num_list;
10989 u32 data_len;
10990 int ret;
10991
10992 bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
10993 if (!bd_num_list)
10994 return -ENOMEM;
10995
10996 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10997 if (ret) {
10998 dev_err(&hdev->pdev->dev,
10999 "Get dfx reg bd num fail, status is %d.\n", ret);
11000 goto out;
11001 }
11002
11003 data_len_per_desc = sizeof_field(struct hclge_desc, data);
11004 *len = 0;
11005 for (i = 0; i < dfx_reg_type_num; i++) {
11006 bd_num = bd_num_list[i];
11007 data_len = data_len_per_desc * bd_num;
11008 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
11009 }
11010
11011 out:
11012 kfree(bd_num_list);
11013 return ret;
11014 }
11015
hclge_get_dfx_reg(struct hclge_dev * hdev,void * data)11016 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
11017 {
11018 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
11019 int bd_num, bd_num_max, buf_len, i;
11020 struct hclge_desc *desc_src;
11021 int *bd_num_list;
11022 u32 *reg = data;
11023 int ret;
11024
11025 bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
11026 if (!bd_num_list)
11027 return -ENOMEM;
11028
11029 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
11030 if (ret) {
11031 dev_err(&hdev->pdev->dev,
11032 "Get dfx reg bd num fail, status is %d.\n", ret);
11033 goto out;
11034 }
11035
11036 bd_num_max = bd_num_list[0];
11037 for (i = 1; i < dfx_reg_type_num; i++)
11038 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
11039
11040 buf_len = sizeof(*desc_src) * bd_num_max;
11041 desc_src = kzalloc(buf_len, GFP_KERNEL);
11042 if (!desc_src) {
11043 ret = -ENOMEM;
11044 goto out;
11045 }
11046
11047 for (i = 0; i < dfx_reg_type_num; i++) {
11048 bd_num = bd_num_list[i];
11049 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
11050 hclge_dfx_reg_opcode_list[i]);
11051 if (ret) {
11052 dev_err(&hdev->pdev->dev,
11053 "Get dfx reg fail, status is %d.\n", ret);
11054 break;
11055 }
11056
11057 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
11058 }
11059
11060 kfree(desc_src);
11061 out:
11062 kfree(bd_num_list);
11063 return ret;
11064 }
11065
hclge_fetch_pf_reg(struct hclge_dev * hdev,void * data,struct hnae3_knic_private_info * kinfo)11066 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
11067 struct hnae3_knic_private_info *kinfo)
11068 {
11069 #define HCLGE_RING_REG_OFFSET 0x200
11070 #define HCLGE_RING_INT_REG_OFFSET 0x4
11071
11072 int i, j, reg_num, separator_num;
11073 int data_num_sum;
11074 u32 *reg = data;
11075
11076 /* fetching per-PF registers valus from PF PCIe register space */
11077 reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
11078 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11079 for (i = 0; i < reg_num; i++)
11080 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
11081 for (i = 0; i < separator_num; i++)
11082 *reg++ = SEPARATOR_VALUE;
11083 data_num_sum = reg_num + separator_num;
11084
11085 reg_num = ARRAY_SIZE(common_reg_addr_list);
11086 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11087 for (i = 0; i < reg_num; i++)
11088 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
11089 for (i = 0; i < separator_num; i++)
11090 *reg++ = SEPARATOR_VALUE;
11091 data_num_sum += reg_num + separator_num;
11092
11093 reg_num = ARRAY_SIZE(ring_reg_addr_list);
11094 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11095 for (j = 0; j < kinfo->num_tqps; j++) {
11096 for (i = 0; i < reg_num; i++)
11097 *reg++ = hclge_read_dev(&hdev->hw,
11098 ring_reg_addr_list[i] +
11099 HCLGE_RING_REG_OFFSET * j);
11100 for (i = 0; i < separator_num; i++)
11101 *reg++ = SEPARATOR_VALUE;
11102 }
11103 data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
11104
11105 reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
11106 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11107 for (j = 0; j < hdev->num_msi_used - 1; j++) {
11108 for (i = 0; i < reg_num; i++)
11109 *reg++ = hclge_read_dev(&hdev->hw,
11110 tqp_intr_reg_addr_list[i] +
11111 HCLGE_RING_INT_REG_OFFSET * j);
11112 for (i = 0; i < separator_num; i++)
11113 *reg++ = SEPARATOR_VALUE;
11114 }
11115 data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
11116
11117 return data_num_sum;
11118 }
11119
hclge_get_regs_len(struct hnae3_handle * handle)11120 static int hclge_get_regs_len(struct hnae3_handle *handle)
11121 {
11122 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
11123 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
11124 struct hclge_vport *vport = hclge_get_vport(handle);
11125 struct hclge_dev *hdev = vport->back;
11126 int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
11127 int regs_lines_32_bit, regs_lines_64_bit;
11128 int ret;
11129
11130 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
11131 if (ret) {
11132 dev_err(&hdev->pdev->dev,
11133 "Get register number failed, ret = %d.\n", ret);
11134 return ret;
11135 }
11136
11137 ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
11138 if (ret) {
11139 dev_err(&hdev->pdev->dev,
11140 "Get dfx reg len failed, ret = %d.\n", ret);
11141 return ret;
11142 }
11143
11144 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
11145 REG_SEPARATOR_LINE;
11146 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
11147 REG_SEPARATOR_LINE;
11148 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
11149 REG_SEPARATOR_LINE;
11150 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
11151 REG_SEPARATOR_LINE;
11152 regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
11153 REG_SEPARATOR_LINE;
11154 regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
11155 REG_SEPARATOR_LINE;
11156
11157 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
11158 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
11159 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
11160 }
11161
hclge_get_regs(struct hnae3_handle * handle,u32 * version,void * data)11162 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
11163 void *data)
11164 {
11165 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
11166 struct hclge_vport *vport = hclge_get_vport(handle);
11167 struct hclge_dev *hdev = vport->back;
11168 u32 regs_num_32_bit, regs_num_64_bit;
11169 int i, reg_num, separator_num, ret;
11170 u32 *reg = data;
11171
11172 *version = hdev->fw_version;
11173
11174 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
11175 if (ret) {
11176 dev_err(&hdev->pdev->dev,
11177 "Get register number failed, ret = %d.\n", ret);
11178 return;
11179 }
11180
11181 reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
11182
11183 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
11184 if (ret) {
11185 dev_err(&hdev->pdev->dev,
11186 "Get 32 bit register failed, ret = %d.\n", ret);
11187 return;
11188 }
11189 reg_num = regs_num_32_bit;
11190 reg += reg_num;
11191 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11192 for (i = 0; i < separator_num; i++)
11193 *reg++ = SEPARATOR_VALUE;
11194
11195 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
11196 if (ret) {
11197 dev_err(&hdev->pdev->dev,
11198 "Get 64 bit register failed, ret = %d.\n", ret);
11199 return;
11200 }
11201 reg_num = regs_num_64_bit * 2;
11202 reg += reg_num;
11203 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11204 for (i = 0; i < separator_num; i++)
11205 *reg++ = SEPARATOR_VALUE;
11206
11207 ret = hclge_get_dfx_reg(hdev, reg);
11208 if (ret)
11209 dev_err(&hdev->pdev->dev,
11210 "Get dfx register failed, ret = %d.\n", ret);
11211 }
11212
hclge_set_led_status(struct hclge_dev * hdev,u8 locate_led_status)11213 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
11214 {
11215 struct hclge_set_led_state_cmd *req;
11216 struct hclge_desc desc;
11217 int ret;
11218
11219 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
11220
11221 req = (struct hclge_set_led_state_cmd *)desc.data;
11222 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
11223 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
11224
11225 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11226 if (ret)
11227 dev_err(&hdev->pdev->dev,
11228 "Send set led state cmd error, ret =%d\n", ret);
11229
11230 return ret;
11231 }
11232
11233 enum hclge_led_status {
11234 HCLGE_LED_OFF,
11235 HCLGE_LED_ON,
11236 HCLGE_LED_NO_CHANGE = 0xFF,
11237 };
11238
hclge_set_led_id(struct hnae3_handle * handle,enum ethtool_phys_id_state status)11239 static int hclge_set_led_id(struct hnae3_handle *handle,
11240 enum ethtool_phys_id_state status)
11241 {
11242 struct hclge_vport *vport = hclge_get_vport(handle);
11243 struct hclge_dev *hdev = vport->back;
11244
11245 switch (status) {
11246 case ETHTOOL_ID_ACTIVE:
11247 return hclge_set_led_status(hdev, HCLGE_LED_ON);
11248 case ETHTOOL_ID_INACTIVE:
11249 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
11250 default:
11251 return -EINVAL;
11252 }
11253 }
11254
hclge_get_link_mode(struct hnae3_handle * handle,unsigned long * supported,unsigned long * advertising)11255 static void hclge_get_link_mode(struct hnae3_handle *handle,
11256 unsigned long *supported,
11257 unsigned long *advertising)
11258 {
11259 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
11260 struct hclge_vport *vport = hclge_get_vport(handle);
11261 struct hclge_dev *hdev = vport->back;
11262 unsigned int idx = 0;
11263
11264 for (; idx < size; idx++) {
11265 supported[idx] = hdev->hw.mac.supported[idx];
11266 advertising[idx] = hdev->hw.mac.advertising[idx];
11267 }
11268 }
11269
hclge_gro_en(struct hnae3_handle * handle,bool enable)11270 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
11271 {
11272 struct hclge_vport *vport = hclge_get_vport(handle);
11273 struct hclge_dev *hdev = vport->back;
11274
11275 return hclge_config_gro(hdev, enable);
11276 }
11277
hclge_sync_promisc_mode(struct hclge_dev * hdev)11278 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
11279 {
11280 struct hclge_vport *vport = &hdev->vport[0];
11281 struct hnae3_handle *handle = &vport->nic;
11282 u8 tmp_flags;
11283 int ret;
11284
11285 if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
11286 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11287 vport->last_promisc_flags = vport->overflow_promisc_flags;
11288 }
11289
11290 if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) {
11291 tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
11292 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
11293 tmp_flags & HNAE3_MPE);
11294 if (!ret) {
11295 clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11296 hclge_enable_vlan_filter(handle,
11297 tmp_flags & HNAE3_VLAN_FLTR);
11298 }
11299 }
11300 }
11301
hclge_module_existed(struct hclge_dev * hdev)11302 static bool hclge_module_existed(struct hclge_dev *hdev)
11303 {
11304 struct hclge_desc desc;
11305 u32 existed;
11306 int ret;
11307
11308 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
11309 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11310 if (ret) {
11311 dev_err(&hdev->pdev->dev,
11312 "failed to get SFP exist state, ret = %d\n", ret);
11313 return false;
11314 }
11315
11316 existed = le32_to_cpu(desc.data[0]);
11317
11318 return existed != 0;
11319 }
11320
11321 /* need 6 bds(total 140 bytes) in one reading
11322 * return the number of bytes actually read, 0 means read failed.
11323 */
hclge_get_sfp_eeprom_info(struct hclge_dev * hdev,u32 offset,u32 len,u8 * data)11324 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
11325 u32 len, u8 *data)
11326 {
11327 struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
11328 struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
11329 u16 read_len;
11330 u16 copy_len;
11331 int ret;
11332 int i;
11333
11334 /* setup all 6 bds to read module eeprom info. */
11335 for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11336 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
11337 true);
11338
11339 /* bd0~bd4 need next flag */
11340 if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
11341 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11342 }
11343
11344 /* setup bd0, this bd contains offset and read length. */
11345 sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
11346 sfp_info_bd0->offset = cpu_to_le16((u16)offset);
11347 read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
11348 sfp_info_bd0->read_len = cpu_to_le16(read_len);
11349
11350 ret = hclge_cmd_send(&hdev->hw, desc, i);
11351 if (ret) {
11352 dev_err(&hdev->pdev->dev,
11353 "failed to get SFP eeprom info, ret = %d\n", ret);
11354 return 0;
11355 }
11356
11357 /* copy sfp info from bd0 to out buffer. */
11358 copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
11359 memcpy(data, sfp_info_bd0->data, copy_len);
11360 read_len = copy_len;
11361
11362 /* copy sfp info from bd1~bd5 to out buffer if needed. */
11363 for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11364 if (read_len >= len)
11365 return read_len;
11366
11367 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
11368 memcpy(data + read_len, desc[i].data, copy_len);
11369 read_len += copy_len;
11370 }
11371
11372 return read_len;
11373 }
11374
hclge_get_module_eeprom(struct hnae3_handle * handle,u32 offset,u32 len,u8 * data)11375 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
11376 u32 len, u8 *data)
11377 {
11378 struct hclge_vport *vport = hclge_get_vport(handle);
11379 struct hclge_dev *hdev = vport->back;
11380 u32 read_len = 0;
11381 u16 data_len;
11382
11383 if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
11384 return -EOPNOTSUPP;
11385
11386 if (!hclge_module_existed(hdev))
11387 return -ENXIO;
11388
11389 while (read_len < len) {
11390 data_len = hclge_get_sfp_eeprom_info(hdev,
11391 offset + read_len,
11392 len - read_len,
11393 data + read_len);
11394 if (!data_len)
11395 return -EIO;
11396
11397 read_len += data_len;
11398 }
11399
11400 return 0;
11401 }
11402
11403 static const struct hnae3_ae_ops hclge_ops = {
11404 .init_ae_dev = hclge_init_ae_dev,
11405 .uninit_ae_dev = hclge_uninit_ae_dev,
11406 .flr_prepare = hclge_flr_prepare,
11407 .flr_done = hclge_flr_done,
11408 .init_client_instance = hclge_init_client_instance,
11409 .uninit_client_instance = hclge_uninit_client_instance,
11410 .map_ring_to_vector = hclge_map_ring_to_vector,
11411 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
11412 .get_vector = hclge_get_vector,
11413 .put_vector = hclge_put_vector,
11414 .set_promisc_mode = hclge_set_promisc_mode,
11415 .request_update_promisc_mode = hclge_request_update_promisc_mode,
11416 .set_loopback = hclge_set_loopback,
11417 .start = hclge_ae_start,
11418 .stop = hclge_ae_stop,
11419 .client_start = hclge_client_start,
11420 .client_stop = hclge_client_stop,
11421 .get_status = hclge_get_status,
11422 .get_ksettings_an_result = hclge_get_ksettings_an_result,
11423 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
11424 .get_media_type = hclge_get_media_type,
11425 .check_port_speed = hclge_check_port_speed,
11426 .get_fec = hclge_get_fec,
11427 .set_fec = hclge_set_fec,
11428 .get_rss_key_size = hclge_get_rss_key_size,
11429 .get_rss_indir_size = hclge_get_rss_indir_size,
11430 .get_rss = hclge_get_rss,
11431 .set_rss = hclge_set_rss,
11432 .set_rss_tuple = hclge_set_rss_tuple,
11433 .get_rss_tuple = hclge_get_rss_tuple,
11434 .get_tc_size = hclge_get_tc_size,
11435 .get_mac_addr = hclge_get_mac_addr,
11436 .set_mac_addr = hclge_set_mac_addr,
11437 .do_ioctl = hclge_do_ioctl,
11438 .add_uc_addr = hclge_add_uc_addr,
11439 .rm_uc_addr = hclge_rm_uc_addr,
11440 .add_mc_addr = hclge_add_mc_addr,
11441 .rm_mc_addr = hclge_rm_mc_addr,
11442 .set_autoneg = hclge_set_autoneg,
11443 .get_autoneg = hclge_get_autoneg,
11444 .restart_autoneg = hclge_restart_autoneg,
11445 .halt_autoneg = hclge_halt_autoneg,
11446 .get_pauseparam = hclge_get_pauseparam,
11447 .set_pauseparam = hclge_set_pauseparam,
11448 .set_mtu = hclge_set_mtu,
11449 .reset_queue = hclge_reset_tqp,
11450 .get_stats = hclge_get_stats,
11451 .get_mac_stats = hclge_get_mac_stat,
11452 .update_stats = hclge_update_stats,
11453 .get_strings = hclge_get_strings,
11454 .get_sset_count = hclge_get_sset_count,
11455 .get_fw_version = hclge_get_fw_version,
11456 .get_mdix_mode = hclge_get_mdix_mode,
11457 .enable_vlan_filter = hclge_enable_vlan_filter,
11458 .set_vlan_filter = hclge_set_vlan_filter,
11459 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
11460 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
11461 .reset_event = hclge_reset_event,
11462 .get_reset_level = hclge_get_reset_level,
11463 .set_default_reset_request = hclge_set_def_reset_request,
11464 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
11465 .set_channels = hclge_set_channels,
11466 .get_channels = hclge_get_channels,
11467 .get_regs_len = hclge_get_regs_len,
11468 .get_regs = hclge_get_regs,
11469 .set_led_id = hclge_set_led_id,
11470 .get_link_mode = hclge_get_link_mode,
11471 .add_fd_entry = hclge_add_fd_entry,
11472 .del_fd_entry = hclge_del_fd_entry,
11473 .del_all_fd_entries = hclge_del_all_fd_entries,
11474 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
11475 .get_fd_rule_info = hclge_get_fd_rule_info,
11476 .get_fd_all_rules = hclge_get_all_rules,
11477 .enable_fd = hclge_enable_fd,
11478 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
11479 .dbg_run_cmd = hclge_dbg_run_cmd,
11480 .handle_hw_ras_error = hclge_handle_hw_ras_error,
11481 .get_hw_reset_stat = hclge_get_hw_reset_stat,
11482 .ae_dev_resetting = hclge_ae_dev_resetting,
11483 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
11484 .set_gro_en = hclge_gro_en,
11485 .get_global_queue_id = hclge_covert_handle_qid_global,
11486 .set_timer_task = hclge_set_timer_task,
11487 .mac_connect_phy = hclge_mac_connect_phy,
11488 .mac_disconnect_phy = hclge_mac_disconnect_phy,
11489 .get_vf_config = hclge_get_vf_config,
11490 .set_vf_link_state = hclge_set_vf_link_state,
11491 .set_vf_spoofchk = hclge_set_vf_spoofchk,
11492 .set_vf_trust = hclge_set_vf_trust,
11493 .set_vf_rate = hclge_set_vf_rate,
11494 .set_vf_mac = hclge_set_vf_mac,
11495 .get_module_eeprom = hclge_get_module_eeprom,
11496 .get_cmdq_stat = hclge_get_cmdq_stat,
11497 };
11498
11499 static struct hnae3_ae_algo ae_algo = {
11500 .ops = &hclge_ops,
11501 .pdev_id_table = ae_algo_pci_tbl,
11502 };
11503
hclge_init(void)11504 static int hclge_init(void)
11505 {
11506 pr_info("%s is initializing\n", HCLGE_NAME);
11507
11508 hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
11509 if (!hclge_wq) {
11510 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
11511 return -ENOMEM;
11512 }
11513
11514 hnae3_register_ae_algo(&ae_algo);
11515
11516 return 0;
11517 }
11518
hclge_exit(void)11519 static void hclge_exit(void)
11520 {
11521 hnae3_unregister_ae_algo_prepare(&ae_algo);
11522 hnae3_unregister_ae_algo(&ae_algo);
11523 destroy_workqueue(hclge_wq);
11524 }
11525 module_init(hclge_init);
11526 module_exit(hclge_exit);
11527
11528 MODULE_LICENSE("GPL");
11529 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
11530 MODULE_DESCRIPTION("HCLGE Driver");
11531 MODULE_VERSION(HCLGE_MOD_VERSION);
11532