1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25
26 #define HCLGE_NAME "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29
30 #define HCLGE_BUF_SIZE_UNIT 256U
31 #define HCLGE_BUF_MUL_BY 2
32 #define HCLGE_BUF_DIV_BY 2
33 #define NEED_RESERVE_TC_NUM 2
34 #define BUF_MAX_PERCENT 100
35 #define BUF_RESERVE_PERCENT 90
36
37 #define HCLGE_RESET_MAX_FAIL_CNT 5
38 #define HCLGE_RESET_SYNC_TIME 100
39 #define HCLGE_PF_RESET_SYNC_TIME 20
40 #define HCLGE_PF_RESET_SYNC_CNT 1500
41
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET 1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET 2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET 3
46 #define HCLGE_DFX_IGU_BD_OFFSET 4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET 5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET 6
49 #define HCLGE_DFX_NCSI_BD_OFFSET 7
50 #define HCLGE_DFX_RTC_BD_OFFSET 8
51 #define HCLGE_DFX_PPP_BD_OFFSET 9
52 #define HCLGE_DFX_RCB_BD_OFFSET 10
53 #define HCLGE_DFX_TQP_BD_OFFSET 11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET 12
55
56 #define HCLGE_LINK_STATUS_MS 10
57
58 #define HCLGE_VF_VPORT_START_NUM 1
59
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
66 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
67 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
68 unsigned long *addr);
69 static int hclge_set_default_loopback(struct hclge_dev *hdev);
70
71 static void hclge_sync_mac_table(struct hclge_dev *hdev);
72 static void hclge_restore_hw_table(struct hclge_dev *hdev);
73 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
74
75 static struct hnae3_ae_algo ae_algo;
76
77 static struct workqueue_struct *hclge_wq;
78
79 static const struct pci_device_id ae_algo_pci_tbl[] = {
80 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
81 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
82 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
83 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
84 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
85 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
86 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
87 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
88 /* required last entry */
89 {0, }
90 };
91
92 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
93
94 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
95 HCLGE_CMDQ_TX_ADDR_H_REG,
96 HCLGE_CMDQ_TX_DEPTH_REG,
97 HCLGE_CMDQ_TX_TAIL_REG,
98 HCLGE_CMDQ_TX_HEAD_REG,
99 HCLGE_CMDQ_RX_ADDR_L_REG,
100 HCLGE_CMDQ_RX_ADDR_H_REG,
101 HCLGE_CMDQ_RX_DEPTH_REG,
102 HCLGE_CMDQ_RX_TAIL_REG,
103 HCLGE_CMDQ_RX_HEAD_REG,
104 HCLGE_VECTOR0_CMDQ_SRC_REG,
105 HCLGE_CMDQ_INTR_STS_REG,
106 HCLGE_CMDQ_INTR_EN_REG,
107 HCLGE_CMDQ_INTR_GEN_REG};
108
109 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
110 HCLGE_VECTOR0_OTER_EN_REG,
111 HCLGE_MISC_RESET_STS_REG,
112 HCLGE_MISC_VECTOR_INT_STS,
113 HCLGE_GLOBAL_RESET_REG,
114 HCLGE_FUN_RST_ING,
115 HCLGE_GRO_EN_REG};
116
117 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
118 HCLGE_RING_RX_ADDR_H_REG,
119 HCLGE_RING_RX_BD_NUM_REG,
120 HCLGE_RING_RX_BD_LENGTH_REG,
121 HCLGE_RING_RX_MERGE_EN_REG,
122 HCLGE_RING_RX_TAIL_REG,
123 HCLGE_RING_RX_HEAD_REG,
124 HCLGE_RING_RX_FBD_NUM_REG,
125 HCLGE_RING_RX_OFFSET_REG,
126 HCLGE_RING_RX_FBD_OFFSET_REG,
127 HCLGE_RING_RX_STASH_REG,
128 HCLGE_RING_RX_BD_ERR_REG,
129 HCLGE_RING_TX_ADDR_L_REG,
130 HCLGE_RING_TX_ADDR_H_REG,
131 HCLGE_RING_TX_BD_NUM_REG,
132 HCLGE_RING_TX_PRIORITY_REG,
133 HCLGE_RING_TX_TC_REG,
134 HCLGE_RING_TX_MERGE_EN_REG,
135 HCLGE_RING_TX_TAIL_REG,
136 HCLGE_RING_TX_HEAD_REG,
137 HCLGE_RING_TX_FBD_NUM_REG,
138 HCLGE_RING_TX_OFFSET_REG,
139 HCLGE_RING_TX_EBD_NUM_REG,
140 HCLGE_RING_TX_EBD_OFFSET_REG,
141 HCLGE_RING_TX_BD_ERR_REG,
142 HCLGE_RING_EN_REG};
143
144 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
145 HCLGE_TQP_INTR_GL0_REG,
146 HCLGE_TQP_INTR_GL1_REG,
147 HCLGE_TQP_INTR_GL2_REG,
148 HCLGE_TQP_INTR_RL_REG};
149
150 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
151 "App Loopback test",
152 "Serdes serial Loopback test",
153 "Serdes parallel Loopback test",
154 "Phy Loopback test"
155 };
156
157 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
158 {"mac_tx_mac_pause_num",
159 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
160 {"mac_rx_mac_pause_num",
161 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
162 {"mac_tx_control_pkt_num",
163 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
164 {"mac_rx_control_pkt_num",
165 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
166 {"mac_tx_pfc_pkt_num",
167 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
168 {"mac_tx_pfc_pri0_pkt_num",
169 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
170 {"mac_tx_pfc_pri1_pkt_num",
171 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
172 {"mac_tx_pfc_pri2_pkt_num",
173 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
174 {"mac_tx_pfc_pri3_pkt_num",
175 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
176 {"mac_tx_pfc_pri4_pkt_num",
177 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
178 {"mac_tx_pfc_pri5_pkt_num",
179 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
180 {"mac_tx_pfc_pri6_pkt_num",
181 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
182 {"mac_tx_pfc_pri7_pkt_num",
183 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
184 {"mac_rx_pfc_pkt_num",
185 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
186 {"mac_rx_pfc_pri0_pkt_num",
187 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
188 {"mac_rx_pfc_pri1_pkt_num",
189 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
190 {"mac_rx_pfc_pri2_pkt_num",
191 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
192 {"mac_rx_pfc_pri3_pkt_num",
193 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
194 {"mac_rx_pfc_pri4_pkt_num",
195 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
196 {"mac_rx_pfc_pri5_pkt_num",
197 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
198 {"mac_rx_pfc_pri6_pkt_num",
199 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
200 {"mac_rx_pfc_pri7_pkt_num",
201 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
202 {"mac_tx_total_pkt_num",
203 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
204 {"mac_tx_total_oct_num",
205 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
206 {"mac_tx_good_pkt_num",
207 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
208 {"mac_tx_bad_pkt_num",
209 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
210 {"mac_tx_good_oct_num",
211 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
212 {"mac_tx_bad_oct_num",
213 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
214 {"mac_tx_uni_pkt_num",
215 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
216 {"mac_tx_multi_pkt_num",
217 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
218 {"mac_tx_broad_pkt_num",
219 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
220 {"mac_tx_undersize_pkt_num",
221 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
222 {"mac_tx_oversize_pkt_num",
223 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
224 {"mac_tx_64_oct_pkt_num",
225 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
226 {"mac_tx_65_127_oct_pkt_num",
227 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
228 {"mac_tx_128_255_oct_pkt_num",
229 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
230 {"mac_tx_256_511_oct_pkt_num",
231 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
232 {"mac_tx_512_1023_oct_pkt_num",
233 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
234 {"mac_tx_1024_1518_oct_pkt_num",
235 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
236 {"mac_tx_1519_2047_oct_pkt_num",
237 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
238 {"mac_tx_2048_4095_oct_pkt_num",
239 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
240 {"mac_tx_4096_8191_oct_pkt_num",
241 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
242 {"mac_tx_8192_9216_oct_pkt_num",
243 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
244 {"mac_tx_9217_12287_oct_pkt_num",
245 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
246 {"mac_tx_12288_16383_oct_pkt_num",
247 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
248 {"mac_tx_1519_max_good_pkt_num",
249 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
250 {"mac_tx_1519_max_bad_pkt_num",
251 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
252 {"mac_rx_total_pkt_num",
253 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
254 {"mac_rx_total_oct_num",
255 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
256 {"mac_rx_good_pkt_num",
257 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
258 {"mac_rx_bad_pkt_num",
259 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
260 {"mac_rx_good_oct_num",
261 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
262 {"mac_rx_bad_oct_num",
263 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
264 {"mac_rx_uni_pkt_num",
265 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
266 {"mac_rx_multi_pkt_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
268 {"mac_rx_broad_pkt_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
270 {"mac_rx_undersize_pkt_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
272 {"mac_rx_oversize_pkt_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
274 {"mac_rx_64_oct_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
276 {"mac_rx_65_127_oct_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
278 {"mac_rx_128_255_oct_pkt_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
280 {"mac_rx_256_511_oct_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
282 {"mac_rx_512_1023_oct_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
284 {"mac_rx_1024_1518_oct_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
286 {"mac_rx_1519_2047_oct_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
288 {"mac_rx_2048_4095_oct_pkt_num",
289 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
290 {"mac_rx_4096_8191_oct_pkt_num",
291 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
292 {"mac_rx_8192_9216_oct_pkt_num",
293 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
294 {"mac_rx_9217_12287_oct_pkt_num",
295 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
296 {"mac_rx_12288_16383_oct_pkt_num",
297 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
298 {"mac_rx_1519_max_good_pkt_num",
299 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
300 {"mac_rx_1519_max_bad_pkt_num",
301 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
302
303 {"mac_tx_fragment_pkt_num",
304 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
305 {"mac_tx_undermin_pkt_num",
306 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
307 {"mac_tx_jabber_pkt_num",
308 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
309 {"mac_tx_err_all_pkt_num",
310 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
311 {"mac_tx_from_app_good_pkt_num",
312 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
313 {"mac_tx_from_app_bad_pkt_num",
314 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
315 {"mac_rx_fragment_pkt_num",
316 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
317 {"mac_rx_undermin_pkt_num",
318 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
319 {"mac_rx_jabber_pkt_num",
320 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
321 {"mac_rx_fcs_err_pkt_num",
322 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
323 {"mac_rx_send_app_good_pkt_num",
324 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
325 {"mac_rx_send_app_bad_pkt_num",
326 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
327 };
328
329 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
330 {
331 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
332 .ethter_type = cpu_to_le16(ETH_P_LLDP),
333 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
334 .i_port_bitmap = 0x1,
335 },
336 };
337
338 static const u8 hclge_hash_key[] = {
339 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
340 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
341 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
342 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
343 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
344 };
345
346 static const u32 hclge_dfx_bd_offset_list[] = {
347 HCLGE_DFX_BIOS_BD_OFFSET,
348 HCLGE_DFX_SSU_0_BD_OFFSET,
349 HCLGE_DFX_SSU_1_BD_OFFSET,
350 HCLGE_DFX_IGU_BD_OFFSET,
351 HCLGE_DFX_RPU_0_BD_OFFSET,
352 HCLGE_DFX_RPU_1_BD_OFFSET,
353 HCLGE_DFX_NCSI_BD_OFFSET,
354 HCLGE_DFX_RTC_BD_OFFSET,
355 HCLGE_DFX_PPP_BD_OFFSET,
356 HCLGE_DFX_RCB_BD_OFFSET,
357 HCLGE_DFX_TQP_BD_OFFSET,
358 HCLGE_DFX_SSU_2_BD_OFFSET
359 };
360
361 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
362 HCLGE_OPC_DFX_BIOS_COMMON_REG,
363 HCLGE_OPC_DFX_SSU_REG_0,
364 HCLGE_OPC_DFX_SSU_REG_1,
365 HCLGE_OPC_DFX_IGU_EGU_REG,
366 HCLGE_OPC_DFX_RPU_REG_0,
367 HCLGE_OPC_DFX_RPU_REG_1,
368 HCLGE_OPC_DFX_NCSI_REG,
369 HCLGE_OPC_DFX_RTC_REG,
370 HCLGE_OPC_DFX_PPP_REG,
371 HCLGE_OPC_DFX_RCB_REG,
372 HCLGE_OPC_DFX_TQP_REG,
373 HCLGE_OPC_DFX_SSU_REG_2
374 };
375
376 static const struct key_info meta_data_key_info[] = {
377 { PACKET_TYPE_ID, 6},
378 { IP_FRAGEMENT, 1},
379 { ROCE_TYPE, 1},
380 { NEXT_KEY, 5},
381 { VLAN_NUMBER, 2},
382 { SRC_VPORT, 12},
383 { DST_VPORT, 12},
384 { TUNNEL_PACKET, 1},
385 };
386
387 static const struct key_info tuple_key_info[] = {
388 { OUTER_DST_MAC, 48},
389 { OUTER_SRC_MAC, 48},
390 { OUTER_VLAN_TAG_FST, 16},
391 { OUTER_VLAN_TAG_SEC, 16},
392 { OUTER_ETH_TYPE, 16},
393 { OUTER_L2_RSV, 16},
394 { OUTER_IP_TOS, 8},
395 { OUTER_IP_PROTO, 8},
396 { OUTER_SRC_IP, 32},
397 { OUTER_DST_IP, 32},
398 { OUTER_L3_RSV, 16},
399 { OUTER_SRC_PORT, 16},
400 { OUTER_DST_PORT, 16},
401 { OUTER_L4_RSV, 32},
402 { OUTER_TUN_VNI, 24},
403 { OUTER_TUN_FLOW_ID, 8},
404 { INNER_DST_MAC, 48},
405 { INNER_SRC_MAC, 48},
406 { INNER_VLAN_TAG_FST, 16},
407 { INNER_VLAN_TAG_SEC, 16},
408 { INNER_ETH_TYPE, 16},
409 { INNER_L2_RSV, 16},
410 { INNER_IP_TOS, 8},
411 { INNER_IP_PROTO, 8},
412 { INNER_SRC_IP, 32},
413 { INNER_DST_IP, 32},
414 { INNER_L3_RSV, 16},
415 { INNER_SRC_PORT, 16},
416 { INNER_DST_PORT, 16},
417 { INNER_L4_RSV, 32},
418 };
419
hclge_mac_update_stats_defective(struct hclge_dev * hdev)420 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
421 {
422 #define HCLGE_MAC_CMD_NUM 21
423
424 u64 *data = (u64 *)(&hdev->mac_stats);
425 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
426 __le64 *desc_data;
427 int i, k, n;
428 int ret;
429
430 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
431 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
432 if (ret) {
433 dev_err(&hdev->pdev->dev,
434 "Get MAC pkt stats fail, status = %d.\n", ret);
435
436 return ret;
437 }
438
439 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
440 /* for special opcode 0032, only the first desc has the head */
441 if (unlikely(i == 0)) {
442 desc_data = (__le64 *)(&desc[i].data[0]);
443 n = HCLGE_RD_FIRST_STATS_NUM;
444 } else {
445 desc_data = (__le64 *)(&desc[i]);
446 n = HCLGE_RD_OTHER_STATS_NUM;
447 }
448
449 for (k = 0; k < n; k++) {
450 *data += le64_to_cpu(*desc_data);
451 data++;
452 desc_data++;
453 }
454 }
455
456 return 0;
457 }
458
hclge_mac_update_stats_complete(struct hclge_dev * hdev,u32 desc_num)459 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
460 {
461 u64 *data = (u64 *)(&hdev->mac_stats);
462 struct hclge_desc *desc;
463 __le64 *desc_data;
464 u16 i, k, n;
465 int ret;
466
467 /* This may be called inside atomic sections,
468 * so GFP_ATOMIC is more suitalbe here
469 */
470 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
471 if (!desc)
472 return -ENOMEM;
473
474 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
475 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
476 if (ret) {
477 kfree(desc);
478 return ret;
479 }
480
481 for (i = 0; i < desc_num; i++) {
482 /* for special opcode 0034, only the first desc has the head */
483 if (i == 0) {
484 desc_data = (__le64 *)(&desc[i].data[0]);
485 n = HCLGE_RD_FIRST_STATS_NUM;
486 } else {
487 desc_data = (__le64 *)(&desc[i]);
488 n = HCLGE_RD_OTHER_STATS_NUM;
489 }
490
491 for (k = 0; k < n; k++) {
492 *data += le64_to_cpu(*desc_data);
493 data++;
494 desc_data++;
495 }
496 }
497
498 kfree(desc);
499
500 return 0;
501 }
502
hclge_mac_query_reg_num(struct hclge_dev * hdev,u32 * desc_num)503 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
504 {
505 struct hclge_desc desc;
506 __le32 *desc_data;
507 u32 reg_num;
508 int ret;
509
510 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
511 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
512 if (ret)
513 return ret;
514
515 desc_data = (__le32 *)(&desc.data[0]);
516 reg_num = le32_to_cpu(*desc_data);
517
518 *desc_num = 1 + ((reg_num - 3) >> 2) +
519 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
520
521 return 0;
522 }
523
hclge_mac_update_stats(struct hclge_dev * hdev)524 static int hclge_mac_update_stats(struct hclge_dev *hdev)
525 {
526 u32 desc_num;
527 int ret;
528
529 ret = hclge_mac_query_reg_num(hdev, &desc_num);
530
531 /* The firmware supports the new statistics acquisition method */
532 if (!ret)
533 ret = hclge_mac_update_stats_complete(hdev, desc_num);
534 else if (ret == -EOPNOTSUPP)
535 ret = hclge_mac_update_stats_defective(hdev);
536 else
537 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
538
539 return ret;
540 }
541
hclge_tqps_update_stats(struct hnae3_handle * handle)542 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
543 {
544 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
545 struct hclge_vport *vport = hclge_get_vport(handle);
546 struct hclge_dev *hdev = vport->back;
547 struct hnae3_queue *queue;
548 struct hclge_desc desc[1];
549 struct hclge_tqp *tqp;
550 int ret, i;
551
552 for (i = 0; i < kinfo->num_tqps; i++) {
553 queue = handle->kinfo.tqp[i];
554 tqp = container_of(queue, struct hclge_tqp, q);
555 /* command : HCLGE_OPC_QUERY_IGU_STAT */
556 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
557 true);
558
559 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
560 ret = hclge_cmd_send(&hdev->hw, desc, 1);
561 if (ret) {
562 dev_err(&hdev->pdev->dev,
563 "Query tqp stat fail, status = %d,queue = %d\n",
564 ret, i);
565 return ret;
566 }
567 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
568 le32_to_cpu(desc[0].data[1]);
569 }
570
571 for (i = 0; i < kinfo->num_tqps; i++) {
572 queue = handle->kinfo.tqp[i];
573 tqp = container_of(queue, struct hclge_tqp, q);
574 /* command : HCLGE_OPC_QUERY_IGU_STAT */
575 hclge_cmd_setup_basic_desc(&desc[0],
576 HCLGE_OPC_QUERY_TX_STATS,
577 true);
578
579 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
580 ret = hclge_cmd_send(&hdev->hw, desc, 1);
581 if (ret) {
582 dev_err(&hdev->pdev->dev,
583 "Query tqp stat fail, status = %d,queue = %d\n",
584 ret, i);
585 return ret;
586 }
587 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
588 le32_to_cpu(desc[0].data[1]);
589 }
590
591 return 0;
592 }
593
hclge_tqps_get_stats(struct hnae3_handle * handle,u64 * data)594 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
595 {
596 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
597 struct hclge_tqp *tqp;
598 u64 *buff = data;
599 int i;
600
601 for (i = 0; i < kinfo->num_tqps; i++) {
602 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
603 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
604 }
605
606 for (i = 0; i < kinfo->num_tqps; i++) {
607 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
608 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
609 }
610
611 return buff;
612 }
613
hclge_tqps_get_sset_count(struct hnae3_handle * handle,int stringset)614 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
615 {
616 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
617
618 /* each tqp has TX & RX two queues */
619 return kinfo->num_tqps * (2);
620 }
621
hclge_tqps_get_strings(struct hnae3_handle * handle,u8 * data)622 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
623 {
624 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
625 u8 *buff = data;
626 int i;
627
628 for (i = 0; i < kinfo->num_tqps; i++) {
629 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
630 struct hclge_tqp, q);
631 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
632 tqp->index);
633 buff = buff + ETH_GSTRING_LEN;
634 }
635
636 for (i = 0; i < kinfo->num_tqps; i++) {
637 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
638 struct hclge_tqp, q);
639 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
640 tqp->index);
641 buff = buff + ETH_GSTRING_LEN;
642 }
643
644 return buff;
645 }
646
hclge_comm_get_stats(const void * comm_stats,const struct hclge_comm_stats_str strs[],int size,u64 * data)647 static u64 *hclge_comm_get_stats(const void *comm_stats,
648 const struct hclge_comm_stats_str strs[],
649 int size, u64 *data)
650 {
651 u64 *buf = data;
652 u32 i;
653
654 for (i = 0; i < size; i++)
655 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
656
657 return buf + size;
658 }
659
hclge_comm_get_strings(u32 stringset,const struct hclge_comm_stats_str strs[],int size,u8 * data)660 static u8 *hclge_comm_get_strings(u32 stringset,
661 const struct hclge_comm_stats_str strs[],
662 int size, u8 *data)
663 {
664 char *buff = (char *)data;
665 u32 i;
666
667 if (stringset != ETH_SS_STATS)
668 return buff;
669
670 for (i = 0; i < size; i++) {
671 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
672 buff = buff + ETH_GSTRING_LEN;
673 }
674
675 return (u8 *)buff;
676 }
677
hclge_update_stats_for_all(struct hclge_dev * hdev)678 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
679 {
680 struct hnae3_handle *handle;
681 int status;
682
683 handle = &hdev->vport[0].nic;
684 if (handle->client) {
685 status = hclge_tqps_update_stats(handle);
686 if (status) {
687 dev_err(&hdev->pdev->dev,
688 "Update TQPS stats fail, status = %d.\n",
689 status);
690 }
691 }
692
693 status = hclge_mac_update_stats(hdev);
694 if (status)
695 dev_err(&hdev->pdev->dev,
696 "Update MAC stats fail, status = %d.\n", status);
697 }
698
hclge_update_stats(struct hnae3_handle * handle,struct net_device_stats * net_stats)699 static void hclge_update_stats(struct hnae3_handle *handle,
700 struct net_device_stats *net_stats)
701 {
702 struct hclge_vport *vport = hclge_get_vport(handle);
703 struct hclge_dev *hdev = vport->back;
704 int status;
705
706 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
707 return;
708
709 status = hclge_mac_update_stats(hdev);
710 if (status)
711 dev_err(&hdev->pdev->dev,
712 "Update MAC stats fail, status = %d.\n",
713 status);
714
715 status = hclge_tqps_update_stats(handle);
716 if (status)
717 dev_err(&hdev->pdev->dev,
718 "Update TQPS stats fail, status = %d.\n",
719 status);
720
721 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
722 }
723
hclge_get_sset_count(struct hnae3_handle * handle,int stringset)724 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
725 {
726 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
727 HNAE3_SUPPORT_PHY_LOOPBACK |\
728 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
729 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
730
731 struct hclge_vport *vport = hclge_get_vport(handle);
732 struct hclge_dev *hdev = vport->back;
733 int count = 0;
734
735 /* Loopback test support rules:
736 * mac: only GE mode support
737 * serdes: all mac mode will support include GE/XGE/LGE/CGE
738 * phy: only support when phy device exist on board
739 */
740 if (stringset == ETH_SS_TEST) {
741 /* clear loopback bit flags at first */
742 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
743 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
744 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
745 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
746 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
747 count += 1;
748 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
749 }
750
751 count += 2;
752 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
753 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
754
755 if (hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
756 hdev->hw.mac.phydev->drv->set_loopback) {
757 count += 1;
758 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
759 }
760
761 } else if (stringset == ETH_SS_STATS) {
762 count = ARRAY_SIZE(g_mac_stats_string) +
763 hclge_tqps_get_sset_count(handle, stringset);
764 }
765
766 return count;
767 }
768
hclge_get_strings(struct hnae3_handle * handle,u32 stringset,u8 * data)769 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
770 u8 *data)
771 {
772 u8 *p = (char *)data;
773 int size;
774
775 if (stringset == ETH_SS_STATS) {
776 size = ARRAY_SIZE(g_mac_stats_string);
777 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
778 size, p);
779 p = hclge_tqps_get_strings(handle, p);
780 } else if (stringset == ETH_SS_TEST) {
781 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
782 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
783 ETH_GSTRING_LEN);
784 p += ETH_GSTRING_LEN;
785 }
786 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
787 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
788 ETH_GSTRING_LEN);
789 p += ETH_GSTRING_LEN;
790 }
791 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
792 memcpy(p,
793 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
794 ETH_GSTRING_LEN);
795 p += ETH_GSTRING_LEN;
796 }
797 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
798 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
799 ETH_GSTRING_LEN);
800 p += ETH_GSTRING_LEN;
801 }
802 }
803 }
804
hclge_get_stats(struct hnae3_handle * handle,u64 * data)805 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
806 {
807 struct hclge_vport *vport = hclge_get_vport(handle);
808 struct hclge_dev *hdev = vport->back;
809 u64 *p;
810
811 p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
812 ARRAY_SIZE(g_mac_stats_string), data);
813 p = hclge_tqps_get_stats(handle, p);
814 }
815
hclge_get_mac_stat(struct hnae3_handle * handle,struct hns3_mac_stats * mac_stats)816 static void hclge_get_mac_stat(struct hnae3_handle *handle,
817 struct hns3_mac_stats *mac_stats)
818 {
819 struct hclge_vport *vport = hclge_get_vport(handle);
820 struct hclge_dev *hdev = vport->back;
821
822 hclge_update_stats(handle, NULL);
823
824 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
825 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
826 }
827
hclge_parse_func_status(struct hclge_dev * hdev,struct hclge_func_status_cmd * status)828 static int hclge_parse_func_status(struct hclge_dev *hdev,
829 struct hclge_func_status_cmd *status)
830 {
831 #define HCLGE_MAC_ID_MASK 0xF
832
833 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
834 return -EINVAL;
835
836 /* Set the pf to main pf */
837 if (status->pf_state & HCLGE_PF_STATE_MAIN)
838 hdev->flag |= HCLGE_FLAG_MAIN;
839 else
840 hdev->flag &= ~HCLGE_FLAG_MAIN;
841
842 hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
843 return 0;
844 }
845
hclge_query_function_status(struct hclge_dev * hdev)846 static int hclge_query_function_status(struct hclge_dev *hdev)
847 {
848 #define HCLGE_QUERY_MAX_CNT 5
849
850 struct hclge_func_status_cmd *req;
851 struct hclge_desc desc;
852 int timeout = 0;
853 int ret;
854
855 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
856 req = (struct hclge_func_status_cmd *)desc.data;
857
858 do {
859 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
860 if (ret) {
861 dev_err(&hdev->pdev->dev,
862 "query function status failed %d.\n", ret);
863 return ret;
864 }
865
866 /* Check pf reset is done */
867 if (req->pf_state)
868 break;
869 usleep_range(1000, 2000);
870 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
871
872 return hclge_parse_func_status(hdev, req);
873 }
874
hclge_query_pf_resource(struct hclge_dev * hdev)875 static int hclge_query_pf_resource(struct hclge_dev *hdev)
876 {
877 struct hclge_pf_res_cmd *req;
878 struct hclge_desc desc;
879 int ret;
880
881 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
882 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
883 if (ret) {
884 dev_err(&hdev->pdev->dev,
885 "query pf resource failed %d.\n", ret);
886 return ret;
887 }
888
889 req = (struct hclge_pf_res_cmd *)desc.data;
890 hdev->num_tqps = le16_to_cpu(req->tqp_num);
891 hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
892
893 if (req->tx_buf_size)
894 hdev->tx_buf_size =
895 le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
896 else
897 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
898
899 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
900
901 if (req->dv_buf_size)
902 hdev->dv_buf_size =
903 le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
904 else
905 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
906
907 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
908
909 if (hnae3_dev_roce_supported(hdev)) {
910 hdev->roce_base_msix_offset =
911 hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
912 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
913 hdev->num_roce_msi =
914 hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
915 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
916
917 /* nic's msix numbers is always equals to the roce's. */
918 hdev->num_nic_msi = hdev->num_roce_msi;
919
920 /* PF should have NIC vectors and Roce vectors,
921 * NIC vectors are queued before Roce vectors.
922 */
923 hdev->num_msi = hdev->num_roce_msi +
924 hdev->roce_base_msix_offset;
925 } else {
926 hdev->num_msi =
927 hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
928 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
929
930 hdev->num_nic_msi = hdev->num_msi;
931 }
932
933 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
934 dev_err(&hdev->pdev->dev,
935 "Just %u msi resources, not enough for pf(min:2).\n",
936 hdev->num_nic_msi);
937 return -EINVAL;
938 }
939
940 return 0;
941 }
942
hclge_parse_speed(int speed_cmd,int * speed)943 static int hclge_parse_speed(int speed_cmd, int *speed)
944 {
945 switch (speed_cmd) {
946 case 6:
947 *speed = HCLGE_MAC_SPEED_10M;
948 break;
949 case 7:
950 *speed = HCLGE_MAC_SPEED_100M;
951 break;
952 case 0:
953 *speed = HCLGE_MAC_SPEED_1G;
954 break;
955 case 1:
956 *speed = HCLGE_MAC_SPEED_10G;
957 break;
958 case 2:
959 *speed = HCLGE_MAC_SPEED_25G;
960 break;
961 case 3:
962 *speed = HCLGE_MAC_SPEED_40G;
963 break;
964 case 4:
965 *speed = HCLGE_MAC_SPEED_50G;
966 break;
967 case 5:
968 *speed = HCLGE_MAC_SPEED_100G;
969 break;
970 case 8:
971 *speed = HCLGE_MAC_SPEED_200G;
972 break;
973 default:
974 return -EINVAL;
975 }
976
977 return 0;
978 }
979
hclge_check_port_speed(struct hnae3_handle * handle,u32 speed)980 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
981 {
982 struct hclge_vport *vport = hclge_get_vport(handle);
983 struct hclge_dev *hdev = vport->back;
984 u32 speed_ability = hdev->hw.mac.speed_ability;
985 u32 speed_bit = 0;
986
987 switch (speed) {
988 case HCLGE_MAC_SPEED_10M:
989 speed_bit = HCLGE_SUPPORT_10M_BIT;
990 break;
991 case HCLGE_MAC_SPEED_100M:
992 speed_bit = HCLGE_SUPPORT_100M_BIT;
993 break;
994 case HCLGE_MAC_SPEED_1G:
995 speed_bit = HCLGE_SUPPORT_1G_BIT;
996 break;
997 case HCLGE_MAC_SPEED_10G:
998 speed_bit = HCLGE_SUPPORT_10G_BIT;
999 break;
1000 case HCLGE_MAC_SPEED_25G:
1001 speed_bit = HCLGE_SUPPORT_25G_BIT;
1002 break;
1003 case HCLGE_MAC_SPEED_40G:
1004 speed_bit = HCLGE_SUPPORT_40G_BIT;
1005 break;
1006 case HCLGE_MAC_SPEED_50G:
1007 speed_bit = HCLGE_SUPPORT_50G_BIT;
1008 break;
1009 case HCLGE_MAC_SPEED_100G:
1010 speed_bit = HCLGE_SUPPORT_100G_BIT;
1011 break;
1012 case HCLGE_MAC_SPEED_200G:
1013 speed_bit = HCLGE_SUPPORT_200G_BIT;
1014 break;
1015 default:
1016 return -EINVAL;
1017 }
1018
1019 if (speed_bit & speed_ability)
1020 return 0;
1021
1022 return -EINVAL;
1023 }
1024
hclge_convert_setting_sr(struct hclge_mac * mac,u16 speed_ability)1025 static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1026 {
1027 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1028 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1029 mac->supported);
1030 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1031 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1032 mac->supported);
1033 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1034 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1035 mac->supported);
1036 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1037 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1038 mac->supported);
1039 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1040 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1041 mac->supported);
1042 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1043 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1044 mac->supported);
1045 }
1046
hclge_convert_setting_lr(struct hclge_mac * mac,u16 speed_ability)1047 static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1048 {
1049 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1050 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1051 mac->supported);
1052 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1053 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1054 mac->supported);
1055 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1056 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1057 mac->supported);
1058 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1059 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1060 mac->supported);
1061 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1062 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1063 mac->supported);
1064 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1065 linkmode_set_bit(
1066 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1067 mac->supported);
1068 }
1069
hclge_convert_setting_cr(struct hclge_mac * mac,u16 speed_ability)1070 static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1071 {
1072 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1073 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1074 mac->supported);
1075 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1076 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1077 mac->supported);
1078 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1079 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1080 mac->supported);
1081 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1082 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1083 mac->supported);
1084 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1085 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1086 mac->supported);
1087 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1088 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1089 mac->supported);
1090 }
1091
hclge_convert_setting_kr(struct hclge_mac * mac,u16 speed_ability)1092 static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1093 {
1094 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1095 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1096 mac->supported);
1097 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1098 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1099 mac->supported);
1100 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1101 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1102 mac->supported);
1103 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1104 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1105 mac->supported);
1106 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1107 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1108 mac->supported);
1109 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1110 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1111 mac->supported);
1112 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1113 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1114 mac->supported);
1115 }
1116
hclge_convert_setting_fec(struct hclge_mac * mac)1117 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1118 {
1119 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1120 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1121
1122 switch (mac->speed) {
1123 case HCLGE_MAC_SPEED_10G:
1124 case HCLGE_MAC_SPEED_40G:
1125 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1126 mac->supported);
1127 mac->fec_ability =
1128 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1129 break;
1130 case HCLGE_MAC_SPEED_25G:
1131 case HCLGE_MAC_SPEED_50G:
1132 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1133 mac->supported);
1134 mac->fec_ability =
1135 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1136 BIT(HNAE3_FEC_AUTO);
1137 break;
1138 case HCLGE_MAC_SPEED_100G:
1139 case HCLGE_MAC_SPEED_200G:
1140 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1141 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1142 break;
1143 default:
1144 mac->fec_ability = 0;
1145 break;
1146 }
1147 }
1148
hclge_parse_fiber_link_mode(struct hclge_dev * hdev,u16 speed_ability)1149 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1150 u16 speed_ability)
1151 {
1152 struct hclge_mac *mac = &hdev->hw.mac;
1153
1154 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1155 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1156 mac->supported);
1157
1158 hclge_convert_setting_sr(mac, speed_ability);
1159 hclge_convert_setting_lr(mac, speed_ability);
1160 hclge_convert_setting_cr(mac, speed_ability);
1161 if (hnae3_dev_fec_supported(hdev))
1162 hclge_convert_setting_fec(mac);
1163
1164 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1165 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1166 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1167 }
1168
hclge_parse_backplane_link_mode(struct hclge_dev * hdev,u16 speed_ability)1169 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1170 u16 speed_ability)
1171 {
1172 struct hclge_mac *mac = &hdev->hw.mac;
1173
1174 hclge_convert_setting_kr(mac, speed_ability);
1175 if (hnae3_dev_fec_supported(hdev))
1176 hclge_convert_setting_fec(mac);
1177 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1178 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1179 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1180 }
1181
hclge_parse_copper_link_mode(struct hclge_dev * hdev,u16 speed_ability)1182 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1183 u16 speed_ability)
1184 {
1185 unsigned long *supported = hdev->hw.mac.supported;
1186
1187 /* default to support all speed for GE port */
1188 if (!speed_ability)
1189 speed_ability = HCLGE_SUPPORT_GE;
1190
1191 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1192 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1193 supported);
1194
1195 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1196 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1197 supported);
1198 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1199 supported);
1200 }
1201
1202 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1203 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1204 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1205 }
1206
1207 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1208 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1209 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1210 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1211 }
1212
hclge_parse_link_mode(struct hclge_dev * hdev,u16 speed_ability)1213 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1214 {
1215 u8 media_type = hdev->hw.mac.media_type;
1216
1217 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1218 hclge_parse_fiber_link_mode(hdev, speed_ability);
1219 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1220 hclge_parse_copper_link_mode(hdev, speed_ability);
1221 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1222 hclge_parse_backplane_link_mode(hdev, speed_ability);
1223 }
1224
hclge_get_max_speed(u16 speed_ability)1225 static u32 hclge_get_max_speed(u16 speed_ability)
1226 {
1227 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1228 return HCLGE_MAC_SPEED_200G;
1229
1230 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1231 return HCLGE_MAC_SPEED_100G;
1232
1233 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1234 return HCLGE_MAC_SPEED_50G;
1235
1236 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1237 return HCLGE_MAC_SPEED_40G;
1238
1239 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1240 return HCLGE_MAC_SPEED_25G;
1241
1242 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1243 return HCLGE_MAC_SPEED_10G;
1244
1245 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1246 return HCLGE_MAC_SPEED_1G;
1247
1248 if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1249 return HCLGE_MAC_SPEED_100M;
1250
1251 if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1252 return HCLGE_MAC_SPEED_10M;
1253
1254 return HCLGE_MAC_SPEED_1G;
1255 }
1256
hclge_parse_cfg(struct hclge_cfg * cfg,struct hclge_desc * desc)1257 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1258 {
1259 #define SPEED_ABILITY_EXT_SHIFT 8
1260
1261 struct hclge_cfg_param_cmd *req;
1262 u64 mac_addr_tmp_high;
1263 u16 speed_ability_ext;
1264 u64 mac_addr_tmp;
1265 unsigned int i;
1266
1267 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1268
1269 /* get the configuration */
1270 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1271 HCLGE_CFG_VMDQ_M,
1272 HCLGE_CFG_VMDQ_S);
1273 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1274 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1275 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1276 HCLGE_CFG_TQP_DESC_N_M,
1277 HCLGE_CFG_TQP_DESC_N_S);
1278
1279 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1280 HCLGE_CFG_PHY_ADDR_M,
1281 HCLGE_CFG_PHY_ADDR_S);
1282 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1283 HCLGE_CFG_MEDIA_TP_M,
1284 HCLGE_CFG_MEDIA_TP_S);
1285 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1286 HCLGE_CFG_RX_BUF_LEN_M,
1287 HCLGE_CFG_RX_BUF_LEN_S);
1288 /* get mac_address */
1289 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1290 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1291 HCLGE_CFG_MAC_ADDR_H_M,
1292 HCLGE_CFG_MAC_ADDR_H_S);
1293
1294 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1295
1296 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1297 HCLGE_CFG_DEFAULT_SPEED_M,
1298 HCLGE_CFG_DEFAULT_SPEED_S);
1299 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1300 HCLGE_CFG_RSS_SIZE_M,
1301 HCLGE_CFG_RSS_SIZE_S);
1302
1303 for (i = 0; i < ETH_ALEN; i++)
1304 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1305
1306 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1307 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1308
1309 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1310 HCLGE_CFG_SPEED_ABILITY_M,
1311 HCLGE_CFG_SPEED_ABILITY_S);
1312 speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1313 HCLGE_CFG_SPEED_ABILITY_EXT_M,
1314 HCLGE_CFG_SPEED_ABILITY_EXT_S);
1315 cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1316
1317 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1318 HCLGE_CFG_UMV_TBL_SPACE_M,
1319 HCLGE_CFG_UMV_TBL_SPACE_S);
1320 if (!cfg->umv_space)
1321 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1322 }
1323
1324 /* hclge_get_cfg: query the static parameter from flash
1325 * @hdev: pointer to struct hclge_dev
1326 * @hcfg: the config structure to be getted
1327 */
hclge_get_cfg(struct hclge_dev * hdev,struct hclge_cfg * hcfg)1328 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1329 {
1330 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1331 struct hclge_cfg_param_cmd *req;
1332 unsigned int i;
1333 int ret;
1334
1335 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1336 u32 offset = 0;
1337
1338 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1339 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1340 true);
1341 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1342 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1343 /* Len should be united by 4 bytes when send to hardware */
1344 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1345 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1346 req->offset = cpu_to_le32(offset);
1347 }
1348
1349 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1350 if (ret) {
1351 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1352 return ret;
1353 }
1354
1355 hclge_parse_cfg(hcfg, desc);
1356
1357 return 0;
1358 }
1359
hclge_set_default_dev_specs(struct hclge_dev * hdev)1360 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1361 {
1362 #define HCLGE_MAX_NON_TSO_BD_NUM 8U
1363
1364 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1365
1366 ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1367 ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1368 ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1369 ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1370 }
1371
hclge_parse_dev_specs(struct hclge_dev * hdev,struct hclge_desc * desc)1372 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1373 struct hclge_desc *desc)
1374 {
1375 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1376 struct hclge_dev_specs_0_cmd *req0;
1377
1378 req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1379
1380 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1381 ae_dev->dev_specs.rss_ind_tbl_size =
1382 le16_to_cpu(req0->rss_ind_tbl_size);
1383 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1384 ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1385 }
1386
hclge_check_dev_specs(struct hclge_dev * hdev)1387 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1388 {
1389 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1390
1391 if (!dev_specs->max_non_tso_bd_num)
1392 dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1393 if (!dev_specs->rss_ind_tbl_size)
1394 dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1395 if (!dev_specs->rss_key_size)
1396 dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1397 if (!dev_specs->max_tm_rate)
1398 dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1399 }
1400
hclge_query_dev_specs(struct hclge_dev * hdev)1401 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1402 {
1403 struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1404 int ret;
1405 int i;
1406
1407 /* set default specifications as devices lower than version V3 do not
1408 * support querying specifications from firmware.
1409 */
1410 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1411 hclge_set_default_dev_specs(hdev);
1412 return 0;
1413 }
1414
1415 for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1416 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1417 true);
1418 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1419 }
1420 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1421
1422 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1423 if (ret)
1424 return ret;
1425
1426 hclge_parse_dev_specs(hdev, desc);
1427 hclge_check_dev_specs(hdev);
1428
1429 return 0;
1430 }
1431
hclge_get_cap(struct hclge_dev * hdev)1432 static int hclge_get_cap(struct hclge_dev *hdev)
1433 {
1434 int ret;
1435
1436 ret = hclge_query_function_status(hdev);
1437 if (ret) {
1438 dev_err(&hdev->pdev->dev,
1439 "query function status error %d.\n", ret);
1440 return ret;
1441 }
1442
1443 /* get pf resource */
1444 return hclge_query_pf_resource(hdev);
1445 }
1446
hclge_init_kdump_kernel_config(struct hclge_dev * hdev)1447 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1448 {
1449 #define HCLGE_MIN_TX_DESC 64
1450 #define HCLGE_MIN_RX_DESC 64
1451
1452 if (!is_kdump_kernel())
1453 return;
1454
1455 dev_info(&hdev->pdev->dev,
1456 "Running kdump kernel. Using minimal resources\n");
1457
1458 /* minimal queue pairs equals to the number of vports */
1459 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1460 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1461 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1462 }
1463
hclge_configure(struct hclge_dev * hdev)1464 static int hclge_configure(struct hclge_dev *hdev)
1465 {
1466 const struct cpumask *cpumask = cpu_online_mask;
1467 struct hclge_cfg cfg;
1468 unsigned int i;
1469 int node, ret;
1470
1471 ret = hclge_get_cfg(hdev, &cfg);
1472 if (ret)
1473 return ret;
1474
1475 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1476 hdev->base_tqp_pid = 0;
1477 hdev->rss_size_max = cfg.rss_size_max;
1478 hdev->rx_buf_len = cfg.rx_buf_len;
1479 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1480 hdev->hw.mac.media_type = cfg.media_type;
1481 hdev->hw.mac.phy_addr = cfg.phy_addr;
1482 hdev->num_tx_desc = cfg.tqp_desc_num;
1483 hdev->num_rx_desc = cfg.tqp_desc_num;
1484 hdev->tm_info.num_pg = 1;
1485 hdev->tc_max = cfg.tc_num;
1486 hdev->tm_info.hw_pfc_map = 0;
1487 hdev->wanted_umv_size = cfg.umv_space;
1488
1489 if (hnae3_dev_fd_supported(hdev)) {
1490 hdev->fd_en = true;
1491 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1492 }
1493
1494 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1495 if (ret) {
1496 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1497 cfg.default_speed, ret);
1498 return ret;
1499 }
1500
1501 hclge_parse_link_mode(hdev, cfg.speed_ability);
1502
1503 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1504
1505 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1506 (hdev->tc_max < 1)) {
1507 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1508 hdev->tc_max);
1509 hdev->tc_max = 1;
1510 }
1511
1512 /* Dev does not support DCB */
1513 if (!hnae3_dev_dcb_supported(hdev)) {
1514 hdev->tc_max = 1;
1515 hdev->pfc_max = 0;
1516 } else {
1517 hdev->pfc_max = hdev->tc_max;
1518 }
1519
1520 hdev->tm_info.num_tc = 1;
1521
1522 /* Currently not support uncontiuous tc */
1523 for (i = 0; i < hdev->tm_info.num_tc; i++)
1524 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1525
1526 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1527
1528 hclge_init_kdump_kernel_config(hdev);
1529
1530 /* Set the affinity based on numa node */
1531 node = dev_to_node(&hdev->pdev->dev);
1532 if (node != NUMA_NO_NODE)
1533 cpumask = cpumask_of_node(node);
1534
1535 cpumask_copy(&hdev->affinity_mask, cpumask);
1536
1537 return ret;
1538 }
1539
hclge_config_tso(struct hclge_dev * hdev,u16 tso_mss_min,u16 tso_mss_max)1540 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1541 u16 tso_mss_max)
1542 {
1543 struct hclge_cfg_tso_status_cmd *req;
1544 struct hclge_desc desc;
1545
1546 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1547
1548 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1549 req->tso_mss_min = cpu_to_le16(tso_mss_min);
1550 req->tso_mss_max = cpu_to_le16(tso_mss_max);
1551
1552 return hclge_cmd_send(&hdev->hw, &desc, 1);
1553 }
1554
hclge_config_gro(struct hclge_dev * hdev,bool en)1555 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1556 {
1557 struct hclge_cfg_gro_status_cmd *req;
1558 struct hclge_desc desc;
1559 int ret;
1560
1561 if (!hnae3_dev_gro_supported(hdev))
1562 return 0;
1563
1564 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1565 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1566
1567 req->gro_en = en ? 1 : 0;
1568
1569 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1570 if (ret)
1571 dev_err(&hdev->pdev->dev,
1572 "GRO hardware config cmd failed, ret = %d\n", ret);
1573
1574 return ret;
1575 }
1576
hclge_alloc_tqps(struct hclge_dev * hdev)1577 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1578 {
1579 struct hclge_tqp *tqp;
1580 int i;
1581
1582 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1583 sizeof(struct hclge_tqp), GFP_KERNEL);
1584 if (!hdev->htqp)
1585 return -ENOMEM;
1586
1587 tqp = hdev->htqp;
1588
1589 for (i = 0; i < hdev->num_tqps; i++) {
1590 tqp->dev = &hdev->pdev->dev;
1591 tqp->index = i;
1592
1593 tqp->q.ae_algo = &ae_algo;
1594 tqp->q.buf_size = hdev->rx_buf_len;
1595 tqp->q.tx_desc_num = hdev->num_tx_desc;
1596 tqp->q.rx_desc_num = hdev->num_rx_desc;
1597 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1598 i * HCLGE_TQP_REG_SIZE;
1599
1600 tqp++;
1601 }
1602
1603 return 0;
1604 }
1605
hclge_map_tqps_to_func(struct hclge_dev * hdev,u16 func_id,u16 tqp_pid,u16 tqp_vid,bool is_pf)1606 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1607 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1608 {
1609 struct hclge_tqp_map_cmd *req;
1610 struct hclge_desc desc;
1611 int ret;
1612
1613 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1614
1615 req = (struct hclge_tqp_map_cmd *)desc.data;
1616 req->tqp_id = cpu_to_le16(tqp_pid);
1617 req->tqp_vf = func_id;
1618 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1619 if (!is_pf)
1620 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1621 req->tqp_vid = cpu_to_le16(tqp_vid);
1622
1623 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1624 if (ret)
1625 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1626
1627 return ret;
1628 }
1629
hclge_assign_tqp(struct hclge_vport * vport,u16 num_tqps)1630 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1631 {
1632 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1633 struct hclge_dev *hdev = vport->back;
1634 int i, alloced;
1635
1636 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1637 alloced < num_tqps; i++) {
1638 if (!hdev->htqp[i].alloced) {
1639 hdev->htqp[i].q.handle = &vport->nic;
1640 hdev->htqp[i].q.tqp_index = alloced;
1641 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1642 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1643 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1644 hdev->htqp[i].alloced = true;
1645 alloced++;
1646 }
1647 }
1648 vport->alloc_tqps = alloced;
1649 kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1650 vport->alloc_tqps / hdev->tm_info.num_tc);
1651
1652 /* ensure one to one mapping between irq and queue at default */
1653 kinfo->rss_size = min_t(u16, kinfo->rss_size,
1654 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1655
1656 return 0;
1657 }
1658
hclge_knic_setup(struct hclge_vport * vport,u16 num_tqps,u16 num_tx_desc,u16 num_rx_desc)1659 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1660 u16 num_tx_desc, u16 num_rx_desc)
1661
1662 {
1663 struct hnae3_handle *nic = &vport->nic;
1664 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1665 struct hclge_dev *hdev = vport->back;
1666 int ret;
1667
1668 kinfo->num_tx_desc = num_tx_desc;
1669 kinfo->num_rx_desc = num_rx_desc;
1670
1671 kinfo->rx_buf_len = hdev->rx_buf_len;
1672
1673 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1674 sizeof(struct hnae3_queue *), GFP_KERNEL);
1675 if (!kinfo->tqp)
1676 return -ENOMEM;
1677
1678 ret = hclge_assign_tqp(vport, num_tqps);
1679 if (ret)
1680 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1681
1682 return ret;
1683 }
1684
hclge_map_tqp_to_vport(struct hclge_dev * hdev,struct hclge_vport * vport)1685 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1686 struct hclge_vport *vport)
1687 {
1688 struct hnae3_handle *nic = &vport->nic;
1689 struct hnae3_knic_private_info *kinfo;
1690 u16 i;
1691
1692 kinfo = &nic->kinfo;
1693 for (i = 0; i < vport->alloc_tqps; i++) {
1694 struct hclge_tqp *q =
1695 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1696 bool is_pf;
1697 int ret;
1698
1699 is_pf = !(vport->vport_id);
1700 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1701 i, is_pf);
1702 if (ret)
1703 return ret;
1704 }
1705
1706 return 0;
1707 }
1708
hclge_map_tqp(struct hclge_dev * hdev)1709 static int hclge_map_tqp(struct hclge_dev *hdev)
1710 {
1711 struct hclge_vport *vport = hdev->vport;
1712 u16 i, num_vport;
1713
1714 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1715 for (i = 0; i < num_vport; i++) {
1716 int ret;
1717
1718 ret = hclge_map_tqp_to_vport(hdev, vport);
1719 if (ret)
1720 return ret;
1721
1722 vport++;
1723 }
1724
1725 return 0;
1726 }
1727
hclge_vport_setup(struct hclge_vport * vport,u16 num_tqps)1728 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1729 {
1730 struct hnae3_handle *nic = &vport->nic;
1731 struct hclge_dev *hdev = vport->back;
1732 int ret;
1733
1734 nic->pdev = hdev->pdev;
1735 nic->ae_algo = &ae_algo;
1736 nic->numa_node_mask = hdev->numa_node_mask;
1737
1738 ret = hclge_knic_setup(vport, num_tqps,
1739 hdev->num_tx_desc, hdev->num_rx_desc);
1740 if (ret)
1741 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1742
1743 return ret;
1744 }
1745
hclge_alloc_vport(struct hclge_dev * hdev)1746 static int hclge_alloc_vport(struct hclge_dev *hdev)
1747 {
1748 struct pci_dev *pdev = hdev->pdev;
1749 struct hclge_vport *vport;
1750 u32 tqp_main_vport;
1751 u32 tqp_per_vport;
1752 int num_vport, i;
1753 int ret;
1754
1755 /* We need to alloc a vport for main NIC of PF */
1756 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1757
1758 if (hdev->num_tqps < num_vport) {
1759 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1760 hdev->num_tqps, num_vport);
1761 return -EINVAL;
1762 }
1763
1764 /* Alloc the same number of TQPs for every vport */
1765 tqp_per_vport = hdev->num_tqps / num_vport;
1766 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1767
1768 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1769 GFP_KERNEL);
1770 if (!vport)
1771 return -ENOMEM;
1772
1773 hdev->vport = vport;
1774 hdev->num_alloc_vport = num_vport;
1775
1776 if (IS_ENABLED(CONFIG_PCI_IOV))
1777 hdev->num_alloc_vfs = hdev->num_req_vfs;
1778
1779 for (i = 0; i < num_vport; i++) {
1780 vport->back = hdev;
1781 vport->vport_id = i;
1782 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1783 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1784 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1785 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1786 INIT_LIST_HEAD(&vport->vlan_list);
1787 INIT_LIST_HEAD(&vport->uc_mac_list);
1788 INIT_LIST_HEAD(&vport->mc_mac_list);
1789 spin_lock_init(&vport->mac_list_lock);
1790
1791 if (i == 0)
1792 ret = hclge_vport_setup(vport, tqp_main_vport);
1793 else
1794 ret = hclge_vport_setup(vport, tqp_per_vport);
1795 if (ret) {
1796 dev_err(&pdev->dev,
1797 "vport setup failed for vport %d, %d\n",
1798 i, ret);
1799 return ret;
1800 }
1801
1802 vport++;
1803 }
1804
1805 return 0;
1806 }
1807
hclge_cmd_alloc_tx_buff(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)1808 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1809 struct hclge_pkt_buf_alloc *buf_alloc)
1810 {
1811 /* TX buffer size is unit by 128 byte */
1812 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1813 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1814 struct hclge_tx_buff_alloc_cmd *req;
1815 struct hclge_desc desc;
1816 int ret;
1817 u8 i;
1818
1819 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1820
1821 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1822 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1823 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1824
1825 req->tx_pkt_buff[i] =
1826 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1827 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1828 }
1829
1830 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1831 if (ret)
1832 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1833 ret);
1834
1835 return ret;
1836 }
1837
hclge_tx_buffer_alloc(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)1838 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1839 struct hclge_pkt_buf_alloc *buf_alloc)
1840 {
1841 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1842
1843 if (ret)
1844 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1845
1846 return ret;
1847 }
1848
hclge_get_tc_num(struct hclge_dev * hdev)1849 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1850 {
1851 unsigned int i;
1852 u32 cnt = 0;
1853
1854 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1855 if (hdev->hw_tc_map & BIT(i))
1856 cnt++;
1857 return cnt;
1858 }
1859
1860 /* Get the number of pfc enabled TCs, which have private buffer */
hclge_get_pfc_priv_num(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)1861 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1862 struct hclge_pkt_buf_alloc *buf_alloc)
1863 {
1864 struct hclge_priv_buf *priv;
1865 unsigned int i;
1866 int cnt = 0;
1867
1868 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1869 priv = &buf_alloc->priv_buf[i];
1870 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1871 priv->enable)
1872 cnt++;
1873 }
1874
1875 return cnt;
1876 }
1877
1878 /* Get the number of pfc disabled TCs, which have private buffer */
hclge_get_no_pfc_priv_num(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)1879 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1880 struct hclge_pkt_buf_alloc *buf_alloc)
1881 {
1882 struct hclge_priv_buf *priv;
1883 unsigned int i;
1884 int cnt = 0;
1885
1886 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1887 priv = &buf_alloc->priv_buf[i];
1888 if (hdev->hw_tc_map & BIT(i) &&
1889 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1890 priv->enable)
1891 cnt++;
1892 }
1893
1894 return cnt;
1895 }
1896
hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc * buf_alloc)1897 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1898 {
1899 struct hclge_priv_buf *priv;
1900 u32 rx_priv = 0;
1901 int i;
1902
1903 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1904 priv = &buf_alloc->priv_buf[i];
1905 if (priv->enable)
1906 rx_priv += priv->buf_size;
1907 }
1908 return rx_priv;
1909 }
1910
hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc * buf_alloc)1911 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1912 {
1913 u32 i, total_tx_size = 0;
1914
1915 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1916 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1917
1918 return total_tx_size;
1919 }
1920
hclge_is_rx_buf_ok(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc,u32 rx_all)1921 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1922 struct hclge_pkt_buf_alloc *buf_alloc,
1923 u32 rx_all)
1924 {
1925 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1926 u32 tc_num = hclge_get_tc_num(hdev);
1927 u32 shared_buf, aligned_mps;
1928 u32 rx_priv;
1929 int i;
1930
1931 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1932
1933 if (hnae3_dev_dcb_supported(hdev))
1934 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1935 hdev->dv_buf_size;
1936 else
1937 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1938 + hdev->dv_buf_size;
1939
1940 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1941 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1942 HCLGE_BUF_SIZE_UNIT);
1943
1944 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1945 if (rx_all < rx_priv + shared_std)
1946 return false;
1947
1948 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1949 buf_alloc->s_buf.buf_size = shared_buf;
1950 if (hnae3_dev_dcb_supported(hdev)) {
1951 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1952 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1953 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1954 HCLGE_BUF_SIZE_UNIT);
1955 } else {
1956 buf_alloc->s_buf.self.high = aligned_mps +
1957 HCLGE_NON_DCB_ADDITIONAL_BUF;
1958 buf_alloc->s_buf.self.low = aligned_mps;
1959 }
1960
1961 if (hnae3_dev_dcb_supported(hdev)) {
1962 hi_thrd = shared_buf - hdev->dv_buf_size;
1963
1964 if (tc_num <= NEED_RESERVE_TC_NUM)
1965 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1966 / BUF_MAX_PERCENT;
1967
1968 if (tc_num)
1969 hi_thrd = hi_thrd / tc_num;
1970
1971 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1972 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1973 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1974 } else {
1975 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1976 lo_thrd = aligned_mps;
1977 }
1978
1979 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1980 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1981 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1982 }
1983
1984 return true;
1985 }
1986
hclge_tx_buffer_calc(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)1987 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1988 struct hclge_pkt_buf_alloc *buf_alloc)
1989 {
1990 u32 i, total_size;
1991
1992 total_size = hdev->pkt_buf_size;
1993
1994 /* alloc tx buffer for all enabled tc */
1995 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1996 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1997
1998 if (hdev->hw_tc_map & BIT(i)) {
1999 if (total_size < hdev->tx_buf_size)
2000 return -ENOMEM;
2001
2002 priv->tx_buf_size = hdev->tx_buf_size;
2003 } else {
2004 priv->tx_buf_size = 0;
2005 }
2006
2007 total_size -= priv->tx_buf_size;
2008 }
2009
2010 return 0;
2011 }
2012
hclge_rx_buf_calc_all(struct hclge_dev * hdev,bool max,struct hclge_pkt_buf_alloc * buf_alloc)2013 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2014 struct hclge_pkt_buf_alloc *buf_alloc)
2015 {
2016 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2017 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2018 unsigned int i;
2019
2020 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2021 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2022
2023 priv->enable = 0;
2024 priv->wl.low = 0;
2025 priv->wl.high = 0;
2026 priv->buf_size = 0;
2027
2028 if (!(hdev->hw_tc_map & BIT(i)))
2029 continue;
2030
2031 priv->enable = 1;
2032
2033 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2034 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2035 priv->wl.high = roundup(priv->wl.low + aligned_mps,
2036 HCLGE_BUF_SIZE_UNIT);
2037 } else {
2038 priv->wl.low = 0;
2039 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2040 aligned_mps;
2041 }
2042
2043 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2044 }
2045
2046 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2047 }
2048
hclge_drop_nopfc_buf_till_fit(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2049 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2050 struct hclge_pkt_buf_alloc *buf_alloc)
2051 {
2052 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2053 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2054 int i;
2055
2056 /* let the last to be cleared first */
2057 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2058 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2059 unsigned int mask = BIT((unsigned int)i);
2060
2061 if (hdev->hw_tc_map & mask &&
2062 !(hdev->tm_info.hw_pfc_map & mask)) {
2063 /* Clear the no pfc TC private buffer */
2064 priv->wl.low = 0;
2065 priv->wl.high = 0;
2066 priv->buf_size = 0;
2067 priv->enable = 0;
2068 no_pfc_priv_num--;
2069 }
2070
2071 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2072 no_pfc_priv_num == 0)
2073 break;
2074 }
2075
2076 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2077 }
2078
hclge_drop_pfc_buf_till_fit(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2079 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2080 struct hclge_pkt_buf_alloc *buf_alloc)
2081 {
2082 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2083 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2084 int i;
2085
2086 /* let the last to be cleared first */
2087 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2088 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2089 unsigned int mask = BIT((unsigned int)i);
2090
2091 if (hdev->hw_tc_map & mask &&
2092 hdev->tm_info.hw_pfc_map & mask) {
2093 /* Reduce the number of pfc TC with private buffer */
2094 priv->wl.low = 0;
2095 priv->enable = 0;
2096 priv->wl.high = 0;
2097 priv->buf_size = 0;
2098 pfc_priv_num--;
2099 }
2100
2101 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2102 pfc_priv_num == 0)
2103 break;
2104 }
2105
2106 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2107 }
2108
hclge_only_alloc_priv_buff(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2109 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2110 struct hclge_pkt_buf_alloc *buf_alloc)
2111 {
2112 #define COMPENSATE_BUFFER 0x3C00
2113 #define COMPENSATE_HALF_MPS_NUM 5
2114 #define PRIV_WL_GAP 0x1800
2115
2116 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2117 u32 tc_num = hclge_get_tc_num(hdev);
2118 u32 half_mps = hdev->mps >> 1;
2119 u32 min_rx_priv;
2120 unsigned int i;
2121
2122 if (tc_num)
2123 rx_priv = rx_priv / tc_num;
2124
2125 if (tc_num <= NEED_RESERVE_TC_NUM)
2126 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2127
2128 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2129 COMPENSATE_HALF_MPS_NUM * half_mps;
2130 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2131 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2132
2133 if (rx_priv < min_rx_priv)
2134 return false;
2135
2136 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2137 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2138
2139 priv->enable = 0;
2140 priv->wl.low = 0;
2141 priv->wl.high = 0;
2142 priv->buf_size = 0;
2143
2144 if (!(hdev->hw_tc_map & BIT(i)))
2145 continue;
2146
2147 priv->enable = 1;
2148 priv->buf_size = rx_priv;
2149 priv->wl.high = rx_priv - hdev->dv_buf_size;
2150 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2151 }
2152
2153 buf_alloc->s_buf.buf_size = 0;
2154
2155 return true;
2156 }
2157
2158 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2159 * @hdev: pointer to struct hclge_dev
2160 * @buf_alloc: pointer to buffer calculation data
2161 * @return: 0: calculate sucessful, negative: fail
2162 */
hclge_rx_buffer_calc(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2163 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2164 struct hclge_pkt_buf_alloc *buf_alloc)
2165 {
2166 /* When DCB is not supported, rx private buffer is not allocated. */
2167 if (!hnae3_dev_dcb_supported(hdev)) {
2168 u32 rx_all = hdev->pkt_buf_size;
2169
2170 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2171 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2172 return -ENOMEM;
2173
2174 return 0;
2175 }
2176
2177 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2178 return 0;
2179
2180 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2181 return 0;
2182
2183 /* try to decrease the buffer size */
2184 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2185 return 0;
2186
2187 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2188 return 0;
2189
2190 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2191 return 0;
2192
2193 return -ENOMEM;
2194 }
2195
hclge_rx_priv_buf_alloc(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2196 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2197 struct hclge_pkt_buf_alloc *buf_alloc)
2198 {
2199 struct hclge_rx_priv_buff_cmd *req;
2200 struct hclge_desc desc;
2201 int ret;
2202 int i;
2203
2204 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2205 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2206
2207 /* Alloc private buffer TCs */
2208 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2209 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2210
2211 req->buf_num[i] =
2212 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2213 req->buf_num[i] |=
2214 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2215 }
2216
2217 req->shared_buf =
2218 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2219 (1 << HCLGE_TC0_PRI_BUF_EN_B));
2220
2221 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2222 if (ret)
2223 dev_err(&hdev->pdev->dev,
2224 "rx private buffer alloc cmd failed %d\n", ret);
2225
2226 return ret;
2227 }
2228
hclge_rx_priv_wl_config(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2229 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2230 struct hclge_pkt_buf_alloc *buf_alloc)
2231 {
2232 struct hclge_rx_priv_wl_buf *req;
2233 struct hclge_priv_buf *priv;
2234 struct hclge_desc desc[2];
2235 int i, j;
2236 int ret;
2237
2238 for (i = 0; i < 2; i++) {
2239 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2240 false);
2241 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2242
2243 /* The first descriptor set the NEXT bit to 1 */
2244 if (i == 0)
2245 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2246 else
2247 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2248
2249 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2250 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2251
2252 priv = &buf_alloc->priv_buf[idx];
2253 req->tc_wl[j].high =
2254 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2255 req->tc_wl[j].high |=
2256 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2257 req->tc_wl[j].low =
2258 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2259 req->tc_wl[j].low |=
2260 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2261 }
2262 }
2263
2264 /* Send 2 descriptor at one time */
2265 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2266 if (ret)
2267 dev_err(&hdev->pdev->dev,
2268 "rx private waterline config cmd failed %d\n",
2269 ret);
2270 return ret;
2271 }
2272
hclge_common_thrd_config(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2273 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2274 struct hclge_pkt_buf_alloc *buf_alloc)
2275 {
2276 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2277 struct hclge_rx_com_thrd *req;
2278 struct hclge_desc desc[2];
2279 struct hclge_tc_thrd *tc;
2280 int i, j;
2281 int ret;
2282
2283 for (i = 0; i < 2; i++) {
2284 hclge_cmd_setup_basic_desc(&desc[i],
2285 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2286 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2287
2288 /* The first descriptor set the NEXT bit to 1 */
2289 if (i == 0)
2290 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2291 else
2292 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2293
2294 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2295 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2296
2297 req->com_thrd[j].high =
2298 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2299 req->com_thrd[j].high |=
2300 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2301 req->com_thrd[j].low =
2302 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2303 req->com_thrd[j].low |=
2304 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2305 }
2306 }
2307
2308 /* Send 2 descriptors at one time */
2309 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2310 if (ret)
2311 dev_err(&hdev->pdev->dev,
2312 "common threshold config cmd failed %d\n", ret);
2313 return ret;
2314 }
2315
hclge_common_wl_config(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2316 static int hclge_common_wl_config(struct hclge_dev *hdev,
2317 struct hclge_pkt_buf_alloc *buf_alloc)
2318 {
2319 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2320 struct hclge_rx_com_wl *req;
2321 struct hclge_desc desc;
2322 int ret;
2323
2324 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2325
2326 req = (struct hclge_rx_com_wl *)desc.data;
2327 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2328 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2329
2330 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2331 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2332
2333 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2334 if (ret)
2335 dev_err(&hdev->pdev->dev,
2336 "common waterline config cmd failed %d\n", ret);
2337
2338 return ret;
2339 }
2340
hclge_buffer_alloc(struct hclge_dev * hdev)2341 int hclge_buffer_alloc(struct hclge_dev *hdev)
2342 {
2343 struct hclge_pkt_buf_alloc *pkt_buf;
2344 int ret;
2345
2346 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2347 if (!pkt_buf)
2348 return -ENOMEM;
2349
2350 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2351 if (ret) {
2352 dev_err(&hdev->pdev->dev,
2353 "could not calc tx buffer size for all TCs %d\n", ret);
2354 goto out;
2355 }
2356
2357 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2358 if (ret) {
2359 dev_err(&hdev->pdev->dev,
2360 "could not alloc tx buffers %d\n", ret);
2361 goto out;
2362 }
2363
2364 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2365 if (ret) {
2366 dev_err(&hdev->pdev->dev,
2367 "could not calc rx priv buffer size for all TCs %d\n",
2368 ret);
2369 goto out;
2370 }
2371
2372 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2373 if (ret) {
2374 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2375 ret);
2376 goto out;
2377 }
2378
2379 if (hnae3_dev_dcb_supported(hdev)) {
2380 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2381 if (ret) {
2382 dev_err(&hdev->pdev->dev,
2383 "could not configure rx private waterline %d\n",
2384 ret);
2385 goto out;
2386 }
2387
2388 ret = hclge_common_thrd_config(hdev, pkt_buf);
2389 if (ret) {
2390 dev_err(&hdev->pdev->dev,
2391 "could not configure common threshold %d\n",
2392 ret);
2393 goto out;
2394 }
2395 }
2396
2397 ret = hclge_common_wl_config(hdev, pkt_buf);
2398 if (ret)
2399 dev_err(&hdev->pdev->dev,
2400 "could not configure common waterline %d\n", ret);
2401
2402 out:
2403 kfree(pkt_buf);
2404 return ret;
2405 }
2406
hclge_init_roce_base_info(struct hclge_vport * vport)2407 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2408 {
2409 struct hnae3_handle *roce = &vport->roce;
2410 struct hnae3_handle *nic = &vport->nic;
2411
2412 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2413
2414 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2415 vport->back->num_msi_left == 0)
2416 return -EINVAL;
2417
2418 roce->rinfo.base_vector = vport->back->roce_base_vector;
2419
2420 roce->rinfo.netdev = nic->kinfo.netdev;
2421 roce->rinfo.roce_io_base = vport->back->hw.io_base;
2422
2423 roce->pdev = nic->pdev;
2424 roce->ae_algo = nic->ae_algo;
2425 roce->numa_node_mask = nic->numa_node_mask;
2426
2427 return 0;
2428 }
2429
hclge_init_msi(struct hclge_dev * hdev)2430 static int hclge_init_msi(struct hclge_dev *hdev)
2431 {
2432 struct pci_dev *pdev = hdev->pdev;
2433 int vectors;
2434 int i;
2435
2436 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2437 hdev->num_msi,
2438 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2439 if (vectors < 0) {
2440 dev_err(&pdev->dev,
2441 "failed(%d) to allocate MSI/MSI-X vectors\n",
2442 vectors);
2443 return vectors;
2444 }
2445 if (vectors < hdev->num_msi)
2446 dev_warn(&hdev->pdev->dev,
2447 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2448 hdev->num_msi, vectors);
2449
2450 hdev->num_msi = vectors;
2451 hdev->num_msi_left = vectors;
2452
2453 hdev->base_msi_vector = pdev->irq;
2454 hdev->roce_base_vector = hdev->base_msi_vector +
2455 hdev->roce_base_msix_offset;
2456
2457 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2458 sizeof(u16), GFP_KERNEL);
2459 if (!hdev->vector_status) {
2460 pci_free_irq_vectors(pdev);
2461 return -ENOMEM;
2462 }
2463
2464 for (i = 0; i < hdev->num_msi; i++)
2465 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2466
2467 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2468 sizeof(int), GFP_KERNEL);
2469 if (!hdev->vector_irq) {
2470 pci_free_irq_vectors(pdev);
2471 return -ENOMEM;
2472 }
2473
2474 return 0;
2475 }
2476
hclge_check_speed_dup(u8 duplex,int speed)2477 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2478 {
2479 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2480 duplex = HCLGE_MAC_FULL;
2481
2482 return duplex;
2483 }
2484
hclge_cfg_mac_speed_dup_hw(struct hclge_dev * hdev,int speed,u8 duplex)2485 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2486 u8 duplex)
2487 {
2488 struct hclge_config_mac_speed_dup_cmd *req;
2489 struct hclge_desc desc;
2490 int ret;
2491
2492 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2493
2494 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2495
2496 if (duplex)
2497 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2498
2499 switch (speed) {
2500 case HCLGE_MAC_SPEED_10M:
2501 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2502 HCLGE_CFG_SPEED_S, 6);
2503 break;
2504 case HCLGE_MAC_SPEED_100M:
2505 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2506 HCLGE_CFG_SPEED_S, 7);
2507 break;
2508 case HCLGE_MAC_SPEED_1G:
2509 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2510 HCLGE_CFG_SPEED_S, 0);
2511 break;
2512 case HCLGE_MAC_SPEED_10G:
2513 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2514 HCLGE_CFG_SPEED_S, 1);
2515 break;
2516 case HCLGE_MAC_SPEED_25G:
2517 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2518 HCLGE_CFG_SPEED_S, 2);
2519 break;
2520 case HCLGE_MAC_SPEED_40G:
2521 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2522 HCLGE_CFG_SPEED_S, 3);
2523 break;
2524 case HCLGE_MAC_SPEED_50G:
2525 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2526 HCLGE_CFG_SPEED_S, 4);
2527 break;
2528 case HCLGE_MAC_SPEED_100G:
2529 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2530 HCLGE_CFG_SPEED_S, 5);
2531 break;
2532 case HCLGE_MAC_SPEED_200G:
2533 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2534 HCLGE_CFG_SPEED_S, 8);
2535 break;
2536 default:
2537 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2538 return -EINVAL;
2539 }
2540
2541 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2542 1);
2543
2544 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2545 if (ret) {
2546 dev_err(&hdev->pdev->dev,
2547 "mac speed/duplex config cmd failed %d.\n", ret);
2548 return ret;
2549 }
2550
2551 return 0;
2552 }
2553
hclge_cfg_mac_speed_dup(struct hclge_dev * hdev,int speed,u8 duplex)2554 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2555 {
2556 struct hclge_mac *mac = &hdev->hw.mac;
2557 int ret;
2558
2559 duplex = hclge_check_speed_dup(duplex, speed);
2560 if (!mac->support_autoneg && mac->speed == speed &&
2561 mac->duplex == duplex)
2562 return 0;
2563
2564 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2565 if (ret)
2566 return ret;
2567
2568 hdev->hw.mac.speed = speed;
2569 hdev->hw.mac.duplex = duplex;
2570
2571 return 0;
2572 }
2573
hclge_cfg_mac_speed_dup_h(struct hnae3_handle * handle,int speed,u8 duplex)2574 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2575 u8 duplex)
2576 {
2577 struct hclge_vport *vport = hclge_get_vport(handle);
2578 struct hclge_dev *hdev = vport->back;
2579
2580 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2581 }
2582
hclge_set_autoneg_en(struct hclge_dev * hdev,bool enable)2583 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2584 {
2585 struct hclge_config_auto_neg_cmd *req;
2586 struct hclge_desc desc;
2587 u32 flag = 0;
2588 int ret;
2589
2590 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2591
2592 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2593 if (enable)
2594 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2595 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2596
2597 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2598 if (ret)
2599 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2600 ret);
2601
2602 return ret;
2603 }
2604
hclge_set_autoneg(struct hnae3_handle * handle,bool enable)2605 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2606 {
2607 struct hclge_vport *vport = hclge_get_vport(handle);
2608 struct hclge_dev *hdev = vport->back;
2609
2610 if (!hdev->hw.mac.support_autoneg) {
2611 if (enable) {
2612 dev_err(&hdev->pdev->dev,
2613 "autoneg is not supported by current port\n");
2614 return -EOPNOTSUPP;
2615 } else {
2616 return 0;
2617 }
2618 }
2619
2620 return hclge_set_autoneg_en(hdev, enable);
2621 }
2622
hclge_get_autoneg(struct hnae3_handle * handle)2623 static int hclge_get_autoneg(struct hnae3_handle *handle)
2624 {
2625 struct hclge_vport *vport = hclge_get_vport(handle);
2626 struct hclge_dev *hdev = vport->back;
2627 struct phy_device *phydev = hdev->hw.mac.phydev;
2628
2629 if (phydev)
2630 return phydev->autoneg;
2631
2632 return hdev->hw.mac.autoneg;
2633 }
2634
hclge_restart_autoneg(struct hnae3_handle * handle)2635 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2636 {
2637 struct hclge_vport *vport = hclge_get_vport(handle);
2638 struct hclge_dev *hdev = vport->back;
2639 int ret;
2640
2641 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2642
2643 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2644 if (ret)
2645 return ret;
2646 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2647 }
2648
hclge_halt_autoneg(struct hnae3_handle * handle,bool halt)2649 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2650 {
2651 struct hclge_vport *vport = hclge_get_vport(handle);
2652 struct hclge_dev *hdev = vport->back;
2653
2654 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2655 return hclge_set_autoneg_en(hdev, !halt);
2656
2657 return 0;
2658 }
2659
hclge_set_fec_hw(struct hclge_dev * hdev,u32 fec_mode)2660 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2661 {
2662 struct hclge_config_fec_cmd *req;
2663 struct hclge_desc desc;
2664 int ret;
2665
2666 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2667
2668 req = (struct hclge_config_fec_cmd *)desc.data;
2669 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2670 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2671 if (fec_mode & BIT(HNAE3_FEC_RS))
2672 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2673 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2674 if (fec_mode & BIT(HNAE3_FEC_BASER))
2675 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2676 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2677
2678 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2679 if (ret)
2680 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2681
2682 return ret;
2683 }
2684
hclge_set_fec(struct hnae3_handle * handle,u32 fec_mode)2685 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2686 {
2687 struct hclge_vport *vport = hclge_get_vport(handle);
2688 struct hclge_dev *hdev = vport->back;
2689 struct hclge_mac *mac = &hdev->hw.mac;
2690 int ret;
2691
2692 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2693 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2694 return -EINVAL;
2695 }
2696
2697 ret = hclge_set_fec_hw(hdev, fec_mode);
2698 if (ret)
2699 return ret;
2700
2701 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2702 return 0;
2703 }
2704
hclge_get_fec(struct hnae3_handle * handle,u8 * fec_ability,u8 * fec_mode)2705 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2706 u8 *fec_mode)
2707 {
2708 struct hclge_vport *vport = hclge_get_vport(handle);
2709 struct hclge_dev *hdev = vport->back;
2710 struct hclge_mac *mac = &hdev->hw.mac;
2711
2712 if (fec_ability)
2713 *fec_ability = mac->fec_ability;
2714 if (fec_mode)
2715 *fec_mode = mac->fec_mode;
2716 }
2717
hclge_mac_init(struct hclge_dev * hdev)2718 static int hclge_mac_init(struct hclge_dev *hdev)
2719 {
2720 struct hclge_mac *mac = &hdev->hw.mac;
2721 int ret;
2722
2723 hdev->support_sfp_query = true;
2724 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2725 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2726 hdev->hw.mac.duplex);
2727 if (ret)
2728 return ret;
2729
2730 if (hdev->hw.mac.support_autoneg) {
2731 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2732 if (ret)
2733 return ret;
2734 }
2735
2736 mac->link = 0;
2737
2738 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2739 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2740 if (ret)
2741 return ret;
2742 }
2743
2744 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2745 if (ret) {
2746 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2747 return ret;
2748 }
2749
2750 ret = hclge_set_default_loopback(hdev);
2751 if (ret)
2752 return ret;
2753
2754 ret = hclge_buffer_alloc(hdev);
2755 if (ret)
2756 dev_err(&hdev->pdev->dev,
2757 "allocate buffer fail, ret=%d\n", ret);
2758
2759 return ret;
2760 }
2761
hclge_mbx_task_schedule(struct hclge_dev * hdev)2762 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2763 {
2764 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2765 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2766 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2767 hclge_wq, &hdev->service_task, 0);
2768 }
2769
hclge_reset_task_schedule(struct hclge_dev * hdev)2770 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2771 {
2772 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2773 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2774 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2775 hclge_wq, &hdev->service_task, 0);
2776 }
2777
hclge_task_schedule(struct hclge_dev * hdev,unsigned long delay_time)2778 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2779 {
2780 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2781 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2782 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2783 hclge_wq, &hdev->service_task,
2784 delay_time);
2785 }
2786
hclge_get_mac_link_status(struct hclge_dev * hdev,int * link_status)2787 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2788 {
2789 struct hclge_link_status_cmd *req;
2790 struct hclge_desc desc;
2791 int ret;
2792
2793 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2794 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2795 if (ret) {
2796 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2797 ret);
2798 return ret;
2799 }
2800
2801 req = (struct hclge_link_status_cmd *)desc.data;
2802 *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2803 HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2804
2805 return 0;
2806 }
2807
hclge_get_mac_phy_link(struct hclge_dev * hdev,int * link_status)2808 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2809 {
2810 struct phy_device *phydev = hdev->hw.mac.phydev;
2811
2812 *link_status = HCLGE_LINK_STATUS_DOWN;
2813
2814 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2815 return 0;
2816
2817 if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2818 return 0;
2819
2820 return hclge_get_mac_link_status(hdev, link_status);
2821 }
2822
hclge_update_link_status(struct hclge_dev * hdev)2823 static void hclge_update_link_status(struct hclge_dev *hdev)
2824 {
2825 struct hnae3_client *rclient = hdev->roce_client;
2826 struct hnae3_client *client = hdev->nic_client;
2827 struct hnae3_handle *rhandle;
2828 struct hnae3_handle *handle;
2829 int state;
2830 int ret;
2831 int i;
2832
2833 if (!client)
2834 return;
2835
2836 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2837 return;
2838
2839 ret = hclge_get_mac_phy_link(hdev, &state);
2840 if (ret) {
2841 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2842 return;
2843 }
2844
2845 if (state != hdev->hw.mac.link) {
2846 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2847 handle = &hdev->vport[i].nic;
2848 client->ops->link_status_change(handle, state);
2849 hclge_config_mac_tnl_int(hdev, state);
2850 rhandle = &hdev->vport[i].roce;
2851 if (rclient && rclient->ops->link_status_change)
2852 rclient->ops->link_status_change(rhandle,
2853 state);
2854 }
2855 hdev->hw.mac.link = state;
2856 }
2857
2858 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2859 }
2860
hclge_update_port_capability(struct hclge_mac * mac)2861 static void hclge_update_port_capability(struct hclge_mac *mac)
2862 {
2863 /* update fec ability by speed */
2864 hclge_convert_setting_fec(mac);
2865
2866 /* firmware can not identify back plane type, the media type
2867 * read from configuration can help deal it
2868 */
2869 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2870 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2871 mac->module_type = HNAE3_MODULE_TYPE_KR;
2872 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2873 mac->module_type = HNAE3_MODULE_TYPE_TP;
2874
2875 if (mac->support_autoneg) {
2876 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2877 linkmode_copy(mac->advertising, mac->supported);
2878 } else {
2879 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2880 mac->supported);
2881 linkmode_zero(mac->advertising);
2882 }
2883 }
2884
hclge_get_sfp_speed(struct hclge_dev * hdev,u32 * speed)2885 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2886 {
2887 struct hclge_sfp_info_cmd *resp;
2888 struct hclge_desc desc;
2889 int ret;
2890
2891 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2892 resp = (struct hclge_sfp_info_cmd *)desc.data;
2893 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2894 if (ret == -EOPNOTSUPP) {
2895 dev_warn(&hdev->pdev->dev,
2896 "IMP do not support get SFP speed %d\n", ret);
2897 return ret;
2898 } else if (ret) {
2899 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2900 return ret;
2901 }
2902
2903 *speed = le32_to_cpu(resp->speed);
2904
2905 return 0;
2906 }
2907
hclge_get_sfp_info(struct hclge_dev * hdev,struct hclge_mac * mac)2908 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2909 {
2910 struct hclge_sfp_info_cmd *resp;
2911 struct hclge_desc desc;
2912 int ret;
2913
2914 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2915 resp = (struct hclge_sfp_info_cmd *)desc.data;
2916
2917 resp->query_type = QUERY_ACTIVE_SPEED;
2918
2919 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2920 if (ret == -EOPNOTSUPP) {
2921 dev_warn(&hdev->pdev->dev,
2922 "IMP does not support get SFP info %d\n", ret);
2923 return ret;
2924 } else if (ret) {
2925 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2926 return ret;
2927 }
2928
2929 /* In some case, mac speed get from IMP may be 0, it shouldn't be
2930 * set to mac->speed.
2931 */
2932 if (!le32_to_cpu(resp->speed))
2933 return 0;
2934
2935 mac->speed = le32_to_cpu(resp->speed);
2936 /* if resp->speed_ability is 0, it means it's an old version
2937 * firmware, do not update these params
2938 */
2939 if (resp->speed_ability) {
2940 mac->module_type = le32_to_cpu(resp->module_type);
2941 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2942 mac->autoneg = resp->autoneg;
2943 mac->support_autoneg = resp->autoneg_ability;
2944 mac->speed_type = QUERY_ACTIVE_SPEED;
2945 if (!resp->active_fec)
2946 mac->fec_mode = 0;
2947 else
2948 mac->fec_mode = BIT(resp->active_fec);
2949 } else {
2950 mac->speed_type = QUERY_SFP_SPEED;
2951 }
2952
2953 return 0;
2954 }
2955
hclge_update_port_info(struct hclge_dev * hdev)2956 static int hclge_update_port_info(struct hclge_dev *hdev)
2957 {
2958 struct hclge_mac *mac = &hdev->hw.mac;
2959 int speed = HCLGE_MAC_SPEED_UNKNOWN;
2960 int ret;
2961
2962 /* get the port info from SFP cmd if not copper port */
2963 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2964 return 0;
2965
2966 /* if IMP does not support get SFP/qSFP info, return directly */
2967 if (!hdev->support_sfp_query)
2968 return 0;
2969
2970 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
2971 ret = hclge_get_sfp_info(hdev, mac);
2972 else
2973 ret = hclge_get_sfp_speed(hdev, &speed);
2974
2975 if (ret == -EOPNOTSUPP) {
2976 hdev->support_sfp_query = false;
2977 return ret;
2978 } else if (ret) {
2979 return ret;
2980 }
2981
2982 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
2983 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2984 hclge_update_port_capability(mac);
2985 return 0;
2986 }
2987 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2988 HCLGE_MAC_FULL);
2989 } else {
2990 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2991 return 0; /* do nothing if no SFP */
2992
2993 /* must config full duplex for SFP */
2994 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2995 }
2996 }
2997
hclge_get_status(struct hnae3_handle * handle)2998 static int hclge_get_status(struct hnae3_handle *handle)
2999 {
3000 struct hclge_vport *vport = hclge_get_vport(handle);
3001 struct hclge_dev *hdev = vport->back;
3002
3003 hclge_update_link_status(hdev);
3004
3005 return hdev->hw.mac.link;
3006 }
3007
hclge_get_vf_vport(struct hclge_dev * hdev,int vf)3008 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3009 {
3010 if (!pci_num_vf(hdev->pdev)) {
3011 dev_err(&hdev->pdev->dev,
3012 "SRIOV is disabled, can not get vport(%d) info.\n", vf);
3013 return NULL;
3014 }
3015
3016 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3017 dev_err(&hdev->pdev->dev,
3018 "vf id(%d) is out of range(0 <= vfid < %d)\n",
3019 vf, pci_num_vf(hdev->pdev));
3020 return NULL;
3021 }
3022
3023 /* VF start from 1 in vport */
3024 vf += HCLGE_VF_VPORT_START_NUM;
3025 return &hdev->vport[vf];
3026 }
3027
hclge_get_vf_config(struct hnae3_handle * handle,int vf,struct ifla_vf_info * ivf)3028 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3029 struct ifla_vf_info *ivf)
3030 {
3031 struct hclge_vport *vport = hclge_get_vport(handle);
3032 struct hclge_dev *hdev = vport->back;
3033
3034 vport = hclge_get_vf_vport(hdev, vf);
3035 if (!vport)
3036 return -EINVAL;
3037
3038 ivf->vf = vf;
3039 ivf->linkstate = vport->vf_info.link_state;
3040 ivf->spoofchk = vport->vf_info.spoofchk;
3041 ivf->trusted = vport->vf_info.trusted;
3042 ivf->min_tx_rate = 0;
3043 ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3044 ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3045 ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3046 ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3047 ether_addr_copy(ivf->mac, vport->vf_info.mac);
3048
3049 return 0;
3050 }
3051
hclge_set_vf_link_state(struct hnae3_handle * handle,int vf,int link_state)3052 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3053 int link_state)
3054 {
3055 struct hclge_vport *vport = hclge_get_vport(handle);
3056 struct hclge_dev *hdev = vport->back;
3057
3058 vport = hclge_get_vf_vport(hdev, vf);
3059 if (!vport)
3060 return -EINVAL;
3061
3062 vport->vf_info.link_state = link_state;
3063
3064 return 0;
3065 }
3066
hclge_check_event_cause(struct hclge_dev * hdev,u32 * clearval)3067 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3068 {
3069 u32 cmdq_src_reg, msix_src_reg;
3070
3071 /* fetch the events from their corresponding regs */
3072 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3073 msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3074
3075 /* Assumption: If by any chance reset and mailbox events are reported
3076 * together then we will only process reset event in this go and will
3077 * defer the processing of the mailbox events. Since, we would have not
3078 * cleared RX CMDQ event this time we would receive again another
3079 * interrupt from H/W just for the mailbox.
3080 *
3081 * check for vector0 reset event sources
3082 */
3083 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3084 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3085 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3086 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3087 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3088 hdev->rst_stats.imp_rst_cnt++;
3089 return HCLGE_VECTOR0_EVENT_RST;
3090 }
3091
3092 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3093 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3094 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3095 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3096 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3097 hdev->rst_stats.global_rst_cnt++;
3098 return HCLGE_VECTOR0_EVENT_RST;
3099 }
3100
3101 /* check for vector0 msix event source */
3102 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3103 *clearval = msix_src_reg;
3104 return HCLGE_VECTOR0_EVENT_ERR;
3105 }
3106
3107 /* check for vector0 mailbox(=CMDQ RX) event source */
3108 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3109 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3110 *clearval = cmdq_src_reg;
3111 return HCLGE_VECTOR0_EVENT_MBX;
3112 }
3113
3114 /* print other vector0 event source */
3115 dev_info(&hdev->pdev->dev,
3116 "CMDQ INT status:0x%x, other INT status:0x%x\n",
3117 cmdq_src_reg, msix_src_reg);
3118 *clearval = msix_src_reg;
3119
3120 return HCLGE_VECTOR0_EVENT_OTHER;
3121 }
3122
hclge_clear_event_cause(struct hclge_dev * hdev,u32 event_type,u32 regclr)3123 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3124 u32 regclr)
3125 {
3126 switch (event_type) {
3127 case HCLGE_VECTOR0_EVENT_RST:
3128 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3129 break;
3130 case HCLGE_VECTOR0_EVENT_MBX:
3131 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3132 break;
3133 default:
3134 break;
3135 }
3136 }
3137
hclge_clear_all_event_cause(struct hclge_dev * hdev)3138 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3139 {
3140 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3141 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3142 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3143 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3144 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3145 }
3146
hclge_enable_vector(struct hclge_misc_vector * vector,bool enable)3147 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3148 {
3149 writel(enable ? 1 : 0, vector->addr);
3150 }
3151
hclge_misc_irq_handle(int irq,void * data)3152 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3153 {
3154 struct hclge_dev *hdev = data;
3155 u32 clearval = 0;
3156 u32 event_cause;
3157
3158 hclge_enable_vector(&hdev->misc_vector, false);
3159 event_cause = hclge_check_event_cause(hdev, &clearval);
3160
3161 /* vector 0 interrupt is shared with reset and mailbox source events.*/
3162 switch (event_cause) {
3163 case HCLGE_VECTOR0_EVENT_ERR:
3164 /* we do not know what type of reset is required now. This could
3165 * only be decided after we fetch the type of errors which
3166 * caused this event. Therefore, we will do below for now:
3167 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3168 * have defered type of reset to be used.
3169 * 2. Schedule the reset serivce task.
3170 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
3171 * will fetch the correct type of reset. This would be done
3172 * by first decoding the types of errors.
3173 */
3174 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3175 fallthrough;
3176 case HCLGE_VECTOR0_EVENT_RST:
3177 hclge_reset_task_schedule(hdev);
3178 break;
3179 case HCLGE_VECTOR0_EVENT_MBX:
3180 /* If we are here then,
3181 * 1. Either we are not handling any mbx task and we are not
3182 * scheduled as well
3183 * OR
3184 * 2. We could be handling a mbx task but nothing more is
3185 * scheduled.
3186 * In both cases, we should schedule mbx task as there are more
3187 * mbx messages reported by this interrupt.
3188 */
3189 hclge_mbx_task_schedule(hdev);
3190 break;
3191 default:
3192 dev_warn(&hdev->pdev->dev,
3193 "received unknown or unhandled event of vector0\n");
3194 break;
3195 }
3196
3197 hclge_clear_event_cause(hdev, event_cause, clearval);
3198
3199 /* Enable interrupt if it is not cause by reset. And when
3200 * clearval equal to 0, it means interrupt status may be
3201 * cleared by hardware before driver reads status register.
3202 * For this case, vector0 interrupt also should be enabled.
3203 */
3204 if (!clearval ||
3205 event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3206 hclge_enable_vector(&hdev->misc_vector, true);
3207 }
3208
3209 return IRQ_HANDLED;
3210 }
3211
hclge_free_vector(struct hclge_dev * hdev,int vector_id)3212 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3213 {
3214 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3215 dev_warn(&hdev->pdev->dev,
3216 "vector(vector_id %d) has been freed.\n", vector_id);
3217 return;
3218 }
3219
3220 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3221 hdev->num_msi_left += 1;
3222 hdev->num_msi_used -= 1;
3223 }
3224
hclge_get_misc_vector(struct hclge_dev * hdev)3225 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3226 {
3227 struct hclge_misc_vector *vector = &hdev->misc_vector;
3228
3229 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3230
3231 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3232 hdev->vector_status[0] = 0;
3233
3234 hdev->num_msi_left -= 1;
3235 hdev->num_msi_used += 1;
3236 }
3237
hclge_irq_affinity_notify(struct irq_affinity_notify * notify,const cpumask_t * mask)3238 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3239 const cpumask_t *mask)
3240 {
3241 struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3242 affinity_notify);
3243
3244 cpumask_copy(&hdev->affinity_mask, mask);
3245 }
3246
hclge_irq_affinity_release(struct kref * ref)3247 static void hclge_irq_affinity_release(struct kref *ref)
3248 {
3249 }
3250
hclge_misc_affinity_setup(struct hclge_dev * hdev)3251 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3252 {
3253 irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3254 &hdev->affinity_mask);
3255
3256 hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3257 hdev->affinity_notify.release = hclge_irq_affinity_release;
3258 irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3259 &hdev->affinity_notify);
3260 }
3261
hclge_misc_affinity_teardown(struct hclge_dev * hdev)3262 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3263 {
3264 irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3265 irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3266 }
3267
hclge_misc_irq_init(struct hclge_dev * hdev)3268 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3269 {
3270 int ret;
3271
3272 hclge_get_misc_vector(hdev);
3273
3274 /* this would be explicitly freed in the end */
3275 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3276 HCLGE_NAME, pci_name(hdev->pdev));
3277 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3278 0, hdev->misc_vector.name, hdev);
3279 if (ret) {
3280 hclge_free_vector(hdev, 0);
3281 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3282 hdev->misc_vector.vector_irq);
3283 }
3284
3285 return ret;
3286 }
3287
hclge_misc_irq_uninit(struct hclge_dev * hdev)3288 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3289 {
3290 free_irq(hdev->misc_vector.vector_irq, hdev);
3291 hclge_free_vector(hdev, 0);
3292 }
3293
hclge_notify_client(struct hclge_dev * hdev,enum hnae3_reset_notify_type type)3294 int hclge_notify_client(struct hclge_dev *hdev,
3295 enum hnae3_reset_notify_type type)
3296 {
3297 struct hnae3_client *client = hdev->nic_client;
3298 u16 i;
3299
3300 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3301 return 0;
3302
3303 if (!client->ops->reset_notify)
3304 return -EOPNOTSUPP;
3305
3306 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3307 struct hnae3_handle *handle = &hdev->vport[i].nic;
3308 int ret;
3309
3310 ret = client->ops->reset_notify(handle, type);
3311 if (ret) {
3312 dev_err(&hdev->pdev->dev,
3313 "notify nic client failed %d(%d)\n", type, ret);
3314 return ret;
3315 }
3316 }
3317
3318 return 0;
3319 }
3320
hclge_notify_roce_client(struct hclge_dev * hdev,enum hnae3_reset_notify_type type)3321 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3322 enum hnae3_reset_notify_type type)
3323 {
3324 struct hnae3_client *client = hdev->roce_client;
3325 int ret;
3326 u16 i;
3327
3328 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3329 return 0;
3330
3331 if (!client->ops->reset_notify)
3332 return -EOPNOTSUPP;
3333
3334 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3335 struct hnae3_handle *handle = &hdev->vport[i].roce;
3336
3337 ret = client->ops->reset_notify(handle, type);
3338 if (ret) {
3339 dev_err(&hdev->pdev->dev,
3340 "notify roce client failed %d(%d)",
3341 type, ret);
3342 return ret;
3343 }
3344 }
3345
3346 return ret;
3347 }
3348
hclge_reset_wait(struct hclge_dev * hdev)3349 static int hclge_reset_wait(struct hclge_dev *hdev)
3350 {
3351 #define HCLGE_RESET_WATI_MS 100
3352 #define HCLGE_RESET_WAIT_CNT 350
3353
3354 u32 val, reg, reg_bit;
3355 u32 cnt = 0;
3356
3357 switch (hdev->reset_type) {
3358 case HNAE3_IMP_RESET:
3359 reg = HCLGE_GLOBAL_RESET_REG;
3360 reg_bit = HCLGE_IMP_RESET_BIT;
3361 break;
3362 case HNAE3_GLOBAL_RESET:
3363 reg = HCLGE_GLOBAL_RESET_REG;
3364 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3365 break;
3366 case HNAE3_FUNC_RESET:
3367 reg = HCLGE_FUN_RST_ING;
3368 reg_bit = HCLGE_FUN_RST_ING_B;
3369 break;
3370 default:
3371 dev_err(&hdev->pdev->dev,
3372 "Wait for unsupported reset type: %d\n",
3373 hdev->reset_type);
3374 return -EINVAL;
3375 }
3376
3377 val = hclge_read_dev(&hdev->hw, reg);
3378 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3379 msleep(HCLGE_RESET_WATI_MS);
3380 val = hclge_read_dev(&hdev->hw, reg);
3381 cnt++;
3382 }
3383
3384 if (cnt >= HCLGE_RESET_WAIT_CNT) {
3385 dev_warn(&hdev->pdev->dev,
3386 "Wait for reset timeout: %d\n", hdev->reset_type);
3387 return -EBUSY;
3388 }
3389
3390 return 0;
3391 }
3392
hclge_set_vf_rst(struct hclge_dev * hdev,int func_id,bool reset)3393 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3394 {
3395 struct hclge_vf_rst_cmd *req;
3396 struct hclge_desc desc;
3397
3398 req = (struct hclge_vf_rst_cmd *)desc.data;
3399 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3400 req->dest_vfid = func_id;
3401
3402 if (reset)
3403 req->vf_rst = 0x1;
3404
3405 return hclge_cmd_send(&hdev->hw, &desc, 1);
3406 }
3407
hclge_set_all_vf_rst(struct hclge_dev * hdev,bool reset)3408 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3409 {
3410 int i;
3411
3412 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3413 struct hclge_vport *vport = &hdev->vport[i];
3414 int ret;
3415
3416 /* Send cmd to set/clear VF's FUNC_RST_ING */
3417 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3418 if (ret) {
3419 dev_err(&hdev->pdev->dev,
3420 "set vf(%u) rst failed %d!\n",
3421 vport->vport_id, ret);
3422 return ret;
3423 }
3424
3425 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3426 continue;
3427
3428 /* Inform VF to process the reset.
3429 * hclge_inform_reset_assert_to_vf may fail if VF
3430 * driver is not loaded.
3431 */
3432 ret = hclge_inform_reset_assert_to_vf(vport);
3433 if (ret)
3434 dev_warn(&hdev->pdev->dev,
3435 "inform reset to vf(%u) failed %d!\n",
3436 vport->vport_id, ret);
3437 }
3438
3439 return 0;
3440 }
3441
hclge_mailbox_service_task(struct hclge_dev * hdev)3442 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3443 {
3444 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3445 test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3446 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3447 return;
3448
3449 hclge_mbx_handler(hdev);
3450
3451 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3452 }
3453
hclge_func_reset_sync_vf(struct hclge_dev * hdev)3454 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3455 {
3456 struct hclge_pf_rst_sync_cmd *req;
3457 struct hclge_desc desc;
3458 int cnt = 0;
3459 int ret;
3460
3461 req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3462 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3463
3464 do {
3465 /* vf need to down netdev by mbx during PF or FLR reset */
3466 hclge_mailbox_service_task(hdev);
3467
3468 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3469 /* for compatible with old firmware, wait
3470 * 100 ms for VF to stop IO
3471 */
3472 if (ret == -EOPNOTSUPP) {
3473 msleep(HCLGE_RESET_SYNC_TIME);
3474 return;
3475 } else if (ret) {
3476 dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3477 ret);
3478 return;
3479 } else if (req->all_vf_ready) {
3480 return;
3481 }
3482 msleep(HCLGE_PF_RESET_SYNC_TIME);
3483 hclge_cmd_reuse_desc(&desc, true);
3484 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3485
3486 dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3487 }
3488
hclge_report_hw_error(struct hclge_dev * hdev,enum hnae3_hw_error_type type)3489 void hclge_report_hw_error(struct hclge_dev *hdev,
3490 enum hnae3_hw_error_type type)
3491 {
3492 struct hnae3_client *client = hdev->nic_client;
3493 u16 i;
3494
3495 if (!client || !client->ops->process_hw_error ||
3496 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3497 return;
3498
3499 for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3500 client->ops->process_hw_error(&hdev->vport[i].nic, type);
3501 }
3502
hclge_handle_imp_error(struct hclge_dev * hdev)3503 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3504 {
3505 u32 reg_val;
3506
3507 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3508 if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3509 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3510 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3511 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3512 }
3513
3514 if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3515 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3516 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3517 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3518 }
3519 }
3520
hclge_func_reset_cmd(struct hclge_dev * hdev,int func_id)3521 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3522 {
3523 struct hclge_desc desc;
3524 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3525 int ret;
3526
3527 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3528 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3529 req->fun_reset_vfid = func_id;
3530
3531 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3532 if (ret)
3533 dev_err(&hdev->pdev->dev,
3534 "send function reset cmd fail, status =%d\n", ret);
3535
3536 return ret;
3537 }
3538
hclge_do_reset(struct hclge_dev * hdev)3539 static void hclge_do_reset(struct hclge_dev *hdev)
3540 {
3541 struct hnae3_handle *handle = &hdev->vport[0].nic;
3542 struct pci_dev *pdev = hdev->pdev;
3543 u32 val;
3544
3545 if (hclge_get_hw_reset_stat(handle)) {
3546 dev_info(&pdev->dev, "hardware reset not finish\n");
3547 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3548 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3549 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3550 return;
3551 }
3552
3553 switch (hdev->reset_type) {
3554 case HNAE3_GLOBAL_RESET:
3555 dev_info(&pdev->dev, "global reset requested\n");
3556 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3557 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3558 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3559 break;
3560 case HNAE3_FUNC_RESET:
3561 dev_info(&pdev->dev, "PF reset requested\n");
3562 /* schedule again to check later */
3563 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3564 hclge_reset_task_schedule(hdev);
3565 break;
3566 default:
3567 dev_warn(&pdev->dev,
3568 "unsupported reset type: %d\n", hdev->reset_type);
3569 break;
3570 }
3571 }
3572
hclge_get_reset_level(struct hnae3_ae_dev * ae_dev,unsigned long * addr)3573 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3574 unsigned long *addr)
3575 {
3576 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3577 struct hclge_dev *hdev = ae_dev->priv;
3578
3579 /* first, resolve any unknown reset type to the known type(s) */
3580 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3581 u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3582 HCLGE_MISC_VECTOR_INT_STS);
3583 /* we will intentionally ignore any errors from this function
3584 * as we will end up in *some* reset request in any case
3585 */
3586 if (hclge_handle_hw_msix_error(hdev, addr))
3587 dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3588 msix_sts_reg);
3589
3590 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3591 /* We defered the clearing of the error event which caused
3592 * interrupt since it was not posssible to do that in
3593 * interrupt context (and this is the reason we introduced
3594 * new UNKNOWN reset type). Now, the errors have been
3595 * handled and cleared in hardware we can safely enable
3596 * interrupts. This is an exception to the norm.
3597 */
3598 hclge_enable_vector(&hdev->misc_vector, true);
3599 }
3600
3601 /* return the highest priority reset level amongst all */
3602 if (test_bit(HNAE3_IMP_RESET, addr)) {
3603 rst_level = HNAE3_IMP_RESET;
3604 clear_bit(HNAE3_IMP_RESET, addr);
3605 clear_bit(HNAE3_GLOBAL_RESET, addr);
3606 clear_bit(HNAE3_FUNC_RESET, addr);
3607 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3608 rst_level = HNAE3_GLOBAL_RESET;
3609 clear_bit(HNAE3_GLOBAL_RESET, addr);
3610 clear_bit(HNAE3_FUNC_RESET, addr);
3611 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3612 rst_level = HNAE3_FUNC_RESET;
3613 clear_bit(HNAE3_FUNC_RESET, addr);
3614 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3615 rst_level = HNAE3_FLR_RESET;
3616 clear_bit(HNAE3_FLR_RESET, addr);
3617 }
3618
3619 if (hdev->reset_type != HNAE3_NONE_RESET &&
3620 rst_level < hdev->reset_type)
3621 return HNAE3_NONE_RESET;
3622
3623 return rst_level;
3624 }
3625
hclge_clear_reset_cause(struct hclge_dev * hdev)3626 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3627 {
3628 u32 clearval = 0;
3629
3630 switch (hdev->reset_type) {
3631 case HNAE3_IMP_RESET:
3632 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3633 break;
3634 case HNAE3_GLOBAL_RESET:
3635 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3636 break;
3637 default:
3638 break;
3639 }
3640
3641 if (!clearval)
3642 return;
3643
3644 /* For revision 0x20, the reset interrupt source
3645 * can only be cleared after hardware reset done
3646 */
3647 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3648 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3649 clearval);
3650
3651 hclge_enable_vector(&hdev->misc_vector, true);
3652 }
3653
hclge_reset_handshake(struct hclge_dev * hdev,bool enable)3654 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3655 {
3656 u32 reg_val;
3657
3658 reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3659 if (enable)
3660 reg_val |= HCLGE_NIC_SW_RST_RDY;
3661 else
3662 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3663
3664 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3665 }
3666
hclge_func_reset_notify_vf(struct hclge_dev * hdev)3667 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3668 {
3669 int ret;
3670
3671 ret = hclge_set_all_vf_rst(hdev, true);
3672 if (ret)
3673 return ret;
3674
3675 hclge_func_reset_sync_vf(hdev);
3676
3677 return 0;
3678 }
3679
hclge_reset_prepare_wait(struct hclge_dev * hdev)3680 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3681 {
3682 u32 reg_val;
3683 int ret = 0;
3684
3685 switch (hdev->reset_type) {
3686 case HNAE3_FUNC_RESET:
3687 ret = hclge_func_reset_notify_vf(hdev);
3688 if (ret)
3689 return ret;
3690
3691 ret = hclge_func_reset_cmd(hdev, 0);
3692 if (ret) {
3693 dev_err(&hdev->pdev->dev,
3694 "asserting function reset fail %d!\n", ret);
3695 return ret;
3696 }
3697
3698 /* After performaning pf reset, it is not necessary to do the
3699 * mailbox handling or send any command to firmware, because
3700 * any mailbox handling or command to firmware is only valid
3701 * after hclge_cmd_init is called.
3702 */
3703 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3704 hdev->rst_stats.pf_rst_cnt++;
3705 break;
3706 case HNAE3_FLR_RESET:
3707 ret = hclge_func_reset_notify_vf(hdev);
3708 if (ret)
3709 return ret;
3710 break;
3711 case HNAE3_IMP_RESET:
3712 hclge_handle_imp_error(hdev);
3713 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3714 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3715 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3716 break;
3717 default:
3718 break;
3719 }
3720
3721 /* inform hardware that preparatory work is done */
3722 msleep(HCLGE_RESET_SYNC_TIME);
3723 hclge_reset_handshake(hdev, true);
3724 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3725
3726 return ret;
3727 }
3728
hclge_reset_err_handle(struct hclge_dev * hdev)3729 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3730 {
3731 #define MAX_RESET_FAIL_CNT 5
3732
3733 if (hdev->reset_pending) {
3734 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3735 hdev->reset_pending);
3736 return true;
3737 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3738 HCLGE_RESET_INT_M) {
3739 dev_info(&hdev->pdev->dev,
3740 "reset failed because new reset interrupt\n");
3741 hclge_clear_reset_cause(hdev);
3742 return false;
3743 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3744 hdev->rst_stats.reset_fail_cnt++;
3745 set_bit(hdev->reset_type, &hdev->reset_pending);
3746 dev_info(&hdev->pdev->dev,
3747 "re-schedule reset task(%u)\n",
3748 hdev->rst_stats.reset_fail_cnt);
3749 return true;
3750 }
3751
3752 hclge_clear_reset_cause(hdev);
3753
3754 /* recover the handshake status when reset fail */
3755 hclge_reset_handshake(hdev, true);
3756
3757 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3758
3759 hclge_dbg_dump_rst_info(hdev);
3760
3761 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3762
3763 return false;
3764 }
3765
hclge_set_rst_done(struct hclge_dev * hdev)3766 static int hclge_set_rst_done(struct hclge_dev *hdev)
3767 {
3768 struct hclge_pf_rst_done_cmd *req;
3769 struct hclge_desc desc;
3770 int ret;
3771
3772 req = (struct hclge_pf_rst_done_cmd *)desc.data;
3773 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3774 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3775
3776 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3777 /* To be compatible with the old firmware, which does not support
3778 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3779 * return success
3780 */
3781 if (ret == -EOPNOTSUPP) {
3782 dev_warn(&hdev->pdev->dev,
3783 "current firmware does not support command(0x%x)!\n",
3784 HCLGE_OPC_PF_RST_DONE);
3785 return 0;
3786 } else if (ret) {
3787 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3788 ret);
3789 }
3790
3791 return ret;
3792 }
3793
hclge_reset_prepare_up(struct hclge_dev * hdev)3794 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3795 {
3796 int ret = 0;
3797
3798 switch (hdev->reset_type) {
3799 case HNAE3_FUNC_RESET:
3800 case HNAE3_FLR_RESET:
3801 ret = hclge_set_all_vf_rst(hdev, false);
3802 break;
3803 case HNAE3_GLOBAL_RESET:
3804 case HNAE3_IMP_RESET:
3805 ret = hclge_set_rst_done(hdev);
3806 break;
3807 default:
3808 break;
3809 }
3810
3811 /* clear up the handshake status after re-initialize done */
3812 hclge_reset_handshake(hdev, false);
3813
3814 return ret;
3815 }
3816
hclge_reset_stack(struct hclge_dev * hdev)3817 static int hclge_reset_stack(struct hclge_dev *hdev)
3818 {
3819 int ret;
3820
3821 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3822 if (ret)
3823 return ret;
3824
3825 ret = hclge_reset_ae_dev(hdev->ae_dev);
3826 if (ret)
3827 return ret;
3828
3829 return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3830 }
3831
hclge_reset_prepare(struct hclge_dev * hdev)3832 static int hclge_reset_prepare(struct hclge_dev *hdev)
3833 {
3834 int ret;
3835
3836 hdev->rst_stats.reset_cnt++;
3837 /* perform reset of the stack & ae device for a client */
3838 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3839 if (ret)
3840 return ret;
3841
3842 rtnl_lock();
3843 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3844 rtnl_unlock();
3845 if (ret)
3846 return ret;
3847
3848 return hclge_reset_prepare_wait(hdev);
3849 }
3850
hclge_reset_rebuild(struct hclge_dev * hdev)3851 static int hclge_reset_rebuild(struct hclge_dev *hdev)
3852 {
3853 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3854 enum hnae3_reset_type reset_level;
3855 int ret;
3856
3857 hdev->rst_stats.hw_reset_done_cnt++;
3858
3859 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3860 if (ret)
3861 return ret;
3862
3863 rtnl_lock();
3864 ret = hclge_reset_stack(hdev);
3865 rtnl_unlock();
3866 if (ret)
3867 return ret;
3868
3869 hclge_clear_reset_cause(hdev);
3870
3871 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3872 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3873 * times
3874 */
3875 if (ret &&
3876 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3877 return ret;
3878
3879 ret = hclge_reset_prepare_up(hdev);
3880 if (ret)
3881 return ret;
3882
3883 rtnl_lock();
3884 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3885 rtnl_unlock();
3886 if (ret)
3887 return ret;
3888
3889 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3890 if (ret)
3891 return ret;
3892
3893 hdev->last_reset_time = jiffies;
3894 hdev->rst_stats.reset_fail_cnt = 0;
3895 hdev->rst_stats.reset_done_cnt++;
3896 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3897
3898 /* if default_reset_request has a higher level reset request,
3899 * it should be handled as soon as possible. since some errors
3900 * need this kind of reset to fix.
3901 */
3902 reset_level = hclge_get_reset_level(ae_dev,
3903 &hdev->default_reset_request);
3904 if (reset_level != HNAE3_NONE_RESET)
3905 set_bit(reset_level, &hdev->reset_request);
3906
3907 return 0;
3908 }
3909
hclge_reset(struct hclge_dev * hdev)3910 static void hclge_reset(struct hclge_dev *hdev)
3911 {
3912 if (hclge_reset_prepare(hdev))
3913 goto err_reset;
3914
3915 if (hclge_reset_wait(hdev))
3916 goto err_reset;
3917
3918 if (hclge_reset_rebuild(hdev))
3919 goto err_reset;
3920
3921 return;
3922
3923 err_reset:
3924 if (hclge_reset_err_handle(hdev))
3925 hclge_reset_task_schedule(hdev);
3926 }
3927
hclge_reset_event(struct pci_dev * pdev,struct hnae3_handle * handle)3928 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3929 {
3930 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3931 struct hclge_dev *hdev = ae_dev->priv;
3932
3933 /* We might end up getting called broadly because of 2 below cases:
3934 * 1. Recoverable error was conveyed through APEI and only way to bring
3935 * normalcy is to reset.
3936 * 2. A new reset request from the stack due to timeout
3937 *
3938 * For the first case,error event might not have ae handle available.
3939 * check if this is a new reset request and we are not here just because
3940 * last reset attempt did not succeed and watchdog hit us again. We will
3941 * know this if last reset request did not occur very recently (watchdog
3942 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3943 * In case of new request we reset the "reset level" to PF reset.
3944 * And if it is a repeat reset request of the most recent one then we
3945 * want to make sure we throttle the reset request. Therefore, we will
3946 * not allow it again before 3*HZ times.
3947 */
3948 if (!handle)
3949 handle = &hdev->vport[0].nic;
3950
3951 if (time_before(jiffies, (hdev->last_reset_time +
3952 HCLGE_RESET_INTERVAL))) {
3953 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3954 return;
3955 } else if (hdev->default_reset_request) {
3956 hdev->reset_level =
3957 hclge_get_reset_level(ae_dev,
3958 &hdev->default_reset_request);
3959 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
3960 hdev->reset_level = HNAE3_FUNC_RESET;
3961 }
3962
3963 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3964 hdev->reset_level);
3965
3966 /* request reset & schedule reset task */
3967 set_bit(hdev->reset_level, &hdev->reset_request);
3968 hclge_reset_task_schedule(hdev);
3969
3970 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3971 hdev->reset_level++;
3972 }
3973
hclge_set_def_reset_request(struct hnae3_ae_dev * ae_dev,enum hnae3_reset_type rst_type)3974 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3975 enum hnae3_reset_type rst_type)
3976 {
3977 struct hclge_dev *hdev = ae_dev->priv;
3978
3979 set_bit(rst_type, &hdev->default_reset_request);
3980 }
3981
hclge_reset_timer(struct timer_list * t)3982 static void hclge_reset_timer(struct timer_list *t)
3983 {
3984 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3985
3986 /* if default_reset_request has no value, it means that this reset
3987 * request has already be handled, so just return here
3988 */
3989 if (!hdev->default_reset_request)
3990 return;
3991
3992 dev_info(&hdev->pdev->dev,
3993 "triggering reset in reset timer\n");
3994 hclge_reset_event(hdev->pdev, NULL);
3995 }
3996
hclge_reset_subtask(struct hclge_dev * hdev)3997 static void hclge_reset_subtask(struct hclge_dev *hdev)
3998 {
3999 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4000
4001 /* check if there is any ongoing reset in the hardware. This status can
4002 * be checked from reset_pending. If there is then, we need to wait for
4003 * hardware to complete reset.
4004 * a. If we are able to figure out in reasonable time that hardware
4005 * has fully resetted then, we can proceed with driver, client
4006 * reset.
4007 * b. else, we can come back later to check this status so re-sched
4008 * now.
4009 */
4010 hdev->last_reset_time = jiffies;
4011 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4012 if (hdev->reset_type != HNAE3_NONE_RESET)
4013 hclge_reset(hdev);
4014
4015 /* check if we got any *new* reset requests to be honored */
4016 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4017 if (hdev->reset_type != HNAE3_NONE_RESET)
4018 hclge_do_reset(hdev);
4019
4020 hdev->reset_type = HNAE3_NONE_RESET;
4021 }
4022
hclge_reset_service_task(struct hclge_dev * hdev)4023 static void hclge_reset_service_task(struct hclge_dev *hdev)
4024 {
4025 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4026 return;
4027
4028 down(&hdev->reset_sem);
4029 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4030
4031 hclge_reset_subtask(hdev);
4032
4033 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4034 up(&hdev->reset_sem);
4035 }
4036
hclge_update_vport_alive(struct hclge_dev * hdev)4037 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4038 {
4039 int i;
4040
4041 /* start from vport 1 for PF is always alive */
4042 for (i = 1; i < hdev->num_alloc_vport; i++) {
4043 struct hclge_vport *vport = &hdev->vport[i];
4044
4045 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4046 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4047
4048 /* If vf is not alive, set to default value */
4049 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4050 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4051 }
4052 }
4053
hclge_periodic_service_task(struct hclge_dev * hdev)4054 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4055 {
4056 unsigned long delta = round_jiffies_relative(HZ);
4057
4058 if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4059 return;
4060
4061 /* Always handle the link updating to make sure link state is
4062 * updated when it is triggered by mbx.
4063 */
4064 hclge_update_link_status(hdev);
4065 hclge_sync_mac_table(hdev);
4066 hclge_sync_promisc_mode(hdev);
4067
4068 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4069 delta = jiffies - hdev->last_serv_processed;
4070
4071 if (delta < round_jiffies_relative(HZ)) {
4072 delta = round_jiffies_relative(HZ) - delta;
4073 goto out;
4074 }
4075 }
4076
4077 hdev->serv_processed_cnt++;
4078 hclge_update_vport_alive(hdev);
4079
4080 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4081 hdev->last_serv_processed = jiffies;
4082 goto out;
4083 }
4084
4085 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4086 hclge_update_stats_for_all(hdev);
4087
4088 hclge_update_port_info(hdev);
4089 hclge_sync_vlan_filter(hdev);
4090
4091 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4092 hclge_rfs_filter_expire(hdev);
4093
4094 hdev->last_serv_processed = jiffies;
4095
4096 out:
4097 hclge_task_schedule(hdev, delta);
4098 }
4099
hclge_service_task(struct work_struct * work)4100 static void hclge_service_task(struct work_struct *work)
4101 {
4102 struct hclge_dev *hdev =
4103 container_of(work, struct hclge_dev, service_task.work);
4104
4105 hclge_reset_service_task(hdev);
4106 hclge_mailbox_service_task(hdev);
4107 hclge_periodic_service_task(hdev);
4108
4109 /* Handle reset and mbx again in case periodical task delays the
4110 * handling by calling hclge_task_schedule() in
4111 * hclge_periodic_service_task().
4112 */
4113 hclge_reset_service_task(hdev);
4114 hclge_mailbox_service_task(hdev);
4115 }
4116
hclge_get_vport(struct hnae3_handle * handle)4117 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4118 {
4119 /* VF handle has no client */
4120 if (!handle->client)
4121 return container_of(handle, struct hclge_vport, nic);
4122 else if (handle->client->type == HNAE3_CLIENT_ROCE)
4123 return container_of(handle, struct hclge_vport, roce);
4124 else
4125 return container_of(handle, struct hclge_vport, nic);
4126 }
4127
hclge_get_vector(struct hnae3_handle * handle,u16 vector_num,struct hnae3_vector_info * vector_info)4128 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4129 struct hnae3_vector_info *vector_info)
4130 {
4131 struct hclge_vport *vport = hclge_get_vport(handle);
4132 struct hnae3_vector_info *vector = vector_info;
4133 struct hclge_dev *hdev = vport->back;
4134 int alloc = 0;
4135 int i, j;
4136
4137 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4138 vector_num = min(hdev->num_msi_left, vector_num);
4139
4140 for (j = 0; j < vector_num; j++) {
4141 for (i = 1; i < hdev->num_msi; i++) {
4142 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4143 vector->vector = pci_irq_vector(hdev->pdev, i);
4144 vector->io_addr = hdev->hw.io_base +
4145 HCLGE_VECTOR_REG_BASE +
4146 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
4147 vport->vport_id *
4148 HCLGE_VECTOR_VF_OFFSET;
4149 hdev->vector_status[i] = vport->vport_id;
4150 hdev->vector_irq[i] = vector->vector;
4151
4152 vector++;
4153 alloc++;
4154
4155 break;
4156 }
4157 }
4158 }
4159 hdev->num_msi_left -= alloc;
4160 hdev->num_msi_used += alloc;
4161
4162 return alloc;
4163 }
4164
hclge_get_vector_index(struct hclge_dev * hdev,int vector)4165 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4166 {
4167 int i;
4168
4169 for (i = 0; i < hdev->num_msi; i++)
4170 if (vector == hdev->vector_irq[i])
4171 return i;
4172
4173 return -EINVAL;
4174 }
4175
hclge_put_vector(struct hnae3_handle * handle,int vector)4176 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4177 {
4178 struct hclge_vport *vport = hclge_get_vport(handle);
4179 struct hclge_dev *hdev = vport->back;
4180 int vector_id;
4181
4182 vector_id = hclge_get_vector_index(hdev, vector);
4183 if (vector_id < 0) {
4184 dev_err(&hdev->pdev->dev,
4185 "Get vector index fail. vector = %d\n", vector);
4186 return vector_id;
4187 }
4188
4189 hclge_free_vector(hdev, vector_id);
4190
4191 return 0;
4192 }
4193
hclge_get_rss_key_size(struct hnae3_handle * handle)4194 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4195 {
4196 return HCLGE_RSS_KEY_SIZE;
4197 }
4198
hclge_get_rss_indir_size(struct hnae3_handle * handle)4199 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
4200 {
4201 return HCLGE_RSS_IND_TBL_SIZE;
4202 }
4203
hclge_set_rss_algo_key(struct hclge_dev * hdev,const u8 hfunc,const u8 * key)4204 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4205 const u8 hfunc, const u8 *key)
4206 {
4207 struct hclge_rss_config_cmd *req;
4208 unsigned int key_offset = 0;
4209 struct hclge_desc desc;
4210 int key_counts;
4211 int key_size;
4212 int ret;
4213
4214 key_counts = HCLGE_RSS_KEY_SIZE;
4215 req = (struct hclge_rss_config_cmd *)desc.data;
4216
4217 while (key_counts) {
4218 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4219 false);
4220
4221 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4222 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4223
4224 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4225 memcpy(req->hash_key,
4226 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4227
4228 key_counts -= key_size;
4229 key_offset++;
4230 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4231 if (ret) {
4232 dev_err(&hdev->pdev->dev,
4233 "Configure RSS config fail, status = %d\n",
4234 ret);
4235 return ret;
4236 }
4237 }
4238 return 0;
4239 }
4240
hclge_set_rss_indir_table(struct hclge_dev * hdev,const u8 * indir)4241 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
4242 {
4243 struct hclge_rss_indirection_table_cmd *req;
4244 struct hclge_desc desc;
4245 int i, j;
4246 int ret;
4247
4248 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4249
4250 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4251 hclge_cmd_setup_basic_desc
4252 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4253
4254 req->start_table_index =
4255 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4256 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4257
4258 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
4259 req->rss_result[j] =
4260 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4261
4262 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4263 if (ret) {
4264 dev_err(&hdev->pdev->dev,
4265 "Configure rss indir table fail,status = %d\n",
4266 ret);
4267 return ret;
4268 }
4269 }
4270 return 0;
4271 }
4272
hclge_set_rss_tc_mode(struct hclge_dev * hdev,u16 * tc_valid,u16 * tc_size,u16 * tc_offset)4273 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4274 u16 *tc_size, u16 *tc_offset)
4275 {
4276 struct hclge_rss_tc_mode_cmd *req;
4277 struct hclge_desc desc;
4278 int ret;
4279 int i;
4280
4281 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4282 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4283
4284 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4285 u16 mode = 0;
4286
4287 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4288 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4289 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4290 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4291 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4292
4293 req->rss_tc_mode[i] = cpu_to_le16(mode);
4294 }
4295
4296 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4297 if (ret)
4298 dev_err(&hdev->pdev->dev,
4299 "Configure rss tc mode fail, status = %d\n", ret);
4300
4301 return ret;
4302 }
4303
hclge_get_rss_type(struct hclge_vport * vport)4304 static void hclge_get_rss_type(struct hclge_vport *vport)
4305 {
4306 if (vport->rss_tuple_sets.ipv4_tcp_en ||
4307 vport->rss_tuple_sets.ipv4_udp_en ||
4308 vport->rss_tuple_sets.ipv4_sctp_en ||
4309 vport->rss_tuple_sets.ipv6_tcp_en ||
4310 vport->rss_tuple_sets.ipv6_udp_en ||
4311 vport->rss_tuple_sets.ipv6_sctp_en)
4312 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4313 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4314 vport->rss_tuple_sets.ipv6_fragment_en)
4315 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4316 else
4317 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4318 }
4319
hclge_set_rss_input_tuple(struct hclge_dev * hdev)4320 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4321 {
4322 struct hclge_rss_input_tuple_cmd *req;
4323 struct hclge_desc desc;
4324 int ret;
4325
4326 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4327
4328 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4329
4330 /* Get the tuple cfg from pf */
4331 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4332 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4333 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4334 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4335 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4336 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4337 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4338 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4339 hclge_get_rss_type(&hdev->vport[0]);
4340 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4341 if (ret)
4342 dev_err(&hdev->pdev->dev,
4343 "Configure rss input fail, status = %d\n", ret);
4344 return ret;
4345 }
4346
hclge_get_rss(struct hnae3_handle * handle,u32 * indir,u8 * key,u8 * hfunc)4347 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4348 u8 *key, u8 *hfunc)
4349 {
4350 struct hclge_vport *vport = hclge_get_vport(handle);
4351 int i;
4352
4353 /* Get hash algorithm */
4354 if (hfunc) {
4355 switch (vport->rss_algo) {
4356 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4357 *hfunc = ETH_RSS_HASH_TOP;
4358 break;
4359 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4360 *hfunc = ETH_RSS_HASH_XOR;
4361 break;
4362 default:
4363 *hfunc = ETH_RSS_HASH_UNKNOWN;
4364 break;
4365 }
4366 }
4367
4368 /* Get the RSS Key required by the user */
4369 if (key)
4370 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4371
4372 /* Get indirect table */
4373 if (indir)
4374 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4375 indir[i] = vport->rss_indirection_tbl[i];
4376
4377 return 0;
4378 }
4379
hclge_parse_rss_hfunc(struct hclge_vport * vport,const u8 hfunc,u8 * hash_algo)4380 static int hclge_parse_rss_hfunc(struct hclge_vport *vport, const u8 hfunc,
4381 u8 *hash_algo)
4382 {
4383 switch (hfunc) {
4384 case ETH_RSS_HASH_TOP:
4385 *hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4386 return 0;
4387 case ETH_RSS_HASH_XOR:
4388 *hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4389 return 0;
4390 case ETH_RSS_HASH_NO_CHANGE:
4391 *hash_algo = vport->rss_algo;
4392 return 0;
4393 default:
4394 return -EINVAL;
4395 }
4396 }
4397
hclge_set_rss(struct hnae3_handle * handle,const u32 * indir,const u8 * key,const u8 hfunc)4398 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4399 const u8 *key, const u8 hfunc)
4400 {
4401 struct hclge_vport *vport = hclge_get_vport(handle);
4402 struct hclge_dev *hdev = vport->back;
4403 u8 hash_algo;
4404 int ret, i;
4405
4406 ret = hclge_parse_rss_hfunc(vport, hfunc, &hash_algo);
4407 if (ret) {
4408 dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc);
4409 return ret;
4410 }
4411
4412 /* Set the RSS Hash Key if specififed by the user */
4413 if (key) {
4414 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4415 if (ret)
4416 return ret;
4417
4418 /* Update the shadow RSS key with user specified qids */
4419 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4420 } else {
4421 ret = hclge_set_rss_algo_key(hdev, hash_algo,
4422 vport->rss_hash_key);
4423 if (ret)
4424 return ret;
4425 }
4426 vport->rss_algo = hash_algo;
4427
4428 /* Update the shadow RSS table with user specified qids */
4429 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4430 vport->rss_indirection_tbl[i] = indir[i];
4431
4432 /* Update the hardware */
4433 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4434 }
4435
hclge_get_rss_hash_bits(struct ethtool_rxnfc * nfc)4436 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4437 {
4438 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4439
4440 if (nfc->data & RXH_L4_B_2_3)
4441 hash_sets |= HCLGE_D_PORT_BIT;
4442 else
4443 hash_sets &= ~HCLGE_D_PORT_BIT;
4444
4445 if (nfc->data & RXH_IP_SRC)
4446 hash_sets |= HCLGE_S_IP_BIT;
4447 else
4448 hash_sets &= ~HCLGE_S_IP_BIT;
4449
4450 if (nfc->data & RXH_IP_DST)
4451 hash_sets |= HCLGE_D_IP_BIT;
4452 else
4453 hash_sets &= ~HCLGE_D_IP_BIT;
4454
4455 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4456 hash_sets |= HCLGE_V_TAG_BIT;
4457
4458 return hash_sets;
4459 }
4460
hclge_set_rss_tuple(struct hnae3_handle * handle,struct ethtool_rxnfc * nfc)4461 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4462 struct ethtool_rxnfc *nfc)
4463 {
4464 struct hclge_vport *vport = hclge_get_vport(handle);
4465 struct hclge_dev *hdev = vport->back;
4466 struct hclge_rss_input_tuple_cmd *req;
4467 struct hclge_desc desc;
4468 u8 tuple_sets;
4469 int ret;
4470
4471 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4472 RXH_L4_B_0_1 | RXH_L4_B_2_3))
4473 return -EINVAL;
4474
4475 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4476 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4477
4478 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4479 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4480 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4481 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4482 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4483 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4484 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4485 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4486
4487 tuple_sets = hclge_get_rss_hash_bits(nfc);
4488 switch (nfc->flow_type) {
4489 case TCP_V4_FLOW:
4490 req->ipv4_tcp_en = tuple_sets;
4491 break;
4492 case TCP_V6_FLOW:
4493 req->ipv6_tcp_en = tuple_sets;
4494 break;
4495 case UDP_V4_FLOW:
4496 req->ipv4_udp_en = tuple_sets;
4497 break;
4498 case UDP_V6_FLOW:
4499 req->ipv6_udp_en = tuple_sets;
4500 break;
4501 case SCTP_V4_FLOW:
4502 req->ipv4_sctp_en = tuple_sets;
4503 break;
4504 case SCTP_V6_FLOW:
4505 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
4506 (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
4507 return -EINVAL;
4508
4509 req->ipv6_sctp_en = tuple_sets;
4510 break;
4511 case IPV4_FLOW:
4512 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4513 break;
4514 case IPV6_FLOW:
4515 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4516 break;
4517 default:
4518 return -EINVAL;
4519 }
4520
4521 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4522 if (ret) {
4523 dev_err(&hdev->pdev->dev,
4524 "Set rss tuple fail, status = %d\n", ret);
4525 return ret;
4526 }
4527
4528 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4529 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4530 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4531 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4532 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4533 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4534 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4535 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4536 hclge_get_rss_type(vport);
4537 return 0;
4538 }
4539
hclge_get_rss_tuple(struct hnae3_handle * handle,struct ethtool_rxnfc * nfc)4540 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4541 struct ethtool_rxnfc *nfc)
4542 {
4543 struct hclge_vport *vport = hclge_get_vport(handle);
4544 u8 tuple_sets;
4545
4546 nfc->data = 0;
4547
4548 switch (nfc->flow_type) {
4549 case TCP_V4_FLOW:
4550 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4551 break;
4552 case UDP_V4_FLOW:
4553 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4554 break;
4555 case TCP_V6_FLOW:
4556 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4557 break;
4558 case UDP_V6_FLOW:
4559 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4560 break;
4561 case SCTP_V4_FLOW:
4562 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4563 break;
4564 case SCTP_V6_FLOW:
4565 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4566 break;
4567 case IPV4_FLOW:
4568 case IPV6_FLOW:
4569 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4570 break;
4571 default:
4572 return -EINVAL;
4573 }
4574
4575 if (!tuple_sets)
4576 return 0;
4577
4578 if (tuple_sets & HCLGE_D_PORT_BIT)
4579 nfc->data |= RXH_L4_B_2_3;
4580 if (tuple_sets & HCLGE_S_PORT_BIT)
4581 nfc->data |= RXH_L4_B_0_1;
4582 if (tuple_sets & HCLGE_D_IP_BIT)
4583 nfc->data |= RXH_IP_DST;
4584 if (tuple_sets & HCLGE_S_IP_BIT)
4585 nfc->data |= RXH_IP_SRC;
4586
4587 return 0;
4588 }
4589
hclge_get_tc_size(struct hnae3_handle * handle)4590 static int hclge_get_tc_size(struct hnae3_handle *handle)
4591 {
4592 struct hclge_vport *vport = hclge_get_vport(handle);
4593 struct hclge_dev *hdev = vport->back;
4594
4595 return hdev->rss_size_max;
4596 }
4597
hclge_rss_init_hw(struct hclge_dev * hdev)4598 int hclge_rss_init_hw(struct hclge_dev *hdev)
4599 {
4600 struct hclge_vport *vport = hdev->vport;
4601 u8 *rss_indir = vport[0].rss_indirection_tbl;
4602 u16 rss_size = vport[0].alloc_rss_size;
4603 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4604 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4605 u8 *key = vport[0].rss_hash_key;
4606 u8 hfunc = vport[0].rss_algo;
4607 u16 tc_valid[HCLGE_MAX_TC_NUM];
4608 u16 roundup_size;
4609 unsigned int i;
4610 int ret;
4611
4612 ret = hclge_set_rss_indir_table(hdev, rss_indir);
4613 if (ret)
4614 return ret;
4615
4616 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4617 if (ret)
4618 return ret;
4619
4620 ret = hclge_set_rss_input_tuple(hdev);
4621 if (ret)
4622 return ret;
4623
4624 /* Each TC have the same queue size, and tc_size set to hardware is
4625 * the log2 of roundup power of two of rss_size, the acutal queue
4626 * size is limited by indirection table.
4627 */
4628 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4629 dev_err(&hdev->pdev->dev,
4630 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4631 rss_size);
4632 return -EINVAL;
4633 }
4634
4635 roundup_size = roundup_pow_of_two(rss_size);
4636 roundup_size = ilog2(roundup_size);
4637
4638 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4639 tc_valid[i] = 0;
4640
4641 if (!(hdev->hw_tc_map & BIT(i)))
4642 continue;
4643
4644 tc_valid[i] = 1;
4645 tc_size[i] = roundup_size;
4646 tc_offset[i] = rss_size * i;
4647 }
4648
4649 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4650 }
4651
hclge_rss_indir_init_cfg(struct hclge_dev * hdev)4652 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4653 {
4654 struct hclge_vport *vport = hdev->vport;
4655 int i, j;
4656
4657 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4658 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4659 vport[j].rss_indirection_tbl[i] =
4660 i % vport[j].alloc_rss_size;
4661 }
4662 }
4663
hclge_rss_init_cfg(struct hclge_dev * hdev)4664 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4665 {
4666 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4667 struct hclge_vport *vport = hdev->vport;
4668
4669 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
4670 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4671
4672 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4673 vport[i].rss_tuple_sets.ipv4_tcp_en =
4674 HCLGE_RSS_INPUT_TUPLE_OTHER;
4675 vport[i].rss_tuple_sets.ipv4_udp_en =
4676 HCLGE_RSS_INPUT_TUPLE_OTHER;
4677 vport[i].rss_tuple_sets.ipv4_sctp_en =
4678 HCLGE_RSS_INPUT_TUPLE_SCTP;
4679 vport[i].rss_tuple_sets.ipv4_fragment_en =
4680 HCLGE_RSS_INPUT_TUPLE_OTHER;
4681 vport[i].rss_tuple_sets.ipv6_tcp_en =
4682 HCLGE_RSS_INPUT_TUPLE_OTHER;
4683 vport[i].rss_tuple_sets.ipv6_udp_en =
4684 HCLGE_RSS_INPUT_TUPLE_OTHER;
4685 vport[i].rss_tuple_sets.ipv6_sctp_en =
4686 hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
4687 HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
4688 HCLGE_RSS_INPUT_TUPLE_SCTP;
4689 vport[i].rss_tuple_sets.ipv6_fragment_en =
4690 HCLGE_RSS_INPUT_TUPLE_OTHER;
4691
4692 vport[i].rss_algo = rss_algo;
4693
4694 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4695 HCLGE_RSS_KEY_SIZE);
4696 }
4697
4698 hclge_rss_indir_init_cfg(hdev);
4699 }
4700
hclge_bind_ring_with_vector(struct hclge_vport * vport,int vector_id,bool en,struct hnae3_ring_chain_node * ring_chain)4701 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4702 int vector_id, bool en,
4703 struct hnae3_ring_chain_node *ring_chain)
4704 {
4705 struct hclge_dev *hdev = vport->back;
4706 struct hnae3_ring_chain_node *node;
4707 struct hclge_desc desc;
4708 struct hclge_ctrl_vector_chain_cmd *req =
4709 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4710 enum hclge_cmd_status status;
4711 enum hclge_opcode_type op;
4712 u16 tqp_type_and_id;
4713 int i;
4714
4715 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4716 hclge_cmd_setup_basic_desc(&desc, op, false);
4717 req->int_vector_id = vector_id;
4718
4719 i = 0;
4720 for (node = ring_chain; node; node = node->next) {
4721 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4722 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4723 HCLGE_INT_TYPE_S,
4724 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4725 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4726 HCLGE_TQP_ID_S, node->tqp_index);
4727 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4728 HCLGE_INT_GL_IDX_S,
4729 hnae3_get_field(node->int_gl_idx,
4730 HNAE3_RING_GL_IDX_M,
4731 HNAE3_RING_GL_IDX_S));
4732 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4733 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4734 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4735 req->vfid = vport->vport_id;
4736
4737 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4738 if (status) {
4739 dev_err(&hdev->pdev->dev,
4740 "Map TQP fail, status is %d.\n",
4741 status);
4742 return -EIO;
4743 }
4744 i = 0;
4745
4746 hclge_cmd_setup_basic_desc(&desc,
4747 op,
4748 false);
4749 req->int_vector_id = vector_id;
4750 }
4751 }
4752
4753 if (i > 0) {
4754 req->int_cause_num = i;
4755 req->vfid = vport->vport_id;
4756 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4757 if (status) {
4758 dev_err(&hdev->pdev->dev,
4759 "Map TQP fail, status is %d.\n", status);
4760 return -EIO;
4761 }
4762 }
4763
4764 return 0;
4765 }
4766
hclge_map_ring_to_vector(struct hnae3_handle * handle,int vector,struct hnae3_ring_chain_node * ring_chain)4767 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4768 struct hnae3_ring_chain_node *ring_chain)
4769 {
4770 struct hclge_vport *vport = hclge_get_vport(handle);
4771 struct hclge_dev *hdev = vport->back;
4772 int vector_id;
4773
4774 vector_id = hclge_get_vector_index(hdev, vector);
4775 if (vector_id < 0) {
4776 dev_err(&hdev->pdev->dev,
4777 "failed to get vector index. vector=%d\n", vector);
4778 return vector_id;
4779 }
4780
4781 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4782 }
4783
hclge_unmap_ring_frm_vector(struct hnae3_handle * handle,int vector,struct hnae3_ring_chain_node * ring_chain)4784 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4785 struct hnae3_ring_chain_node *ring_chain)
4786 {
4787 struct hclge_vport *vport = hclge_get_vport(handle);
4788 struct hclge_dev *hdev = vport->back;
4789 int vector_id, ret;
4790
4791 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4792 return 0;
4793
4794 vector_id = hclge_get_vector_index(hdev, vector);
4795 if (vector_id < 0) {
4796 dev_err(&handle->pdev->dev,
4797 "Get vector index fail. ret =%d\n", vector_id);
4798 return vector_id;
4799 }
4800
4801 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4802 if (ret)
4803 dev_err(&handle->pdev->dev,
4804 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4805 vector_id, ret);
4806
4807 return ret;
4808 }
4809
hclge_cmd_set_promisc_mode(struct hclge_dev * hdev,struct hclge_promisc_param * param)4810 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4811 struct hclge_promisc_param *param)
4812 {
4813 struct hclge_promisc_cfg_cmd *req;
4814 struct hclge_desc desc;
4815 int ret;
4816
4817 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4818
4819 req = (struct hclge_promisc_cfg_cmd *)desc.data;
4820 req->vf_id = param->vf_id;
4821
4822 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4823 * pdev revision(0x20), new revision support them. The
4824 * value of this two fields will not return error when driver
4825 * send command to fireware in revision(0x20).
4826 */
4827 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4828 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4829
4830 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4831 if (ret)
4832 dev_err(&hdev->pdev->dev,
4833 "failed to set vport %d promisc mode, ret = %d.\n",
4834 param->vf_id, ret);
4835
4836 return ret;
4837 }
4838
hclge_promisc_param_init(struct hclge_promisc_param * param,bool en_uc,bool en_mc,bool en_bc,int vport_id)4839 static void hclge_promisc_param_init(struct hclge_promisc_param *param,
4840 bool en_uc, bool en_mc, bool en_bc,
4841 int vport_id)
4842 {
4843 if (!param)
4844 return;
4845
4846 memset(param, 0, sizeof(struct hclge_promisc_param));
4847 if (en_uc)
4848 param->enable = HCLGE_PROMISC_EN_UC;
4849 if (en_mc)
4850 param->enable |= HCLGE_PROMISC_EN_MC;
4851 if (en_bc)
4852 param->enable |= HCLGE_PROMISC_EN_BC;
4853 param->vf_id = vport_id;
4854 }
4855
hclge_set_vport_promisc_mode(struct hclge_vport * vport,bool en_uc_pmc,bool en_mc_pmc,bool en_bc_pmc)4856 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4857 bool en_mc_pmc, bool en_bc_pmc)
4858 {
4859 struct hclge_dev *hdev = vport->back;
4860 struct hclge_promisc_param param;
4861
4862 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4863 vport->vport_id);
4864 return hclge_cmd_set_promisc_mode(hdev, ¶m);
4865 }
4866
hclge_set_promisc_mode(struct hnae3_handle * handle,bool en_uc_pmc,bool en_mc_pmc)4867 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4868 bool en_mc_pmc)
4869 {
4870 struct hclge_vport *vport = hclge_get_vport(handle);
4871 struct hclge_dev *hdev = vport->back;
4872 bool en_bc_pmc = true;
4873
4874 /* For device whose version below V2, if broadcast promisc enabled,
4875 * vlan filter is always bypassed. So broadcast promisc should be
4876 * disabled until user enable promisc mode
4877 */
4878 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
4879 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4880
4881 return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4882 en_bc_pmc);
4883 }
4884
hclge_request_update_promisc_mode(struct hnae3_handle * handle)4885 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
4886 {
4887 struct hclge_vport *vport = hclge_get_vport(handle);
4888 struct hclge_dev *hdev = vport->back;
4889
4890 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
4891 }
4892
hclge_get_fd_mode(struct hclge_dev * hdev,u8 * fd_mode)4893 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4894 {
4895 struct hclge_get_fd_mode_cmd *req;
4896 struct hclge_desc desc;
4897 int ret;
4898
4899 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4900
4901 req = (struct hclge_get_fd_mode_cmd *)desc.data;
4902
4903 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4904 if (ret) {
4905 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4906 return ret;
4907 }
4908
4909 *fd_mode = req->mode;
4910
4911 return ret;
4912 }
4913
hclge_get_fd_allocation(struct hclge_dev * hdev,u32 * stage1_entry_num,u32 * stage2_entry_num,u16 * stage1_counter_num,u16 * stage2_counter_num)4914 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4915 u32 *stage1_entry_num,
4916 u32 *stage2_entry_num,
4917 u16 *stage1_counter_num,
4918 u16 *stage2_counter_num)
4919 {
4920 struct hclge_get_fd_allocation_cmd *req;
4921 struct hclge_desc desc;
4922 int ret;
4923
4924 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4925
4926 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4927
4928 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4929 if (ret) {
4930 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4931 ret);
4932 return ret;
4933 }
4934
4935 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4936 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4937 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4938 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4939
4940 return ret;
4941 }
4942
hclge_set_fd_key_config(struct hclge_dev * hdev,enum HCLGE_FD_STAGE stage_num)4943 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
4944 enum HCLGE_FD_STAGE stage_num)
4945 {
4946 struct hclge_set_fd_key_config_cmd *req;
4947 struct hclge_fd_key_cfg *stage;
4948 struct hclge_desc desc;
4949 int ret;
4950
4951 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4952
4953 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4954 stage = &hdev->fd_cfg.key_cfg[stage_num];
4955 req->stage = stage_num;
4956 req->key_select = stage->key_sel;
4957 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4958 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4959 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4960 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4961 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4962 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4963
4964 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4965 if (ret)
4966 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4967
4968 return ret;
4969 }
4970
hclge_init_fd_config(struct hclge_dev * hdev)4971 static int hclge_init_fd_config(struct hclge_dev *hdev)
4972 {
4973 #define LOW_2_WORDS 0x03
4974 struct hclge_fd_key_cfg *key_cfg;
4975 int ret;
4976
4977 if (!hnae3_dev_fd_supported(hdev))
4978 return 0;
4979
4980 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4981 if (ret)
4982 return ret;
4983
4984 switch (hdev->fd_cfg.fd_mode) {
4985 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4986 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4987 break;
4988 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4989 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4990 break;
4991 default:
4992 dev_err(&hdev->pdev->dev,
4993 "Unsupported flow director mode %u\n",
4994 hdev->fd_cfg.fd_mode);
4995 return -EOPNOTSUPP;
4996 }
4997
4998 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4999 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
5000 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5001 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5002 key_cfg->outer_sipv6_word_en = 0;
5003 key_cfg->outer_dipv6_word_en = 0;
5004
5005 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5006 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5007 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5008 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5009
5010 /* If use max 400bit key, we can support tuples for ether type */
5011 if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1)
5012 key_cfg->tuple_active |=
5013 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5014
5015 /* roce_type is used to filter roce frames
5016 * dst_vport is used to specify the rule
5017 */
5018 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5019
5020 ret = hclge_get_fd_allocation(hdev,
5021 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5022 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5023 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5024 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5025 if (ret)
5026 return ret;
5027
5028 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5029 }
5030
hclge_fd_tcam_config(struct hclge_dev * hdev,u8 stage,bool sel_x,int loc,u8 * key,bool is_add)5031 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5032 int loc, u8 *key, bool is_add)
5033 {
5034 struct hclge_fd_tcam_config_1_cmd *req1;
5035 struct hclge_fd_tcam_config_2_cmd *req2;
5036 struct hclge_fd_tcam_config_3_cmd *req3;
5037 struct hclge_desc desc[3];
5038 int ret;
5039
5040 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5041 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5042 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5043 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5044 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5045
5046 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5047 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5048 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5049
5050 req1->stage = stage;
5051 req1->xy_sel = sel_x ? 1 : 0;
5052 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5053 req1->index = cpu_to_le32(loc);
5054 req1->entry_vld = sel_x ? is_add : 0;
5055
5056 if (key) {
5057 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5058 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5059 sizeof(req2->tcam_data));
5060 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5061 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5062 }
5063
5064 ret = hclge_cmd_send(&hdev->hw, desc, 3);
5065 if (ret)
5066 dev_err(&hdev->pdev->dev,
5067 "config tcam key fail, ret=%d\n",
5068 ret);
5069
5070 return ret;
5071 }
5072
hclge_fd_ad_config(struct hclge_dev * hdev,u8 stage,int loc,struct hclge_fd_ad_data * action)5073 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5074 struct hclge_fd_ad_data *action)
5075 {
5076 struct hclge_fd_ad_config_cmd *req;
5077 struct hclge_desc desc;
5078 u64 ad_data = 0;
5079 int ret;
5080
5081 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5082
5083 req = (struct hclge_fd_ad_config_cmd *)desc.data;
5084 req->index = cpu_to_le32(loc);
5085 req->stage = stage;
5086
5087 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5088 action->write_rule_id_to_bd);
5089 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5090 action->rule_id);
5091 ad_data <<= 32;
5092 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5093 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5094 action->forward_to_direct_queue);
5095 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5096 action->queue_id);
5097 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5098 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5099 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5100 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5101 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5102 action->counter_id);
5103
5104 req->ad_data = cpu_to_le64(ad_data);
5105 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5106 if (ret)
5107 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5108
5109 return ret;
5110 }
5111
hclge_fd_convert_tuple(u32 tuple_bit,u8 * key_x,u8 * key_y,struct hclge_fd_rule * rule)5112 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5113 struct hclge_fd_rule *rule)
5114 {
5115 u16 tmp_x_s, tmp_y_s;
5116 u32 tmp_x_l, tmp_y_l;
5117 int i;
5118
5119 if (rule->unused_tuple & tuple_bit)
5120 return true;
5121
5122 switch (tuple_bit) {
5123 case BIT(INNER_DST_MAC):
5124 for (i = 0; i < ETH_ALEN; i++) {
5125 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5126 rule->tuples_mask.dst_mac[i]);
5127 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5128 rule->tuples_mask.dst_mac[i]);
5129 }
5130
5131 return true;
5132 case BIT(INNER_SRC_MAC):
5133 for (i = 0; i < ETH_ALEN; i++) {
5134 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5135 rule->tuples_mask.src_mac[i]);
5136 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5137 rule->tuples_mask.src_mac[i]);
5138 }
5139
5140 return true;
5141 case BIT(INNER_VLAN_TAG_FST):
5142 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5143 rule->tuples_mask.vlan_tag1);
5144 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5145 rule->tuples_mask.vlan_tag1);
5146 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5147 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5148
5149 return true;
5150 case BIT(INNER_ETH_TYPE):
5151 calc_x(tmp_x_s, rule->tuples.ether_proto,
5152 rule->tuples_mask.ether_proto);
5153 calc_y(tmp_y_s, rule->tuples.ether_proto,
5154 rule->tuples_mask.ether_proto);
5155 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5156 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5157
5158 return true;
5159 case BIT(INNER_IP_TOS):
5160 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5161 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5162
5163 return true;
5164 case BIT(INNER_IP_PROTO):
5165 calc_x(*key_x, rule->tuples.ip_proto,
5166 rule->tuples_mask.ip_proto);
5167 calc_y(*key_y, rule->tuples.ip_proto,
5168 rule->tuples_mask.ip_proto);
5169
5170 return true;
5171 case BIT(INNER_SRC_IP):
5172 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5173 rule->tuples_mask.src_ip[IPV4_INDEX]);
5174 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5175 rule->tuples_mask.src_ip[IPV4_INDEX]);
5176 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5177 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5178
5179 return true;
5180 case BIT(INNER_DST_IP):
5181 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5182 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5183 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5184 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5185 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5186 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5187
5188 return true;
5189 case BIT(INNER_SRC_PORT):
5190 calc_x(tmp_x_s, rule->tuples.src_port,
5191 rule->tuples_mask.src_port);
5192 calc_y(tmp_y_s, rule->tuples.src_port,
5193 rule->tuples_mask.src_port);
5194 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5195 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5196
5197 return true;
5198 case BIT(INNER_DST_PORT):
5199 calc_x(tmp_x_s, rule->tuples.dst_port,
5200 rule->tuples_mask.dst_port);
5201 calc_y(tmp_y_s, rule->tuples.dst_port,
5202 rule->tuples_mask.dst_port);
5203 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5204 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5205
5206 return true;
5207 default:
5208 return false;
5209 }
5210 }
5211
hclge_get_port_number(enum HLCGE_PORT_TYPE port_type,u8 pf_id,u8 vf_id,u8 network_port_id)5212 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5213 u8 vf_id, u8 network_port_id)
5214 {
5215 u32 port_number = 0;
5216
5217 if (port_type == HOST_PORT) {
5218 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5219 pf_id);
5220 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5221 vf_id);
5222 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5223 } else {
5224 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5225 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5226 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5227 }
5228
5229 return port_number;
5230 }
5231
hclge_fd_convert_meta_data(struct hclge_fd_key_cfg * key_cfg,__le32 * key_x,__le32 * key_y,struct hclge_fd_rule * rule)5232 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5233 __le32 *key_x, __le32 *key_y,
5234 struct hclge_fd_rule *rule)
5235 {
5236 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5237 u8 cur_pos = 0, tuple_size, shift_bits;
5238 unsigned int i;
5239
5240 for (i = 0; i < MAX_META_DATA; i++) {
5241 tuple_size = meta_data_key_info[i].key_length;
5242 tuple_bit = key_cfg->meta_data_active & BIT(i);
5243
5244 switch (tuple_bit) {
5245 case BIT(ROCE_TYPE):
5246 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5247 cur_pos += tuple_size;
5248 break;
5249 case BIT(DST_VPORT):
5250 port_number = hclge_get_port_number(HOST_PORT, 0,
5251 rule->vf_id, 0);
5252 hnae3_set_field(meta_data,
5253 GENMASK(cur_pos + tuple_size, cur_pos),
5254 cur_pos, port_number);
5255 cur_pos += tuple_size;
5256 break;
5257 default:
5258 break;
5259 }
5260 }
5261
5262 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5263 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5264 shift_bits = sizeof(meta_data) * 8 - cur_pos;
5265
5266 *key_x = cpu_to_le32(tmp_x << shift_bits);
5267 *key_y = cpu_to_le32(tmp_y << shift_bits);
5268 }
5269
5270 /* A complete key is combined with meta data key and tuple key.
5271 * Meta data key is stored at the MSB region, and tuple key is stored at
5272 * the LSB region, unused bits will be filled 0.
5273 */
hclge_config_key(struct hclge_dev * hdev,u8 stage,struct hclge_fd_rule * rule)5274 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5275 struct hclge_fd_rule *rule)
5276 {
5277 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5278 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5279 u8 *cur_key_x, *cur_key_y;
5280 u8 meta_data_region;
5281 u8 tuple_size;
5282 int ret;
5283 u32 i;
5284
5285 memset(key_x, 0, sizeof(key_x));
5286 memset(key_y, 0, sizeof(key_y));
5287 cur_key_x = key_x;
5288 cur_key_y = key_y;
5289
5290 for (i = 0 ; i < MAX_TUPLE; i++) {
5291 bool tuple_valid;
5292 u32 check_tuple;
5293
5294 tuple_size = tuple_key_info[i].key_length / 8;
5295 check_tuple = key_cfg->tuple_active & BIT(i);
5296
5297 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5298 cur_key_y, rule);
5299 if (tuple_valid) {
5300 cur_key_x += tuple_size;
5301 cur_key_y += tuple_size;
5302 }
5303 }
5304
5305 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5306 MAX_META_DATA_LENGTH / 8;
5307
5308 hclge_fd_convert_meta_data(key_cfg,
5309 (__le32 *)(key_x + meta_data_region),
5310 (__le32 *)(key_y + meta_data_region),
5311 rule);
5312
5313 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5314 true);
5315 if (ret) {
5316 dev_err(&hdev->pdev->dev,
5317 "fd key_y config fail, loc=%u, ret=%d\n",
5318 rule->queue_id, ret);
5319 return ret;
5320 }
5321
5322 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5323 true);
5324 if (ret)
5325 dev_err(&hdev->pdev->dev,
5326 "fd key_x config fail, loc=%u, ret=%d\n",
5327 rule->queue_id, ret);
5328 return ret;
5329 }
5330
hclge_config_action(struct hclge_dev * hdev,u8 stage,struct hclge_fd_rule * rule)5331 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5332 struct hclge_fd_rule *rule)
5333 {
5334 struct hclge_fd_ad_data ad_data;
5335
5336 ad_data.ad_id = rule->location;
5337
5338 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5339 ad_data.drop_packet = true;
5340 ad_data.forward_to_direct_queue = false;
5341 ad_data.queue_id = 0;
5342 } else {
5343 ad_data.drop_packet = false;
5344 ad_data.forward_to_direct_queue = true;
5345 ad_data.queue_id = rule->queue_id;
5346 }
5347
5348 ad_data.use_counter = false;
5349 ad_data.counter_id = 0;
5350
5351 ad_data.use_next_stage = false;
5352 ad_data.next_input_key = 0;
5353
5354 ad_data.write_rule_id_to_bd = true;
5355 ad_data.rule_id = rule->location;
5356
5357 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5358 }
5359
hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec * spec,u32 * unused_tuple)5360 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5361 u32 *unused_tuple)
5362 {
5363 if (!spec || !unused_tuple)
5364 return -EINVAL;
5365
5366 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5367
5368 if (!spec->ip4src)
5369 *unused_tuple |= BIT(INNER_SRC_IP);
5370
5371 if (!spec->ip4dst)
5372 *unused_tuple |= BIT(INNER_DST_IP);
5373
5374 if (!spec->psrc)
5375 *unused_tuple |= BIT(INNER_SRC_PORT);
5376
5377 if (!spec->pdst)
5378 *unused_tuple |= BIT(INNER_DST_PORT);
5379
5380 if (!spec->tos)
5381 *unused_tuple |= BIT(INNER_IP_TOS);
5382
5383 return 0;
5384 }
5385
hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec * spec,u32 * unused_tuple)5386 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5387 u32 *unused_tuple)
5388 {
5389 if (!spec || !unused_tuple)
5390 return -EINVAL;
5391
5392 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5393 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5394
5395 if (!spec->ip4src)
5396 *unused_tuple |= BIT(INNER_SRC_IP);
5397
5398 if (!spec->ip4dst)
5399 *unused_tuple |= BIT(INNER_DST_IP);
5400
5401 if (!spec->tos)
5402 *unused_tuple |= BIT(INNER_IP_TOS);
5403
5404 if (!spec->proto)
5405 *unused_tuple |= BIT(INNER_IP_PROTO);
5406
5407 if (spec->l4_4_bytes)
5408 return -EOPNOTSUPP;
5409
5410 if (spec->ip_ver != ETH_RX_NFC_IP4)
5411 return -EOPNOTSUPP;
5412
5413 return 0;
5414 }
5415
hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec * spec,u32 * unused_tuple)5416 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5417 u32 *unused_tuple)
5418 {
5419 if (!spec || !unused_tuple)
5420 return -EINVAL;
5421
5422 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5423 BIT(INNER_IP_TOS);
5424
5425 /* check whether src/dst ip address used */
5426 if (!spec->ip6src[0] && !spec->ip6src[1] &&
5427 !spec->ip6src[2] && !spec->ip6src[3])
5428 *unused_tuple |= BIT(INNER_SRC_IP);
5429
5430 if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5431 !spec->ip6dst[2] && !spec->ip6dst[3])
5432 *unused_tuple |= BIT(INNER_DST_IP);
5433
5434 if (!spec->psrc)
5435 *unused_tuple |= BIT(INNER_SRC_PORT);
5436
5437 if (!spec->pdst)
5438 *unused_tuple |= BIT(INNER_DST_PORT);
5439
5440 if (spec->tclass)
5441 return -EOPNOTSUPP;
5442
5443 return 0;
5444 }
5445
hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec * spec,u32 * unused_tuple)5446 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
5447 u32 *unused_tuple)
5448 {
5449 if (!spec || !unused_tuple)
5450 return -EINVAL;
5451
5452 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5453 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5454
5455 /* check whether src/dst ip address used */
5456 if (!spec->ip6src[0] && !spec->ip6src[1] &&
5457 !spec->ip6src[2] && !spec->ip6src[3])
5458 *unused_tuple |= BIT(INNER_SRC_IP);
5459
5460 if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5461 !spec->ip6dst[2] && !spec->ip6dst[3])
5462 *unused_tuple |= BIT(INNER_DST_IP);
5463
5464 if (!spec->l4_proto)
5465 *unused_tuple |= BIT(INNER_IP_PROTO);
5466
5467 if (spec->tclass)
5468 return -EOPNOTSUPP;
5469
5470 if (spec->l4_4_bytes)
5471 return -EOPNOTSUPP;
5472
5473 return 0;
5474 }
5475
hclge_fd_check_ether_tuple(struct ethhdr * spec,u32 * unused_tuple)5476 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
5477 {
5478 if (!spec || !unused_tuple)
5479 return -EINVAL;
5480
5481 *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5482 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5483 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5484
5485 if (is_zero_ether_addr(spec->h_source))
5486 *unused_tuple |= BIT(INNER_SRC_MAC);
5487
5488 if (is_zero_ether_addr(spec->h_dest))
5489 *unused_tuple |= BIT(INNER_DST_MAC);
5490
5491 if (!spec->h_proto)
5492 *unused_tuple |= BIT(INNER_ETH_TYPE);
5493
5494 return 0;
5495 }
5496
hclge_fd_check_ext_tuple(struct hclge_dev * hdev,struct ethtool_rx_flow_spec * fs,u32 * unused_tuple)5497 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
5498 struct ethtool_rx_flow_spec *fs,
5499 u32 *unused_tuple)
5500 {
5501 if (fs->flow_type & FLOW_EXT) {
5502 if (fs->h_ext.vlan_etype) {
5503 dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
5504 return -EOPNOTSUPP;
5505 }
5506
5507 if (!fs->h_ext.vlan_tci)
5508 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5509
5510 if (fs->m_ext.vlan_tci &&
5511 be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
5512 dev_err(&hdev->pdev->dev,
5513 "failed to config vlan_tci, invalid vlan_tci: %u, max is %u.\n",
5514 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
5515 return -EINVAL;
5516 }
5517 } else {
5518 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5519 }
5520
5521 if (fs->flow_type & FLOW_MAC_EXT) {
5522 if (hdev->fd_cfg.fd_mode !=
5523 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5524 dev_err(&hdev->pdev->dev,
5525 "FLOW_MAC_EXT is not supported in current fd mode!\n");
5526 return -EOPNOTSUPP;
5527 }
5528
5529 if (is_zero_ether_addr(fs->h_ext.h_dest))
5530 *unused_tuple |= BIT(INNER_DST_MAC);
5531 else
5532 *unused_tuple &= ~BIT(INNER_DST_MAC);
5533 }
5534
5535 return 0;
5536 }
5537
hclge_fd_check_spec(struct hclge_dev * hdev,struct ethtool_rx_flow_spec * fs,u32 * unused_tuple)5538 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5539 struct ethtool_rx_flow_spec *fs,
5540 u32 *unused_tuple)
5541 {
5542 u32 flow_type;
5543 int ret;
5544
5545 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5546 dev_err(&hdev->pdev->dev,
5547 "failed to config fd rules, invalid rule location: %u, max is %u\n.",
5548 fs->location,
5549 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
5550 return -EINVAL;
5551 }
5552
5553 if ((fs->flow_type & FLOW_EXT) &&
5554 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5555 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5556 return -EOPNOTSUPP;
5557 }
5558
5559 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5560 switch (flow_type) {
5561 case SCTP_V4_FLOW:
5562 case TCP_V4_FLOW:
5563 case UDP_V4_FLOW:
5564 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
5565 unused_tuple);
5566 break;
5567 case IP_USER_FLOW:
5568 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
5569 unused_tuple);
5570 break;
5571 case SCTP_V6_FLOW:
5572 case TCP_V6_FLOW:
5573 case UDP_V6_FLOW:
5574 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
5575 unused_tuple);
5576 break;
5577 case IPV6_USER_FLOW:
5578 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
5579 unused_tuple);
5580 break;
5581 case ETHER_FLOW:
5582 if (hdev->fd_cfg.fd_mode !=
5583 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5584 dev_err(&hdev->pdev->dev,
5585 "ETHER_FLOW is not supported in current fd mode!\n");
5586 return -EOPNOTSUPP;
5587 }
5588
5589 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
5590 unused_tuple);
5591 break;
5592 default:
5593 dev_err(&hdev->pdev->dev,
5594 "unsupported protocol type, protocol type = %#x\n",
5595 flow_type);
5596 return -EOPNOTSUPP;
5597 }
5598
5599 if (ret) {
5600 dev_err(&hdev->pdev->dev,
5601 "failed to check flow union tuple, ret = %d\n",
5602 ret);
5603 return ret;
5604 }
5605
5606 return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
5607 }
5608
hclge_fd_rule_exist(struct hclge_dev * hdev,u16 location)5609 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5610 {
5611 struct hclge_fd_rule *rule = NULL;
5612 struct hlist_node *node2;
5613
5614 spin_lock_bh(&hdev->fd_rule_lock);
5615 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5616 if (rule->location >= location)
5617 break;
5618 }
5619
5620 spin_unlock_bh(&hdev->fd_rule_lock);
5621
5622 return rule && rule->location == location;
5623 }
5624
5625 /* make sure being called after lock up with fd_rule_lock */
hclge_fd_update_rule_list(struct hclge_dev * hdev,struct hclge_fd_rule * new_rule,u16 location,bool is_add)5626 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5627 struct hclge_fd_rule *new_rule,
5628 u16 location,
5629 bool is_add)
5630 {
5631 struct hclge_fd_rule *rule = NULL, *parent = NULL;
5632 struct hlist_node *node2;
5633
5634 if (is_add && !new_rule)
5635 return -EINVAL;
5636
5637 hlist_for_each_entry_safe(rule, node2,
5638 &hdev->fd_rule_list, rule_node) {
5639 if (rule->location >= location)
5640 break;
5641 parent = rule;
5642 }
5643
5644 if (rule && rule->location == location) {
5645 hlist_del(&rule->rule_node);
5646 kfree(rule);
5647 hdev->hclge_fd_rule_num--;
5648
5649 if (!is_add) {
5650 if (!hdev->hclge_fd_rule_num)
5651 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5652 clear_bit(location, hdev->fd_bmap);
5653
5654 return 0;
5655 }
5656 } else if (!is_add) {
5657 dev_err(&hdev->pdev->dev,
5658 "delete fail, rule %u is inexistent\n",
5659 location);
5660 return -EINVAL;
5661 }
5662
5663 INIT_HLIST_NODE(&new_rule->rule_node);
5664
5665 if (parent)
5666 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5667 else
5668 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5669
5670 set_bit(location, hdev->fd_bmap);
5671 hdev->hclge_fd_rule_num++;
5672 hdev->fd_active_type = new_rule->rule_type;
5673
5674 return 0;
5675 }
5676
hclge_fd_get_tuple(struct hclge_dev * hdev,struct ethtool_rx_flow_spec * fs,struct hclge_fd_rule * rule)5677 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5678 struct ethtool_rx_flow_spec *fs,
5679 struct hclge_fd_rule *rule)
5680 {
5681 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5682
5683 switch (flow_type) {
5684 case SCTP_V4_FLOW:
5685 case TCP_V4_FLOW:
5686 case UDP_V4_FLOW:
5687 rule->tuples.src_ip[IPV4_INDEX] =
5688 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5689 rule->tuples_mask.src_ip[IPV4_INDEX] =
5690 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5691
5692 rule->tuples.dst_ip[IPV4_INDEX] =
5693 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5694 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5695 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5696
5697 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5698 rule->tuples_mask.src_port =
5699 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5700
5701 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5702 rule->tuples_mask.dst_port =
5703 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5704
5705 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5706 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5707
5708 rule->tuples.ether_proto = ETH_P_IP;
5709 rule->tuples_mask.ether_proto = 0xFFFF;
5710
5711 break;
5712 case IP_USER_FLOW:
5713 rule->tuples.src_ip[IPV4_INDEX] =
5714 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5715 rule->tuples_mask.src_ip[IPV4_INDEX] =
5716 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5717
5718 rule->tuples.dst_ip[IPV4_INDEX] =
5719 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5720 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5721 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5722
5723 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5724 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5725
5726 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5727 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5728
5729 rule->tuples.ether_proto = ETH_P_IP;
5730 rule->tuples_mask.ether_proto = 0xFFFF;
5731
5732 break;
5733 case SCTP_V6_FLOW:
5734 case TCP_V6_FLOW:
5735 case UDP_V6_FLOW:
5736 be32_to_cpu_array(rule->tuples.src_ip,
5737 fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5738 be32_to_cpu_array(rule->tuples_mask.src_ip,
5739 fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5740
5741 be32_to_cpu_array(rule->tuples.dst_ip,
5742 fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5743 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5744 fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5745
5746 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5747 rule->tuples_mask.src_port =
5748 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5749
5750 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5751 rule->tuples_mask.dst_port =
5752 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5753
5754 rule->tuples.ether_proto = ETH_P_IPV6;
5755 rule->tuples_mask.ether_proto = 0xFFFF;
5756
5757 break;
5758 case IPV6_USER_FLOW:
5759 be32_to_cpu_array(rule->tuples.src_ip,
5760 fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5761 be32_to_cpu_array(rule->tuples_mask.src_ip,
5762 fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5763
5764 be32_to_cpu_array(rule->tuples.dst_ip,
5765 fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5766 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5767 fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5768
5769 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5770 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5771
5772 rule->tuples.ether_proto = ETH_P_IPV6;
5773 rule->tuples_mask.ether_proto = 0xFFFF;
5774
5775 break;
5776 case ETHER_FLOW:
5777 ether_addr_copy(rule->tuples.src_mac,
5778 fs->h_u.ether_spec.h_source);
5779 ether_addr_copy(rule->tuples_mask.src_mac,
5780 fs->m_u.ether_spec.h_source);
5781
5782 ether_addr_copy(rule->tuples.dst_mac,
5783 fs->h_u.ether_spec.h_dest);
5784 ether_addr_copy(rule->tuples_mask.dst_mac,
5785 fs->m_u.ether_spec.h_dest);
5786
5787 rule->tuples.ether_proto =
5788 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5789 rule->tuples_mask.ether_proto =
5790 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5791
5792 break;
5793 default:
5794 return -EOPNOTSUPP;
5795 }
5796
5797 switch (flow_type) {
5798 case SCTP_V4_FLOW:
5799 case SCTP_V6_FLOW:
5800 rule->tuples.ip_proto = IPPROTO_SCTP;
5801 rule->tuples_mask.ip_proto = 0xFF;
5802 break;
5803 case TCP_V4_FLOW:
5804 case TCP_V6_FLOW:
5805 rule->tuples.ip_proto = IPPROTO_TCP;
5806 rule->tuples_mask.ip_proto = 0xFF;
5807 break;
5808 case UDP_V4_FLOW:
5809 case UDP_V6_FLOW:
5810 rule->tuples.ip_proto = IPPROTO_UDP;
5811 rule->tuples_mask.ip_proto = 0xFF;
5812 break;
5813 default:
5814 break;
5815 }
5816
5817 if (fs->flow_type & FLOW_EXT) {
5818 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5819 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5820 }
5821
5822 if (fs->flow_type & FLOW_MAC_EXT) {
5823 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5824 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5825 }
5826
5827 return 0;
5828 }
5829
5830 /* make sure being called after lock up with fd_rule_lock */
hclge_fd_config_rule(struct hclge_dev * hdev,struct hclge_fd_rule * rule)5831 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5832 struct hclge_fd_rule *rule)
5833 {
5834 int ret;
5835
5836 if (!rule) {
5837 dev_err(&hdev->pdev->dev,
5838 "The flow director rule is NULL\n");
5839 return -EINVAL;
5840 }
5841
5842 /* it will never fail here, so needn't to check return value */
5843 hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5844
5845 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5846 if (ret)
5847 goto clear_rule;
5848
5849 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5850 if (ret)
5851 goto clear_rule;
5852
5853 return 0;
5854
5855 clear_rule:
5856 hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5857 return ret;
5858 }
5859
hclge_add_fd_entry(struct hnae3_handle * handle,struct ethtool_rxnfc * cmd)5860 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5861 struct ethtool_rxnfc *cmd)
5862 {
5863 struct hclge_vport *vport = hclge_get_vport(handle);
5864 struct hclge_dev *hdev = vport->back;
5865 u16 dst_vport_id = 0, q_index = 0;
5866 struct ethtool_rx_flow_spec *fs;
5867 struct hclge_fd_rule *rule;
5868 u32 unused = 0;
5869 u8 action;
5870 int ret;
5871
5872 if (!hnae3_dev_fd_supported(hdev)) {
5873 dev_err(&hdev->pdev->dev,
5874 "flow table director is not supported\n");
5875 return -EOPNOTSUPP;
5876 }
5877
5878 if (!hdev->fd_en) {
5879 dev_err(&hdev->pdev->dev,
5880 "please enable flow director first\n");
5881 return -EOPNOTSUPP;
5882 }
5883
5884 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5885
5886 ret = hclge_fd_check_spec(hdev, fs, &unused);
5887 if (ret)
5888 return ret;
5889
5890 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5891 action = HCLGE_FD_ACTION_DROP_PACKET;
5892 } else {
5893 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5894 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5895 u16 tqps;
5896
5897 if (vf > hdev->num_req_vfs) {
5898 dev_err(&hdev->pdev->dev,
5899 "Error: vf id (%u) > max vf num (%u)\n",
5900 vf, hdev->num_req_vfs);
5901 return -EINVAL;
5902 }
5903
5904 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5905 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5906
5907 if (ring >= tqps) {
5908 dev_err(&hdev->pdev->dev,
5909 "Error: queue id (%u) > max tqp num (%u)\n",
5910 ring, tqps - 1);
5911 return -EINVAL;
5912 }
5913
5914 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5915 q_index = ring;
5916 }
5917
5918 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5919 if (!rule)
5920 return -ENOMEM;
5921
5922 ret = hclge_fd_get_tuple(hdev, fs, rule);
5923 if (ret) {
5924 kfree(rule);
5925 return ret;
5926 }
5927
5928 rule->flow_type = fs->flow_type;
5929 rule->location = fs->location;
5930 rule->unused_tuple = unused;
5931 rule->vf_id = dst_vport_id;
5932 rule->queue_id = q_index;
5933 rule->action = action;
5934 rule->rule_type = HCLGE_FD_EP_ACTIVE;
5935
5936 /* to avoid rule conflict, when user configure rule by ethtool,
5937 * we need to clear all arfs rules
5938 */
5939 spin_lock_bh(&hdev->fd_rule_lock);
5940 hclge_clear_arfs_rules(handle);
5941
5942 ret = hclge_fd_config_rule(hdev, rule);
5943
5944 spin_unlock_bh(&hdev->fd_rule_lock);
5945
5946 return ret;
5947 }
5948
hclge_del_fd_entry(struct hnae3_handle * handle,struct ethtool_rxnfc * cmd)5949 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5950 struct ethtool_rxnfc *cmd)
5951 {
5952 struct hclge_vport *vport = hclge_get_vport(handle);
5953 struct hclge_dev *hdev = vport->back;
5954 struct ethtool_rx_flow_spec *fs;
5955 int ret;
5956
5957 if (!hnae3_dev_fd_supported(hdev))
5958 return -EOPNOTSUPP;
5959
5960 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5961
5962 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5963 return -EINVAL;
5964
5965 if (!hclge_fd_rule_exist(hdev, fs->location)) {
5966 dev_err(&hdev->pdev->dev,
5967 "Delete fail, rule %u is inexistent\n", fs->location);
5968 return -ENOENT;
5969 }
5970
5971 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5972 NULL, false);
5973 if (ret)
5974 return ret;
5975
5976 spin_lock_bh(&hdev->fd_rule_lock);
5977 ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5978
5979 spin_unlock_bh(&hdev->fd_rule_lock);
5980
5981 return ret;
5982 }
5983
5984 /* make sure being called after lock up with fd_rule_lock */
hclge_del_all_fd_entries(struct hnae3_handle * handle,bool clear_list)5985 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5986 bool clear_list)
5987 {
5988 struct hclge_vport *vport = hclge_get_vport(handle);
5989 struct hclge_dev *hdev = vport->back;
5990 struct hclge_fd_rule *rule;
5991 struct hlist_node *node;
5992 u16 location;
5993
5994 if (!hnae3_dev_fd_supported(hdev))
5995 return;
5996
5997 for_each_set_bit(location, hdev->fd_bmap,
5998 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5999 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6000 NULL, false);
6001
6002 if (clear_list) {
6003 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6004 rule_node) {
6005 hlist_del(&rule->rule_node);
6006 kfree(rule);
6007 }
6008 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6009 hdev->hclge_fd_rule_num = 0;
6010 bitmap_zero(hdev->fd_bmap,
6011 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6012 }
6013 }
6014
hclge_restore_fd_entries(struct hnae3_handle * handle)6015 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6016 {
6017 struct hclge_vport *vport = hclge_get_vport(handle);
6018 struct hclge_dev *hdev = vport->back;
6019 struct hclge_fd_rule *rule;
6020 struct hlist_node *node;
6021 int ret;
6022
6023 /* Return ok here, because reset error handling will check this
6024 * return value. If error is returned here, the reset process will
6025 * fail.
6026 */
6027 if (!hnae3_dev_fd_supported(hdev))
6028 return 0;
6029
6030 /* if fd is disabled, should not restore it when reset */
6031 if (!hdev->fd_en)
6032 return 0;
6033
6034 spin_lock_bh(&hdev->fd_rule_lock);
6035 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6036 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6037 if (!ret)
6038 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6039
6040 if (ret) {
6041 dev_warn(&hdev->pdev->dev,
6042 "Restore rule %u failed, remove it\n",
6043 rule->location);
6044 clear_bit(rule->location, hdev->fd_bmap);
6045 hlist_del(&rule->rule_node);
6046 kfree(rule);
6047 hdev->hclge_fd_rule_num--;
6048 }
6049 }
6050
6051 if (hdev->hclge_fd_rule_num)
6052 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
6053
6054 spin_unlock_bh(&hdev->fd_rule_lock);
6055
6056 return 0;
6057 }
6058
hclge_get_fd_rule_cnt(struct hnae3_handle * handle,struct ethtool_rxnfc * cmd)6059 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6060 struct ethtool_rxnfc *cmd)
6061 {
6062 struct hclge_vport *vport = hclge_get_vport(handle);
6063 struct hclge_dev *hdev = vport->back;
6064
6065 if (!hnae3_dev_fd_supported(hdev))
6066 return -EOPNOTSUPP;
6067
6068 cmd->rule_cnt = hdev->hclge_fd_rule_num;
6069 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6070
6071 return 0;
6072 }
6073
hclge_fd_get_tcpip4_info(struct hclge_fd_rule * rule,struct ethtool_tcpip4_spec * spec,struct ethtool_tcpip4_spec * spec_mask)6074 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6075 struct ethtool_tcpip4_spec *spec,
6076 struct ethtool_tcpip4_spec *spec_mask)
6077 {
6078 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6079 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6080 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6081
6082 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6083 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6084 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6085
6086 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6087 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6088 0 : cpu_to_be16(rule->tuples_mask.src_port);
6089
6090 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6091 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6092 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6093
6094 spec->tos = rule->tuples.ip_tos;
6095 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6096 0 : rule->tuples_mask.ip_tos;
6097 }
6098
hclge_fd_get_ip4_info(struct hclge_fd_rule * rule,struct ethtool_usrip4_spec * spec,struct ethtool_usrip4_spec * spec_mask)6099 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6100 struct ethtool_usrip4_spec *spec,
6101 struct ethtool_usrip4_spec *spec_mask)
6102 {
6103 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6104 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6105 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6106
6107 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6108 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6109 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6110
6111 spec->tos = rule->tuples.ip_tos;
6112 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6113 0 : rule->tuples_mask.ip_tos;
6114
6115 spec->proto = rule->tuples.ip_proto;
6116 spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6117 0 : rule->tuples_mask.ip_proto;
6118
6119 spec->ip_ver = ETH_RX_NFC_IP4;
6120 }
6121
hclge_fd_get_tcpip6_info(struct hclge_fd_rule * rule,struct ethtool_tcpip6_spec * spec,struct ethtool_tcpip6_spec * spec_mask)6122 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6123 struct ethtool_tcpip6_spec *spec,
6124 struct ethtool_tcpip6_spec *spec_mask)
6125 {
6126 cpu_to_be32_array(spec->ip6src,
6127 rule->tuples.src_ip, IPV6_SIZE);
6128 cpu_to_be32_array(spec->ip6dst,
6129 rule->tuples.dst_ip, IPV6_SIZE);
6130 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6131 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6132 else
6133 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6134 IPV6_SIZE);
6135
6136 if (rule->unused_tuple & BIT(INNER_DST_IP))
6137 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6138 else
6139 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6140 IPV6_SIZE);
6141
6142 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6143 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6144 0 : cpu_to_be16(rule->tuples_mask.src_port);
6145
6146 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6147 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6148 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6149 }
6150
hclge_fd_get_ip6_info(struct hclge_fd_rule * rule,struct ethtool_usrip6_spec * spec,struct ethtool_usrip6_spec * spec_mask)6151 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6152 struct ethtool_usrip6_spec *spec,
6153 struct ethtool_usrip6_spec *spec_mask)
6154 {
6155 cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6156 cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6157 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6158 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6159 else
6160 cpu_to_be32_array(spec_mask->ip6src,
6161 rule->tuples_mask.src_ip, IPV6_SIZE);
6162
6163 if (rule->unused_tuple & BIT(INNER_DST_IP))
6164 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6165 else
6166 cpu_to_be32_array(spec_mask->ip6dst,
6167 rule->tuples_mask.dst_ip, IPV6_SIZE);
6168
6169 spec->l4_proto = rule->tuples.ip_proto;
6170 spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6171 0 : rule->tuples_mask.ip_proto;
6172 }
6173
hclge_fd_get_ether_info(struct hclge_fd_rule * rule,struct ethhdr * spec,struct ethhdr * spec_mask)6174 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6175 struct ethhdr *spec,
6176 struct ethhdr *spec_mask)
6177 {
6178 ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6179 ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6180
6181 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6182 eth_zero_addr(spec_mask->h_source);
6183 else
6184 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6185
6186 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6187 eth_zero_addr(spec_mask->h_dest);
6188 else
6189 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6190
6191 spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6192 spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6193 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6194 }
6195
hclge_fd_get_ext_info(struct ethtool_rx_flow_spec * fs,struct hclge_fd_rule * rule)6196 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6197 struct hclge_fd_rule *rule)
6198 {
6199 if (fs->flow_type & FLOW_EXT) {
6200 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6201 fs->m_ext.vlan_tci =
6202 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6203 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
6204 }
6205
6206 if (fs->flow_type & FLOW_MAC_EXT) {
6207 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6208 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6209 eth_zero_addr(fs->m_u.ether_spec.h_dest);
6210 else
6211 ether_addr_copy(fs->m_u.ether_spec.h_dest,
6212 rule->tuples_mask.dst_mac);
6213 }
6214 }
6215
hclge_get_fd_rule_info(struct hnae3_handle * handle,struct ethtool_rxnfc * cmd)6216 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6217 struct ethtool_rxnfc *cmd)
6218 {
6219 struct hclge_vport *vport = hclge_get_vport(handle);
6220 struct hclge_fd_rule *rule = NULL;
6221 struct hclge_dev *hdev = vport->back;
6222 struct ethtool_rx_flow_spec *fs;
6223 struct hlist_node *node2;
6224
6225 if (!hnae3_dev_fd_supported(hdev))
6226 return -EOPNOTSUPP;
6227
6228 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6229
6230 spin_lock_bh(&hdev->fd_rule_lock);
6231
6232 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6233 if (rule->location >= fs->location)
6234 break;
6235 }
6236
6237 if (!rule || fs->location != rule->location) {
6238 spin_unlock_bh(&hdev->fd_rule_lock);
6239
6240 return -ENOENT;
6241 }
6242
6243 fs->flow_type = rule->flow_type;
6244 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6245 case SCTP_V4_FLOW:
6246 case TCP_V4_FLOW:
6247 case UDP_V4_FLOW:
6248 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6249 &fs->m_u.tcp_ip4_spec);
6250 break;
6251 case IP_USER_FLOW:
6252 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6253 &fs->m_u.usr_ip4_spec);
6254 break;
6255 case SCTP_V6_FLOW:
6256 case TCP_V6_FLOW:
6257 case UDP_V6_FLOW:
6258 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6259 &fs->m_u.tcp_ip6_spec);
6260 break;
6261 case IPV6_USER_FLOW:
6262 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6263 &fs->m_u.usr_ip6_spec);
6264 break;
6265 /* The flow type of fd rule has been checked before adding in to rule
6266 * list. As other flow types have been handled, it must be ETHER_FLOW
6267 * for the default case
6268 */
6269 default:
6270 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6271 &fs->m_u.ether_spec);
6272 break;
6273 }
6274
6275 hclge_fd_get_ext_info(fs, rule);
6276
6277 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6278 fs->ring_cookie = RX_CLS_FLOW_DISC;
6279 } else {
6280 u64 vf_id;
6281
6282 fs->ring_cookie = rule->queue_id;
6283 vf_id = rule->vf_id;
6284 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6285 fs->ring_cookie |= vf_id;
6286 }
6287
6288 spin_unlock_bh(&hdev->fd_rule_lock);
6289
6290 return 0;
6291 }
6292
hclge_get_all_rules(struct hnae3_handle * handle,struct ethtool_rxnfc * cmd,u32 * rule_locs)6293 static int hclge_get_all_rules(struct hnae3_handle *handle,
6294 struct ethtool_rxnfc *cmd, u32 *rule_locs)
6295 {
6296 struct hclge_vport *vport = hclge_get_vport(handle);
6297 struct hclge_dev *hdev = vport->back;
6298 struct hclge_fd_rule *rule;
6299 struct hlist_node *node2;
6300 int cnt = 0;
6301
6302 if (!hnae3_dev_fd_supported(hdev))
6303 return -EOPNOTSUPP;
6304
6305 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6306
6307 spin_lock_bh(&hdev->fd_rule_lock);
6308 hlist_for_each_entry_safe(rule, node2,
6309 &hdev->fd_rule_list, rule_node) {
6310 if (cnt == cmd->rule_cnt) {
6311 spin_unlock_bh(&hdev->fd_rule_lock);
6312 return -EMSGSIZE;
6313 }
6314
6315 rule_locs[cnt] = rule->location;
6316 cnt++;
6317 }
6318
6319 spin_unlock_bh(&hdev->fd_rule_lock);
6320
6321 cmd->rule_cnt = cnt;
6322
6323 return 0;
6324 }
6325
hclge_fd_get_flow_tuples(const struct flow_keys * fkeys,struct hclge_fd_rule_tuples * tuples)6326 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6327 struct hclge_fd_rule_tuples *tuples)
6328 {
6329 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6330 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6331
6332 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6333 tuples->ip_proto = fkeys->basic.ip_proto;
6334 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6335
6336 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6337 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6338 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6339 } else {
6340 int i;
6341
6342 for (i = 0; i < IPV6_SIZE; i++) {
6343 tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6344 tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6345 }
6346 }
6347 }
6348
6349 /* traverse all rules, check whether an existed rule has the same tuples */
6350 static struct hclge_fd_rule *
hclge_fd_search_flow_keys(struct hclge_dev * hdev,const struct hclge_fd_rule_tuples * tuples)6351 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6352 const struct hclge_fd_rule_tuples *tuples)
6353 {
6354 struct hclge_fd_rule *rule = NULL;
6355 struct hlist_node *node;
6356
6357 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6358 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6359 return rule;
6360 }
6361
6362 return NULL;
6363 }
6364
hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples * tuples,struct hclge_fd_rule * rule)6365 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6366 struct hclge_fd_rule *rule)
6367 {
6368 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6369 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6370 BIT(INNER_SRC_PORT);
6371 rule->action = 0;
6372 rule->vf_id = 0;
6373 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6374 if (tuples->ether_proto == ETH_P_IP) {
6375 if (tuples->ip_proto == IPPROTO_TCP)
6376 rule->flow_type = TCP_V4_FLOW;
6377 else
6378 rule->flow_type = UDP_V4_FLOW;
6379 } else {
6380 if (tuples->ip_proto == IPPROTO_TCP)
6381 rule->flow_type = TCP_V6_FLOW;
6382 else
6383 rule->flow_type = UDP_V6_FLOW;
6384 }
6385 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6386 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6387 }
6388
hclge_add_fd_entry_by_arfs(struct hnae3_handle * handle,u16 queue_id,u16 flow_id,struct flow_keys * fkeys)6389 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6390 u16 flow_id, struct flow_keys *fkeys)
6391 {
6392 struct hclge_vport *vport = hclge_get_vport(handle);
6393 struct hclge_fd_rule_tuples new_tuples = {};
6394 struct hclge_dev *hdev = vport->back;
6395 struct hclge_fd_rule *rule;
6396 u16 tmp_queue_id;
6397 u16 bit_id;
6398 int ret;
6399
6400 if (!hnae3_dev_fd_supported(hdev))
6401 return -EOPNOTSUPP;
6402
6403 /* when there is already fd rule existed add by user,
6404 * arfs should not work
6405 */
6406 spin_lock_bh(&hdev->fd_rule_lock);
6407 if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6408 spin_unlock_bh(&hdev->fd_rule_lock);
6409 return -EOPNOTSUPP;
6410 }
6411
6412 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6413
6414 /* check is there flow director filter existed for this flow,
6415 * if not, create a new filter for it;
6416 * if filter exist with different queue id, modify the filter;
6417 * if filter exist with same queue id, do nothing
6418 */
6419 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6420 if (!rule) {
6421 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6422 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6423 spin_unlock_bh(&hdev->fd_rule_lock);
6424 return -ENOSPC;
6425 }
6426
6427 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6428 if (!rule) {
6429 spin_unlock_bh(&hdev->fd_rule_lock);
6430 return -ENOMEM;
6431 }
6432
6433 set_bit(bit_id, hdev->fd_bmap);
6434 rule->location = bit_id;
6435 rule->flow_id = flow_id;
6436 rule->queue_id = queue_id;
6437 hclge_fd_build_arfs_rule(&new_tuples, rule);
6438 ret = hclge_fd_config_rule(hdev, rule);
6439
6440 spin_unlock_bh(&hdev->fd_rule_lock);
6441
6442 if (ret)
6443 return ret;
6444
6445 return rule->location;
6446 }
6447
6448 spin_unlock_bh(&hdev->fd_rule_lock);
6449
6450 if (rule->queue_id == queue_id)
6451 return rule->location;
6452
6453 tmp_queue_id = rule->queue_id;
6454 rule->queue_id = queue_id;
6455 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6456 if (ret) {
6457 rule->queue_id = tmp_queue_id;
6458 return ret;
6459 }
6460
6461 return rule->location;
6462 }
6463
hclge_rfs_filter_expire(struct hclge_dev * hdev)6464 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6465 {
6466 #ifdef CONFIG_RFS_ACCEL
6467 struct hnae3_handle *handle = &hdev->vport[0].nic;
6468 struct hclge_fd_rule *rule;
6469 struct hlist_node *node;
6470 HLIST_HEAD(del_list);
6471
6472 spin_lock_bh(&hdev->fd_rule_lock);
6473 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6474 spin_unlock_bh(&hdev->fd_rule_lock);
6475 return;
6476 }
6477 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6478 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6479 rule->flow_id, rule->location)) {
6480 hlist_del_init(&rule->rule_node);
6481 hlist_add_head(&rule->rule_node, &del_list);
6482 hdev->hclge_fd_rule_num--;
6483 clear_bit(rule->location, hdev->fd_bmap);
6484 }
6485 }
6486 spin_unlock_bh(&hdev->fd_rule_lock);
6487
6488 hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6489 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6490 rule->location, NULL, false);
6491 kfree(rule);
6492 }
6493 #endif
6494 }
6495
6496 /* make sure being called after lock up with fd_rule_lock */
hclge_clear_arfs_rules(struct hnae3_handle * handle)6497 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6498 {
6499 #ifdef CONFIG_RFS_ACCEL
6500 struct hclge_vport *vport = hclge_get_vport(handle);
6501 struct hclge_dev *hdev = vport->back;
6502
6503 if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6504 hclge_del_all_fd_entries(handle, true);
6505 #endif
6506 }
6507
hclge_get_hw_reset_stat(struct hnae3_handle * handle)6508 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6509 {
6510 struct hclge_vport *vport = hclge_get_vport(handle);
6511 struct hclge_dev *hdev = vport->back;
6512
6513 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6514 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6515 }
6516
hclge_get_cmdq_stat(struct hnae3_handle * handle)6517 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
6518 {
6519 struct hclge_vport *vport = hclge_get_vport(handle);
6520 struct hclge_dev *hdev = vport->back;
6521
6522 return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
6523 }
6524
hclge_ae_dev_resetting(struct hnae3_handle * handle)6525 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6526 {
6527 struct hclge_vport *vport = hclge_get_vport(handle);
6528 struct hclge_dev *hdev = vport->back;
6529
6530 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6531 }
6532
hclge_ae_dev_reset_cnt(struct hnae3_handle * handle)6533 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6534 {
6535 struct hclge_vport *vport = hclge_get_vport(handle);
6536 struct hclge_dev *hdev = vport->back;
6537
6538 return hdev->rst_stats.hw_reset_done_cnt;
6539 }
6540
hclge_enable_fd(struct hnae3_handle * handle,bool enable)6541 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6542 {
6543 struct hclge_vport *vport = hclge_get_vport(handle);
6544 struct hclge_dev *hdev = vport->back;
6545 bool clear;
6546
6547 hdev->fd_en = enable;
6548 clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6549
6550 if (!enable) {
6551 spin_lock_bh(&hdev->fd_rule_lock);
6552 hclge_del_all_fd_entries(handle, clear);
6553 spin_unlock_bh(&hdev->fd_rule_lock);
6554 } else {
6555 hclge_restore_fd_entries(handle);
6556 }
6557 }
6558
hclge_cfg_mac_mode(struct hclge_dev * hdev,bool enable)6559 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6560 {
6561 struct hclge_desc desc;
6562 struct hclge_config_mac_mode_cmd *req =
6563 (struct hclge_config_mac_mode_cmd *)desc.data;
6564 u32 loop_en = 0;
6565 int ret;
6566
6567 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6568
6569 if (enable) {
6570 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6571 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6572 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6573 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6574 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6575 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6576 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6577 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6578 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6579 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6580 }
6581
6582 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6583
6584 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6585 if (ret)
6586 dev_err(&hdev->pdev->dev,
6587 "mac enable fail, ret =%d.\n", ret);
6588 }
6589
hclge_config_switch_param(struct hclge_dev * hdev,int vfid,u8 switch_param,u8 param_mask)6590 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6591 u8 switch_param, u8 param_mask)
6592 {
6593 struct hclge_mac_vlan_switch_cmd *req;
6594 struct hclge_desc desc;
6595 u32 func_id;
6596 int ret;
6597
6598 func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6599 req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6600
6601 /* read current config parameter */
6602 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6603 true);
6604 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6605 req->func_id = cpu_to_le32(func_id);
6606
6607 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6608 if (ret) {
6609 dev_err(&hdev->pdev->dev,
6610 "read mac vlan switch parameter fail, ret = %d\n", ret);
6611 return ret;
6612 }
6613
6614 /* modify and write new config parameter */
6615 hclge_cmd_reuse_desc(&desc, false);
6616 req->switch_param = (req->switch_param & param_mask) | switch_param;
6617 req->param_mask = param_mask;
6618
6619 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6620 if (ret)
6621 dev_err(&hdev->pdev->dev,
6622 "set mac vlan switch parameter fail, ret = %d\n", ret);
6623 return ret;
6624 }
6625
hclge_phy_link_status_wait(struct hclge_dev * hdev,int link_ret)6626 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6627 int link_ret)
6628 {
6629 #define HCLGE_PHY_LINK_STATUS_NUM 200
6630
6631 struct phy_device *phydev = hdev->hw.mac.phydev;
6632 int i = 0;
6633 int ret;
6634
6635 do {
6636 ret = phy_read_status(phydev);
6637 if (ret) {
6638 dev_err(&hdev->pdev->dev,
6639 "phy update link status fail, ret = %d\n", ret);
6640 return;
6641 }
6642
6643 if (phydev->link == link_ret)
6644 break;
6645
6646 msleep(HCLGE_LINK_STATUS_MS);
6647 } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6648 }
6649
hclge_mac_link_status_wait(struct hclge_dev * hdev,int link_ret)6650 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
6651 {
6652 #define HCLGE_MAC_LINK_STATUS_NUM 100
6653
6654 int link_status;
6655 int i = 0;
6656 int ret;
6657
6658 do {
6659 ret = hclge_get_mac_link_status(hdev, &link_status);
6660 if (ret)
6661 return ret;
6662 if (link_status == link_ret)
6663 return 0;
6664
6665 msleep(HCLGE_LINK_STATUS_MS);
6666 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6667 return -EBUSY;
6668 }
6669
hclge_mac_phy_link_status_wait(struct hclge_dev * hdev,bool en,bool is_phy)6670 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6671 bool is_phy)
6672 {
6673 int link_ret;
6674
6675 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6676
6677 if (is_phy)
6678 hclge_phy_link_status_wait(hdev, link_ret);
6679
6680 return hclge_mac_link_status_wait(hdev, link_ret);
6681 }
6682
hclge_set_app_loopback(struct hclge_dev * hdev,bool en)6683 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
6684 {
6685 struct hclge_config_mac_mode_cmd *req;
6686 struct hclge_desc desc;
6687 u32 loop_en;
6688 int ret;
6689
6690 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6691 /* 1 Read out the MAC mode config at first */
6692 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6693 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6694 if (ret) {
6695 dev_err(&hdev->pdev->dev,
6696 "mac loopback get fail, ret =%d.\n", ret);
6697 return ret;
6698 }
6699
6700 /* 2 Then setup the loopback flag */
6701 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
6702 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
6703
6704 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6705
6706 /* 3 Config mac work mode with loopback flag
6707 * and its original configure parameters
6708 */
6709 hclge_cmd_reuse_desc(&desc, false);
6710 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6711 if (ret)
6712 dev_err(&hdev->pdev->dev,
6713 "mac loopback set fail, ret =%d.\n", ret);
6714 return ret;
6715 }
6716
hclge_cfg_serdes_loopback(struct hclge_dev * hdev,bool en,enum hnae3_loop loop_mode)6717 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
6718 enum hnae3_loop loop_mode)
6719 {
6720 #define HCLGE_SERDES_RETRY_MS 10
6721 #define HCLGE_SERDES_RETRY_NUM 100
6722
6723 struct hclge_serdes_lb_cmd *req;
6724 struct hclge_desc desc;
6725 int ret, i = 0;
6726 u8 loop_mode_b;
6727
6728 req = (struct hclge_serdes_lb_cmd *)desc.data;
6729 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6730
6731 switch (loop_mode) {
6732 case HNAE3_LOOP_SERIAL_SERDES:
6733 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6734 break;
6735 case HNAE3_LOOP_PARALLEL_SERDES:
6736 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6737 break;
6738 default:
6739 dev_err(&hdev->pdev->dev,
6740 "unsupported serdes loopback mode %d\n", loop_mode);
6741 return -ENOTSUPP;
6742 }
6743
6744 if (en) {
6745 req->enable = loop_mode_b;
6746 req->mask = loop_mode_b;
6747 } else {
6748 req->mask = loop_mode_b;
6749 }
6750
6751 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6752 if (ret) {
6753 dev_err(&hdev->pdev->dev,
6754 "serdes loopback set fail, ret = %d\n", ret);
6755 return ret;
6756 }
6757
6758 do {
6759 msleep(HCLGE_SERDES_RETRY_MS);
6760 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6761 true);
6762 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6763 if (ret) {
6764 dev_err(&hdev->pdev->dev,
6765 "serdes loopback get, ret = %d\n", ret);
6766 return ret;
6767 }
6768 } while (++i < HCLGE_SERDES_RETRY_NUM &&
6769 !(req->result & HCLGE_CMD_SERDES_DONE_B));
6770
6771 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6772 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6773 return -EBUSY;
6774 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6775 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6776 return -EIO;
6777 }
6778 return ret;
6779 }
6780
hclge_set_serdes_loopback(struct hclge_dev * hdev,bool en,enum hnae3_loop loop_mode)6781 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6782 enum hnae3_loop loop_mode)
6783 {
6784 int ret;
6785
6786 ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
6787 if (ret)
6788 return ret;
6789
6790 hclge_cfg_mac_mode(hdev, en);
6791
6792 ret = hclge_mac_phy_link_status_wait(hdev, en, false);
6793 if (ret)
6794 dev_err(&hdev->pdev->dev,
6795 "serdes loopback config mac mode timeout\n");
6796
6797 return ret;
6798 }
6799
hclge_enable_phy_loopback(struct hclge_dev * hdev,struct phy_device * phydev)6800 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6801 struct phy_device *phydev)
6802 {
6803 int ret;
6804
6805 if (!phydev->suspended) {
6806 ret = phy_suspend(phydev);
6807 if (ret)
6808 return ret;
6809 }
6810
6811 ret = phy_resume(phydev);
6812 if (ret)
6813 return ret;
6814
6815 return phy_loopback(phydev, true);
6816 }
6817
hclge_disable_phy_loopback(struct hclge_dev * hdev,struct phy_device * phydev)6818 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6819 struct phy_device *phydev)
6820 {
6821 int ret;
6822
6823 ret = phy_loopback(phydev, false);
6824 if (ret)
6825 return ret;
6826
6827 return phy_suspend(phydev);
6828 }
6829
hclge_set_phy_loopback(struct hclge_dev * hdev,bool en)6830 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6831 {
6832 struct phy_device *phydev = hdev->hw.mac.phydev;
6833 int ret;
6834
6835 if (!phydev)
6836 return -ENOTSUPP;
6837
6838 if (en)
6839 ret = hclge_enable_phy_loopback(hdev, phydev);
6840 else
6841 ret = hclge_disable_phy_loopback(hdev, phydev);
6842 if (ret) {
6843 dev_err(&hdev->pdev->dev,
6844 "set phy loopback fail, ret = %d\n", ret);
6845 return ret;
6846 }
6847
6848 hclge_cfg_mac_mode(hdev, en);
6849
6850 ret = hclge_mac_phy_link_status_wait(hdev, en, true);
6851 if (ret)
6852 dev_err(&hdev->pdev->dev,
6853 "phy loopback config mac mode timeout\n");
6854
6855 return ret;
6856 }
6857
hclge_tqp_enable(struct hclge_dev * hdev,unsigned int tqp_id,int stream_id,bool enable)6858 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6859 int stream_id, bool enable)
6860 {
6861 struct hclge_desc desc;
6862 struct hclge_cfg_com_tqp_queue_cmd *req =
6863 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6864 int ret;
6865
6866 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6867 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6868 req->stream_id = cpu_to_le16(stream_id);
6869 if (enable)
6870 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6871
6872 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6873 if (ret)
6874 dev_err(&hdev->pdev->dev,
6875 "Tqp enable fail, status =%d.\n", ret);
6876 return ret;
6877 }
6878
hclge_set_loopback(struct hnae3_handle * handle,enum hnae3_loop loop_mode,bool en)6879 static int hclge_set_loopback(struct hnae3_handle *handle,
6880 enum hnae3_loop loop_mode, bool en)
6881 {
6882 struct hclge_vport *vport = hclge_get_vport(handle);
6883 struct hnae3_knic_private_info *kinfo;
6884 struct hclge_dev *hdev = vport->back;
6885 int i, ret;
6886
6887 /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
6888 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
6889 * the same, the packets are looped back in the SSU. If SSU loopback
6890 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
6891 */
6892 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
6893 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6894
6895 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6896 HCLGE_SWITCH_ALW_LPBK_MASK);
6897 if (ret)
6898 return ret;
6899 }
6900
6901 switch (loop_mode) {
6902 case HNAE3_LOOP_APP:
6903 ret = hclge_set_app_loopback(hdev, en);
6904 break;
6905 case HNAE3_LOOP_SERIAL_SERDES:
6906 case HNAE3_LOOP_PARALLEL_SERDES:
6907 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6908 break;
6909 case HNAE3_LOOP_PHY:
6910 ret = hclge_set_phy_loopback(hdev, en);
6911 break;
6912 default:
6913 ret = -ENOTSUPP;
6914 dev_err(&hdev->pdev->dev,
6915 "loop_mode %d is not supported\n", loop_mode);
6916 break;
6917 }
6918
6919 if (ret)
6920 return ret;
6921
6922 kinfo = &vport->nic.kinfo;
6923 for (i = 0; i < kinfo->num_tqps; i++) {
6924 ret = hclge_tqp_enable(hdev, i, 0, en);
6925 if (ret)
6926 return ret;
6927 }
6928
6929 return 0;
6930 }
6931
hclge_set_default_loopback(struct hclge_dev * hdev)6932 static int hclge_set_default_loopback(struct hclge_dev *hdev)
6933 {
6934 int ret;
6935
6936 ret = hclge_set_app_loopback(hdev, false);
6937 if (ret)
6938 return ret;
6939
6940 ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
6941 if (ret)
6942 return ret;
6943
6944 return hclge_cfg_serdes_loopback(hdev, false,
6945 HNAE3_LOOP_PARALLEL_SERDES);
6946 }
6947
hclge_reset_tqp_stats(struct hnae3_handle * handle)6948 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6949 {
6950 struct hclge_vport *vport = hclge_get_vport(handle);
6951 struct hnae3_knic_private_info *kinfo;
6952 struct hnae3_queue *queue;
6953 struct hclge_tqp *tqp;
6954 int i;
6955
6956 kinfo = &vport->nic.kinfo;
6957 for (i = 0; i < kinfo->num_tqps; i++) {
6958 queue = handle->kinfo.tqp[i];
6959 tqp = container_of(queue, struct hclge_tqp, q);
6960 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6961 }
6962 }
6963
hclge_flush_link_update(struct hclge_dev * hdev)6964 static void hclge_flush_link_update(struct hclge_dev *hdev)
6965 {
6966 #define HCLGE_FLUSH_LINK_TIMEOUT 100000
6967
6968 unsigned long last = hdev->serv_processed_cnt;
6969 int i = 0;
6970
6971 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
6972 i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
6973 last == hdev->serv_processed_cnt)
6974 usleep_range(1, 1);
6975 }
6976
hclge_set_timer_task(struct hnae3_handle * handle,bool enable)6977 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6978 {
6979 struct hclge_vport *vport = hclge_get_vport(handle);
6980 struct hclge_dev *hdev = vport->back;
6981
6982 if (enable) {
6983 hclge_task_schedule(hdev, 0);
6984 } else {
6985 /* Set the DOWN flag here to disable link updating */
6986 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6987
6988 /* flush memory to make sure DOWN is seen by service task */
6989 smp_mb__before_atomic();
6990 hclge_flush_link_update(hdev);
6991 }
6992 }
6993
hclge_ae_start(struct hnae3_handle * handle)6994 static int hclge_ae_start(struct hnae3_handle *handle)
6995 {
6996 struct hclge_vport *vport = hclge_get_vport(handle);
6997 struct hclge_dev *hdev = vport->back;
6998
6999 /* mac enable */
7000 hclge_cfg_mac_mode(hdev, true);
7001 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
7002 hdev->hw.mac.link = 0;
7003
7004 /* reset tqp stats */
7005 hclge_reset_tqp_stats(handle);
7006
7007 hclge_mac_start_phy(hdev);
7008
7009 return 0;
7010 }
7011
hclge_ae_stop(struct hnae3_handle * handle)7012 static void hclge_ae_stop(struct hnae3_handle *handle)
7013 {
7014 struct hclge_vport *vport = hclge_get_vport(handle);
7015 struct hclge_dev *hdev = vport->back;
7016 int i;
7017
7018 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7019 spin_lock_bh(&hdev->fd_rule_lock);
7020 hclge_clear_arfs_rules(handle);
7021 spin_unlock_bh(&hdev->fd_rule_lock);
7022
7023 /* If it is not PF reset or FLR, the firmware will disable the MAC,
7024 * so it only need to stop phy here.
7025 */
7026 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) {
7027 hclge_pfc_pause_en_cfg(hdev, HCLGE_PFC_TX_RX_DISABLE,
7028 HCLGE_PFC_DISABLE);
7029 if (hdev->reset_type != HNAE3_FUNC_RESET &&
7030 hdev->reset_type != HNAE3_FLR_RESET) {
7031 hclge_mac_stop_phy(hdev);
7032 hclge_update_link_status(hdev);
7033 return;
7034 }
7035 }
7036
7037 for (i = 0; i < handle->kinfo.num_tqps; i++)
7038 hclge_reset_tqp(handle, i);
7039
7040 hclge_config_mac_tnl_int(hdev, false);
7041
7042 /* Mac disable */
7043 hclge_cfg_mac_mode(hdev, false);
7044
7045 hclge_mac_stop_phy(hdev);
7046
7047 /* reset tqp stats */
7048 hclge_reset_tqp_stats(handle);
7049 hclge_update_link_status(hdev);
7050 }
7051
hclge_vport_start(struct hclge_vport * vport)7052 int hclge_vport_start(struct hclge_vport *vport)
7053 {
7054 struct hclge_dev *hdev = vport->back;
7055
7056 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7057 vport->last_active_jiffies = jiffies;
7058
7059 if (test_bit(vport->vport_id, hdev->vport_config_block)) {
7060 if (vport->vport_id) {
7061 hclge_restore_mac_table_common(vport);
7062 hclge_restore_vport_vlan_table(vport);
7063 } else {
7064 hclge_restore_hw_table(hdev);
7065 }
7066 }
7067
7068 clear_bit(vport->vport_id, hdev->vport_config_block);
7069
7070 return 0;
7071 }
7072
hclge_vport_stop(struct hclge_vport * vport)7073 void hclge_vport_stop(struct hclge_vport *vport)
7074 {
7075 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7076 }
7077
hclge_client_start(struct hnae3_handle * handle)7078 static int hclge_client_start(struct hnae3_handle *handle)
7079 {
7080 struct hclge_vport *vport = hclge_get_vport(handle);
7081
7082 return hclge_vport_start(vport);
7083 }
7084
hclge_client_stop(struct hnae3_handle * handle)7085 static void hclge_client_stop(struct hnae3_handle *handle)
7086 {
7087 struct hclge_vport *vport = hclge_get_vport(handle);
7088
7089 hclge_vport_stop(vport);
7090 }
7091
hclge_get_mac_vlan_cmd_status(struct hclge_vport * vport,u16 cmdq_resp,u8 resp_code,enum hclge_mac_vlan_tbl_opcode op)7092 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
7093 u16 cmdq_resp, u8 resp_code,
7094 enum hclge_mac_vlan_tbl_opcode op)
7095 {
7096 struct hclge_dev *hdev = vport->back;
7097
7098 if (cmdq_resp) {
7099 dev_err(&hdev->pdev->dev,
7100 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
7101 cmdq_resp);
7102 return -EIO;
7103 }
7104
7105 if (op == HCLGE_MAC_VLAN_ADD) {
7106 if (!resp_code || resp_code == 1)
7107 return 0;
7108 else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
7109 resp_code == HCLGE_ADD_MC_OVERFLOW)
7110 return -ENOSPC;
7111
7112 dev_err(&hdev->pdev->dev,
7113 "add mac addr failed for undefined, code=%u.\n",
7114 resp_code);
7115 return -EIO;
7116 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
7117 if (!resp_code) {
7118 return 0;
7119 } else if (resp_code == 1) {
7120 dev_dbg(&hdev->pdev->dev,
7121 "remove mac addr failed for miss.\n");
7122 return -ENOENT;
7123 }
7124
7125 dev_err(&hdev->pdev->dev,
7126 "remove mac addr failed for undefined, code=%u.\n",
7127 resp_code);
7128 return -EIO;
7129 } else if (op == HCLGE_MAC_VLAN_LKUP) {
7130 if (!resp_code) {
7131 return 0;
7132 } else if (resp_code == 1) {
7133 dev_dbg(&hdev->pdev->dev,
7134 "lookup mac addr failed for miss.\n");
7135 return -ENOENT;
7136 }
7137
7138 dev_err(&hdev->pdev->dev,
7139 "lookup mac addr failed for undefined, code=%u.\n",
7140 resp_code);
7141 return -EIO;
7142 }
7143
7144 dev_err(&hdev->pdev->dev,
7145 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
7146
7147 return -EINVAL;
7148 }
7149
hclge_update_desc_vfid(struct hclge_desc * desc,int vfid,bool clr)7150 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
7151 {
7152 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
7153
7154 unsigned int word_num;
7155 unsigned int bit_num;
7156
7157 if (vfid > 255 || vfid < 0)
7158 return -EIO;
7159
7160 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
7161 word_num = vfid / 32;
7162 bit_num = vfid % 32;
7163 if (clr)
7164 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7165 else
7166 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
7167 } else {
7168 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
7169 bit_num = vfid % 32;
7170 if (clr)
7171 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7172 else
7173 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
7174 }
7175
7176 return 0;
7177 }
7178
hclge_is_all_function_id_zero(struct hclge_desc * desc)7179 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
7180 {
7181 #define HCLGE_DESC_NUMBER 3
7182 #define HCLGE_FUNC_NUMBER_PER_DESC 6
7183 int i, j;
7184
7185 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
7186 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
7187 if (desc[i].data[j])
7188 return false;
7189
7190 return true;
7191 }
7192
hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd * new_req,const u8 * addr,bool is_mc)7193 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
7194 const u8 *addr, bool is_mc)
7195 {
7196 const unsigned char *mac_addr = addr;
7197 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
7198 (mac_addr[0]) | (mac_addr[1] << 8);
7199 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
7200
7201 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7202 if (is_mc) {
7203 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
7204 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7205 }
7206
7207 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
7208 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
7209 }
7210
hclge_remove_mac_vlan_tbl(struct hclge_vport * vport,struct hclge_mac_vlan_tbl_entry_cmd * req)7211 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
7212 struct hclge_mac_vlan_tbl_entry_cmd *req)
7213 {
7214 struct hclge_dev *hdev = vport->back;
7215 struct hclge_desc desc;
7216 u8 resp_code;
7217 u16 retval;
7218 int ret;
7219
7220 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
7221
7222 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7223
7224 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7225 if (ret) {
7226 dev_err(&hdev->pdev->dev,
7227 "del mac addr failed for cmd_send, ret =%d.\n",
7228 ret);
7229 return ret;
7230 }
7231 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7232 retval = le16_to_cpu(desc.retval);
7233
7234 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7235 HCLGE_MAC_VLAN_REMOVE);
7236 }
7237
hclge_lookup_mac_vlan_tbl(struct hclge_vport * vport,struct hclge_mac_vlan_tbl_entry_cmd * req,struct hclge_desc * desc,bool is_mc)7238 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
7239 struct hclge_mac_vlan_tbl_entry_cmd *req,
7240 struct hclge_desc *desc,
7241 bool is_mc)
7242 {
7243 struct hclge_dev *hdev = vport->back;
7244 u8 resp_code;
7245 u16 retval;
7246 int ret;
7247
7248 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7249 if (is_mc) {
7250 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7251 memcpy(desc[0].data,
7252 req,
7253 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7254 hclge_cmd_setup_basic_desc(&desc[1],
7255 HCLGE_OPC_MAC_VLAN_ADD,
7256 true);
7257 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7258 hclge_cmd_setup_basic_desc(&desc[2],
7259 HCLGE_OPC_MAC_VLAN_ADD,
7260 true);
7261 ret = hclge_cmd_send(&hdev->hw, desc, 3);
7262 } else {
7263 memcpy(desc[0].data,
7264 req,
7265 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7266 ret = hclge_cmd_send(&hdev->hw, desc, 1);
7267 }
7268 if (ret) {
7269 dev_err(&hdev->pdev->dev,
7270 "lookup mac addr failed for cmd_send, ret =%d.\n",
7271 ret);
7272 return ret;
7273 }
7274 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7275 retval = le16_to_cpu(desc[0].retval);
7276
7277 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7278 HCLGE_MAC_VLAN_LKUP);
7279 }
7280
hclge_add_mac_vlan_tbl(struct hclge_vport * vport,struct hclge_mac_vlan_tbl_entry_cmd * req,struct hclge_desc * mc_desc)7281 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7282 struct hclge_mac_vlan_tbl_entry_cmd *req,
7283 struct hclge_desc *mc_desc)
7284 {
7285 struct hclge_dev *hdev = vport->back;
7286 int cfg_status;
7287 u8 resp_code;
7288 u16 retval;
7289 int ret;
7290
7291 if (!mc_desc) {
7292 struct hclge_desc desc;
7293
7294 hclge_cmd_setup_basic_desc(&desc,
7295 HCLGE_OPC_MAC_VLAN_ADD,
7296 false);
7297 memcpy(desc.data, req,
7298 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7299 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7300 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7301 retval = le16_to_cpu(desc.retval);
7302
7303 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7304 resp_code,
7305 HCLGE_MAC_VLAN_ADD);
7306 } else {
7307 hclge_cmd_reuse_desc(&mc_desc[0], false);
7308 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7309 hclge_cmd_reuse_desc(&mc_desc[1], false);
7310 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7311 hclge_cmd_reuse_desc(&mc_desc[2], false);
7312 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7313 memcpy(mc_desc[0].data, req,
7314 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7315 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7316 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7317 retval = le16_to_cpu(mc_desc[0].retval);
7318
7319 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7320 resp_code,
7321 HCLGE_MAC_VLAN_ADD);
7322 }
7323
7324 if (ret) {
7325 dev_err(&hdev->pdev->dev,
7326 "add mac addr failed for cmd_send, ret =%d.\n",
7327 ret);
7328 return ret;
7329 }
7330
7331 return cfg_status;
7332 }
7333
hclge_set_umv_space(struct hclge_dev * hdev,u16 space_size,u16 * allocated_size)7334 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7335 u16 *allocated_size)
7336 {
7337 struct hclge_umv_spc_alc_cmd *req;
7338 struct hclge_desc desc;
7339 int ret;
7340
7341 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7342 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7343
7344 req->space_size = cpu_to_le32(space_size);
7345
7346 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7347 if (ret) {
7348 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
7349 ret);
7350 return ret;
7351 }
7352
7353 *allocated_size = le32_to_cpu(desc.data[1]);
7354
7355 return 0;
7356 }
7357
hclge_init_umv_space(struct hclge_dev * hdev)7358 static int hclge_init_umv_space(struct hclge_dev *hdev)
7359 {
7360 u16 allocated_size = 0;
7361 int ret;
7362
7363 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
7364 if (ret)
7365 return ret;
7366
7367 if (allocated_size < hdev->wanted_umv_size)
7368 dev_warn(&hdev->pdev->dev,
7369 "failed to alloc umv space, want %u, get %u\n",
7370 hdev->wanted_umv_size, allocated_size);
7371
7372 hdev->max_umv_size = allocated_size;
7373 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
7374 hdev->share_umv_size = hdev->priv_umv_size +
7375 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7376
7377 return 0;
7378 }
7379
hclge_reset_umv_space(struct hclge_dev * hdev)7380 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7381 {
7382 struct hclge_vport *vport;
7383 int i;
7384
7385 for (i = 0; i < hdev->num_alloc_vport; i++) {
7386 vport = &hdev->vport[i];
7387 vport->used_umv_num = 0;
7388 }
7389
7390 mutex_lock(&hdev->vport_lock);
7391 hdev->share_umv_size = hdev->priv_umv_size +
7392 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7393 mutex_unlock(&hdev->vport_lock);
7394 }
7395
hclge_is_umv_space_full(struct hclge_vport * vport,bool need_lock)7396 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
7397 {
7398 struct hclge_dev *hdev = vport->back;
7399 bool is_full;
7400
7401 if (need_lock)
7402 mutex_lock(&hdev->vport_lock);
7403
7404 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7405 hdev->share_umv_size == 0);
7406
7407 if (need_lock)
7408 mutex_unlock(&hdev->vport_lock);
7409
7410 return is_full;
7411 }
7412
hclge_update_umv_space(struct hclge_vport * vport,bool is_free)7413 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7414 {
7415 struct hclge_dev *hdev = vport->back;
7416
7417 if (is_free) {
7418 if (vport->used_umv_num > hdev->priv_umv_size)
7419 hdev->share_umv_size++;
7420
7421 if (vport->used_umv_num > 0)
7422 vport->used_umv_num--;
7423 } else {
7424 if (vport->used_umv_num >= hdev->priv_umv_size &&
7425 hdev->share_umv_size > 0)
7426 hdev->share_umv_size--;
7427 vport->used_umv_num++;
7428 }
7429 }
7430
hclge_find_mac_node(struct list_head * list,const u8 * mac_addr)7431 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
7432 const u8 *mac_addr)
7433 {
7434 struct hclge_mac_node *mac_node, *tmp;
7435
7436 list_for_each_entry_safe(mac_node, tmp, list, node)
7437 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
7438 return mac_node;
7439
7440 return NULL;
7441 }
7442
hclge_update_mac_node(struct hclge_mac_node * mac_node,enum HCLGE_MAC_NODE_STATE state)7443 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
7444 enum HCLGE_MAC_NODE_STATE state)
7445 {
7446 switch (state) {
7447 /* from set_rx_mode or tmp_add_list */
7448 case HCLGE_MAC_TO_ADD:
7449 if (mac_node->state == HCLGE_MAC_TO_DEL)
7450 mac_node->state = HCLGE_MAC_ACTIVE;
7451 break;
7452 /* only from set_rx_mode */
7453 case HCLGE_MAC_TO_DEL:
7454 if (mac_node->state == HCLGE_MAC_TO_ADD) {
7455 list_del(&mac_node->node);
7456 kfree(mac_node);
7457 } else {
7458 mac_node->state = HCLGE_MAC_TO_DEL;
7459 }
7460 break;
7461 /* only from tmp_add_list, the mac_node->state won't be
7462 * ACTIVE.
7463 */
7464 case HCLGE_MAC_ACTIVE:
7465 if (mac_node->state == HCLGE_MAC_TO_ADD)
7466 mac_node->state = HCLGE_MAC_ACTIVE;
7467
7468 break;
7469 }
7470 }
7471
hclge_update_mac_list(struct hclge_vport * vport,enum HCLGE_MAC_NODE_STATE state,enum HCLGE_MAC_ADDR_TYPE mac_type,const unsigned char * addr)7472 int hclge_update_mac_list(struct hclge_vport *vport,
7473 enum HCLGE_MAC_NODE_STATE state,
7474 enum HCLGE_MAC_ADDR_TYPE mac_type,
7475 const unsigned char *addr)
7476 {
7477 struct hclge_dev *hdev = vport->back;
7478 struct hclge_mac_node *mac_node;
7479 struct list_head *list;
7480
7481 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7482 &vport->uc_mac_list : &vport->mc_mac_list;
7483
7484 spin_lock_bh(&vport->mac_list_lock);
7485
7486 /* if the mac addr is already in the mac list, no need to add a new
7487 * one into it, just check the mac addr state, convert it to a new
7488 * new state, or just remove it, or do nothing.
7489 */
7490 mac_node = hclge_find_mac_node(list, addr);
7491 if (mac_node) {
7492 hclge_update_mac_node(mac_node, state);
7493 spin_unlock_bh(&vport->mac_list_lock);
7494 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7495 return 0;
7496 }
7497
7498 /* if this address is never added, unnecessary to delete */
7499 if (state == HCLGE_MAC_TO_DEL) {
7500 spin_unlock_bh(&vport->mac_list_lock);
7501 dev_err(&hdev->pdev->dev,
7502 "failed to delete address %pM from mac list\n",
7503 addr);
7504 return -ENOENT;
7505 }
7506
7507 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
7508 if (!mac_node) {
7509 spin_unlock_bh(&vport->mac_list_lock);
7510 return -ENOMEM;
7511 }
7512
7513 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7514
7515 mac_node->state = state;
7516 ether_addr_copy(mac_node->mac_addr, addr);
7517 list_add_tail(&mac_node->node, list);
7518
7519 spin_unlock_bh(&vport->mac_list_lock);
7520
7521 return 0;
7522 }
7523
hclge_add_uc_addr(struct hnae3_handle * handle,const unsigned char * addr)7524 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7525 const unsigned char *addr)
7526 {
7527 struct hclge_vport *vport = hclge_get_vport(handle);
7528
7529 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
7530 addr);
7531 }
7532
hclge_add_uc_addr_common(struct hclge_vport * vport,const unsigned char * addr)7533 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7534 const unsigned char *addr)
7535 {
7536 struct hclge_dev *hdev = vport->back;
7537 struct hclge_mac_vlan_tbl_entry_cmd req;
7538 struct hclge_desc desc;
7539 u16 egress_port = 0;
7540 int ret;
7541
7542 /* mac addr check */
7543 if (is_zero_ether_addr(addr) ||
7544 is_broadcast_ether_addr(addr) ||
7545 is_multicast_ether_addr(addr)) {
7546 dev_err(&hdev->pdev->dev,
7547 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7548 addr, is_zero_ether_addr(addr),
7549 is_broadcast_ether_addr(addr),
7550 is_multicast_ether_addr(addr));
7551 return -EINVAL;
7552 }
7553
7554 memset(&req, 0, sizeof(req));
7555
7556 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7557 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7558
7559 req.egress_port = cpu_to_le16(egress_port);
7560
7561 hclge_prepare_mac_addr(&req, addr, false);
7562
7563 /* Lookup the mac address in the mac_vlan table, and add
7564 * it if the entry is inexistent. Repeated unicast entry
7565 * is not allowed in the mac vlan table.
7566 */
7567 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7568 if (ret == -ENOENT) {
7569 mutex_lock(&hdev->vport_lock);
7570 if (!hclge_is_umv_space_full(vport, false)) {
7571 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7572 if (!ret)
7573 hclge_update_umv_space(vport, false);
7574 mutex_unlock(&hdev->vport_lock);
7575 return ret;
7576 }
7577 mutex_unlock(&hdev->vport_lock);
7578
7579 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
7580 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7581 hdev->priv_umv_size);
7582
7583 return -ENOSPC;
7584 }
7585
7586 /* check if we just hit the duplicate */
7587 if (!ret)
7588 return -EEXIST;
7589
7590 return ret;
7591 }
7592
hclge_rm_uc_addr(struct hnae3_handle * handle,const unsigned char * addr)7593 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7594 const unsigned char *addr)
7595 {
7596 struct hclge_vport *vport = hclge_get_vport(handle);
7597
7598 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
7599 addr);
7600 }
7601
hclge_rm_uc_addr_common(struct hclge_vport * vport,const unsigned char * addr)7602 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7603 const unsigned char *addr)
7604 {
7605 struct hclge_dev *hdev = vport->back;
7606 struct hclge_mac_vlan_tbl_entry_cmd req;
7607 int ret;
7608
7609 /* mac addr check */
7610 if (is_zero_ether_addr(addr) ||
7611 is_broadcast_ether_addr(addr) ||
7612 is_multicast_ether_addr(addr)) {
7613 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7614 addr);
7615 return -EINVAL;
7616 }
7617
7618 memset(&req, 0, sizeof(req));
7619 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7620 hclge_prepare_mac_addr(&req, addr, false);
7621 ret = hclge_remove_mac_vlan_tbl(vport, &req);
7622 if (!ret || ret == -ENOENT) {
7623 mutex_lock(&hdev->vport_lock);
7624 hclge_update_umv_space(vport, true);
7625 mutex_unlock(&hdev->vport_lock);
7626 return 0;
7627 }
7628
7629 return ret;
7630 }
7631
hclge_add_mc_addr(struct hnae3_handle * handle,const unsigned char * addr)7632 static int hclge_add_mc_addr(struct hnae3_handle *handle,
7633 const unsigned char *addr)
7634 {
7635 struct hclge_vport *vport = hclge_get_vport(handle);
7636
7637 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
7638 addr);
7639 }
7640
hclge_add_mc_addr_common(struct hclge_vport * vport,const unsigned char * addr)7641 int hclge_add_mc_addr_common(struct hclge_vport *vport,
7642 const unsigned char *addr)
7643 {
7644 struct hclge_dev *hdev = vport->back;
7645 struct hclge_mac_vlan_tbl_entry_cmd req;
7646 struct hclge_desc desc[3];
7647 int status;
7648
7649 /* mac addr check */
7650 if (!is_multicast_ether_addr(addr)) {
7651 dev_err(&hdev->pdev->dev,
7652 "Add mc mac err! invalid mac:%pM.\n",
7653 addr);
7654 return -EINVAL;
7655 }
7656 memset(&req, 0, sizeof(req));
7657 hclge_prepare_mac_addr(&req, addr, true);
7658 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7659 if (status) {
7660 /* This mac addr do not exist, add new entry for it */
7661 memset(desc[0].data, 0, sizeof(desc[0].data));
7662 memset(desc[1].data, 0, sizeof(desc[0].data));
7663 memset(desc[2].data, 0, sizeof(desc[0].data));
7664 }
7665 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7666 if (status)
7667 return status;
7668 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7669
7670 /* if already overflow, not to print each time */
7671 if (status == -ENOSPC &&
7672 !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
7673 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
7674
7675 return status;
7676 }
7677
hclge_rm_mc_addr(struct hnae3_handle * handle,const unsigned char * addr)7678 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
7679 const unsigned char *addr)
7680 {
7681 struct hclge_vport *vport = hclge_get_vport(handle);
7682
7683 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
7684 addr);
7685 }
7686
hclge_rm_mc_addr_common(struct hclge_vport * vport,const unsigned char * addr)7687 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
7688 const unsigned char *addr)
7689 {
7690 struct hclge_dev *hdev = vport->back;
7691 struct hclge_mac_vlan_tbl_entry_cmd req;
7692 enum hclge_cmd_status status;
7693 struct hclge_desc desc[3];
7694
7695 /* mac addr check */
7696 if (!is_multicast_ether_addr(addr)) {
7697 dev_dbg(&hdev->pdev->dev,
7698 "Remove mc mac err! invalid mac:%pM.\n",
7699 addr);
7700 return -EINVAL;
7701 }
7702
7703 memset(&req, 0, sizeof(req));
7704 hclge_prepare_mac_addr(&req, addr, true);
7705 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7706 if (!status) {
7707 /* This mac addr exist, remove this handle's VFID for it */
7708 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7709 if (status)
7710 return status;
7711
7712 if (hclge_is_all_function_id_zero(desc))
7713 /* All the vfid is zero, so need to delete this entry */
7714 status = hclge_remove_mac_vlan_tbl(vport, &req);
7715 else
7716 /* Not all the vfid is zero, update the vfid */
7717 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7718
7719 } else if (status == -ENOENT) {
7720 status = 0;
7721 }
7722
7723 return status;
7724 }
7725
hclge_sync_vport_mac_list(struct hclge_vport * vport,struct list_head * list,int (* sync)(struct hclge_vport *,const unsigned char *))7726 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
7727 struct list_head *list,
7728 int (*sync)(struct hclge_vport *,
7729 const unsigned char *))
7730 {
7731 struct hclge_mac_node *mac_node, *tmp;
7732 int ret;
7733
7734 list_for_each_entry_safe(mac_node, tmp, list, node) {
7735 ret = sync(vport, mac_node->mac_addr);
7736 if (!ret) {
7737 mac_node->state = HCLGE_MAC_ACTIVE;
7738 } else {
7739 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
7740 &vport->state);
7741
7742 /* If one unicast mac address is existing in hardware,
7743 * we need to try whether other unicast mac addresses
7744 * are new addresses that can be added.
7745 */
7746 if (ret != -EEXIST)
7747 break;
7748 }
7749 }
7750 }
7751
hclge_unsync_vport_mac_list(struct hclge_vport * vport,struct list_head * list,int (* unsync)(struct hclge_vport *,const unsigned char *))7752 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
7753 struct list_head *list,
7754 int (*unsync)(struct hclge_vport *,
7755 const unsigned char *))
7756 {
7757 struct hclge_mac_node *mac_node, *tmp;
7758 int ret;
7759
7760 list_for_each_entry_safe(mac_node, tmp, list, node) {
7761 ret = unsync(vport, mac_node->mac_addr);
7762 if (!ret || ret == -ENOENT) {
7763 list_del(&mac_node->node);
7764 kfree(mac_node);
7765 } else {
7766 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
7767 &vport->state);
7768 break;
7769 }
7770 }
7771 }
7772
hclge_sync_from_add_list(struct list_head * add_list,struct list_head * mac_list)7773 static bool hclge_sync_from_add_list(struct list_head *add_list,
7774 struct list_head *mac_list)
7775 {
7776 struct hclge_mac_node *mac_node, *tmp, *new_node;
7777 bool all_added = true;
7778
7779 list_for_each_entry_safe(mac_node, tmp, add_list, node) {
7780 if (mac_node->state == HCLGE_MAC_TO_ADD)
7781 all_added = false;
7782
7783 /* if the mac address from tmp_add_list is not in the
7784 * uc/mc_mac_list, it means have received a TO_DEL request
7785 * during the time window of adding the mac address into mac
7786 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
7787 * then it will be removed at next time. else it must be TO_ADD,
7788 * this address hasn't been added into mac table,
7789 * so just remove the mac node.
7790 */
7791 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
7792 if (new_node) {
7793 hclge_update_mac_node(new_node, mac_node->state);
7794 list_del(&mac_node->node);
7795 kfree(mac_node);
7796 } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
7797 mac_node->state = HCLGE_MAC_TO_DEL;
7798 list_del(&mac_node->node);
7799 list_add_tail(&mac_node->node, mac_list);
7800 } else {
7801 list_del(&mac_node->node);
7802 kfree(mac_node);
7803 }
7804 }
7805
7806 return all_added;
7807 }
7808
hclge_sync_from_del_list(struct list_head * del_list,struct list_head * mac_list)7809 static void hclge_sync_from_del_list(struct list_head *del_list,
7810 struct list_head *mac_list)
7811 {
7812 struct hclge_mac_node *mac_node, *tmp, *new_node;
7813
7814 list_for_each_entry_safe(mac_node, tmp, del_list, node) {
7815 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
7816 if (new_node) {
7817 /* If the mac addr exists in the mac list, it means
7818 * received a new TO_ADD request during the time window
7819 * of configuring the mac address. For the mac node
7820 * state is TO_ADD, and the address is already in the
7821 * in the hardware(due to delete fail), so we just need
7822 * to change the mac node state to ACTIVE.
7823 */
7824 new_node->state = HCLGE_MAC_ACTIVE;
7825 list_del(&mac_node->node);
7826 kfree(mac_node);
7827 } else {
7828 list_del(&mac_node->node);
7829 list_add_tail(&mac_node->node, mac_list);
7830 }
7831 }
7832 }
7833
hclge_update_overflow_flags(struct hclge_vport * vport,enum HCLGE_MAC_ADDR_TYPE mac_type,bool is_all_added)7834 static void hclge_update_overflow_flags(struct hclge_vport *vport,
7835 enum HCLGE_MAC_ADDR_TYPE mac_type,
7836 bool is_all_added)
7837 {
7838 if (mac_type == HCLGE_MAC_ADDR_UC) {
7839 if (is_all_added)
7840 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
7841 else
7842 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
7843 } else {
7844 if (is_all_added)
7845 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
7846 else
7847 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
7848 }
7849 }
7850
hclge_sync_vport_mac_table(struct hclge_vport * vport,enum HCLGE_MAC_ADDR_TYPE mac_type)7851 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
7852 enum HCLGE_MAC_ADDR_TYPE mac_type)
7853 {
7854 struct hclge_mac_node *mac_node, *tmp, *new_node;
7855 struct list_head tmp_add_list, tmp_del_list;
7856 struct list_head *list;
7857 bool all_added;
7858
7859 INIT_LIST_HEAD(&tmp_add_list);
7860 INIT_LIST_HEAD(&tmp_del_list);
7861
7862 /* move the mac addr to the tmp_add_list and tmp_del_list, then
7863 * we can add/delete these mac addr outside the spin lock
7864 */
7865 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7866 &vport->uc_mac_list : &vport->mc_mac_list;
7867
7868 spin_lock_bh(&vport->mac_list_lock);
7869
7870 list_for_each_entry_safe(mac_node, tmp, list, node) {
7871 switch (mac_node->state) {
7872 case HCLGE_MAC_TO_DEL:
7873 list_del(&mac_node->node);
7874 list_add_tail(&mac_node->node, &tmp_del_list);
7875 break;
7876 case HCLGE_MAC_TO_ADD:
7877 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
7878 if (!new_node)
7879 goto stop_traverse;
7880 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
7881 new_node->state = mac_node->state;
7882 list_add_tail(&new_node->node, &tmp_add_list);
7883 break;
7884 default:
7885 break;
7886 }
7887 }
7888
7889 stop_traverse:
7890 spin_unlock_bh(&vport->mac_list_lock);
7891
7892 /* delete first, in order to get max mac table space for adding */
7893 if (mac_type == HCLGE_MAC_ADDR_UC) {
7894 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7895 hclge_rm_uc_addr_common);
7896 hclge_sync_vport_mac_list(vport, &tmp_add_list,
7897 hclge_add_uc_addr_common);
7898 } else {
7899 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7900 hclge_rm_mc_addr_common);
7901 hclge_sync_vport_mac_list(vport, &tmp_add_list,
7902 hclge_add_mc_addr_common);
7903 }
7904
7905 /* if some mac addresses were added/deleted fail, move back to the
7906 * mac_list, and retry at next time.
7907 */
7908 spin_lock_bh(&vport->mac_list_lock);
7909
7910 hclge_sync_from_del_list(&tmp_del_list, list);
7911 all_added = hclge_sync_from_add_list(&tmp_add_list, list);
7912
7913 spin_unlock_bh(&vport->mac_list_lock);
7914
7915 hclge_update_overflow_flags(vport, mac_type, all_added);
7916 }
7917
hclge_need_sync_mac_table(struct hclge_vport * vport)7918 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
7919 {
7920 struct hclge_dev *hdev = vport->back;
7921
7922 if (test_bit(vport->vport_id, hdev->vport_config_block))
7923 return false;
7924
7925 if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
7926 return true;
7927
7928 return false;
7929 }
7930
hclge_sync_mac_table(struct hclge_dev * hdev)7931 static void hclge_sync_mac_table(struct hclge_dev *hdev)
7932 {
7933 int i;
7934
7935 for (i = 0; i < hdev->num_alloc_vport; i++) {
7936 struct hclge_vport *vport = &hdev->vport[i];
7937
7938 if (!hclge_need_sync_mac_table(vport))
7939 continue;
7940
7941 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
7942 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
7943 }
7944 }
7945
hclge_rm_vport_all_mac_table(struct hclge_vport * vport,bool is_del_list,enum HCLGE_MAC_ADDR_TYPE mac_type)7946 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7947 enum HCLGE_MAC_ADDR_TYPE mac_type)
7948 {
7949 int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
7950 struct hclge_mac_node *mac_cfg, *tmp;
7951 struct hclge_dev *hdev = vport->back;
7952 struct list_head tmp_del_list, *list;
7953 int ret;
7954
7955 if (mac_type == HCLGE_MAC_ADDR_UC) {
7956 list = &vport->uc_mac_list;
7957 unsync = hclge_rm_uc_addr_common;
7958 } else {
7959 list = &vport->mc_mac_list;
7960 unsync = hclge_rm_mc_addr_common;
7961 }
7962
7963 INIT_LIST_HEAD(&tmp_del_list);
7964
7965 if (!is_del_list)
7966 set_bit(vport->vport_id, hdev->vport_config_block);
7967
7968 spin_lock_bh(&vport->mac_list_lock);
7969
7970 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7971 switch (mac_cfg->state) {
7972 case HCLGE_MAC_TO_DEL:
7973 case HCLGE_MAC_ACTIVE:
7974 list_del(&mac_cfg->node);
7975 list_add_tail(&mac_cfg->node, &tmp_del_list);
7976 break;
7977 case HCLGE_MAC_TO_ADD:
7978 if (is_del_list) {
7979 list_del(&mac_cfg->node);
7980 kfree(mac_cfg);
7981 }
7982 break;
7983 }
7984 }
7985
7986 spin_unlock_bh(&vport->mac_list_lock);
7987
7988 list_for_each_entry_safe(mac_cfg, tmp, &tmp_del_list, node) {
7989 ret = unsync(vport, mac_cfg->mac_addr);
7990 if (!ret || ret == -ENOENT) {
7991 /* clear all mac addr from hardware, but remain these
7992 * mac addr in the mac list, and restore them after
7993 * vf reset finished.
7994 */
7995 if (!is_del_list &&
7996 mac_cfg->state == HCLGE_MAC_ACTIVE) {
7997 mac_cfg->state = HCLGE_MAC_TO_ADD;
7998 } else {
7999 list_del(&mac_cfg->node);
8000 kfree(mac_cfg);
8001 }
8002 } else if (is_del_list) {
8003 mac_cfg->state = HCLGE_MAC_TO_DEL;
8004 }
8005 }
8006
8007 spin_lock_bh(&vport->mac_list_lock);
8008
8009 hclge_sync_from_del_list(&tmp_del_list, list);
8010
8011 spin_unlock_bh(&vport->mac_list_lock);
8012 }
8013
8014 /* remove all mac address when uninitailize */
hclge_uninit_vport_mac_list(struct hclge_vport * vport,enum HCLGE_MAC_ADDR_TYPE mac_type)8015 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
8016 enum HCLGE_MAC_ADDR_TYPE mac_type)
8017 {
8018 struct hclge_mac_node *mac_node, *tmp;
8019 struct hclge_dev *hdev = vport->back;
8020 struct list_head tmp_del_list, *list;
8021
8022 INIT_LIST_HEAD(&tmp_del_list);
8023
8024 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8025 &vport->uc_mac_list : &vport->mc_mac_list;
8026
8027 spin_lock_bh(&vport->mac_list_lock);
8028
8029 list_for_each_entry_safe(mac_node, tmp, list, node) {
8030 switch (mac_node->state) {
8031 case HCLGE_MAC_TO_DEL:
8032 case HCLGE_MAC_ACTIVE:
8033 list_del(&mac_node->node);
8034 list_add_tail(&mac_node->node, &tmp_del_list);
8035 break;
8036 case HCLGE_MAC_TO_ADD:
8037 list_del(&mac_node->node);
8038 kfree(mac_node);
8039 break;
8040 }
8041 }
8042
8043 spin_unlock_bh(&vport->mac_list_lock);
8044
8045 if (mac_type == HCLGE_MAC_ADDR_UC)
8046 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8047 hclge_rm_uc_addr_common);
8048 else
8049 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8050 hclge_rm_mc_addr_common);
8051
8052 if (!list_empty(&tmp_del_list))
8053 dev_warn(&hdev->pdev->dev,
8054 "uninit %s mac list for vport %u not completely.\n",
8055 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
8056 vport->vport_id);
8057
8058 list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
8059 list_del(&mac_node->node);
8060 kfree(mac_node);
8061 }
8062 }
8063
hclge_uninit_mac_table(struct hclge_dev * hdev)8064 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
8065 {
8066 struct hclge_vport *vport;
8067 int i;
8068
8069 for (i = 0; i < hdev->num_alloc_vport; i++) {
8070 vport = &hdev->vport[i];
8071 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
8072 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
8073 }
8074 }
8075
hclge_get_mac_ethertype_cmd_status(struct hclge_dev * hdev,u16 cmdq_resp,u8 resp_code)8076 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
8077 u16 cmdq_resp, u8 resp_code)
8078 {
8079 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
8080 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
8081 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
8082 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
8083
8084 int return_status;
8085
8086 if (cmdq_resp) {
8087 dev_err(&hdev->pdev->dev,
8088 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
8089 cmdq_resp);
8090 return -EIO;
8091 }
8092
8093 switch (resp_code) {
8094 case HCLGE_ETHERTYPE_SUCCESS_ADD:
8095 case HCLGE_ETHERTYPE_ALREADY_ADD:
8096 return_status = 0;
8097 break;
8098 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
8099 dev_err(&hdev->pdev->dev,
8100 "add mac ethertype failed for manager table overflow.\n");
8101 return_status = -EIO;
8102 break;
8103 case HCLGE_ETHERTYPE_KEY_CONFLICT:
8104 dev_err(&hdev->pdev->dev,
8105 "add mac ethertype failed for key conflict.\n");
8106 return_status = -EIO;
8107 break;
8108 default:
8109 dev_err(&hdev->pdev->dev,
8110 "add mac ethertype failed for undefined, code=%u.\n",
8111 resp_code);
8112 return_status = -EIO;
8113 }
8114
8115 return return_status;
8116 }
8117
hclge_check_vf_mac_exist(struct hclge_vport * vport,int vf_idx,u8 * mac_addr)8118 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
8119 u8 *mac_addr)
8120 {
8121 struct hclge_mac_vlan_tbl_entry_cmd req;
8122 struct hclge_dev *hdev = vport->back;
8123 struct hclge_desc desc;
8124 u16 egress_port = 0;
8125 int i;
8126
8127 if (is_zero_ether_addr(mac_addr))
8128 return false;
8129
8130 memset(&req, 0, sizeof(req));
8131 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8132 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8133 req.egress_port = cpu_to_le16(egress_port);
8134 hclge_prepare_mac_addr(&req, mac_addr, false);
8135
8136 if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
8137 return true;
8138
8139 vf_idx += HCLGE_VF_VPORT_START_NUM;
8140 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
8141 if (i != vf_idx &&
8142 ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
8143 return true;
8144
8145 return false;
8146 }
8147
hclge_set_vf_mac(struct hnae3_handle * handle,int vf,u8 * mac_addr)8148 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
8149 u8 *mac_addr)
8150 {
8151 struct hclge_vport *vport = hclge_get_vport(handle);
8152 struct hclge_dev *hdev = vport->back;
8153
8154 vport = hclge_get_vf_vport(hdev, vf);
8155 if (!vport)
8156 return -EINVAL;
8157
8158 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
8159 dev_info(&hdev->pdev->dev,
8160 "Specified MAC(=%pM) is same as before, no change committed!\n",
8161 mac_addr);
8162 return 0;
8163 }
8164
8165 if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
8166 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
8167 mac_addr);
8168 return -EEXIST;
8169 }
8170
8171 ether_addr_copy(vport->vf_info.mac, mac_addr);
8172
8173 if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8174 dev_info(&hdev->pdev->dev,
8175 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
8176 vf, mac_addr);
8177 return hclge_inform_reset_assert_to_vf(vport);
8178 }
8179
8180 dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
8181 vf, mac_addr);
8182 return 0;
8183 }
8184
hclge_add_mgr_tbl(struct hclge_dev * hdev,const struct hclge_mac_mgr_tbl_entry_cmd * req)8185 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
8186 const struct hclge_mac_mgr_tbl_entry_cmd *req)
8187 {
8188 struct hclge_desc desc;
8189 u8 resp_code;
8190 u16 retval;
8191 int ret;
8192
8193 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
8194 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
8195
8196 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8197 if (ret) {
8198 dev_err(&hdev->pdev->dev,
8199 "add mac ethertype failed for cmd_send, ret =%d.\n",
8200 ret);
8201 return ret;
8202 }
8203
8204 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8205 retval = le16_to_cpu(desc.retval);
8206
8207 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
8208 }
8209
init_mgr_tbl(struct hclge_dev * hdev)8210 static int init_mgr_tbl(struct hclge_dev *hdev)
8211 {
8212 int ret;
8213 int i;
8214
8215 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
8216 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
8217 if (ret) {
8218 dev_err(&hdev->pdev->dev,
8219 "add mac ethertype failed, ret =%d.\n",
8220 ret);
8221 return ret;
8222 }
8223 }
8224
8225 return 0;
8226 }
8227
hclge_get_mac_addr(struct hnae3_handle * handle,u8 * p)8228 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
8229 {
8230 struct hclge_vport *vport = hclge_get_vport(handle);
8231 struct hclge_dev *hdev = vport->back;
8232
8233 ether_addr_copy(p, hdev->hw.mac.mac_addr);
8234 }
8235
hclge_update_mac_node_for_dev_addr(struct hclge_vport * vport,const u8 * old_addr,const u8 * new_addr)8236 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
8237 const u8 *old_addr, const u8 *new_addr)
8238 {
8239 struct list_head *list = &vport->uc_mac_list;
8240 struct hclge_mac_node *old_node, *new_node;
8241
8242 new_node = hclge_find_mac_node(list, new_addr);
8243 if (!new_node) {
8244 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8245 if (!new_node)
8246 return -ENOMEM;
8247
8248 new_node->state = HCLGE_MAC_TO_ADD;
8249 ether_addr_copy(new_node->mac_addr, new_addr);
8250 list_add(&new_node->node, list);
8251 } else {
8252 if (new_node->state == HCLGE_MAC_TO_DEL)
8253 new_node->state = HCLGE_MAC_ACTIVE;
8254
8255 /* make sure the new addr is in the list head, avoid dev
8256 * addr may be not re-added into mac table for the umv space
8257 * limitation after global/imp reset which will clear mac
8258 * table by hardware.
8259 */
8260 list_move(&new_node->node, list);
8261 }
8262
8263 if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
8264 old_node = hclge_find_mac_node(list, old_addr);
8265 if (old_node) {
8266 if (old_node->state == HCLGE_MAC_TO_ADD) {
8267 list_del(&old_node->node);
8268 kfree(old_node);
8269 } else {
8270 old_node->state = HCLGE_MAC_TO_DEL;
8271 }
8272 }
8273 }
8274
8275 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8276
8277 return 0;
8278 }
8279
hclge_set_mac_addr(struct hnae3_handle * handle,void * p,bool is_first)8280 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
8281 bool is_first)
8282 {
8283 const unsigned char *new_addr = (const unsigned char *)p;
8284 struct hclge_vport *vport = hclge_get_vport(handle);
8285 struct hclge_dev *hdev = vport->back;
8286 unsigned char *old_addr = NULL;
8287 int ret;
8288
8289 /* mac addr check */
8290 if (is_zero_ether_addr(new_addr) ||
8291 is_broadcast_ether_addr(new_addr) ||
8292 is_multicast_ether_addr(new_addr)) {
8293 dev_err(&hdev->pdev->dev,
8294 "change uc mac err! invalid mac: %pM.\n",
8295 new_addr);
8296 return -EINVAL;
8297 }
8298
8299 ret = hclge_pause_addr_cfg(hdev, new_addr);
8300 if (ret) {
8301 dev_err(&hdev->pdev->dev,
8302 "failed to configure mac pause address, ret = %d\n",
8303 ret);
8304 return ret;
8305 }
8306
8307 if (!is_first)
8308 old_addr = hdev->hw.mac.mac_addr;
8309
8310 spin_lock_bh(&vport->mac_list_lock);
8311 ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
8312 if (ret) {
8313 dev_err(&hdev->pdev->dev,
8314 "failed to change the mac addr:%pM, ret = %d\n",
8315 new_addr, ret);
8316 spin_unlock_bh(&vport->mac_list_lock);
8317
8318 if (!is_first)
8319 hclge_pause_addr_cfg(hdev, old_addr);
8320
8321 return ret;
8322 }
8323 /* we must update dev addr with spin lock protect, preventing dev addr
8324 * being removed by set_rx_mode path.
8325 */
8326 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
8327 spin_unlock_bh(&vport->mac_list_lock);
8328
8329 hclge_task_schedule(hdev, 0);
8330
8331 return 0;
8332 }
8333
hclge_do_ioctl(struct hnae3_handle * handle,struct ifreq * ifr,int cmd)8334 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
8335 int cmd)
8336 {
8337 struct hclge_vport *vport = hclge_get_vport(handle);
8338 struct hclge_dev *hdev = vport->back;
8339
8340 if (!hdev->hw.mac.phydev)
8341 return -EOPNOTSUPP;
8342
8343 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
8344 }
8345
hclge_set_vlan_filter_ctrl(struct hclge_dev * hdev,u8 vlan_type,u8 fe_type,bool filter_en,u8 vf_id)8346 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
8347 u8 fe_type, bool filter_en, u8 vf_id)
8348 {
8349 struct hclge_vlan_filter_ctrl_cmd *req;
8350 struct hclge_desc desc;
8351 int ret;
8352
8353 /* read current vlan filter parameter */
8354 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
8355 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
8356 req->vlan_type = vlan_type;
8357 req->vf_id = vf_id;
8358
8359 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8360 if (ret) {
8361 dev_err(&hdev->pdev->dev,
8362 "failed to get vlan filter config, ret = %d.\n", ret);
8363 return ret;
8364 }
8365
8366 /* modify and write new config parameter */
8367 hclge_cmd_reuse_desc(&desc, false);
8368 req->vlan_fe = filter_en ?
8369 (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
8370
8371 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8372 if (ret)
8373 dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
8374 ret);
8375
8376 return ret;
8377 }
8378
8379 #define HCLGE_FILTER_TYPE_VF 0
8380 #define HCLGE_FILTER_TYPE_PORT 1
8381 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
8382 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
8383 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
8384 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
8385 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
8386 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
8387 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
8388 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
8389 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
8390
hclge_enable_vlan_filter(struct hnae3_handle * handle,bool enable)8391 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
8392 {
8393 struct hclge_vport *vport = hclge_get_vport(handle);
8394 struct hclge_dev *hdev = vport->back;
8395
8396 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
8397 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8398 HCLGE_FILTER_FE_EGRESS, enable, 0);
8399 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8400 HCLGE_FILTER_FE_INGRESS, enable, 0);
8401 } else {
8402 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8403 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
8404 0);
8405 }
8406 if (enable)
8407 handle->netdev_flags |= HNAE3_VLAN_FLTR;
8408 else
8409 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
8410 }
8411
hclge_set_vf_vlan_common(struct hclge_dev * hdev,u16 vfid,bool is_kill,u16 vlan,__be16 proto)8412 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
8413 bool is_kill, u16 vlan,
8414 __be16 proto)
8415 {
8416 struct hclge_vport *vport = &hdev->vport[vfid];
8417 struct hclge_vlan_filter_vf_cfg_cmd *req0;
8418 struct hclge_vlan_filter_vf_cfg_cmd *req1;
8419 struct hclge_desc desc[2];
8420 u8 vf_byte_val;
8421 u8 vf_byte_off;
8422 int ret;
8423
8424 /* if vf vlan table is full, firmware will close vf vlan filter, it
8425 * is unable and unnecessary to add new vlan id to vf vlan filter.
8426 * If spoof check is enable, and vf vlan is full, it shouldn't add
8427 * new vlan, because tx packets with these vlan id will be dropped.
8428 */
8429 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
8430 if (vport->vf_info.spoofchk && vlan) {
8431 dev_err(&hdev->pdev->dev,
8432 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
8433 return -EPERM;
8434 }
8435 return 0;
8436 }
8437
8438 hclge_cmd_setup_basic_desc(&desc[0],
8439 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8440 hclge_cmd_setup_basic_desc(&desc[1],
8441 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8442
8443 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8444
8445 vf_byte_off = vfid / 8;
8446 vf_byte_val = 1 << (vfid % 8);
8447
8448 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
8449 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
8450
8451 req0->vlan_id = cpu_to_le16(vlan);
8452 req0->vlan_cfg = is_kill;
8453
8454 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
8455 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
8456 else
8457 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
8458
8459 ret = hclge_cmd_send(&hdev->hw, desc, 2);
8460 if (ret) {
8461 dev_err(&hdev->pdev->dev,
8462 "Send vf vlan command fail, ret =%d.\n",
8463 ret);
8464 return ret;
8465 }
8466
8467 if (!is_kill) {
8468 #define HCLGE_VF_VLAN_NO_ENTRY 2
8469 if (!req0->resp_code || req0->resp_code == 1)
8470 return 0;
8471
8472 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
8473 set_bit(vfid, hdev->vf_vlan_full);
8474 dev_warn(&hdev->pdev->dev,
8475 "vf vlan table is full, vf vlan filter is disabled\n");
8476 return 0;
8477 }
8478
8479 dev_err(&hdev->pdev->dev,
8480 "Add vf vlan filter fail, ret =%u.\n",
8481 req0->resp_code);
8482 } else {
8483 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
8484 if (!req0->resp_code)
8485 return 0;
8486
8487 /* vf vlan filter is disabled when vf vlan table is full,
8488 * then new vlan id will not be added into vf vlan table.
8489 * Just return 0 without warning, avoid massive verbose
8490 * print logs when unload.
8491 */
8492 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
8493 return 0;
8494
8495 dev_err(&hdev->pdev->dev,
8496 "Kill vf vlan filter fail, ret =%u.\n",
8497 req0->resp_code);
8498 }
8499
8500 return -EIO;
8501 }
8502
hclge_set_port_vlan_filter(struct hclge_dev * hdev,__be16 proto,u16 vlan_id,bool is_kill)8503 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
8504 u16 vlan_id, bool is_kill)
8505 {
8506 struct hclge_vlan_filter_pf_cfg_cmd *req;
8507 struct hclge_desc desc;
8508 u8 vlan_offset_byte_val;
8509 u8 vlan_offset_byte;
8510 u8 vlan_offset_160;
8511 int ret;
8512
8513 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
8514
8515 vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
8516 vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
8517 HCLGE_VLAN_BYTE_SIZE;
8518 vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
8519
8520 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
8521 req->vlan_offset = vlan_offset_160;
8522 req->vlan_cfg = is_kill;
8523 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
8524
8525 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8526 if (ret)
8527 dev_err(&hdev->pdev->dev,
8528 "port vlan command, send fail, ret =%d.\n", ret);
8529 return ret;
8530 }
8531
hclge_set_vlan_filter_hw(struct hclge_dev * hdev,__be16 proto,u16 vport_id,u16 vlan_id,bool is_kill)8532 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
8533 u16 vport_id, u16 vlan_id,
8534 bool is_kill)
8535 {
8536 u16 vport_idx, vport_num = 0;
8537 int ret;
8538
8539 if (is_kill && !vlan_id)
8540 return 0;
8541
8542 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
8543 proto);
8544 if (ret) {
8545 dev_err(&hdev->pdev->dev,
8546 "Set %u vport vlan filter config fail, ret =%d.\n",
8547 vport_id, ret);
8548 return ret;
8549 }
8550
8551 /* vlan 0 may be added twice when 8021q module is enabled */
8552 if (!is_kill && !vlan_id &&
8553 test_bit(vport_id, hdev->vlan_table[vlan_id]))
8554 return 0;
8555
8556 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
8557 dev_err(&hdev->pdev->dev,
8558 "Add port vlan failed, vport %u is already in vlan %u\n",
8559 vport_id, vlan_id);
8560 return -EINVAL;
8561 }
8562
8563 if (is_kill &&
8564 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
8565 dev_err(&hdev->pdev->dev,
8566 "Delete port vlan failed, vport %u is not in vlan %u\n",
8567 vport_id, vlan_id);
8568 return -EINVAL;
8569 }
8570
8571 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
8572 vport_num++;
8573
8574 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
8575 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
8576 is_kill);
8577
8578 return ret;
8579 }
8580
hclge_set_vlan_tx_offload_cfg(struct hclge_vport * vport)8581 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
8582 {
8583 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
8584 struct hclge_vport_vtag_tx_cfg_cmd *req;
8585 struct hclge_dev *hdev = vport->back;
8586 struct hclge_desc desc;
8587 u16 bmap_index;
8588 int status;
8589
8590 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
8591
8592 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
8593 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
8594 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
8595 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
8596 vcfg->accept_tag1 ? 1 : 0);
8597 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
8598 vcfg->accept_untag1 ? 1 : 0);
8599 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
8600 vcfg->accept_tag2 ? 1 : 0);
8601 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
8602 vcfg->accept_untag2 ? 1 : 0);
8603 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
8604 vcfg->insert_tag1_en ? 1 : 0);
8605 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
8606 vcfg->insert_tag2_en ? 1 : 0);
8607 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
8608
8609 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8610 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8611 HCLGE_VF_NUM_PER_BYTE;
8612 req->vf_bitmap[bmap_index] =
8613 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8614
8615 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8616 if (status)
8617 dev_err(&hdev->pdev->dev,
8618 "Send port txvlan cfg command fail, ret =%d\n",
8619 status);
8620
8621 return status;
8622 }
8623
hclge_set_vlan_rx_offload_cfg(struct hclge_vport * vport)8624 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
8625 {
8626 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
8627 struct hclge_vport_vtag_rx_cfg_cmd *req;
8628 struct hclge_dev *hdev = vport->back;
8629 struct hclge_desc desc;
8630 u16 bmap_index;
8631 int status;
8632
8633 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
8634
8635 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
8636 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
8637 vcfg->strip_tag1_en ? 1 : 0);
8638 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
8639 vcfg->strip_tag2_en ? 1 : 0);
8640 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
8641 vcfg->vlan1_vlan_prionly ? 1 : 0);
8642 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
8643 vcfg->vlan2_vlan_prionly ? 1 : 0);
8644
8645 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8646 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8647 HCLGE_VF_NUM_PER_BYTE;
8648 req->vf_bitmap[bmap_index] =
8649 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8650
8651 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8652 if (status)
8653 dev_err(&hdev->pdev->dev,
8654 "Send port rxvlan cfg command fail, ret =%d\n",
8655 status);
8656
8657 return status;
8658 }
8659
hclge_vlan_offload_cfg(struct hclge_vport * vport,u16 port_base_vlan_state,u16 vlan_tag)8660 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
8661 u16 port_base_vlan_state,
8662 u16 vlan_tag)
8663 {
8664 int ret;
8665
8666 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8667 vport->txvlan_cfg.accept_tag1 = true;
8668 vport->txvlan_cfg.insert_tag1_en = false;
8669 vport->txvlan_cfg.default_tag1 = 0;
8670 } else {
8671 vport->txvlan_cfg.accept_tag1 = false;
8672 vport->txvlan_cfg.insert_tag1_en = true;
8673 vport->txvlan_cfg.default_tag1 = vlan_tag;
8674 }
8675
8676 vport->txvlan_cfg.accept_untag1 = true;
8677
8678 /* accept_tag2 and accept_untag2 are not supported on
8679 * pdev revision(0x20), new revision support them,
8680 * this two fields can not be configured by user.
8681 */
8682 vport->txvlan_cfg.accept_tag2 = true;
8683 vport->txvlan_cfg.accept_untag2 = true;
8684 vport->txvlan_cfg.insert_tag2_en = false;
8685 vport->txvlan_cfg.default_tag2 = 0;
8686
8687 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8688 vport->rxvlan_cfg.strip_tag1_en = false;
8689 vport->rxvlan_cfg.strip_tag2_en =
8690 vport->rxvlan_cfg.rx_vlan_offload_en;
8691 } else {
8692 vport->rxvlan_cfg.strip_tag1_en =
8693 vport->rxvlan_cfg.rx_vlan_offload_en;
8694 vport->rxvlan_cfg.strip_tag2_en = true;
8695 }
8696 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8697 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8698
8699 ret = hclge_set_vlan_tx_offload_cfg(vport);
8700 if (ret)
8701 return ret;
8702
8703 return hclge_set_vlan_rx_offload_cfg(vport);
8704 }
8705
hclge_set_vlan_protocol_type(struct hclge_dev * hdev)8706 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
8707 {
8708 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
8709 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
8710 struct hclge_desc desc;
8711 int status;
8712
8713 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
8714 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
8715 rx_req->ot_fst_vlan_type =
8716 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
8717 rx_req->ot_sec_vlan_type =
8718 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
8719 rx_req->in_fst_vlan_type =
8720 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
8721 rx_req->in_sec_vlan_type =
8722 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
8723
8724 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8725 if (status) {
8726 dev_err(&hdev->pdev->dev,
8727 "Send rxvlan protocol type command fail, ret =%d\n",
8728 status);
8729 return status;
8730 }
8731
8732 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
8733
8734 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
8735 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
8736 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
8737
8738 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8739 if (status)
8740 dev_err(&hdev->pdev->dev,
8741 "Send txvlan protocol type command fail, ret =%d\n",
8742 status);
8743
8744 return status;
8745 }
8746
hclge_init_vlan_config(struct hclge_dev * hdev)8747 static int hclge_init_vlan_config(struct hclge_dev *hdev)
8748 {
8749 #define HCLGE_DEF_VLAN_TYPE 0x8100
8750
8751 struct hnae3_handle *handle = &hdev->vport[0].nic;
8752 struct hclge_vport *vport;
8753 int ret;
8754 int i;
8755
8756 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
8757 /* for revision 0x21, vf vlan filter is per function */
8758 for (i = 0; i < hdev->num_alloc_vport; i++) {
8759 vport = &hdev->vport[i];
8760 ret = hclge_set_vlan_filter_ctrl(hdev,
8761 HCLGE_FILTER_TYPE_VF,
8762 HCLGE_FILTER_FE_EGRESS,
8763 true,
8764 vport->vport_id);
8765 if (ret)
8766 return ret;
8767 }
8768
8769 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8770 HCLGE_FILTER_FE_INGRESS, true,
8771 0);
8772 if (ret)
8773 return ret;
8774 } else {
8775 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8776 HCLGE_FILTER_FE_EGRESS_V1_B,
8777 true, 0);
8778 if (ret)
8779 return ret;
8780 }
8781
8782 handle->netdev_flags |= HNAE3_VLAN_FLTR;
8783
8784 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8785 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8786 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8787 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8788 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
8789 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
8790
8791 ret = hclge_set_vlan_protocol_type(hdev);
8792 if (ret)
8793 return ret;
8794
8795 for (i = 0; i < hdev->num_alloc_vport; i++) {
8796 u16 vlan_tag;
8797
8798 vport = &hdev->vport[i];
8799 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8800
8801 ret = hclge_vlan_offload_cfg(vport,
8802 vport->port_base_vlan_cfg.state,
8803 vlan_tag);
8804 if (ret)
8805 return ret;
8806 }
8807
8808 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
8809 }
8810
hclge_add_vport_vlan_table(struct hclge_vport * vport,u16 vlan_id,bool writen_to_tbl)8811 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8812 bool writen_to_tbl)
8813 {
8814 struct hclge_vport_vlan_cfg *vlan, *tmp;
8815
8816 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
8817 if (vlan->vlan_id == vlan_id)
8818 return;
8819
8820 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
8821 if (!vlan)
8822 return;
8823
8824 vlan->hd_tbl_status = writen_to_tbl;
8825 vlan->vlan_id = vlan_id;
8826
8827 list_add_tail(&vlan->node, &vport->vlan_list);
8828 }
8829
hclge_add_vport_all_vlan_table(struct hclge_vport * vport)8830 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
8831 {
8832 struct hclge_vport_vlan_cfg *vlan, *tmp;
8833 struct hclge_dev *hdev = vport->back;
8834 int ret;
8835
8836 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8837 if (!vlan->hd_tbl_status) {
8838 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8839 vport->vport_id,
8840 vlan->vlan_id, false);
8841 if (ret) {
8842 dev_err(&hdev->pdev->dev,
8843 "restore vport vlan list failed, ret=%d\n",
8844 ret);
8845 return ret;
8846 }
8847 }
8848 vlan->hd_tbl_status = true;
8849 }
8850
8851 return 0;
8852 }
8853
hclge_rm_vport_vlan_table(struct hclge_vport * vport,u16 vlan_id,bool is_write_tbl)8854 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8855 bool is_write_tbl)
8856 {
8857 struct hclge_vport_vlan_cfg *vlan, *tmp;
8858 struct hclge_dev *hdev = vport->back;
8859
8860 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8861 if (vlan->vlan_id == vlan_id) {
8862 if (is_write_tbl && vlan->hd_tbl_status)
8863 hclge_set_vlan_filter_hw(hdev,
8864 htons(ETH_P_8021Q),
8865 vport->vport_id,
8866 vlan_id,
8867 true);
8868
8869 list_del(&vlan->node);
8870 kfree(vlan);
8871 break;
8872 }
8873 }
8874 }
8875
hclge_rm_vport_all_vlan_table(struct hclge_vport * vport,bool is_del_list)8876 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
8877 {
8878 struct hclge_vport_vlan_cfg *vlan, *tmp;
8879 struct hclge_dev *hdev = vport->back;
8880
8881 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8882 if (vlan->hd_tbl_status)
8883 hclge_set_vlan_filter_hw(hdev,
8884 htons(ETH_P_8021Q),
8885 vport->vport_id,
8886 vlan->vlan_id,
8887 true);
8888
8889 vlan->hd_tbl_status = false;
8890 if (is_del_list) {
8891 list_del(&vlan->node);
8892 kfree(vlan);
8893 }
8894 }
8895 clear_bit(vport->vport_id, hdev->vf_vlan_full);
8896 }
8897
hclge_uninit_vport_vlan_table(struct hclge_dev * hdev)8898 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8899 {
8900 struct hclge_vport_vlan_cfg *vlan, *tmp;
8901 struct hclge_vport *vport;
8902 int i;
8903
8904 for (i = 0; i < hdev->num_alloc_vport; i++) {
8905 vport = &hdev->vport[i];
8906 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8907 list_del(&vlan->node);
8908 kfree(vlan);
8909 }
8910 }
8911 }
8912
hclge_restore_vport_vlan_table(struct hclge_vport * vport)8913 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
8914 {
8915 struct hclge_vport_vlan_cfg *vlan, *tmp;
8916 struct hclge_dev *hdev = vport->back;
8917 u16 vlan_proto;
8918 u16 vlan_id;
8919 u16 state;
8920 int ret;
8921
8922 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8923 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8924 state = vport->port_base_vlan_cfg.state;
8925
8926 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8927 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
8928 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8929 vport->vport_id, vlan_id,
8930 false);
8931 return;
8932 }
8933
8934 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8935 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8936 vport->vport_id,
8937 vlan->vlan_id, false);
8938 if (ret)
8939 break;
8940 vlan->hd_tbl_status = true;
8941 }
8942 }
8943
8944 /* For global reset and imp reset, hardware will clear the mac table,
8945 * so we change the mac address state from ACTIVE to TO_ADD, then they
8946 * can be restored in the service task after reset complete. Furtherly,
8947 * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
8948 * be restored after reset, so just remove these mac nodes from mac_list.
8949 */
hclge_mac_node_convert_for_reset(struct list_head * list)8950 static void hclge_mac_node_convert_for_reset(struct list_head *list)
8951 {
8952 struct hclge_mac_node *mac_node, *tmp;
8953
8954 list_for_each_entry_safe(mac_node, tmp, list, node) {
8955 if (mac_node->state == HCLGE_MAC_ACTIVE) {
8956 mac_node->state = HCLGE_MAC_TO_ADD;
8957 } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
8958 list_del(&mac_node->node);
8959 kfree(mac_node);
8960 }
8961 }
8962 }
8963
hclge_restore_mac_table_common(struct hclge_vport * vport)8964 void hclge_restore_mac_table_common(struct hclge_vport *vport)
8965 {
8966 spin_lock_bh(&vport->mac_list_lock);
8967
8968 hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
8969 hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
8970 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8971
8972 spin_unlock_bh(&vport->mac_list_lock);
8973 }
8974
hclge_restore_hw_table(struct hclge_dev * hdev)8975 static void hclge_restore_hw_table(struct hclge_dev *hdev)
8976 {
8977 struct hclge_vport *vport = &hdev->vport[0];
8978 struct hnae3_handle *handle = &vport->nic;
8979
8980 hclge_restore_mac_table_common(vport);
8981 hclge_restore_vport_vlan_table(vport);
8982 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
8983
8984 hclge_restore_fd_entries(handle);
8985 }
8986
hclge_en_hw_strip_rxvtag(struct hnae3_handle * handle,bool enable)8987 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
8988 {
8989 struct hclge_vport *vport = hclge_get_vport(handle);
8990
8991 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8992 vport->rxvlan_cfg.strip_tag1_en = false;
8993 vport->rxvlan_cfg.strip_tag2_en = enable;
8994 } else {
8995 vport->rxvlan_cfg.strip_tag1_en = enable;
8996 vport->rxvlan_cfg.strip_tag2_en = true;
8997 }
8998 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8999 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9000 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
9001
9002 return hclge_set_vlan_rx_offload_cfg(vport);
9003 }
9004
hclge_update_vlan_filter_entries(struct hclge_vport * vport,u16 port_base_vlan_state,struct hclge_vlan_info * new_info,struct hclge_vlan_info * old_info)9005 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
9006 u16 port_base_vlan_state,
9007 struct hclge_vlan_info *new_info,
9008 struct hclge_vlan_info *old_info)
9009 {
9010 struct hclge_dev *hdev = vport->back;
9011 int ret;
9012
9013 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
9014 hclge_rm_vport_all_vlan_table(vport, false);
9015 return hclge_set_vlan_filter_hw(hdev,
9016 htons(new_info->vlan_proto),
9017 vport->vport_id,
9018 new_info->vlan_tag,
9019 false);
9020 }
9021
9022 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
9023 vport->vport_id, old_info->vlan_tag,
9024 true);
9025 if (ret)
9026 return ret;
9027
9028 return hclge_add_vport_all_vlan_table(vport);
9029 }
9030
hclge_update_port_base_vlan_cfg(struct hclge_vport * vport,u16 state,struct hclge_vlan_info * vlan_info)9031 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
9032 struct hclge_vlan_info *vlan_info)
9033 {
9034 struct hnae3_handle *nic = &vport->nic;
9035 struct hclge_vlan_info *old_vlan_info;
9036 struct hclge_dev *hdev = vport->back;
9037 int ret;
9038
9039 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9040
9041 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
9042 if (ret)
9043 return ret;
9044
9045 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
9046 /* add new VLAN tag */
9047 ret = hclge_set_vlan_filter_hw(hdev,
9048 htons(vlan_info->vlan_proto),
9049 vport->vport_id,
9050 vlan_info->vlan_tag,
9051 false);
9052 if (ret)
9053 return ret;
9054
9055 /* remove old VLAN tag */
9056 ret = hclge_set_vlan_filter_hw(hdev,
9057 htons(old_vlan_info->vlan_proto),
9058 vport->vport_id,
9059 old_vlan_info->vlan_tag,
9060 true);
9061 if (ret)
9062 return ret;
9063
9064 goto update;
9065 }
9066
9067 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
9068 old_vlan_info);
9069 if (ret)
9070 return ret;
9071
9072 /* update state only when disable/enable port based VLAN */
9073 vport->port_base_vlan_cfg.state = state;
9074 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
9075 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
9076 else
9077 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
9078
9079 update:
9080 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
9081 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
9082 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
9083
9084 return 0;
9085 }
9086
hclge_get_port_base_vlan_state(struct hclge_vport * vport,enum hnae3_port_base_vlan_state state,u16 vlan)9087 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
9088 enum hnae3_port_base_vlan_state state,
9089 u16 vlan)
9090 {
9091 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9092 if (!vlan)
9093 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9094 else
9095 return HNAE3_PORT_BASE_VLAN_ENABLE;
9096 } else {
9097 if (!vlan)
9098 return HNAE3_PORT_BASE_VLAN_DISABLE;
9099 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
9100 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9101 else
9102 return HNAE3_PORT_BASE_VLAN_MODIFY;
9103 }
9104 }
9105
hclge_set_vf_vlan_filter(struct hnae3_handle * handle,int vfid,u16 vlan,u8 qos,__be16 proto)9106 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
9107 u16 vlan, u8 qos, __be16 proto)
9108 {
9109 struct hclge_vport *vport = hclge_get_vport(handle);
9110 struct hclge_dev *hdev = vport->back;
9111 struct hclge_vlan_info vlan_info;
9112 u16 state;
9113 int ret;
9114
9115 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9116 return -EOPNOTSUPP;
9117
9118 vport = hclge_get_vf_vport(hdev, vfid);
9119 if (!vport)
9120 return -EINVAL;
9121
9122 /* qos is a 3 bits value, so can not be bigger than 7 */
9123 if (vlan > VLAN_N_VID - 1 || qos > 7)
9124 return -EINVAL;
9125 if (proto != htons(ETH_P_8021Q))
9126 return -EPROTONOSUPPORT;
9127
9128 state = hclge_get_port_base_vlan_state(vport,
9129 vport->port_base_vlan_cfg.state,
9130 vlan);
9131 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
9132 return 0;
9133
9134 vlan_info.vlan_tag = vlan;
9135 vlan_info.qos = qos;
9136 vlan_info.vlan_proto = ntohs(proto);
9137
9138 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9139 return hclge_update_port_base_vlan_cfg(vport, state,
9140 &vlan_info);
9141 } else {
9142 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
9143 vport->vport_id, state,
9144 vlan, qos,
9145 ntohs(proto));
9146 return ret;
9147 }
9148 }
9149
hclge_clear_vf_vlan(struct hclge_dev * hdev)9150 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
9151 {
9152 struct hclge_vlan_info *vlan_info;
9153 struct hclge_vport *vport;
9154 int ret;
9155 int vf;
9156
9157 /* clear port base vlan for all vf */
9158 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9159 vport = &hdev->vport[vf];
9160 vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9161
9162 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9163 vport->vport_id,
9164 vlan_info->vlan_tag, true);
9165 if (ret)
9166 dev_err(&hdev->pdev->dev,
9167 "failed to clear vf vlan for vf%d, ret = %d\n",
9168 vf - HCLGE_VF_VPORT_START_NUM, ret);
9169 }
9170 }
9171
hclge_set_vlan_filter(struct hnae3_handle * handle,__be16 proto,u16 vlan_id,bool is_kill)9172 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
9173 u16 vlan_id, bool is_kill)
9174 {
9175 struct hclge_vport *vport = hclge_get_vport(handle);
9176 struct hclge_dev *hdev = vport->back;
9177 bool writen_to_tbl = false;
9178 int ret = 0;
9179
9180 /* When device is resetting or reset failed, firmware is unable to
9181 * handle mailbox. Just record the vlan id, and remove it after
9182 * reset finished.
9183 */
9184 if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9185 test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
9186 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9187 return -EBUSY;
9188 }
9189
9190 /* when port base vlan enabled, we use port base vlan as the vlan
9191 * filter entry. In this case, we don't update vlan filter table
9192 * when user add new vlan or remove exist vlan, just update the vport
9193 * vlan list. The vlan id in vlan list will be writen in vlan filter
9194 * table until port base vlan disabled
9195 */
9196 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9197 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
9198 vlan_id, is_kill);
9199 writen_to_tbl = true;
9200 }
9201
9202 if (!ret) {
9203 if (!is_kill)
9204 hclge_add_vport_vlan_table(vport, vlan_id,
9205 writen_to_tbl);
9206 else if (is_kill && vlan_id != 0)
9207 hclge_rm_vport_vlan_table(vport, vlan_id, false);
9208 } else if (is_kill) {
9209 /* when remove hw vlan filter failed, record the vlan id,
9210 * and try to remove it from hw later, to be consistence
9211 * with stack
9212 */
9213 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9214 }
9215 return ret;
9216 }
9217
hclge_sync_vlan_filter(struct hclge_dev * hdev)9218 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
9219 {
9220 #define HCLGE_MAX_SYNC_COUNT 60
9221
9222 int i, ret, sync_cnt = 0;
9223 u16 vlan_id;
9224
9225 /* start from vport 1 for PF is always alive */
9226 for (i = 0; i < hdev->num_alloc_vport; i++) {
9227 struct hclge_vport *vport = &hdev->vport[i];
9228
9229 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9230 VLAN_N_VID);
9231 while (vlan_id != VLAN_N_VID) {
9232 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9233 vport->vport_id, vlan_id,
9234 true);
9235 if (ret && ret != -EINVAL)
9236 return;
9237
9238 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
9239 hclge_rm_vport_vlan_table(vport, vlan_id, false);
9240
9241 sync_cnt++;
9242 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
9243 return;
9244
9245 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9246 VLAN_N_VID);
9247 }
9248 }
9249 }
9250
hclge_set_mac_mtu(struct hclge_dev * hdev,int new_mps)9251 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
9252 {
9253 struct hclge_config_max_frm_size_cmd *req;
9254 struct hclge_desc desc;
9255
9256 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
9257
9258 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
9259 req->max_frm_size = cpu_to_le16(new_mps);
9260 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
9261
9262 return hclge_cmd_send(&hdev->hw, &desc, 1);
9263 }
9264
hclge_set_mtu(struct hnae3_handle * handle,int new_mtu)9265 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
9266 {
9267 struct hclge_vport *vport = hclge_get_vport(handle);
9268
9269 return hclge_set_vport_mtu(vport, new_mtu);
9270 }
9271
hclge_set_vport_mtu(struct hclge_vport * vport,int new_mtu)9272 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
9273 {
9274 struct hclge_dev *hdev = vport->back;
9275 int i, max_frm_size, ret;
9276
9277 /* HW supprt 2 layer vlan */
9278 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9279 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
9280 max_frm_size > HCLGE_MAC_MAX_FRAME)
9281 return -EINVAL;
9282
9283 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
9284 mutex_lock(&hdev->vport_lock);
9285 /* VF's mps must fit within hdev->mps */
9286 if (vport->vport_id && max_frm_size > hdev->mps) {
9287 mutex_unlock(&hdev->vport_lock);
9288 return -EINVAL;
9289 } else if (vport->vport_id) {
9290 vport->mps = max_frm_size;
9291 mutex_unlock(&hdev->vport_lock);
9292 return 0;
9293 }
9294
9295 /* PF's mps must be greater then VF's mps */
9296 for (i = 1; i < hdev->num_alloc_vport; i++)
9297 if (max_frm_size < hdev->vport[i].mps) {
9298 mutex_unlock(&hdev->vport_lock);
9299 return -EINVAL;
9300 }
9301
9302 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
9303
9304 ret = hclge_set_mac_mtu(hdev, max_frm_size);
9305 if (ret) {
9306 dev_err(&hdev->pdev->dev,
9307 "Change mtu fail, ret =%d\n", ret);
9308 goto out;
9309 }
9310
9311 hdev->mps = max_frm_size;
9312 vport->mps = max_frm_size;
9313
9314 ret = hclge_buffer_alloc(hdev);
9315 if (ret)
9316 dev_err(&hdev->pdev->dev,
9317 "Allocate buffer fail, ret =%d\n", ret);
9318
9319 out:
9320 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
9321 mutex_unlock(&hdev->vport_lock);
9322 return ret;
9323 }
9324
hclge_send_reset_tqp_cmd(struct hclge_dev * hdev,u16 queue_id,bool enable)9325 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
9326 bool enable)
9327 {
9328 struct hclge_reset_tqp_queue_cmd *req;
9329 struct hclge_desc desc;
9330 int ret;
9331
9332 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
9333
9334 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9335 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
9336 if (enable)
9337 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
9338
9339 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9340 if (ret) {
9341 dev_err(&hdev->pdev->dev,
9342 "Send tqp reset cmd error, status =%d\n", ret);
9343 return ret;
9344 }
9345
9346 return 0;
9347 }
9348
hclge_get_reset_status(struct hclge_dev * hdev,u16 queue_id)9349 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
9350 {
9351 struct hclge_reset_tqp_queue_cmd *req;
9352 struct hclge_desc desc;
9353 int ret;
9354
9355 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
9356
9357 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9358 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
9359
9360 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9361 if (ret) {
9362 dev_err(&hdev->pdev->dev,
9363 "Get reset status error, status =%d\n", ret);
9364 return ret;
9365 }
9366
9367 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
9368 }
9369
hclge_covert_handle_qid_global(struct hnae3_handle * handle,u16 queue_id)9370 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
9371 {
9372 struct hnae3_queue *queue;
9373 struct hclge_tqp *tqp;
9374
9375 queue = handle->kinfo.tqp[queue_id];
9376 tqp = container_of(queue, struct hclge_tqp, q);
9377
9378 return tqp->index;
9379 }
9380
hclge_reset_tqp(struct hnae3_handle * handle,u16 queue_id)9381 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
9382 {
9383 struct hclge_vport *vport = hclge_get_vport(handle);
9384 struct hclge_dev *hdev = vport->back;
9385 int reset_try_times = 0;
9386 int reset_status;
9387 u16 queue_gid;
9388 int ret;
9389
9390 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
9391
9392 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
9393 if (ret) {
9394 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
9395 return ret;
9396 }
9397
9398 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9399 if (ret) {
9400 dev_err(&hdev->pdev->dev,
9401 "Send reset tqp cmd fail, ret = %d\n", ret);
9402 return ret;
9403 }
9404
9405 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9406 reset_status = hclge_get_reset_status(hdev, queue_gid);
9407 if (reset_status)
9408 break;
9409
9410 /* Wait for tqp hw reset */
9411 usleep_range(1000, 1200);
9412 }
9413
9414 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9415 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
9416 return ret;
9417 }
9418
9419 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9420 if (ret)
9421 dev_err(&hdev->pdev->dev,
9422 "Deassert the soft reset fail, ret = %d\n", ret);
9423
9424 return ret;
9425 }
9426
hclge_reset_vf_queue(struct hclge_vport * vport,u16 queue_id)9427 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
9428 {
9429 struct hnae3_handle *handle = &vport->nic;
9430 struct hclge_dev *hdev = vport->back;
9431 int reset_try_times = 0;
9432 int reset_status;
9433 u16 queue_gid;
9434 int ret;
9435
9436 if (queue_id >= handle->kinfo.num_tqps) {
9437 dev_warn(&hdev->pdev->dev, "Invalid vf queue id(%u)\n",
9438 queue_id);
9439 return;
9440 }
9441
9442 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
9443
9444 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9445 if (ret) {
9446 dev_warn(&hdev->pdev->dev,
9447 "Send reset tqp cmd fail, ret = %d\n", ret);
9448 return;
9449 }
9450
9451 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9452 reset_status = hclge_get_reset_status(hdev, queue_gid);
9453 if (reset_status)
9454 break;
9455
9456 /* Wait for tqp hw reset */
9457 usleep_range(1000, 1200);
9458 }
9459
9460 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9461 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
9462 return;
9463 }
9464
9465 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9466 if (ret)
9467 dev_warn(&hdev->pdev->dev,
9468 "Deassert the soft reset fail, ret = %d\n", ret);
9469 }
9470
hclge_get_fw_version(struct hnae3_handle * handle)9471 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
9472 {
9473 struct hclge_vport *vport = hclge_get_vport(handle);
9474 struct hclge_dev *hdev = vport->back;
9475
9476 return hdev->fw_version;
9477 }
9478
hclge_set_flowctrl_adv(struct hclge_dev * hdev,u32 rx_en,u32 tx_en)9479 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9480 {
9481 struct phy_device *phydev = hdev->hw.mac.phydev;
9482
9483 if (!phydev)
9484 return;
9485
9486 phy_set_asym_pause(phydev, rx_en, tx_en);
9487 }
9488
hclge_cfg_pauseparam(struct hclge_dev * hdev,u32 rx_en,u32 tx_en)9489 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9490 {
9491 int ret;
9492
9493 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
9494 return 0;
9495
9496 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
9497 if (ret)
9498 dev_err(&hdev->pdev->dev,
9499 "configure pauseparam error, ret = %d.\n", ret);
9500
9501 return ret;
9502 }
9503
hclge_cfg_flowctrl(struct hclge_dev * hdev)9504 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
9505 {
9506 struct phy_device *phydev = hdev->hw.mac.phydev;
9507 u16 remote_advertising = 0;
9508 u16 local_advertising;
9509 u32 rx_pause, tx_pause;
9510 u8 flowctl;
9511
9512 if (!phydev->link || !phydev->autoneg)
9513 return 0;
9514
9515 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
9516
9517 if (phydev->pause)
9518 remote_advertising = LPA_PAUSE_CAP;
9519
9520 if (phydev->asym_pause)
9521 remote_advertising |= LPA_PAUSE_ASYM;
9522
9523 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
9524 remote_advertising);
9525 tx_pause = flowctl & FLOW_CTRL_TX;
9526 rx_pause = flowctl & FLOW_CTRL_RX;
9527
9528 if (phydev->duplex == HCLGE_MAC_HALF) {
9529 tx_pause = 0;
9530 rx_pause = 0;
9531 }
9532
9533 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
9534 }
9535
hclge_get_pauseparam(struct hnae3_handle * handle,u32 * auto_neg,u32 * rx_en,u32 * tx_en)9536 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
9537 u32 *rx_en, u32 *tx_en)
9538 {
9539 struct hclge_vport *vport = hclge_get_vport(handle);
9540 struct hclge_dev *hdev = vport->back;
9541 struct phy_device *phydev = hdev->hw.mac.phydev;
9542
9543 *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
9544
9545 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
9546 *rx_en = 0;
9547 *tx_en = 0;
9548 return;
9549 }
9550
9551 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
9552 *rx_en = 1;
9553 *tx_en = 0;
9554 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
9555 *tx_en = 1;
9556 *rx_en = 0;
9557 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
9558 *rx_en = 1;
9559 *tx_en = 1;
9560 } else {
9561 *rx_en = 0;
9562 *tx_en = 0;
9563 }
9564 }
9565
hclge_record_user_pauseparam(struct hclge_dev * hdev,u32 rx_en,u32 tx_en)9566 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
9567 u32 rx_en, u32 tx_en)
9568 {
9569 if (rx_en && tx_en)
9570 hdev->fc_mode_last_time = HCLGE_FC_FULL;
9571 else if (rx_en && !tx_en)
9572 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
9573 else if (!rx_en && tx_en)
9574 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
9575 else
9576 hdev->fc_mode_last_time = HCLGE_FC_NONE;
9577
9578 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
9579 }
9580
hclge_set_pauseparam(struct hnae3_handle * handle,u32 auto_neg,u32 rx_en,u32 tx_en)9581 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
9582 u32 rx_en, u32 tx_en)
9583 {
9584 struct hclge_vport *vport = hclge_get_vport(handle);
9585 struct hclge_dev *hdev = vport->back;
9586 struct phy_device *phydev = hdev->hw.mac.phydev;
9587 u32 fc_autoneg;
9588
9589 if (phydev) {
9590 fc_autoneg = hclge_get_autoneg(handle);
9591 if (auto_neg != fc_autoneg) {
9592 dev_info(&hdev->pdev->dev,
9593 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
9594 return -EOPNOTSUPP;
9595 }
9596 }
9597
9598 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
9599 dev_info(&hdev->pdev->dev,
9600 "Priority flow control enabled. Cannot set link flow control.\n");
9601 return -EOPNOTSUPP;
9602 }
9603
9604 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
9605
9606 hclge_record_user_pauseparam(hdev, rx_en, tx_en);
9607
9608 if (!auto_neg)
9609 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
9610
9611 if (phydev)
9612 return phy_start_aneg(phydev);
9613
9614 return -EOPNOTSUPP;
9615 }
9616
hclge_get_ksettings_an_result(struct hnae3_handle * handle,u8 * auto_neg,u32 * speed,u8 * duplex)9617 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
9618 u8 *auto_neg, u32 *speed, u8 *duplex)
9619 {
9620 struct hclge_vport *vport = hclge_get_vport(handle);
9621 struct hclge_dev *hdev = vport->back;
9622
9623 if (speed)
9624 *speed = hdev->hw.mac.speed;
9625 if (duplex)
9626 *duplex = hdev->hw.mac.duplex;
9627 if (auto_neg)
9628 *auto_neg = hdev->hw.mac.autoneg;
9629 }
9630
hclge_get_media_type(struct hnae3_handle * handle,u8 * media_type,u8 * module_type)9631 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
9632 u8 *module_type)
9633 {
9634 struct hclge_vport *vport = hclge_get_vport(handle);
9635 struct hclge_dev *hdev = vport->back;
9636
9637 /* When nic is down, the service task is not running, doesn't update
9638 * the port information per second. Query the port information before
9639 * return the media type, ensure getting the correct media information.
9640 */
9641 hclge_update_port_info(hdev);
9642
9643 if (media_type)
9644 *media_type = hdev->hw.mac.media_type;
9645
9646 if (module_type)
9647 *module_type = hdev->hw.mac.module_type;
9648 }
9649
hclge_get_mdix_mode(struct hnae3_handle * handle,u8 * tp_mdix_ctrl,u8 * tp_mdix)9650 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
9651 u8 *tp_mdix_ctrl, u8 *tp_mdix)
9652 {
9653 struct hclge_vport *vport = hclge_get_vport(handle);
9654 struct hclge_dev *hdev = vport->back;
9655 struct phy_device *phydev = hdev->hw.mac.phydev;
9656 int mdix_ctrl, mdix, is_resolved;
9657 unsigned int retval;
9658
9659 if (!phydev) {
9660 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9661 *tp_mdix = ETH_TP_MDI_INVALID;
9662 return;
9663 }
9664
9665 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
9666
9667 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
9668 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
9669 HCLGE_PHY_MDIX_CTRL_S);
9670
9671 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
9672 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
9673 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
9674
9675 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
9676
9677 switch (mdix_ctrl) {
9678 case 0x0:
9679 *tp_mdix_ctrl = ETH_TP_MDI;
9680 break;
9681 case 0x1:
9682 *tp_mdix_ctrl = ETH_TP_MDI_X;
9683 break;
9684 case 0x3:
9685 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
9686 break;
9687 default:
9688 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9689 break;
9690 }
9691
9692 if (!is_resolved)
9693 *tp_mdix = ETH_TP_MDI_INVALID;
9694 else if (mdix)
9695 *tp_mdix = ETH_TP_MDI_X;
9696 else
9697 *tp_mdix = ETH_TP_MDI;
9698 }
9699
hclge_info_show(struct hclge_dev * hdev)9700 static void hclge_info_show(struct hclge_dev *hdev)
9701 {
9702 struct device *dev = &hdev->pdev->dev;
9703
9704 dev_info(dev, "PF info begin:\n");
9705
9706 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
9707 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
9708 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
9709 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
9710 dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
9711 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
9712 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
9713 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
9714 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
9715 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
9716 dev_info(dev, "This is %s PF\n",
9717 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
9718 dev_info(dev, "DCB %s\n",
9719 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
9720 dev_info(dev, "MQPRIO %s\n",
9721 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
9722
9723 dev_info(dev, "PF info end.\n");
9724 }
9725
hclge_init_nic_client_instance(struct hnae3_ae_dev * ae_dev,struct hclge_vport * vport)9726 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
9727 struct hclge_vport *vport)
9728 {
9729 struct hnae3_client *client = vport->nic.client;
9730 struct hclge_dev *hdev = ae_dev->priv;
9731 int rst_cnt = hdev->rst_stats.reset_cnt;
9732 int ret;
9733
9734 ret = client->ops->init_instance(&vport->nic);
9735 if (ret)
9736 return ret;
9737
9738 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9739 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9740 rst_cnt != hdev->rst_stats.reset_cnt) {
9741 ret = -EBUSY;
9742 goto init_nic_err;
9743 }
9744
9745 /* Enable nic hw error interrupts */
9746 ret = hclge_config_nic_hw_error(hdev, true);
9747 if (ret) {
9748 dev_err(&ae_dev->pdev->dev,
9749 "fail(%d) to enable hw error interrupts\n", ret);
9750 goto init_nic_err;
9751 }
9752
9753 hnae3_set_client_init_flag(client, ae_dev, 1);
9754
9755 if (netif_msg_drv(&hdev->vport->nic))
9756 hclge_info_show(hdev);
9757
9758 return ret;
9759
9760 init_nic_err:
9761 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9762 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9763 msleep(HCLGE_WAIT_RESET_DONE);
9764
9765 client->ops->uninit_instance(&vport->nic, 0);
9766
9767 return ret;
9768 }
9769
hclge_init_roce_client_instance(struct hnae3_ae_dev * ae_dev,struct hclge_vport * vport)9770 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
9771 struct hclge_vport *vport)
9772 {
9773 struct hclge_dev *hdev = ae_dev->priv;
9774 struct hnae3_client *client;
9775 int rst_cnt;
9776 int ret;
9777
9778 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
9779 !hdev->nic_client)
9780 return 0;
9781
9782 client = hdev->roce_client;
9783 ret = hclge_init_roce_base_info(vport);
9784 if (ret)
9785 return ret;
9786
9787 rst_cnt = hdev->rst_stats.reset_cnt;
9788 ret = client->ops->init_instance(&vport->roce);
9789 if (ret)
9790 return ret;
9791
9792 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9793 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9794 rst_cnt != hdev->rst_stats.reset_cnt) {
9795 ret = -EBUSY;
9796 goto init_roce_err;
9797 }
9798
9799 /* Enable roce ras interrupts */
9800 ret = hclge_config_rocee_ras_interrupt(hdev, true);
9801 if (ret) {
9802 dev_err(&ae_dev->pdev->dev,
9803 "fail(%d) to enable roce ras interrupts\n", ret);
9804 goto init_roce_err;
9805 }
9806
9807 hnae3_set_client_init_flag(client, ae_dev, 1);
9808
9809 return 0;
9810
9811 init_roce_err:
9812 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9813 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9814 msleep(HCLGE_WAIT_RESET_DONE);
9815
9816 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
9817
9818 return ret;
9819 }
9820
hclge_init_client_instance(struct hnae3_client * client,struct hnae3_ae_dev * ae_dev)9821 static int hclge_init_client_instance(struct hnae3_client *client,
9822 struct hnae3_ae_dev *ae_dev)
9823 {
9824 struct hclge_dev *hdev = ae_dev->priv;
9825 struct hclge_vport *vport;
9826 int i, ret;
9827
9828 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9829 vport = &hdev->vport[i];
9830
9831 switch (client->type) {
9832 case HNAE3_CLIENT_KNIC:
9833 hdev->nic_client = client;
9834 vport->nic.client = client;
9835 ret = hclge_init_nic_client_instance(ae_dev, vport);
9836 if (ret)
9837 goto clear_nic;
9838
9839 ret = hclge_init_roce_client_instance(ae_dev, vport);
9840 if (ret)
9841 goto clear_roce;
9842
9843 break;
9844 case HNAE3_CLIENT_ROCE:
9845 if (hnae3_dev_roce_supported(hdev)) {
9846 hdev->roce_client = client;
9847 vport->roce.client = client;
9848 }
9849
9850 ret = hclge_init_roce_client_instance(ae_dev, vport);
9851 if (ret)
9852 goto clear_roce;
9853
9854 break;
9855 default:
9856 return -EINVAL;
9857 }
9858 }
9859
9860 return 0;
9861
9862 clear_nic:
9863 hdev->nic_client = NULL;
9864 vport->nic.client = NULL;
9865 return ret;
9866 clear_roce:
9867 hdev->roce_client = NULL;
9868 vport->roce.client = NULL;
9869 return ret;
9870 }
9871
hclge_uninit_client_instance(struct hnae3_client * client,struct hnae3_ae_dev * ae_dev)9872 static void hclge_uninit_client_instance(struct hnae3_client *client,
9873 struct hnae3_ae_dev *ae_dev)
9874 {
9875 struct hclge_dev *hdev = ae_dev->priv;
9876 struct hclge_vport *vport;
9877 int i;
9878
9879 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9880 vport = &hdev->vport[i];
9881 if (hdev->roce_client) {
9882 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9883 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9884 msleep(HCLGE_WAIT_RESET_DONE);
9885
9886 hdev->roce_client->ops->uninit_instance(&vport->roce,
9887 0);
9888 hdev->roce_client = NULL;
9889 vport->roce.client = NULL;
9890 }
9891 if (client->type == HNAE3_CLIENT_ROCE)
9892 return;
9893 if (hdev->nic_client && client->ops->uninit_instance) {
9894 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9895 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9896 msleep(HCLGE_WAIT_RESET_DONE);
9897
9898 client->ops->uninit_instance(&vport->nic, 0);
9899 hdev->nic_client = NULL;
9900 vport->nic.client = NULL;
9901 }
9902 }
9903 }
9904
hclge_pci_init(struct hclge_dev * hdev)9905 static int hclge_pci_init(struct hclge_dev *hdev)
9906 {
9907 struct pci_dev *pdev = hdev->pdev;
9908 struct hclge_hw *hw;
9909 int ret;
9910
9911 ret = pci_enable_device(pdev);
9912 if (ret) {
9913 dev_err(&pdev->dev, "failed to enable PCI device\n");
9914 return ret;
9915 }
9916
9917 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9918 if (ret) {
9919 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9920 if (ret) {
9921 dev_err(&pdev->dev,
9922 "can't set consistent PCI DMA");
9923 goto err_disable_device;
9924 }
9925 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
9926 }
9927
9928 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
9929 if (ret) {
9930 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
9931 goto err_disable_device;
9932 }
9933
9934 pci_set_master(pdev);
9935 hw = &hdev->hw;
9936 hw->io_base = pcim_iomap(pdev, 2, 0);
9937 if (!hw->io_base) {
9938 dev_err(&pdev->dev, "Can't map configuration register space\n");
9939 ret = -ENOMEM;
9940 goto err_clr_master;
9941 }
9942
9943 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
9944
9945 return 0;
9946 err_clr_master:
9947 pci_clear_master(pdev);
9948 pci_release_regions(pdev);
9949 err_disable_device:
9950 pci_disable_device(pdev);
9951
9952 return ret;
9953 }
9954
hclge_pci_uninit(struct hclge_dev * hdev)9955 static void hclge_pci_uninit(struct hclge_dev *hdev)
9956 {
9957 struct pci_dev *pdev = hdev->pdev;
9958
9959 pcim_iounmap(pdev, hdev->hw.io_base);
9960 pci_free_irq_vectors(pdev);
9961 pci_clear_master(pdev);
9962 pci_release_mem_regions(pdev);
9963 pci_disable_device(pdev);
9964 }
9965
hclge_state_init(struct hclge_dev * hdev)9966 static void hclge_state_init(struct hclge_dev *hdev)
9967 {
9968 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
9969 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9970 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
9971 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9972 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
9973 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
9974 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
9975 }
9976
hclge_state_uninit(struct hclge_dev * hdev)9977 static void hclge_state_uninit(struct hclge_dev *hdev)
9978 {
9979 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9980 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
9981
9982 if (hdev->reset_timer.function)
9983 del_timer_sync(&hdev->reset_timer);
9984 if (hdev->service_task.work.func)
9985 cancel_delayed_work_sync(&hdev->service_task);
9986 }
9987
hclge_flr_prepare(struct hnae3_ae_dev * ae_dev)9988 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
9989 {
9990 #define HCLGE_FLR_RETRY_WAIT_MS 500
9991 #define HCLGE_FLR_RETRY_CNT 5
9992
9993 struct hclge_dev *hdev = ae_dev->priv;
9994 int retry_cnt = 0;
9995 int ret;
9996
9997 retry:
9998 down(&hdev->reset_sem);
9999 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10000 hdev->reset_type = HNAE3_FLR_RESET;
10001 ret = hclge_reset_prepare(hdev);
10002 if (ret || hdev->reset_pending) {
10003 dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
10004 ret);
10005 if (hdev->reset_pending ||
10006 retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
10007 dev_err(&hdev->pdev->dev,
10008 "reset_pending:0x%lx, retry_cnt:%d\n",
10009 hdev->reset_pending, retry_cnt);
10010 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10011 up(&hdev->reset_sem);
10012 msleep(HCLGE_FLR_RETRY_WAIT_MS);
10013 goto retry;
10014 }
10015 }
10016
10017 /* disable misc vector before FLR done */
10018 hclge_enable_vector(&hdev->misc_vector, false);
10019 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
10020 hdev->rst_stats.flr_rst_cnt++;
10021 }
10022
hclge_flr_done(struct hnae3_ae_dev * ae_dev)10023 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
10024 {
10025 struct hclge_dev *hdev = ae_dev->priv;
10026 int ret;
10027
10028 hclge_enable_vector(&hdev->misc_vector, true);
10029
10030 ret = hclge_reset_rebuild(hdev);
10031 if (ret)
10032 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
10033
10034 hdev->reset_type = HNAE3_NONE_RESET;
10035 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10036 up(&hdev->reset_sem);
10037 }
10038
hclge_clear_resetting_state(struct hclge_dev * hdev)10039 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
10040 {
10041 u16 i;
10042
10043 for (i = 0; i < hdev->num_alloc_vport; i++) {
10044 struct hclge_vport *vport = &hdev->vport[i];
10045 int ret;
10046
10047 /* Send cmd to clear VF's FUNC_RST_ING */
10048 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
10049 if (ret)
10050 dev_warn(&hdev->pdev->dev,
10051 "clear vf(%u) rst failed %d!\n",
10052 vport->vport_id, ret);
10053 }
10054 }
10055
hclge_clear_hw_resource(struct hclge_dev * hdev)10056 static int hclge_clear_hw_resource(struct hclge_dev *hdev)
10057 {
10058 struct hclge_desc desc;
10059 int ret;
10060
10061 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false);
10062
10063 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10064 /* This new command is only supported by new firmware, it will
10065 * fail with older firmware. Error value -EOPNOSUPP can only be
10066 * returned by older firmware running this command, to keep code
10067 * backward compatible we will override this value and return
10068 * success.
10069 */
10070 if (ret && ret != -EOPNOTSUPP) {
10071 dev_err(&hdev->pdev->dev,
10072 "failed to clear hw resource, ret = %d\n", ret);
10073 return ret;
10074 }
10075 return 0;
10076 }
10077
hclge_init_ae_dev(struct hnae3_ae_dev * ae_dev)10078 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
10079 {
10080 struct pci_dev *pdev = ae_dev->pdev;
10081 struct hclge_dev *hdev;
10082 int ret;
10083
10084 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
10085 if (!hdev)
10086 return -ENOMEM;
10087
10088 hdev->pdev = pdev;
10089 hdev->ae_dev = ae_dev;
10090 hdev->reset_type = HNAE3_NONE_RESET;
10091 hdev->reset_level = HNAE3_FUNC_RESET;
10092 ae_dev->priv = hdev;
10093
10094 /* HW supprt 2 layer vlan */
10095 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10096
10097 mutex_init(&hdev->vport_lock);
10098 spin_lock_init(&hdev->fd_rule_lock);
10099 sema_init(&hdev->reset_sem, 1);
10100
10101 ret = hclge_pci_init(hdev);
10102 if (ret)
10103 goto out;
10104
10105 /* Firmware command queue initialize */
10106 ret = hclge_cmd_queue_init(hdev);
10107 if (ret)
10108 goto err_pci_uninit;
10109
10110 /* Firmware command initialize */
10111 ret = hclge_cmd_init(hdev);
10112 if (ret)
10113 goto err_cmd_uninit;
10114
10115 ret = hclge_clear_hw_resource(hdev);
10116 if (ret)
10117 goto err_cmd_uninit;
10118
10119 ret = hclge_get_cap(hdev);
10120 if (ret)
10121 goto err_cmd_uninit;
10122
10123 ret = hclge_query_dev_specs(hdev);
10124 if (ret) {
10125 dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
10126 ret);
10127 goto err_cmd_uninit;
10128 }
10129
10130 ret = hclge_configure(hdev);
10131 if (ret) {
10132 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
10133 goto err_cmd_uninit;
10134 }
10135
10136 ret = hclge_init_msi(hdev);
10137 if (ret) {
10138 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
10139 goto err_cmd_uninit;
10140 }
10141
10142 ret = hclge_misc_irq_init(hdev);
10143 if (ret)
10144 goto err_msi_uninit;
10145
10146 ret = hclge_alloc_tqps(hdev);
10147 if (ret) {
10148 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
10149 goto err_msi_irq_uninit;
10150 }
10151
10152 ret = hclge_alloc_vport(hdev);
10153 if (ret)
10154 goto err_msi_irq_uninit;
10155
10156 ret = hclge_map_tqp(hdev);
10157 if (ret)
10158 goto err_msi_irq_uninit;
10159
10160 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
10161 ret = hclge_mac_mdio_config(hdev);
10162 if (ret)
10163 goto err_msi_irq_uninit;
10164 }
10165
10166 ret = hclge_init_umv_space(hdev);
10167 if (ret)
10168 goto err_mdiobus_unreg;
10169
10170 ret = hclge_mac_init(hdev);
10171 if (ret) {
10172 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10173 goto err_mdiobus_unreg;
10174 }
10175
10176 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10177 if (ret) {
10178 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10179 goto err_mdiobus_unreg;
10180 }
10181
10182 ret = hclge_config_gro(hdev, true);
10183 if (ret)
10184 goto err_mdiobus_unreg;
10185
10186 ret = hclge_init_vlan_config(hdev);
10187 if (ret) {
10188 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10189 goto err_mdiobus_unreg;
10190 }
10191
10192 ret = hclge_tm_schd_init(hdev);
10193 if (ret) {
10194 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
10195 goto err_mdiobus_unreg;
10196 }
10197
10198 hclge_rss_init_cfg(hdev);
10199 ret = hclge_rss_init_hw(hdev);
10200 if (ret) {
10201 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10202 goto err_mdiobus_unreg;
10203 }
10204
10205 ret = init_mgr_tbl(hdev);
10206 if (ret) {
10207 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
10208 goto err_mdiobus_unreg;
10209 }
10210
10211 ret = hclge_init_fd_config(hdev);
10212 if (ret) {
10213 dev_err(&pdev->dev,
10214 "fd table init fail, ret=%d\n", ret);
10215 goto err_mdiobus_unreg;
10216 }
10217
10218 INIT_KFIFO(hdev->mac_tnl_log);
10219
10220 hclge_dcb_ops_set(hdev);
10221
10222 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
10223 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
10224
10225 /* Setup affinity after service timer setup because add_timer_on
10226 * is called in affinity notify.
10227 */
10228 hclge_misc_affinity_setup(hdev);
10229
10230 hclge_clear_all_event_cause(hdev);
10231 hclge_clear_resetting_state(hdev);
10232
10233 /* Log and clear the hw errors those already occurred */
10234 hclge_handle_all_hns_hw_errors(ae_dev);
10235
10236 /* request delayed reset for the error recovery because an immediate
10237 * global reset on a PF affecting pending initialization of other PFs
10238 */
10239 if (ae_dev->hw_err_reset_req) {
10240 enum hnae3_reset_type reset_level;
10241
10242 reset_level = hclge_get_reset_level(ae_dev,
10243 &ae_dev->hw_err_reset_req);
10244 hclge_set_def_reset_request(ae_dev, reset_level);
10245 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
10246 }
10247
10248 /* Enable MISC vector(vector0) */
10249 hclge_enable_vector(&hdev->misc_vector, true);
10250
10251 hclge_state_init(hdev);
10252 hdev->last_reset_time = jiffies;
10253
10254 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
10255 HCLGE_DRIVER_NAME);
10256
10257 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
10258
10259 return 0;
10260
10261 err_mdiobus_unreg:
10262 if (hdev->hw.mac.phydev)
10263 mdiobus_unregister(hdev->hw.mac.mdio_bus);
10264 err_msi_irq_uninit:
10265 hclge_misc_irq_uninit(hdev);
10266 err_msi_uninit:
10267 pci_free_irq_vectors(pdev);
10268 err_cmd_uninit:
10269 hclge_cmd_uninit(hdev);
10270 err_pci_uninit:
10271 pcim_iounmap(pdev, hdev->hw.io_base);
10272 pci_clear_master(pdev);
10273 pci_release_regions(pdev);
10274 pci_disable_device(pdev);
10275 out:
10276 mutex_destroy(&hdev->vport_lock);
10277 return ret;
10278 }
10279
hclge_stats_clear(struct hclge_dev * hdev)10280 static void hclge_stats_clear(struct hclge_dev *hdev)
10281 {
10282 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
10283 }
10284
hclge_set_mac_spoofchk(struct hclge_dev * hdev,int vf,bool enable)10285 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10286 {
10287 return hclge_config_switch_param(hdev, vf, enable,
10288 HCLGE_SWITCH_ANTI_SPOOF_MASK);
10289 }
10290
hclge_set_vlan_spoofchk(struct hclge_dev * hdev,int vf,bool enable)10291 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10292 {
10293 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10294 HCLGE_FILTER_FE_NIC_INGRESS_B,
10295 enable, vf);
10296 }
10297
hclge_set_vf_spoofchk_hw(struct hclge_dev * hdev,int vf,bool enable)10298 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
10299 {
10300 int ret;
10301
10302 ret = hclge_set_mac_spoofchk(hdev, vf, enable);
10303 if (ret) {
10304 dev_err(&hdev->pdev->dev,
10305 "Set vf %d mac spoof check %s failed, ret=%d\n",
10306 vf, enable ? "on" : "off", ret);
10307 return ret;
10308 }
10309
10310 ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
10311 if (ret)
10312 dev_err(&hdev->pdev->dev,
10313 "Set vf %d vlan spoof check %s failed, ret=%d\n",
10314 vf, enable ? "on" : "off", ret);
10315
10316 return ret;
10317 }
10318
hclge_set_vf_spoofchk(struct hnae3_handle * handle,int vf,bool enable)10319 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
10320 bool enable)
10321 {
10322 struct hclge_vport *vport = hclge_get_vport(handle);
10323 struct hclge_dev *hdev = vport->back;
10324 u32 new_spoofchk = enable ? 1 : 0;
10325 int ret;
10326
10327 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10328 return -EOPNOTSUPP;
10329
10330 vport = hclge_get_vf_vport(hdev, vf);
10331 if (!vport)
10332 return -EINVAL;
10333
10334 if (vport->vf_info.spoofchk == new_spoofchk)
10335 return 0;
10336
10337 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
10338 dev_warn(&hdev->pdev->dev,
10339 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
10340 vf);
10341 else if (enable && hclge_is_umv_space_full(vport, true))
10342 dev_warn(&hdev->pdev->dev,
10343 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
10344 vf);
10345
10346 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
10347 if (ret)
10348 return ret;
10349
10350 vport->vf_info.spoofchk = new_spoofchk;
10351 return 0;
10352 }
10353
hclge_reset_vport_spoofchk(struct hclge_dev * hdev)10354 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
10355 {
10356 struct hclge_vport *vport = hdev->vport;
10357 int ret;
10358 int i;
10359
10360 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10361 return 0;
10362
10363 /* resume the vf spoof check state after reset */
10364 for (i = 0; i < hdev->num_alloc_vport; i++) {
10365 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
10366 vport->vf_info.spoofchk);
10367 if (ret)
10368 return ret;
10369
10370 vport++;
10371 }
10372
10373 return 0;
10374 }
10375
hclge_set_vf_trust(struct hnae3_handle * handle,int vf,bool enable)10376 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
10377 {
10378 struct hclge_vport *vport = hclge_get_vport(handle);
10379 struct hclge_dev *hdev = vport->back;
10380 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
10381 u32 new_trusted = enable ? 1 : 0;
10382 bool en_bc_pmc;
10383 int ret;
10384
10385 vport = hclge_get_vf_vport(hdev, vf);
10386 if (!vport)
10387 return -EINVAL;
10388
10389 if (vport->vf_info.trusted == new_trusted)
10390 return 0;
10391
10392 /* Disable promisc mode for VF if it is not trusted any more. */
10393 if (!enable && vport->vf_info.promisc_enable) {
10394 en_bc_pmc = ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
10395 ret = hclge_set_vport_promisc_mode(vport, false, false,
10396 en_bc_pmc);
10397 if (ret)
10398 return ret;
10399 vport->vf_info.promisc_enable = 0;
10400 hclge_inform_vf_promisc_info(vport);
10401 }
10402
10403 vport->vf_info.trusted = new_trusted;
10404
10405 return 0;
10406 }
10407
hclge_reset_vf_rate(struct hclge_dev * hdev)10408 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
10409 {
10410 int ret;
10411 int vf;
10412
10413 /* reset vf rate to default value */
10414 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10415 struct hclge_vport *vport = &hdev->vport[vf];
10416
10417 vport->vf_info.max_tx_rate = 0;
10418 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
10419 if (ret)
10420 dev_err(&hdev->pdev->dev,
10421 "vf%d failed to reset to default, ret=%d\n",
10422 vf - HCLGE_VF_VPORT_START_NUM, ret);
10423 }
10424 }
10425
hclge_vf_rate_param_check(struct hclge_dev * hdev,int vf,int min_tx_rate,int max_tx_rate)10426 static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
10427 int min_tx_rate, int max_tx_rate)
10428 {
10429 if (min_tx_rate != 0 ||
10430 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
10431 dev_err(&hdev->pdev->dev,
10432 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
10433 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
10434 return -EINVAL;
10435 }
10436
10437 return 0;
10438 }
10439
hclge_set_vf_rate(struct hnae3_handle * handle,int vf,int min_tx_rate,int max_tx_rate,bool force)10440 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
10441 int min_tx_rate, int max_tx_rate, bool force)
10442 {
10443 struct hclge_vport *vport = hclge_get_vport(handle);
10444 struct hclge_dev *hdev = vport->back;
10445 int ret;
10446
10447 ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
10448 if (ret)
10449 return ret;
10450
10451 vport = hclge_get_vf_vport(hdev, vf);
10452 if (!vport)
10453 return -EINVAL;
10454
10455 if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
10456 return 0;
10457
10458 ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
10459 if (ret)
10460 return ret;
10461
10462 vport->vf_info.max_tx_rate = max_tx_rate;
10463
10464 return 0;
10465 }
10466
hclge_resume_vf_rate(struct hclge_dev * hdev)10467 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
10468 {
10469 struct hnae3_handle *handle = &hdev->vport->nic;
10470 struct hclge_vport *vport;
10471 int ret;
10472 int vf;
10473
10474 /* resume the vf max_tx_rate after reset */
10475 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
10476 vport = hclge_get_vf_vport(hdev, vf);
10477 if (!vport)
10478 return -EINVAL;
10479
10480 /* zero means max rate, after reset, firmware already set it to
10481 * max rate, so just continue.
10482 */
10483 if (!vport->vf_info.max_tx_rate)
10484 continue;
10485
10486 ret = hclge_set_vf_rate(handle, vf, 0,
10487 vport->vf_info.max_tx_rate, true);
10488 if (ret) {
10489 dev_err(&hdev->pdev->dev,
10490 "vf%d failed to resume tx_rate:%u, ret=%d\n",
10491 vf, vport->vf_info.max_tx_rate, ret);
10492 return ret;
10493 }
10494 }
10495
10496 return 0;
10497 }
10498
hclge_reset_vport_state(struct hclge_dev * hdev)10499 static void hclge_reset_vport_state(struct hclge_dev *hdev)
10500 {
10501 struct hclge_vport *vport = hdev->vport;
10502 int i;
10503
10504 for (i = 0; i < hdev->num_alloc_vport; i++) {
10505 hclge_vport_stop(vport);
10506 vport++;
10507 }
10508 }
10509
hclge_reset_ae_dev(struct hnae3_ae_dev * ae_dev)10510 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
10511 {
10512 struct hclge_dev *hdev = ae_dev->priv;
10513 struct pci_dev *pdev = ae_dev->pdev;
10514 int ret;
10515
10516 set_bit(HCLGE_STATE_DOWN, &hdev->state);
10517
10518 hclge_stats_clear(hdev);
10519 /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
10520 * so here should not clean table in memory.
10521 */
10522 if (hdev->reset_type == HNAE3_IMP_RESET ||
10523 hdev->reset_type == HNAE3_GLOBAL_RESET) {
10524 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
10525 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
10526 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
10527 hclge_reset_umv_space(hdev);
10528 }
10529
10530 ret = hclge_cmd_init(hdev);
10531 if (ret) {
10532 dev_err(&pdev->dev, "Cmd queue init failed\n");
10533 return ret;
10534 }
10535
10536 ret = hclge_map_tqp(hdev);
10537 if (ret) {
10538 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
10539 return ret;
10540 }
10541
10542 ret = hclge_mac_init(hdev);
10543 if (ret) {
10544 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10545 return ret;
10546 }
10547
10548 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10549 if (ret) {
10550 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10551 return ret;
10552 }
10553
10554 ret = hclge_config_gro(hdev, true);
10555 if (ret)
10556 return ret;
10557
10558 ret = hclge_init_vlan_config(hdev);
10559 if (ret) {
10560 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10561 return ret;
10562 }
10563
10564 ret = hclge_tm_init_hw(hdev, true);
10565 if (ret) {
10566 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
10567 return ret;
10568 }
10569
10570 ret = hclge_rss_init_hw(hdev);
10571 if (ret) {
10572 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10573 return ret;
10574 }
10575
10576 ret = init_mgr_tbl(hdev);
10577 if (ret) {
10578 dev_err(&pdev->dev,
10579 "failed to reinit manager table, ret = %d\n", ret);
10580 return ret;
10581 }
10582
10583 ret = hclge_init_fd_config(hdev);
10584 if (ret) {
10585 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
10586 return ret;
10587 }
10588
10589 /* Log and clear the hw errors those already occurred */
10590 hclge_handle_all_hns_hw_errors(ae_dev);
10591
10592 /* Re-enable the hw error interrupts because
10593 * the interrupts get disabled on global reset.
10594 */
10595 ret = hclge_config_nic_hw_error(hdev, true);
10596 if (ret) {
10597 dev_err(&pdev->dev,
10598 "fail(%d) to re-enable NIC hw error interrupts\n",
10599 ret);
10600 return ret;
10601 }
10602
10603 if (hdev->roce_client) {
10604 ret = hclge_config_rocee_ras_interrupt(hdev, true);
10605 if (ret) {
10606 dev_err(&pdev->dev,
10607 "fail(%d) to re-enable roce ras interrupts\n",
10608 ret);
10609 return ret;
10610 }
10611 }
10612
10613 hclge_reset_vport_state(hdev);
10614 ret = hclge_reset_vport_spoofchk(hdev);
10615 if (ret)
10616 return ret;
10617
10618 ret = hclge_resume_vf_rate(hdev);
10619 if (ret)
10620 return ret;
10621
10622 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
10623 HCLGE_DRIVER_NAME);
10624
10625 return 0;
10626 }
10627
hclge_uninit_ae_dev(struct hnae3_ae_dev * ae_dev)10628 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
10629 {
10630 struct hclge_dev *hdev = ae_dev->priv;
10631 struct hclge_mac *mac = &hdev->hw.mac;
10632
10633 hclge_reset_vf_rate(hdev);
10634 hclge_clear_vf_vlan(hdev);
10635 hclge_misc_affinity_teardown(hdev);
10636 hclge_state_uninit(hdev);
10637 hclge_uninit_mac_table(hdev);
10638
10639 if (mac->phydev)
10640 mdiobus_unregister(mac->mdio_bus);
10641
10642 /* Disable MISC vector(vector0) */
10643 hclge_enable_vector(&hdev->misc_vector, false);
10644 synchronize_irq(hdev->misc_vector.vector_irq);
10645
10646 /* Disable all hw interrupts */
10647 hclge_config_mac_tnl_int(hdev, false);
10648 hclge_config_nic_hw_error(hdev, false);
10649 hclge_config_rocee_ras_interrupt(hdev, false);
10650
10651 hclge_cmd_uninit(hdev);
10652 hclge_misc_irq_uninit(hdev);
10653 hclge_pci_uninit(hdev);
10654 mutex_destroy(&hdev->vport_lock);
10655 hclge_uninit_vport_vlan_table(hdev);
10656 ae_dev->priv = NULL;
10657 }
10658
hclge_get_max_channels(struct hnae3_handle * handle)10659 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
10660 {
10661 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10662 struct hclge_vport *vport = hclge_get_vport(handle);
10663 struct hclge_dev *hdev = vport->back;
10664
10665 return min_t(u32, hdev->rss_size_max,
10666 vport->alloc_tqps / kinfo->num_tc);
10667 }
10668
hclge_get_channels(struct hnae3_handle * handle,struct ethtool_channels * ch)10669 static void hclge_get_channels(struct hnae3_handle *handle,
10670 struct ethtool_channels *ch)
10671 {
10672 ch->max_combined = hclge_get_max_channels(handle);
10673 ch->other_count = 1;
10674 ch->max_other = 1;
10675 ch->combined_count = handle->kinfo.rss_size;
10676 }
10677
hclge_get_tqps_and_rss_info(struct hnae3_handle * handle,u16 * alloc_tqps,u16 * max_rss_size)10678 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
10679 u16 *alloc_tqps, u16 *max_rss_size)
10680 {
10681 struct hclge_vport *vport = hclge_get_vport(handle);
10682 struct hclge_dev *hdev = vport->back;
10683
10684 *alloc_tqps = vport->alloc_tqps;
10685 *max_rss_size = hdev->rss_size_max;
10686 }
10687
hclge_set_channels(struct hnae3_handle * handle,u32 new_tqps_num,bool rxfh_configured)10688 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
10689 bool rxfh_configured)
10690 {
10691 struct hclge_vport *vport = hclge_get_vport(handle);
10692 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
10693 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
10694 struct hclge_dev *hdev = vport->back;
10695 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
10696 u16 cur_rss_size = kinfo->rss_size;
10697 u16 cur_tqps = kinfo->num_tqps;
10698 u16 tc_valid[HCLGE_MAX_TC_NUM];
10699 u16 roundup_size;
10700 u32 *rss_indir;
10701 unsigned int i;
10702 int ret;
10703
10704 kinfo->req_rss_size = new_tqps_num;
10705
10706 ret = hclge_tm_vport_map_update(hdev);
10707 if (ret) {
10708 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
10709 return ret;
10710 }
10711
10712 roundup_size = roundup_pow_of_two(kinfo->rss_size);
10713 roundup_size = ilog2(roundup_size);
10714 /* Set the RSS TC mode according to the new RSS size */
10715 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
10716 tc_valid[i] = 0;
10717
10718 if (!(hdev->hw_tc_map & BIT(i)))
10719 continue;
10720
10721 tc_valid[i] = 1;
10722 tc_size[i] = roundup_size;
10723 tc_offset[i] = kinfo->rss_size * i;
10724 }
10725 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
10726 if (ret)
10727 return ret;
10728
10729 /* RSS indirection table has been configuared by user */
10730 if (rxfh_configured)
10731 goto out;
10732
10733 /* Reinitializes the rss indirect table according to the new RSS size */
10734 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
10735 if (!rss_indir)
10736 return -ENOMEM;
10737
10738 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
10739 rss_indir[i] = i % kinfo->rss_size;
10740
10741 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
10742 if (ret)
10743 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
10744 ret);
10745
10746 kfree(rss_indir);
10747
10748 out:
10749 if (!ret)
10750 dev_info(&hdev->pdev->dev,
10751 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
10752 cur_rss_size, kinfo->rss_size,
10753 cur_tqps, kinfo->rss_size * kinfo->num_tc);
10754
10755 return ret;
10756 }
10757
hclge_get_regs_num(struct hclge_dev * hdev,u32 * regs_num_32_bit,u32 * regs_num_64_bit)10758 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
10759 u32 *regs_num_64_bit)
10760 {
10761 struct hclge_desc desc;
10762 u32 total_num;
10763 int ret;
10764
10765 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
10766 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10767 if (ret) {
10768 dev_err(&hdev->pdev->dev,
10769 "Query register number cmd failed, ret = %d.\n", ret);
10770 return ret;
10771 }
10772
10773 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
10774 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
10775
10776 total_num = *regs_num_32_bit + *regs_num_64_bit;
10777 if (!total_num)
10778 return -EINVAL;
10779
10780 return 0;
10781 }
10782
hclge_get_32_bit_regs(struct hclge_dev * hdev,u32 regs_num,void * data)10783 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10784 void *data)
10785 {
10786 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
10787 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
10788
10789 struct hclge_desc *desc;
10790 u32 *reg_val = data;
10791 __le32 *desc_data;
10792 int nodata_num;
10793 int cmd_num;
10794 int i, k, n;
10795 int ret;
10796
10797 if (regs_num == 0)
10798 return 0;
10799
10800 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
10801 cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
10802 HCLGE_32_BIT_REG_RTN_DATANUM);
10803 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10804 if (!desc)
10805 return -ENOMEM;
10806
10807 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
10808 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10809 if (ret) {
10810 dev_err(&hdev->pdev->dev,
10811 "Query 32 bit register cmd failed, ret = %d.\n", ret);
10812 kfree(desc);
10813 return ret;
10814 }
10815
10816 for (i = 0; i < cmd_num; i++) {
10817 if (i == 0) {
10818 desc_data = (__le32 *)(&desc[i].data[0]);
10819 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
10820 } else {
10821 desc_data = (__le32 *)(&desc[i]);
10822 n = HCLGE_32_BIT_REG_RTN_DATANUM;
10823 }
10824 for (k = 0; k < n; k++) {
10825 *reg_val++ = le32_to_cpu(*desc_data++);
10826
10827 regs_num--;
10828 if (!regs_num)
10829 break;
10830 }
10831 }
10832
10833 kfree(desc);
10834 return 0;
10835 }
10836
hclge_get_64_bit_regs(struct hclge_dev * hdev,u32 regs_num,void * data)10837 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10838 void *data)
10839 {
10840 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
10841 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
10842
10843 struct hclge_desc *desc;
10844 u64 *reg_val = data;
10845 __le64 *desc_data;
10846 int nodata_len;
10847 int cmd_num;
10848 int i, k, n;
10849 int ret;
10850
10851 if (regs_num == 0)
10852 return 0;
10853
10854 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
10855 cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
10856 HCLGE_64_BIT_REG_RTN_DATANUM);
10857 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10858 if (!desc)
10859 return -ENOMEM;
10860
10861 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
10862 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10863 if (ret) {
10864 dev_err(&hdev->pdev->dev,
10865 "Query 64 bit register cmd failed, ret = %d.\n", ret);
10866 kfree(desc);
10867 return ret;
10868 }
10869
10870 for (i = 0; i < cmd_num; i++) {
10871 if (i == 0) {
10872 desc_data = (__le64 *)(&desc[i].data[0]);
10873 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
10874 } else {
10875 desc_data = (__le64 *)(&desc[i]);
10876 n = HCLGE_64_BIT_REG_RTN_DATANUM;
10877 }
10878 for (k = 0; k < n; k++) {
10879 *reg_val++ = le64_to_cpu(*desc_data++);
10880
10881 regs_num--;
10882 if (!regs_num)
10883 break;
10884 }
10885 }
10886
10887 kfree(desc);
10888 return 0;
10889 }
10890
10891 #define MAX_SEPARATE_NUM 4
10892 #define SEPARATOR_VALUE 0xFDFCFBFA
10893 #define REG_NUM_PER_LINE 4
10894 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
10895 #define REG_SEPARATOR_LINE 1
10896 #define REG_NUM_REMAIN_MASK 3
10897
hclge_query_bd_num_cmd_send(struct hclge_dev * hdev,struct hclge_desc * desc)10898 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
10899 {
10900 int i;
10901
10902 /* initialize command BD except the last one */
10903 for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
10904 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
10905 true);
10906 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10907 }
10908
10909 /* initialize the last command BD */
10910 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
10911
10912 return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
10913 }
10914
hclge_get_dfx_reg_bd_num(struct hclge_dev * hdev,int * bd_num_list,u32 type_num)10915 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
10916 int *bd_num_list,
10917 u32 type_num)
10918 {
10919 u32 entries_per_desc, desc_index, index, offset, i;
10920 struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
10921 int ret;
10922
10923 ret = hclge_query_bd_num_cmd_send(hdev, desc);
10924 if (ret) {
10925 dev_err(&hdev->pdev->dev,
10926 "Get dfx bd num fail, status is %d.\n", ret);
10927 return ret;
10928 }
10929
10930 entries_per_desc = ARRAY_SIZE(desc[0].data);
10931 for (i = 0; i < type_num; i++) {
10932 offset = hclge_dfx_bd_offset_list[i];
10933 index = offset % entries_per_desc;
10934 desc_index = offset / entries_per_desc;
10935 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
10936 }
10937
10938 return ret;
10939 }
10940
hclge_dfx_reg_cmd_send(struct hclge_dev * hdev,struct hclge_desc * desc_src,int bd_num,enum hclge_opcode_type cmd)10941 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
10942 struct hclge_desc *desc_src, int bd_num,
10943 enum hclge_opcode_type cmd)
10944 {
10945 struct hclge_desc *desc = desc_src;
10946 int i, ret;
10947
10948 hclge_cmd_setup_basic_desc(desc, cmd, true);
10949 for (i = 0; i < bd_num - 1; i++) {
10950 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10951 desc++;
10952 hclge_cmd_setup_basic_desc(desc, cmd, true);
10953 }
10954
10955 desc = desc_src;
10956 ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
10957 if (ret)
10958 dev_err(&hdev->pdev->dev,
10959 "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
10960 cmd, ret);
10961
10962 return ret;
10963 }
10964
hclge_dfx_reg_fetch_data(struct hclge_desc * desc_src,int bd_num,void * data)10965 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
10966 void *data)
10967 {
10968 int entries_per_desc, reg_num, separator_num, desc_index, index, i;
10969 struct hclge_desc *desc = desc_src;
10970 u32 *reg = data;
10971
10972 entries_per_desc = ARRAY_SIZE(desc->data);
10973 reg_num = entries_per_desc * bd_num;
10974 separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
10975 for (i = 0; i < reg_num; i++) {
10976 index = i % entries_per_desc;
10977 desc_index = i / entries_per_desc;
10978 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
10979 }
10980 for (i = 0; i < separator_num; i++)
10981 *reg++ = SEPARATOR_VALUE;
10982
10983 return reg_num + separator_num;
10984 }
10985
hclge_get_dfx_reg_len(struct hclge_dev * hdev,int * len)10986 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
10987 {
10988 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10989 int data_len_per_desc, bd_num, i;
10990 int *bd_num_list;
10991 u32 data_len;
10992 int ret;
10993
10994 bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
10995 if (!bd_num_list)
10996 return -ENOMEM;
10997
10998 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10999 if (ret) {
11000 dev_err(&hdev->pdev->dev,
11001 "Get dfx reg bd num fail, status is %d.\n", ret);
11002 goto out;
11003 }
11004
11005 data_len_per_desc = sizeof_field(struct hclge_desc, data);
11006 *len = 0;
11007 for (i = 0; i < dfx_reg_type_num; i++) {
11008 bd_num = bd_num_list[i];
11009 data_len = data_len_per_desc * bd_num;
11010 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
11011 }
11012
11013 out:
11014 kfree(bd_num_list);
11015 return ret;
11016 }
11017
hclge_get_dfx_reg(struct hclge_dev * hdev,void * data)11018 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
11019 {
11020 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
11021 int bd_num, bd_num_max, buf_len, i;
11022 struct hclge_desc *desc_src;
11023 int *bd_num_list;
11024 u32 *reg = data;
11025 int ret;
11026
11027 bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
11028 if (!bd_num_list)
11029 return -ENOMEM;
11030
11031 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
11032 if (ret) {
11033 dev_err(&hdev->pdev->dev,
11034 "Get dfx reg bd num fail, status is %d.\n", ret);
11035 goto out;
11036 }
11037
11038 bd_num_max = bd_num_list[0];
11039 for (i = 1; i < dfx_reg_type_num; i++)
11040 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
11041
11042 buf_len = sizeof(*desc_src) * bd_num_max;
11043 desc_src = kzalloc(buf_len, GFP_KERNEL);
11044 if (!desc_src) {
11045 ret = -ENOMEM;
11046 goto out;
11047 }
11048
11049 for (i = 0; i < dfx_reg_type_num; i++) {
11050 bd_num = bd_num_list[i];
11051 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
11052 hclge_dfx_reg_opcode_list[i]);
11053 if (ret) {
11054 dev_err(&hdev->pdev->dev,
11055 "Get dfx reg fail, status is %d.\n", ret);
11056 break;
11057 }
11058
11059 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
11060 }
11061
11062 kfree(desc_src);
11063 out:
11064 kfree(bd_num_list);
11065 return ret;
11066 }
11067
hclge_fetch_pf_reg(struct hclge_dev * hdev,void * data,struct hnae3_knic_private_info * kinfo)11068 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
11069 struct hnae3_knic_private_info *kinfo)
11070 {
11071 #define HCLGE_RING_REG_OFFSET 0x200
11072 #define HCLGE_RING_INT_REG_OFFSET 0x4
11073
11074 int i, j, reg_num, separator_num;
11075 int data_num_sum;
11076 u32 *reg = data;
11077
11078 /* fetching per-PF registers valus from PF PCIe register space */
11079 reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
11080 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11081 for (i = 0; i < reg_num; i++)
11082 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
11083 for (i = 0; i < separator_num; i++)
11084 *reg++ = SEPARATOR_VALUE;
11085 data_num_sum = reg_num + separator_num;
11086
11087 reg_num = ARRAY_SIZE(common_reg_addr_list);
11088 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11089 for (i = 0; i < reg_num; i++)
11090 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
11091 for (i = 0; i < separator_num; i++)
11092 *reg++ = SEPARATOR_VALUE;
11093 data_num_sum += reg_num + separator_num;
11094
11095 reg_num = ARRAY_SIZE(ring_reg_addr_list);
11096 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11097 for (j = 0; j < kinfo->num_tqps; j++) {
11098 for (i = 0; i < reg_num; i++)
11099 *reg++ = hclge_read_dev(&hdev->hw,
11100 ring_reg_addr_list[i] +
11101 HCLGE_RING_REG_OFFSET * j);
11102 for (i = 0; i < separator_num; i++)
11103 *reg++ = SEPARATOR_VALUE;
11104 }
11105 data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
11106
11107 reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
11108 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11109 for (j = 0; j < hdev->num_msi_used - 1; j++) {
11110 for (i = 0; i < reg_num; i++)
11111 *reg++ = hclge_read_dev(&hdev->hw,
11112 tqp_intr_reg_addr_list[i] +
11113 HCLGE_RING_INT_REG_OFFSET * j);
11114 for (i = 0; i < separator_num; i++)
11115 *reg++ = SEPARATOR_VALUE;
11116 }
11117 data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
11118
11119 return data_num_sum;
11120 }
11121
hclge_get_regs_len(struct hnae3_handle * handle)11122 static int hclge_get_regs_len(struct hnae3_handle *handle)
11123 {
11124 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
11125 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
11126 struct hclge_vport *vport = hclge_get_vport(handle);
11127 struct hclge_dev *hdev = vport->back;
11128 int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
11129 int regs_lines_32_bit, regs_lines_64_bit;
11130 int ret;
11131
11132 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
11133 if (ret) {
11134 dev_err(&hdev->pdev->dev,
11135 "Get register number failed, ret = %d.\n", ret);
11136 return ret;
11137 }
11138
11139 ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
11140 if (ret) {
11141 dev_err(&hdev->pdev->dev,
11142 "Get dfx reg len failed, ret = %d.\n", ret);
11143 return ret;
11144 }
11145
11146 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
11147 REG_SEPARATOR_LINE;
11148 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
11149 REG_SEPARATOR_LINE;
11150 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
11151 REG_SEPARATOR_LINE;
11152 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
11153 REG_SEPARATOR_LINE;
11154 regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
11155 REG_SEPARATOR_LINE;
11156 regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
11157 REG_SEPARATOR_LINE;
11158
11159 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
11160 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
11161 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
11162 }
11163
hclge_get_regs(struct hnae3_handle * handle,u32 * version,void * data)11164 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
11165 void *data)
11166 {
11167 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
11168 struct hclge_vport *vport = hclge_get_vport(handle);
11169 struct hclge_dev *hdev = vport->back;
11170 u32 regs_num_32_bit, regs_num_64_bit;
11171 int i, reg_num, separator_num, ret;
11172 u32 *reg = data;
11173
11174 *version = hdev->fw_version;
11175
11176 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
11177 if (ret) {
11178 dev_err(&hdev->pdev->dev,
11179 "Get register number failed, ret = %d.\n", ret);
11180 return;
11181 }
11182
11183 reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
11184
11185 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
11186 if (ret) {
11187 dev_err(&hdev->pdev->dev,
11188 "Get 32 bit register failed, ret = %d.\n", ret);
11189 return;
11190 }
11191 reg_num = regs_num_32_bit;
11192 reg += reg_num;
11193 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11194 for (i = 0; i < separator_num; i++)
11195 *reg++ = SEPARATOR_VALUE;
11196
11197 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
11198 if (ret) {
11199 dev_err(&hdev->pdev->dev,
11200 "Get 64 bit register failed, ret = %d.\n", ret);
11201 return;
11202 }
11203 reg_num = regs_num_64_bit * 2;
11204 reg += reg_num;
11205 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11206 for (i = 0; i < separator_num; i++)
11207 *reg++ = SEPARATOR_VALUE;
11208
11209 ret = hclge_get_dfx_reg(hdev, reg);
11210 if (ret)
11211 dev_err(&hdev->pdev->dev,
11212 "Get dfx register failed, ret = %d.\n", ret);
11213 }
11214
hclge_set_led_status(struct hclge_dev * hdev,u8 locate_led_status)11215 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
11216 {
11217 struct hclge_set_led_state_cmd *req;
11218 struct hclge_desc desc;
11219 int ret;
11220
11221 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
11222
11223 req = (struct hclge_set_led_state_cmd *)desc.data;
11224 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
11225 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
11226
11227 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11228 if (ret)
11229 dev_err(&hdev->pdev->dev,
11230 "Send set led state cmd error, ret =%d\n", ret);
11231
11232 return ret;
11233 }
11234
11235 enum hclge_led_status {
11236 HCLGE_LED_OFF,
11237 HCLGE_LED_ON,
11238 HCLGE_LED_NO_CHANGE = 0xFF,
11239 };
11240
hclge_set_led_id(struct hnae3_handle * handle,enum ethtool_phys_id_state status)11241 static int hclge_set_led_id(struct hnae3_handle *handle,
11242 enum ethtool_phys_id_state status)
11243 {
11244 struct hclge_vport *vport = hclge_get_vport(handle);
11245 struct hclge_dev *hdev = vport->back;
11246
11247 switch (status) {
11248 case ETHTOOL_ID_ACTIVE:
11249 return hclge_set_led_status(hdev, HCLGE_LED_ON);
11250 case ETHTOOL_ID_INACTIVE:
11251 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
11252 default:
11253 return -EINVAL;
11254 }
11255 }
11256
hclge_get_link_mode(struct hnae3_handle * handle,unsigned long * supported,unsigned long * advertising)11257 static void hclge_get_link_mode(struct hnae3_handle *handle,
11258 unsigned long *supported,
11259 unsigned long *advertising)
11260 {
11261 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
11262 struct hclge_vport *vport = hclge_get_vport(handle);
11263 struct hclge_dev *hdev = vport->back;
11264 unsigned int idx = 0;
11265
11266 for (; idx < size; idx++) {
11267 supported[idx] = hdev->hw.mac.supported[idx];
11268 advertising[idx] = hdev->hw.mac.advertising[idx];
11269 }
11270 }
11271
hclge_gro_en(struct hnae3_handle * handle,bool enable)11272 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
11273 {
11274 struct hclge_vport *vport = hclge_get_vport(handle);
11275 struct hclge_dev *hdev = vport->back;
11276
11277 return hclge_config_gro(hdev, enable);
11278 }
11279
hclge_sync_promisc_mode(struct hclge_dev * hdev)11280 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
11281 {
11282 struct hclge_vport *vport = &hdev->vport[0];
11283 struct hnae3_handle *handle = &vport->nic;
11284 u8 tmp_flags;
11285 int ret;
11286
11287 if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
11288 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11289 vport->last_promisc_flags = vport->overflow_promisc_flags;
11290 }
11291
11292 if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) {
11293 tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
11294 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
11295 tmp_flags & HNAE3_MPE);
11296 if (!ret) {
11297 clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11298 hclge_enable_vlan_filter(handle,
11299 tmp_flags & HNAE3_VLAN_FLTR);
11300 }
11301 }
11302 }
11303
hclge_module_existed(struct hclge_dev * hdev)11304 static bool hclge_module_existed(struct hclge_dev *hdev)
11305 {
11306 struct hclge_desc desc;
11307 u32 existed;
11308 int ret;
11309
11310 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
11311 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11312 if (ret) {
11313 dev_err(&hdev->pdev->dev,
11314 "failed to get SFP exist state, ret = %d\n", ret);
11315 return false;
11316 }
11317
11318 existed = le32_to_cpu(desc.data[0]);
11319
11320 return existed != 0;
11321 }
11322
11323 /* need 6 bds(total 140 bytes) in one reading
11324 * return the number of bytes actually read, 0 means read failed.
11325 */
hclge_get_sfp_eeprom_info(struct hclge_dev * hdev,u32 offset,u32 len,u8 * data)11326 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
11327 u32 len, u8 *data)
11328 {
11329 struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
11330 struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
11331 u16 read_len;
11332 u16 copy_len;
11333 int ret;
11334 int i;
11335
11336 /* setup all 6 bds to read module eeprom info. */
11337 for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11338 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
11339 true);
11340
11341 /* bd0~bd4 need next flag */
11342 if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
11343 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11344 }
11345
11346 /* setup bd0, this bd contains offset and read length. */
11347 sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
11348 sfp_info_bd0->offset = cpu_to_le16((u16)offset);
11349 read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
11350 sfp_info_bd0->read_len = cpu_to_le16(read_len);
11351
11352 ret = hclge_cmd_send(&hdev->hw, desc, i);
11353 if (ret) {
11354 dev_err(&hdev->pdev->dev,
11355 "failed to get SFP eeprom info, ret = %d\n", ret);
11356 return 0;
11357 }
11358
11359 /* copy sfp info from bd0 to out buffer. */
11360 copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
11361 memcpy(data, sfp_info_bd0->data, copy_len);
11362 read_len = copy_len;
11363
11364 /* copy sfp info from bd1~bd5 to out buffer if needed. */
11365 for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11366 if (read_len >= len)
11367 return read_len;
11368
11369 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
11370 memcpy(data + read_len, desc[i].data, copy_len);
11371 read_len += copy_len;
11372 }
11373
11374 return read_len;
11375 }
11376
hclge_get_module_eeprom(struct hnae3_handle * handle,u32 offset,u32 len,u8 * data)11377 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
11378 u32 len, u8 *data)
11379 {
11380 struct hclge_vport *vport = hclge_get_vport(handle);
11381 struct hclge_dev *hdev = vport->back;
11382 u32 read_len = 0;
11383 u16 data_len;
11384
11385 if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
11386 return -EOPNOTSUPP;
11387
11388 if (!hclge_module_existed(hdev))
11389 return -ENXIO;
11390
11391 while (read_len < len) {
11392 data_len = hclge_get_sfp_eeprom_info(hdev,
11393 offset + read_len,
11394 len - read_len,
11395 data + read_len);
11396 if (!data_len)
11397 return -EIO;
11398
11399 read_len += data_len;
11400 }
11401
11402 return 0;
11403 }
11404
11405 static const struct hnae3_ae_ops hclge_ops = {
11406 .init_ae_dev = hclge_init_ae_dev,
11407 .uninit_ae_dev = hclge_uninit_ae_dev,
11408 .flr_prepare = hclge_flr_prepare,
11409 .flr_done = hclge_flr_done,
11410 .init_client_instance = hclge_init_client_instance,
11411 .uninit_client_instance = hclge_uninit_client_instance,
11412 .map_ring_to_vector = hclge_map_ring_to_vector,
11413 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
11414 .get_vector = hclge_get_vector,
11415 .put_vector = hclge_put_vector,
11416 .set_promisc_mode = hclge_set_promisc_mode,
11417 .request_update_promisc_mode = hclge_request_update_promisc_mode,
11418 .set_loopback = hclge_set_loopback,
11419 .start = hclge_ae_start,
11420 .stop = hclge_ae_stop,
11421 .client_start = hclge_client_start,
11422 .client_stop = hclge_client_stop,
11423 .get_status = hclge_get_status,
11424 .get_ksettings_an_result = hclge_get_ksettings_an_result,
11425 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
11426 .get_media_type = hclge_get_media_type,
11427 .check_port_speed = hclge_check_port_speed,
11428 .get_fec = hclge_get_fec,
11429 .set_fec = hclge_set_fec,
11430 .get_rss_key_size = hclge_get_rss_key_size,
11431 .get_rss_indir_size = hclge_get_rss_indir_size,
11432 .get_rss = hclge_get_rss,
11433 .set_rss = hclge_set_rss,
11434 .set_rss_tuple = hclge_set_rss_tuple,
11435 .get_rss_tuple = hclge_get_rss_tuple,
11436 .get_tc_size = hclge_get_tc_size,
11437 .get_mac_addr = hclge_get_mac_addr,
11438 .set_mac_addr = hclge_set_mac_addr,
11439 .do_ioctl = hclge_do_ioctl,
11440 .add_uc_addr = hclge_add_uc_addr,
11441 .rm_uc_addr = hclge_rm_uc_addr,
11442 .add_mc_addr = hclge_add_mc_addr,
11443 .rm_mc_addr = hclge_rm_mc_addr,
11444 .set_autoneg = hclge_set_autoneg,
11445 .get_autoneg = hclge_get_autoneg,
11446 .restart_autoneg = hclge_restart_autoneg,
11447 .halt_autoneg = hclge_halt_autoneg,
11448 .get_pauseparam = hclge_get_pauseparam,
11449 .set_pauseparam = hclge_set_pauseparam,
11450 .set_mtu = hclge_set_mtu,
11451 .reset_queue = hclge_reset_tqp,
11452 .get_stats = hclge_get_stats,
11453 .get_mac_stats = hclge_get_mac_stat,
11454 .update_stats = hclge_update_stats,
11455 .get_strings = hclge_get_strings,
11456 .get_sset_count = hclge_get_sset_count,
11457 .get_fw_version = hclge_get_fw_version,
11458 .get_mdix_mode = hclge_get_mdix_mode,
11459 .enable_vlan_filter = hclge_enable_vlan_filter,
11460 .set_vlan_filter = hclge_set_vlan_filter,
11461 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
11462 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
11463 .reset_event = hclge_reset_event,
11464 .get_reset_level = hclge_get_reset_level,
11465 .set_default_reset_request = hclge_set_def_reset_request,
11466 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
11467 .set_channels = hclge_set_channels,
11468 .get_channels = hclge_get_channels,
11469 .get_regs_len = hclge_get_regs_len,
11470 .get_regs = hclge_get_regs,
11471 .set_led_id = hclge_set_led_id,
11472 .get_link_mode = hclge_get_link_mode,
11473 .add_fd_entry = hclge_add_fd_entry,
11474 .del_fd_entry = hclge_del_fd_entry,
11475 .del_all_fd_entries = hclge_del_all_fd_entries,
11476 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
11477 .get_fd_rule_info = hclge_get_fd_rule_info,
11478 .get_fd_all_rules = hclge_get_all_rules,
11479 .enable_fd = hclge_enable_fd,
11480 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
11481 .dbg_run_cmd = hclge_dbg_run_cmd,
11482 .handle_hw_ras_error = hclge_handle_hw_ras_error,
11483 .get_hw_reset_stat = hclge_get_hw_reset_stat,
11484 .ae_dev_resetting = hclge_ae_dev_resetting,
11485 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
11486 .set_gro_en = hclge_gro_en,
11487 .get_global_queue_id = hclge_covert_handle_qid_global,
11488 .set_timer_task = hclge_set_timer_task,
11489 .mac_connect_phy = hclge_mac_connect_phy,
11490 .mac_disconnect_phy = hclge_mac_disconnect_phy,
11491 .get_vf_config = hclge_get_vf_config,
11492 .set_vf_link_state = hclge_set_vf_link_state,
11493 .set_vf_spoofchk = hclge_set_vf_spoofchk,
11494 .set_vf_trust = hclge_set_vf_trust,
11495 .set_vf_rate = hclge_set_vf_rate,
11496 .set_vf_mac = hclge_set_vf_mac,
11497 .get_module_eeprom = hclge_get_module_eeprom,
11498 .get_cmdq_stat = hclge_get_cmdq_stat,
11499 };
11500
11501 static struct hnae3_ae_algo ae_algo = {
11502 .ops = &hclge_ops,
11503 .pdev_id_table = ae_algo_pci_tbl,
11504 };
11505
hclge_init(void)11506 static int hclge_init(void)
11507 {
11508 pr_info("%s is initializing\n", HCLGE_NAME);
11509
11510 hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
11511 if (!hclge_wq) {
11512 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
11513 return -ENOMEM;
11514 }
11515
11516 hnae3_register_ae_algo(&ae_algo);
11517
11518 return 0;
11519 }
11520
hclge_exit(void)11521 static void hclge_exit(void)
11522 {
11523 hnae3_unregister_ae_algo_prepare(&ae_algo);
11524 hnae3_unregister_ae_algo(&ae_algo);
11525 destroy_workqueue(hclge_wq);
11526 }
11527 module_init(hclge_init);
11528 module_exit(hclge_exit);
11529
11530 MODULE_LICENSE("GPL");
11531 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
11532 MODULE_DESCRIPTION("HCLGE Driver");
11533 MODULE_VERSION(HCLGE_MOD_VERSION);
11534