• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25 
26 #define HCLGE_NAME			"hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29 
30 #define HCLGE_BUF_SIZE_UNIT	256U
31 #define HCLGE_BUF_MUL_BY	2
32 #define HCLGE_BUF_DIV_BY	2
33 #define NEED_RESERVE_TC_NUM	2
34 #define BUF_MAX_PERCENT		100
35 #define BUF_RESERVE_PERCENT	90
36 
37 #define HCLGE_RESET_MAX_FAIL_CNT	5
38 #define HCLGE_RESET_SYNC_TIME		100
39 #define HCLGE_PF_RESET_SYNC_TIME	20
40 #define HCLGE_PF_RESET_SYNC_CNT		1500
41 
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET        1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
46 #define HCLGE_DFX_IGU_BD_OFFSET         4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
49 #define HCLGE_DFX_NCSI_BD_OFFSET        7
50 #define HCLGE_DFX_RTC_BD_OFFSET         8
51 #define HCLGE_DFX_PPP_BD_OFFSET         9
52 #define HCLGE_DFX_RCB_BD_OFFSET         10
53 #define HCLGE_DFX_TQP_BD_OFFSET         11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
55 
56 #define HCLGE_LINK_STATUS_MS	10
57 
58 #define HCLGE_VF_VPORT_START_NUM	1
59 
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
66 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
67 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
68 						   unsigned long *addr);
69 static int hclge_set_default_loopback(struct hclge_dev *hdev);
70 
71 static void hclge_sync_mac_table(struct hclge_dev *hdev);
72 static void hclge_restore_hw_table(struct hclge_dev *hdev);
73 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
74 
75 static struct hnae3_ae_algo ae_algo;
76 
77 static struct workqueue_struct *hclge_wq;
78 
79 static const struct pci_device_id ae_algo_pci_tbl[] = {
80 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
81 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
82 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
83 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
84 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
85 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
86 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
87 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
88 	/* required last entry */
89 	{0, }
90 };
91 
92 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
93 
94 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
95 					 HCLGE_CMDQ_TX_ADDR_H_REG,
96 					 HCLGE_CMDQ_TX_DEPTH_REG,
97 					 HCLGE_CMDQ_TX_TAIL_REG,
98 					 HCLGE_CMDQ_TX_HEAD_REG,
99 					 HCLGE_CMDQ_RX_ADDR_L_REG,
100 					 HCLGE_CMDQ_RX_ADDR_H_REG,
101 					 HCLGE_CMDQ_RX_DEPTH_REG,
102 					 HCLGE_CMDQ_RX_TAIL_REG,
103 					 HCLGE_CMDQ_RX_HEAD_REG,
104 					 HCLGE_VECTOR0_CMDQ_SRC_REG,
105 					 HCLGE_CMDQ_INTR_STS_REG,
106 					 HCLGE_CMDQ_INTR_EN_REG,
107 					 HCLGE_CMDQ_INTR_GEN_REG};
108 
109 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
110 					   HCLGE_VECTOR0_OTER_EN_REG,
111 					   HCLGE_MISC_RESET_STS_REG,
112 					   HCLGE_MISC_VECTOR_INT_STS,
113 					   HCLGE_GLOBAL_RESET_REG,
114 					   HCLGE_FUN_RST_ING,
115 					   HCLGE_GRO_EN_REG};
116 
117 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
118 					 HCLGE_RING_RX_ADDR_H_REG,
119 					 HCLGE_RING_RX_BD_NUM_REG,
120 					 HCLGE_RING_RX_BD_LENGTH_REG,
121 					 HCLGE_RING_RX_MERGE_EN_REG,
122 					 HCLGE_RING_RX_TAIL_REG,
123 					 HCLGE_RING_RX_HEAD_REG,
124 					 HCLGE_RING_RX_FBD_NUM_REG,
125 					 HCLGE_RING_RX_OFFSET_REG,
126 					 HCLGE_RING_RX_FBD_OFFSET_REG,
127 					 HCLGE_RING_RX_STASH_REG,
128 					 HCLGE_RING_RX_BD_ERR_REG,
129 					 HCLGE_RING_TX_ADDR_L_REG,
130 					 HCLGE_RING_TX_ADDR_H_REG,
131 					 HCLGE_RING_TX_BD_NUM_REG,
132 					 HCLGE_RING_TX_PRIORITY_REG,
133 					 HCLGE_RING_TX_TC_REG,
134 					 HCLGE_RING_TX_MERGE_EN_REG,
135 					 HCLGE_RING_TX_TAIL_REG,
136 					 HCLGE_RING_TX_HEAD_REG,
137 					 HCLGE_RING_TX_FBD_NUM_REG,
138 					 HCLGE_RING_TX_OFFSET_REG,
139 					 HCLGE_RING_TX_EBD_NUM_REG,
140 					 HCLGE_RING_TX_EBD_OFFSET_REG,
141 					 HCLGE_RING_TX_BD_ERR_REG,
142 					 HCLGE_RING_EN_REG};
143 
144 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
145 					     HCLGE_TQP_INTR_GL0_REG,
146 					     HCLGE_TQP_INTR_GL1_REG,
147 					     HCLGE_TQP_INTR_GL2_REG,
148 					     HCLGE_TQP_INTR_RL_REG};
149 
150 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
151 	"App    Loopback test",
152 	"Serdes serial Loopback test",
153 	"Serdes parallel Loopback test",
154 	"Phy    Loopback test"
155 };
156 
157 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
158 	{"mac_tx_mac_pause_num",
159 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
160 	{"mac_rx_mac_pause_num",
161 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
162 	{"mac_tx_control_pkt_num",
163 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
164 	{"mac_rx_control_pkt_num",
165 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
166 	{"mac_tx_pfc_pkt_num",
167 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
168 	{"mac_tx_pfc_pri0_pkt_num",
169 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
170 	{"mac_tx_pfc_pri1_pkt_num",
171 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
172 	{"mac_tx_pfc_pri2_pkt_num",
173 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
174 	{"mac_tx_pfc_pri3_pkt_num",
175 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
176 	{"mac_tx_pfc_pri4_pkt_num",
177 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
178 	{"mac_tx_pfc_pri5_pkt_num",
179 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
180 	{"mac_tx_pfc_pri6_pkt_num",
181 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
182 	{"mac_tx_pfc_pri7_pkt_num",
183 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
184 	{"mac_rx_pfc_pkt_num",
185 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
186 	{"mac_rx_pfc_pri0_pkt_num",
187 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
188 	{"mac_rx_pfc_pri1_pkt_num",
189 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
190 	{"mac_rx_pfc_pri2_pkt_num",
191 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
192 	{"mac_rx_pfc_pri3_pkt_num",
193 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
194 	{"mac_rx_pfc_pri4_pkt_num",
195 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
196 	{"mac_rx_pfc_pri5_pkt_num",
197 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
198 	{"mac_rx_pfc_pri6_pkt_num",
199 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
200 	{"mac_rx_pfc_pri7_pkt_num",
201 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
202 	{"mac_tx_total_pkt_num",
203 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
204 	{"mac_tx_total_oct_num",
205 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
206 	{"mac_tx_good_pkt_num",
207 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
208 	{"mac_tx_bad_pkt_num",
209 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
210 	{"mac_tx_good_oct_num",
211 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
212 	{"mac_tx_bad_oct_num",
213 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
214 	{"mac_tx_uni_pkt_num",
215 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
216 	{"mac_tx_multi_pkt_num",
217 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
218 	{"mac_tx_broad_pkt_num",
219 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
220 	{"mac_tx_undersize_pkt_num",
221 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
222 	{"mac_tx_oversize_pkt_num",
223 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
224 	{"mac_tx_64_oct_pkt_num",
225 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
226 	{"mac_tx_65_127_oct_pkt_num",
227 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
228 	{"mac_tx_128_255_oct_pkt_num",
229 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
230 	{"mac_tx_256_511_oct_pkt_num",
231 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
232 	{"mac_tx_512_1023_oct_pkt_num",
233 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
234 	{"mac_tx_1024_1518_oct_pkt_num",
235 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
236 	{"mac_tx_1519_2047_oct_pkt_num",
237 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
238 	{"mac_tx_2048_4095_oct_pkt_num",
239 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
240 	{"mac_tx_4096_8191_oct_pkt_num",
241 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
242 	{"mac_tx_8192_9216_oct_pkt_num",
243 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
244 	{"mac_tx_9217_12287_oct_pkt_num",
245 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
246 	{"mac_tx_12288_16383_oct_pkt_num",
247 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
248 	{"mac_tx_1519_max_good_pkt_num",
249 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
250 	{"mac_tx_1519_max_bad_pkt_num",
251 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
252 	{"mac_rx_total_pkt_num",
253 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
254 	{"mac_rx_total_oct_num",
255 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
256 	{"mac_rx_good_pkt_num",
257 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
258 	{"mac_rx_bad_pkt_num",
259 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
260 	{"mac_rx_good_oct_num",
261 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
262 	{"mac_rx_bad_oct_num",
263 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
264 	{"mac_rx_uni_pkt_num",
265 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
266 	{"mac_rx_multi_pkt_num",
267 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
268 	{"mac_rx_broad_pkt_num",
269 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
270 	{"mac_rx_undersize_pkt_num",
271 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
272 	{"mac_rx_oversize_pkt_num",
273 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
274 	{"mac_rx_64_oct_pkt_num",
275 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
276 	{"mac_rx_65_127_oct_pkt_num",
277 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
278 	{"mac_rx_128_255_oct_pkt_num",
279 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
280 	{"mac_rx_256_511_oct_pkt_num",
281 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
282 	{"mac_rx_512_1023_oct_pkt_num",
283 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
284 	{"mac_rx_1024_1518_oct_pkt_num",
285 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
286 	{"mac_rx_1519_2047_oct_pkt_num",
287 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
288 	{"mac_rx_2048_4095_oct_pkt_num",
289 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
290 	{"mac_rx_4096_8191_oct_pkt_num",
291 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
292 	{"mac_rx_8192_9216_oct_pkt_num",
293 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
294 	{"mac_rx_9217_12287_oct_pkt_num",
295 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
296 	{"mac_rx_12288_16383_oct_pkt_num",
297 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
298 	{"mac_rx_1519_max_good_pkt_num",
299 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
300 	{"mac_rx_1519_max_bad_pkt_num",
301 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
302 
303 	{"mac_tx_fragment_pkt_num",
304 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
305 	{"mac_tx_undermin_pkt_num",
306 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
307 	{"mac_tx_jabber_pkt_num",
308 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
309 	{"mac_tx_err_all_pkt_num",
310 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
311 	{"mac_tx_from_app_good_pkt_num",
312 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
313 	{"mac_tx_from_app_bad_pkt_num",
314 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
315 	{"mac_rx_fragment_pkt_num",
316 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
317 	{"mac_rx_undermin_pkt_num",
318 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
319 	{"mac_rx_jabber_pkt_num",
320 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
321 	{"mac_rx_fcs_err_pkt_num",
322 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
323 	{"mac_rx_send_app_good_pkt_num",
324 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
325 	{"mac_rx_send_app_bad_pkt_num",
326 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
327 };
328 
329 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
330 	{
331 		.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
332 		.ethter_type = cpu_to_le16(ETH_P_LLDP),
333 		.mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
334 		.i_port_bitmap = 0x1,
335 	},
336 };
337 
338 static const u8 hclge_hash_key[] = {
339 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
340 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
341 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
342 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
343 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
344 };
345 
346 static const u32 hclge_dfx_bd_offset_list[] = {
347 	HCLGE_DFX_BIOS_BD_OFFSET,
348 	HCLGE_DFX_SSU_0_BD_OFFSET,
349 	HCLGE_DFX_SSU_1_BD_OFFSET,
350 	HCLGE_DFX_IGU_BD_OFFSET,
351 	HCLGE_DFX_RPU_0_BD_OFFSET,
352 	HCLGE_DFX_RPU_1_BD_OFFSET,
353 	HCLGE_DFX_NCSI_BD_OFFSET,
354 	HCLGE_DFX_RTC_BD_OFFSET,
355 	HCLGE_DFX_PPP_BD_OFFSET,
356 	HCLGE_DFX_RCB_BD_OFFSET,
357 	HCLGE_DFX_TQP_BD_OFFSET,
358 	HCLGE_DFX_SSU_2_BD_OFFSET
359 };
360 
361 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
362 	HCLGE_OPC_DFX_BIOS_COMMON_REG,
363 	HCLGE_OPC_DFX_SSU_REG_0,
364 	HCLGE_OPC_DFX_SSU_REG_1,
365 	HCLGE_OPC_DFX_IGU_EGU_REG,
366 	HCLGE_OPC_DFX_RPU_REG_0,
367 	HCLGE_OPC_DFX_RPU_REG_1,
368 	HCLGE_OPC_DFX_NCSI_REG,
369 	HCLGE_OPC_DFX_RTC_REG,
370 	HCLGE_OPC_DFX_PPP_REG,
371 	HCLGE_OPC_DFX_RCB_REG,
372 	HCLGE_OPC_DFX_TQP_REG,
373 	HCLGE_OPC_DFX_SSU_REG_2
374 };
375 
376 static const struct key_info meta_data_key_info[] = {
377 	{ PACKET_TYPE_ID, 6},
378 	{ IP_FRAGEMENT, 1},
379 	{ ROCE_TYPE, 1},
380 	{ NEXT_KEY, 5},
381 	{ VLAN_NUMBER, 2},
382 	{ SRC_VPORT, 12},
383 	{ DST_VPORT, 12},
384 	{ TUNNEL_PACKET, 1},
385 };
386 
387 static const struct key_info tuple_key_info[] = {
388 	{ OUTER_DST_MAC, 48},
389 	{ OUTER_SRC_MAC, 48},
390 	{ OUTER_VLAN_TAG_FST, 16},
391 	{ OUTER_VLAN_TAG_SEC, 16},
392 	{ OUTER_ETH_TYPE, 16},
393 	{ OUTER_L2_RSV, 16},
394 	{ OUTER_IP_TOS, 8},
395 	{ OUTER_IP_PROTO, 8},
396 	{ OUTER_SRC_IP, 32},
397 	{ OUTER_DST_IP, 32},
398 	{ OUTER_L3_RSV, 16},
399 	{ OUTER_SRC_PORT, 16},
400 	{ OUTER_DST_PORT, 16},
401 	{ OUTER_L4_RSV, 32},
402 	{ OUTER_TUN_VNI, 24},
403 	{ OUTER_TUN_FLOW_ID, 8},
404 	{ INNER_DST_MAC, 48},
405 	{ INNER_SRC_MAC, 48},
406 	{ INNER_VLAN_TAG_FST, 16},
407 	{ INNER_VLAN_TAG_SEC, 16},
408 	{ INNER_ETH_TYPE, 16},
409 	{ INNER_L2_RSV, 16},
410 	{ INNER_IP_TOS, 8},
411 	{ INNER_IP_PROTO, 8},
412 	{ INNER_SRC_IP, 32},
413 	{ INNER_DST_IP, 32},
414 	{ INNER_L3_RSV, 16},
415 	{ INNER_SRC_PORT, 16},
416 	{ INNER_DST_PORT, 16},
417 	{ INNER_L4_RSV, 32},
418 };
419 
hclge_mac_update_stats_defective(struct hclge_dev * hdev)420 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
421 {
422 #define HCLGE_MAC_CMD_NUM 21
423 
424 	u64 *data = (u64 *)(&hdev->mac_stats);
425 	struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
426 	__le64 *desc_data;
427 	int i, k, n;
428 	int ret;
429 
430 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
431 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
432 	if (ret) {
433 		dev_err(&hdev->pdev->dev,
434 			"Get MAC pkt stats fail, status = %d.\n", ret);
435 
436 		return ret;
437 	}
438 
439 	for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
440 		/* for special opcode 0032, only the first desc has the head */
441 		if (unlikely(i == 0)) {
442 			desc_data = (__le64 *)(&desc[i].data[0]);
443 			n = HCLGE_RD_FIRST_STATS_NUM;
444 		} else {
445 			desc_data = (__le64 *)(&desc[i]);
446 			n = HCLGE_RD_OTHER_STATS_NUM;
447 		}
448 
449 		for (k = 0; k < n; k++) {
450 			*data += le64_to_cpu(*desc_data);
451 			data++;
452 			desc_data++;
453 		}
454 	}
455 
456 	return 0;
457 }
458 
hclge_mac_update_stats_complete(struct hclge_dev * hdev,u32 desc_num)459 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
460 {
461 	u64 *data = (u64 *)(&hdev->mac_stats);
462 	struct hclge_desc *desc;
463 	__le64 *desc_data;
464 	u16 i, k, n;
465 	int ret;
466 
467 	/* This may be called inside atomic sections,
468 	 * so GFP_ATOMIC is more suitalbe here
469 	 */
470 	desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
471 	if (!desc)
472 		return -ENOMEM;
473 
474 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
475 	ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
476 	if (ret) {
477 		kfree(desc);
478 		return ret;
479 	}
480 
481 	for (i = 0; i < desc_num; i++) {
482 		/* for special opcode 0034, only the first desc has the head */
483 		if (i == 0) {
484 			desc_data = (__le64 *)(&desc[i].data[0]);
485 			n = HCLGE_RD_FIRST_STATS_NUM;
486 		} else {
487 			desc_data = (__le64 *)(&desc[i]);
488 			n = HCLGE_RD_OTHER_STATS_NUM;
489 		}
490 
491 		for (k = 0; k < n; k++) {
492 			*data += le64_to_cpu(*desc_data);
493 			data++;
494 			desc_data++;
495 		}
496 	}
497 
498 	kfree(desc);
499 
500 	return 0;
501 }
502 
hclge_mac_query_reg_num(struct hclge_dev * hdev,u32 * desc_num)503 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
504 {
505 	struct hclge_desc desc;
506 	__le32 *desc_data;
507 	u32 reg_num;
508 	int ret;
509 
510 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
511 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
512 	if (ret)
513 		return ret;
514 
515 	desc_data = (__le32 *)(&desc.data[0]);
516 	reg_num = le32_to_cpu(*desc_data);
517 
518 	*desc_num = 1 + ((reg_num - 3) >> 2) +
519 		    (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
520 
521 	return 0;
522 }
523 
hclge_mac_update_stats(struct hclge_dev * hdev)524 static int hclge_mac_update_stats(struct hclge_dev *hdev)
525 {
526 	u32 desc_num;
527 	int ret;
528 
529 	ret = hclge_mac_query_reg_num(hdev, &desc_num);
530 
531 	/* The firmware supports the new statistics acquisition method */
532 	if (!ret)
533 		ret = hclge_mac_update_stats_complete(hdev, desc_num);
534 	else if (ret == -EOPNOTSUPP)
535 		ret = hclge_mac_update_stats_defective(hdev);
536 	else
537 		dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
538 
539 	return ret;
540 }
541 
hclge_tqps_update_stats(struct hnae3_handle * handle)542 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
543 {
544 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
545 	struct hclge_vport *vport = hclge_get_vport(handle);
546 	struct hclge_dev *hdev = vport->back;
547 	struct hnae3_queue *queue;
548 	struct hclge_desc desc[1];
549 	struct hclge_tqp *tqp;
550 	int ret, i;
551 
552 	for (i = 0; i < kinfo->num_tqps; i++) {
553 		queue = handle->kinfo.tqp[i];
554 		tqp = container_of(queue, struct hclge_tqp, q);
555 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
556 		hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
557 					   true);
558 
559 		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
560 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
561 		if (ret) {
562 			dev_err(&hdev->pdev->dev,
563 				"Query tqp stat fail, status = %d,queue = %d\n",
564 				ret, i);
565 			return ret;
566 		}
567 		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
568 			le32_to_cpu(desc[0].data[1]);
569 	}
570 
571 	for (i = 0; i < kinfo->num_tqps; i++) {
572 		queue = handle->kinfo.tqp[i];
573 		tqp = container_of(queue, struct hclge_tqp, q);
574 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
575 		hclge_cmd_setup_basic_desc(&desc[0],
576 					   HCLGE_OPC_QUERY_TX_STATS,
577 					   true);
578 
579 		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
580 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
581 		if (ret) {
582 			dev_err(&hdev->pdev->dev,
583 				"Query tqp stat fail, status = %d,queue = %d\n",
584 				ret, i);
585 			return ret;
586 		}
587 		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
588 			le32_to_cpu(desc[0].data[1]);
589 	}
590 
591 	return 0;
592 }
593 
hclge_tqps_get_stats(struct hnae3_handle * handle,u64 * data)594 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
595 {
596 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
597 	struct hclge_tqp *tqp;
598 	u64 *buff = data;
599 	int i;
600 
601 	for (i = 0; i < kinfo->num_tqps; i++) {
602 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
603 		*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
604 	}
605 
606 	for (i = 0; i < kinfo->num_tqps; i++) {
607 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
608 		*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
609 	}
610 
611 	return buff;
612 }
613 
hclge_tqps_get_sset_count(struct hnae3_handle * handle,int stringset)614 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
615 {
616 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
617 
618 	/* each tqp has TX & RX two queues */
619 	return kinfo->num_tqps * (2);
620 }
621 
hclge_tqps_get_strings(struct hnae3_handle * handle,u8 * data)622 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
623 {
624 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
625 	u8 *buff = data;
626 	int i;
627 
628 	for (i = 0; i < kinfo->num_tqps; i++) {
629 		struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
630 			struct hclge_tqp, q);
631 		snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
632 			 tqp->index);
633 		buff = buff + ETH_GSTRING_LEN;
634 	}
635 
636 	for (i = 0; i < kinfo->num_tqps; i++) {
637 		struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
638 			struct hclge_tqp, q);
639 		snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
640 			 tqp->index);
641 		buff = buff + ETH_GSTRING_LEN;
642 	}
643 
644 	return buff;
645 }
646 
hclge_comm_get_stats(const void * comm_stats,const struct hclge_comm_stats_str strs[],int size,u64 * data)647 static u64 *hclge_comm_get_stats(const void *comm_stats,
648 				 const struct hclge_comm_stats_str strs[],
649 				 int size, u64 *data)
650 {
651 	u64 *buf = data;
652 	u32 i;
653 
654 	for (i = 0; i < size; i++)
655 		buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
656 
657 	return buf + size;
658 }
659 
hclge_comm_get_strings(u32 stringset,const struct hclge_comm_stats_str strs[],int size,u8 * data)660 static u8 *hclge_comm_get_strings(u32 stringset,
661 				  const struct hclge_comm_stats_str strs[],
662 				  int size, u8 *data)
663 {
664 	char *buff = (char *)data;
665 	u32 i;
666 
667 	if (stringset != ETH_SS_STATS)
668 		return buff;
669 
670 	for (i = 0; i < size; i++) {
671 		snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
672 		buff = buff + ETH_GSTRING_LEN;
673 	}
674 
675 	return (u8 *)buff;
676 }
677 
hclge_update_stats_for_all(struct hclge_dev * hdev)678 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
679 {
680 	struct hnae3_handle *handle;
681 	int status;
682 
683 	handle = &hdev->vport[0].nic;
684 	if (handle->client) {
685 		status = hclge_tqps_update_stats(handle);
686 		if (status) {
687 			dev_err(&hdev->pdev->dev,
688 				"Update TQPS stats fail, status = %d.\n",
689 				status);
690 		}
691 	}
692 
693 	status = hclge_mac_update_stats(hdev);
694 	if (status)
695 		dev_err(&hdev->pdev->dev,
696 			"Update MAC stats fail, status = %d.\n", status);
697 }
698 
hclge_update_stats(struct hnae3_handle * handle,struct net_device_stats * net_stats)699 static void hclge_update_stats(struct hnae3_handle *handle,
700 			       struct net_device_stats *net_stats)
701 {
702 	struct hclge_vport *vport = hclge_get_vport(handle);
703 	struct hclge_dev *hdev = vport->back;
704 	int status;
705 
706 	if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
707 		return;
708 
709 	status = hclge_mac_update_stats(hdev);
710 	if (status)
711 		dev_err(&hdev->pdev->dev,
712 			"Update MAC stats fail, status = %d.\n",
713 			status);
714 
715 	status = hclge_tqps_update_stats(handle);
716 	if (status)
717 		dev_err(&hdev->pdev->dev,
718 			"Update TQPS stats fail, status = %d.\n",
719 			status);
720 
721 	clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
722 }
723 
hclge_get_sset_count(struct hnae3_handle * handle,int stringset)724 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
725 {
726 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
727 		HNAE3_SUPPORT_PHY_LOOPBACK |\
728 		HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
729 		HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
730 
731 	struct hclge_vport *vport = hclge_get_vport(handle);
732 	struct hclge_dev *hdev = vport->back;
733 	int count = 0;
734 
735 	/* Loopback test support rules:
736 	 * mac: only GE mode support
737 	 * serdes: all mac mode will support include GE/XGE/LGE/CGE
738 	 * phy: only support when phy device exist on board
739 	 */
740 	if (stringset == ETH_SS_TEST) {
741 		/* clear loopback bit flags at first */
742 		handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
743 		if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
744 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
745 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
746 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
747 			count += 1;
748 			handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
749 		}
750 
751 		count += 2;
752 		handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
753 		handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
754 
755 		if (hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
756 		    hdev->hw.mac.phydev->drv->set_loopback) {
757 			count += 1;
758 			handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
759 		}
760 
761 	} else if (stringset == ETH_SS_STATS) {
762 		count = ARRAY_SIZE(g_mac_stats_string) +
763 			hclge_tqps_get_sset_count(handle, stringset);
764 	}
765 
766 	return count;
767 }
768 
hclge_get_strings(struct hnae3_handle * handle,u32 stringset,u8 * data)769 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
770 			      u8 *data)
771 {
772 	u8 *p = (char *)data;
773 	int size;
774 
775 	if (stringset == ETH_SS_STATS) {
776 		size = ARRAY_SIZE(g_mac_stats_string);
777 		p = hclge_comm_get_strings(stringset, g_mac_stats_string,
778 					   size, p);
779 		p = hclge_tqps_get_strings(handle, p);
780 	} else if (stringset == ETH_SS_TEST) {
781 		if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
782 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
783 			       ETH_GSTRING_LEN);
784 			p += ETH_GSTRING_LEN;
785 		}
786 		if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
787 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
788 			       ETH_GSTRING_LEN);
789 			p += ETH_GSTRING_LEN;
790 		}
791 		if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
792 			memcpy(p,
793 			       hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
794 			       ETH_GSTRING_LEN);
795 			p += ETH_GSTRING_LEN;
796 		}
797 		if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
798 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
799 			       ETH_GSTRING_LEN);
800 			p += ETH_GSTRING_LEN;
801 		}
802 	}
803 }
804 
hclge_get_stats(struct hnae3_handle * handle,u64 * data)805 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
806 {
807 	struct hclge_vport *vport = hclge_get_vport(handle);
808 	struct hclge_dev *hdev = vport->back;
809 	u64 *p;
810 
811 	p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
812 				 ARRAY_SIZE(g_mac_stats_string), data);
813 	p = hclge_tqps_get_stats(handle, p);
814 }
815 
hclge_get_mac_stat(struct hnae3_handle * handle,struct hns3_mac_stats * mac_stats)816 static void hclge_get_mac_stat(struct hnae3_handle *handle,
817 			       struct hns3_mac_stats *mac_stats)
818 {
819 	struct hclge_vport *vport = hclge_get_vport(handle);
820 	struct hclge_dev *hdev = vport->back;
821 
822 	hclge_update_stats(handle, NULL);
823 
824 	mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
825 	mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
826 }
827 
hclge_parse_func_status(struct hclge_dev * hdev,struct hclge_func_status_cmd * status)828 static int hclge_parse_func_status(struct hclge_dev *hdev,
829 				   struct hclge_func_status_cmd *status)
830 {
831 #define HCLGE_MAC_ID_MASK	0xF
832 
833 	if (!(status->pf_state & HCLGE_PF_STATE_DONE))
834 		return -EINVAL;
835 
836 	/* Set the pf to main pf */
837 	if (status->pf_state & HCLGE_PF_STATE_MAIN)
838 		hdev->flag |= HCLGE_FLAG_MAIN;
839 	else
840 		hdev->flag &= ~HCLGE_FLAG_MAIN;
841 
842 	hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
843 	return 0;
844 }
845 
hclge_query_function_status(struct hclge_dev * hdev)846 static int hclge_query_function_status(struct hclge_dev *hdev)
847 {
848 #define HCLGE_QUERY_MAX_CNT	5
849 
850 	struct hclge_func_status_cmd *req;
851 	struct hclge_desc desc;
852 	int timeout = 0;
853 	int ret;
854 
855 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
856 	req = (struct hclge_func_status_cmd *)desc.data;
857 
858 	do {
859 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
860 		if (ret) {
861 			dev_err(&hdev->pdev->dev,
862 				"query function status failed %d.\n", ret);
863 			return ret;
864 		}
865 
866 		/* Check pf reset is done */
867 		if (req->pf_state)
868 			break;
869 		usleep_range(1000, 2000);
870 	} while (timeout++ < HCLGE_QUERY_MAX_CNT);
871 
872 	return hclge_parse_func_status(hdev, req);
873 }
874 
hclge_query_pf_resource(struct hclge_dev * hdev)875 static int hclge_query_pf_resource(struct hclge_dev *hdev)
876 {
877 	struct hclge_pf_res_cmd *req;
878 	struct hclge_desc desc;
879 	int ret;
880 
881 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
882 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
883 	if (ret) {
884 		dev_err(&hdev->pdev->dev,
885 			"query pf resource failed %d.\n", ret);
886 		return ret;
887 	}
888 
889 	req = (struct hclge_pf_res_cmd *)desc.data;
890 	hdev->num_tqps = le16_to_cpu(req->tqp_num);
891 	hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
892 
893 	if (req->tx_buf_size)
894 		hdev->tx_buf_size =
895 			le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
896 	else
897 		hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
898 
899 	hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
900 
901 	if (req->dv_buf_size)
902 		hdev->dv_buf_size =
903 			le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
904 	else
905 		hdev->dv_buf_size = HCLGE_DEFAULT_DV;
906 
907 	hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
908 
909 	if (hnae3_dev_roce_supported(hdev)) {
910 		hdev->roce_base_msix_offset =
911 		hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
912 				HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
913 		hdev->num_roce_msi =
914 		hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
915 				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
916 
917 		/* nic's msix numbers is always equals to the roce's. */
918 		hdev->num_nic_msi = hdev->num_roce_msi;
919 
920 		/* PF should have NIC vectors and Roce vectors,
921 		 * NIC vectors are queued before Roce vectors.
922 		 */
923 		hdev->num_msi = hdev->num_roce_msi +
924 				hdev->roce_base_msix_offset;
925 	} else {
926 		hdev->num_msi =
927 		hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
928 				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
929 
930 		hdev->num_nic_msi = hdev->num_msi;
931 	}
932 
933 	if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
934 		dev_err(&hdev->pdev->dev,
935 			"Just %u msi resources, not enough for pf(min:2).\n",
936 			hdev->num_nic_msi);
937 		return -EINVAL;
938 	}
939 
940 	return 0;
941 }
942 
hclge_parse_speed(int speed_cmd,int * speed)943 static int hclge_parse_speed(int speed_cmd, int *speed)
944 {
945 	switch (speed_cmd) {
946 	case 6:
947 		*speed = HCLGE_MAC_SPEED_10M;
948 		break;
949 	case 7:
950 		*speed = HCLGE_MAC_SPEED_100M;
951 		break;
952 	case 0:
953 		*speed = HCLGE_MAC_SPEED_1G;
954 		break;
955 	case 1:
956 		*speed = HCLGE_MAC_SPEED_10G;
957 		break;
958 	case 2:
959 		*speed = HCLGE_MAC_SPEED_25G;
960 		break;
961 	case 3:
962 		*speed = HCLGE_MAC_SPEED_40G;
963 		break;
964 	case 4:
965 		*speed = HCLGE_MAC_SPEED_50G;
966 		break;
967 	case 5:
968 		*speed = HCLGE_MAC_SPEED_100G;
969 		break;
970 	case 8:
971 		*speed = HCLGE_MAC_SPEED_200G;
972 		break;
973 	default:
974 		return -EINVAL;
975 	}
976 
977 	return 0;
978 }
979 
hclge_check_port_speed(struct hnae3_handle * handle,u32 speed)980 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
981 {
982 	struct hclge_vport *vport = hclge_get_vport(handle);
983 	struct hclge_dev *hdev = vport->back;
984 	u32 speed_ability = hdev->hw.mac.speed_ability;
985 	u32 speed_bit = 0;
986 
987 	switch (speed) {
988 	case HCLGE_MAC_SPEED_10M:
989 		speed_bit = HCLGE_SUPPORT_10M_BIT;
990 		break;
991 	case HCLGE_MAC_SPEED_100M:
992 		speed_bit = HCLGE_SUPPORT_100M_BIT;
993 		break;
994 	case HCLGE_MAC_SPEED_1G:
995 		speed_bit = HCLGE_SUPPORT_1G_BIT;
996 		break;
997 	case HCLGE_MAC_SPEED_10G:
998 		speed_bit = HCLGE_SUPPORT_10G_BIT;
999 		break;
1000 	case HCLGE_MAC_SPEED_25G:
1001 		speed_bit = HCLGE_SUPPORT_25G_BIT;
1002 		break;
1003 	case HCLGE_MAC_SPEED_40G:
1004 		speed_bit = HCLGE_SUPPORT_40G_BIT;
1005 		break;
1006 	case HCLGE_MAC_SPEED_50G:
1007 		speed_bit = HCLGE_SUPPORT_50G_BIT;
1008 		break;
1009 	case HCLGE_MAC_SPEED_100G:
1010 		speed_bit = HCLGE_SUPPORT_100G_BIT;
1011 		break;
1012 	case HCLGE_MAC_SPEED_200G:
1013 		speed_bit = HCLGE_SUPPORT_200G_BIT;
1014 		break;
1015 	default:
1016 		return -EINVAL;
1017 	}
1018 
1019 	if (speed_bit & speed_ability)
1020 		return 0;
1021 
1022 	return -EINVAL;
1023 }
1024 
hclge_convert_setting_sr(struct hclge_mac * mac,u16 speed_ability)1025 static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1026 {
1027 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1028 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1029 				 mac->supported);
1030 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1031 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1032 				 mac->supported);
1033 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1034 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1035 				 mac->supported);
1036 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1037 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1038 				 mac->supported);
1039 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1040 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1041 				 mac->supported);
1042 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1043 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1044 				 mac->supported);
1045 }
1046 
hclge_convert_setting_lr(struct hclge_mac * mac,u16 speed_ability)1047 static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1048 {
1049 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1050 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1051 				 mac->supported);
1052 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1053 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1054 				 mac->supported);
1055 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1056 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1057 				 mac->supported);
1058 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1059 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1060 				 mac->supported);
1061 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1062 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1063 				 mac->supported);
1064 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1065 		linkmode_set_bit(
1066 			ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1067 			mac->supported);
1068 }
1069 
hclge_convert_setting_cr(struct hclge_mac * mac,u16 speed_ability)1070 static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1071 {
1072 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1073 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1074 				 mac->supported);
1075 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1076 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1077 				 mac->supported);
1078 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1079 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1080 				 mac->supported);
1081 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1082 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1083 				 mac->supported);
1084 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1085 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1086 				 mac->supported);
1087 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1088 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1089 				 mac->supported);
1090 }
1091 
hclge_convert_setting_kr(struct hclge_mac * mac,u16 speed_ability)1092 static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1093 {
1094 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1095 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1096 				 mac->supported);
1097 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1098 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1099 				 mac->supported);
1100 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1101 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1102 				 mac->supported);
1103 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1104 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1105 				 mac->supported);
1106 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1107 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1108 				 mac->supported);
1109 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1110 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1111 				 mac->supported);
1112 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1113 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1114 				 mac->supported);
1115 }
1116 
hclge_convert_setting_fec(struct hclge_mac * mac)1117 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1118 {
1119 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1120 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1121 
1122 	switch (mac->speed) {
1123 	case HCLGE_MAC_SPEED_10G:
1124 	case HCLGE_MAC_SPEED_40G:
1125 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1126 				 mac->supported);
1127 		mac->fec_ability =
1128 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1129 		break;
1130 	case HCLGE_MAC_SPEED_25G:
1131 	case HCLGE_MAC_SPEED_50G:
1132 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1133 				 mac->supported);
1134 		mac->fec_ability =
1135 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1136 			BIT(HNAE3_FEC_AUTO);
1137 		break;
1138 	case HCLGE_MAC_SPEED_100G:
1139 	case HCLGE_MAC_SPEED_200G:
1140 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1141 		mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1142 		break;
1143 	default:
1144 		mac->fec_ability = 0;
1145 		break;
1146 	}
1147 }
1148 
hclge_parse_fiber_link_mode(struct hclge_dev * hdev,u16 speed_ability)1149 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1150 					u16 speed_ability)
1151 {
1152 	struct hclge_mac *mac = &hdev->hw.mac;
1153 
1154 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1155 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1156 				 mac->supported);
1157 
1158 	hclge_convert_setting_sr(mac, speed_ability);
1159 	hclge_convert_setting_lr(mac, speed_ability);
1160 	hclge_convert_setting_cr(mac, speed_ability);
1161 	if (hnae3_dev_fec_supported(hdev))
1162 		hclge_convert_setting_fec(mac);
1163 
1164 	linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1165 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1166 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1167 }
1168 
hclge_parse_backplane_link_mode(struct hclge_dev * hdev,u16 speed_ability)1169 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1170 					    u16 speed_ability)
1171 {
1172 	struct hclge_mac *mac = &hdev->hw.mac;
1173 
1174 	hclge_convert_setting_kr(mac, speed_ability);
1175 	if (hnae3_dev_fec_supported(hdev))
1176 		hclge_convert_setting_fec(mac);
1177 	linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1178 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1179 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1180 }
1181 
hclge_parse_copper_link_mode(struct hclge_dev * hdev,u16 speed_ability)1182 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1183 					 u16 speed_ability)
1184 {
1185 	unsigned long *supported = hdev->hw.mac.supported;
1186 
1187 	/* default to support all speed for GE port */
1188 	if (!speed_ability)
1189 		speed_ability = HCLGE_SUPPORT_GE;
1190 
1191 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1192 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1193 				 supported);
1194 
1195 	if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1196 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1197 				 supported);
1198 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1199 				 supported);
1200 	}
1201 
1202 	if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1203 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1204 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1205 	}
1206 
1207 	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1208 	linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1209 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1210 	linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1211 }
1212 
hclge_parse_link_mode(struct hclge_dev * hdev,u16 speed_ability)1213 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1214 {
1215 	u8 media_type = hdev->hw.mac.media_type;
1216 
1217 	if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1218 		hclge_parse_fiber_link_mode(hdev, speed_ability);
1219 	else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1220 		hclge_parse_copper_link_mode(hdev, speed_ability);
1221 	else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1222 		hclge_parse_backplane_link_mode(hdev, speed_ability);
1223 }
1224 
hclge_get_max_speed(u16 speed_ability)1225 static u32 hclge_get_max_speed(u16 speed_ability)
1226 {
1227 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1228 		return HCLGE_MAC_SPEED_200G;
1229 
1230 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1231 		return HCLGE_MAC_SPEED_100G;
1232 
1233 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1234 		return HCLGE_MAC_SPEED_50G;
1235 
1236 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1237 		return HCLGE_MAC_SPEED_40G;
1238 
1239 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1240 		return HCLGE_MAC_SPEED_25G;
1241 
1242 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1243 		return HCLGE_MAC_SPEED_10G;
1244 
1245 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1246 		return HCLGE_MAC_SPEED_1G;
1247 
1248 	if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1249 		return HCLGE_MAC_SPEED_100M;
1250 
1251 	if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1252 		return HCLGE_MAC_SPEED_10M;
1253 
1254 	return HCLGE_MAC_SPEED_1G;
1255 }
1256 
hclge_parse_cfg(struct hclge_cfg * cfg,struct hclge_desc * desc)1257 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1258 {
1259 #define SPEED_ABILITY_EXT_SHIFT			8
1260 
1261 	struct hclge_cfg_param_cmd *req;
1262 	u64 mac_addr_tmp_high;
1263 	u16 speed_ability_ext;
1264 	u64 mac_addr_tmp;
1265 	unsigned int i;
1266 
1267 	req = (struct hclge_cfg_param_cmd *)desc[0].data;
1268 
1269 	/* get the configuration */
1270 	cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1271 					      HCLGE_CFG_VMDQ_M,
1272 					      HCLGE_CFG_VMDQ_S);
1273 	cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1274 				      HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1275 	cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1276 					    HCLGE_CFG_TQP_DESC_N_M,
1277 					    HCLGE_CFG_TQP_DESC_N_S);
1278 
1279 	cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1280 					HCLGE_CFG_PHY_ADDR_M,
1281 					HCLGE_CFG_PHY_ADDR_S);
1282 	cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1283 					  HCLGE_CFG_MEDIA_TP_M,
1284 					  HCLGE_CFG_MEDIA_TP_S);
1285 	cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1286 					  HCLGE_CFG_RX_BUF_LEN_M,
1287 					  HCLGE_CFG_RX_BUF_LEN_S);
1288 	/* get mac_address */
1289 	mac_addr_tmp = __le32_to_cpu(req->param[2]);
1290 	mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1291 					    HCLGE_CFG_MAC_ADDR_H_M,
1292 					    HCLGE_CFG_MAC_ADDR_H_S);
1293 
1294 	mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1295 
1296 	cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1297 					     HCLGE_CFG_DEFAULT_SPEED_M,
1298 					     HCLGE_CFG_DEFAULT_SPEED_S);
1299 	cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1300 					    HCLGE_CFG_RSS_SIZE_M,
1301 					    HCLGE_CFG_RSS_SIZE_S);
1302 
1303 	for (i = 0; i < ETH_ALEN; i++)
1304 		cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1305 
1306 	req = (struct hclge_cfg_param_cmd *)desc[1].data;
1307 	cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1308 
1309 	cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1310 					     HCLGE_CFG_SPEED_ABILITY_M,
1311 					     HCLGE_CFG_SPEED_ABILITY_S);
1312 	speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1313 					    HCLGE_CFG_SPEED_ABILITY_EXT_M,
1314 					    HCLGE_CFG_SPEED_ABILITY_EXT_S);
1315 	cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1316 
1317 	cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1318 					 HCLGE_CFG_UMV_TBL_SPACE_M,
1319 					 HCLGE_CFG_UMV_TBL_SPACE_S);
1320 	if (!cfg->umv_space)
1321 		cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1322 }
1323 
1324 /* hclge_get_cfg: query the static parameter from flash
1325  * @hdev: pointer to struct hclge_dev
1326  * @hcfg: the config structure to be getted
1327  */
hclge_get_cfg(struct hclge_dev * hdev,struct hclge_cfg * hcfg)1328 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1329 {
1330 	struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1331 	struct hclge_cfg_param_cmd *req;
1332 	unsigned int i;
1333 	int ret;
1334 
1335 	for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1336 		u32 offset = 0;
1337 
1338 		req = (struct hclge_cfg_param_cmd *)desc[i].data;
1339 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1340 					   true);
1341 		hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1342 				HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1343 		/* Len should be united by 4 bytes when send to hardware */
1344 		hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1345 				HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1346 		req->offset = cpu_to_le32(offset);
1347 	}
1348 
1349 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1350 	if (ret) {
1351 		dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1352 		return ret;
1353 	}
1354 
1355 	hclge_parse_cfg(hcfg, desc);
1356 
1357 	return 0;
1358 }
1359 
hclge_set_default_dev_specs(struct hclge_dev * hdev)1360 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1361 {
1362 #define HCLGE_MAX_NON_TSO_BD_NUM			8U
1363 
1364 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1365 
1366 	ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1367 	ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1368 	ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1369 	ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1370 }
1371 
hclge_parse_dev_specs(struct hclge_dev * hdev,struct hclge_desc * desc)1372 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1373 				  struct hclge_desc *desc)
1374 {
1375 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1376 	struct hclge_dev_specs_0_cmd *req0;
1377 
1378 	req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1379 
1380 	ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1381 	ae_dev->dev_specs.rss_ind_tbl_size =
1382 		le16_to_cpu(req0->rss_ind_tbl_size);
1383 	ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1384 	ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1385 }
1386 
hclge_check_dev_specs(struct hclge_dev * hdev)1387 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1388 {
1389 	struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1390 
1391 	if (!dev_specs->max_non_tso_bd_num)
1392 		dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1393 	if (!dev_specs->rss_ind_tbl_size)
1394 		dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1395 	if (!dev_specs->rss_key_size)
1396 		dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1397 	if (!dev_specs->max_tm_rate)
1398 		dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1399 }
1400 
hclge_query_dev_specs(struct hclge_dev * hdev)1401 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1402 {
1403 	struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1404 	int ret;
1405 	int i;
1406 
1407 	/* set default specifications as devices lower than version V3 do not
1408 	 * support querying specifications from firmware.
1409 	 */
1410 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1411 		hclge_set_default_dev_specs(hdev);
1412 		return 0;
1413 	}
1414 
1415 	for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1416 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1417 					   true);
1418 		desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1419 	}
1420 	hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1421 
1422 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1423 	if (ret)
1424 		return ret;
1425 
1426 	hclge_parse_dev_specs(hdev, desc);
1427 	hclge_check_dev_specs(hdev);
1428 
1429 	return 0;
1430 }
1431 
hclge_get_cap(struct hclge_dev * hdev)1432 static int hclge_get_cap(struct hclge_dev *hdev)
1433 {
1434 	int ret;
1435 
1436 	ret = hclge_query_function_status(hdev);
1437 	if (ret) {
1438 		dev_err(&hdev->pdev->dev,
1439 			"query function status error %d.\n", ret);
1440 		return ret;
1441 	}
1442 
1443 	/* get pf resource */
1444 	return hclge_query_pf_resource(hdev);
1445 }
1446 
hclge_init_kdump_kernel_config(struct hclge_dev * hdev)1447 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1448 {
1449 #define HCLGE_MIN_TX_DESC	64
1450 #define HCLGE_MIN_RX_DESC	64
1451 
1452 	if (!is_kdump_kernel())
1453 		return;
1454 
1455 	dev_info(&hdev->pdev->dev,
1456 		 "Running kdump kernel. Using minimal resources\n");
1457 
1458 	/* minimal queue pairs equals to the number of vports */
1459 	hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1460 	hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1461 	hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1462 }
1463 
hclge_configure(struct hclge_dev * hdev)1464 static int hclge_configure(struct hclge_dev *hdev)
1465 {
1466 	const struct cpumask *cpumask = cpu_online_mask;
1467 	struct hclge_cfg cfg;
1468 	unsigned int i;
1469 	int node, ret;
1470 
1471 	ret = hclge_get_cfg(hdev, &cfg);
1472 	if (ret)
1473 		return ret;
1474 
1475 	hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1476 	hdev->base_tqp_pid = 0;
1477 	hdev->rss_size_max = cfg.rss_size_max;
1478 	hdev->rx_buf_len = cfg.rx_buf_len;
1479 	ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1480 	hdev->hw.mac.media_type = cfg.media_type;
1481 	hdev->hw.mac.phy_addr = cfg.phy_addr;
1482 	hdev->num_tx_desc = cfg.tqp_desc_num;
1483 	hdev->num_rx_desc = cfg.tqp_desc_num;
1484 	hdev->tm_info.num_pg = 1;
1485 	hdev->tc_max = cfg.tc_num;
1486 	hdev->tm_info.hw_pfc_map = 0;
1487 	hdev->wanted_umv_size = cfg.umv_space;
1488 
1489 	if (hnae3_dev_fd_supported(hdev)) {
1490 		hdev->fd_en = true;
1491 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1492 	}
1493 
1494 	ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1495 	if (ret) {
1496 		dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1497 			cfg.default_speed, ret);
1498 		return ret;
1499 	}
1500 
1501 	hclge_parse_link_mode(hdev, cfg.speed_ability);
1502 
1503 	hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1504 
1505 	if ((hdev->tc_max > HNAE3_MAX_TC) ||
1506 	    (hdev->tc_max < 1)) {
1507 		dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1508 			 hdev->tc_max);
1509 		hdev->tc_max = 1;
1510 	}
1511 
1512 	/* Dev does not support DCB */
1513 	if (!hnae3_dev_dcb_supported(hdev)) {
1514 		hdev->tc_max = 1;
1515 		hdev->pfc_max = 0;
1516 	} else {
1517 		hdev->pfc_max = hdev->tc_max;
1518 	}
1519 
1520 	hdev->tm_info.num_tc = 1;
1521 
1522 	/* Currently not support uncontiuous tc */
1523 	for (i = 0; i < hdev->tm_info.num_tc; i++)
1524 		hnae3_set_bit(hdev->hw_tc_map, i, 1);
1525 
1526 	hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1527 
1528 	hclge_init_kdump_kernel_config(hdev);
1529 
1530 	/* Set the affinity based on numa node */
1531 	node = dev_to_node(&hdev->pdev->dev);
1532 	if (node != NUMA_NO_NODE)
1533 		cpumask = cpumask_of_node(node);
1534 
1535 	cpumask_copy(&hdev->affinity_mask, cpumask);
1536 
1537 	return ret;
1538 }
1539 
hclge_config_tso(struct hclge_dev * hdev,u16 tso_mss_min,u16 tso_mss_max)1540 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1541 			    u16 tso_mss_max)
1542 {
1543 	struct hclge_cfg_tso_status_cmd *req;
1544 	struct hclge_desc desc;
1545 
1546 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1547 
1548 	req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1549 	req->tso_mss_min = cpu_to_le16(tso_mss_min);
1550 	req->tso_mss_max = cpu_to_le16(tso_mss_max);
1551 
1552 	return hclge_cmd_send(&hdev->hw, &desc, 1);
1553 }
1554 
hclge_config_gro(struct hclge_dev * hdev,bool en)1555 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1556 {
1557 	struct hclge_cfg_gro_status_cmd *req;
1558 	struct hclge_desc desc;
1559 	int ret;
1560 
1561 	if (!hnae3_dev_gro_supported(hdev))
1562 		return 0;
1563 
1564 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1565 	req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1566 
1567 	req->gro_en = en ? 1 : 0;
1568 
1569 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1570 	if (ret)
1571 		dev_err(&hdev->pdev->dev,
1572 			"GRO hardware config cmd failed, ret = %d\n", ret);
1573 
1574 	return ret;
1575 }
1576 
hclge_alloc_tqps(struct hclge_dev * hdev)1577 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1578 {
1579 	struct hclge_tqp *tqp;
1580 	int i;
1581 
1582 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1583 				  sizeof(struct hclge_tqp), GFP_KERNEL);
1584 	if (!hdev->htqp)
1585 		return -ENOMEM;
1586 
1587 	tqp = hdev->htqp;
1588 
1589 	for (i = 0; i < hdev->num_tqps; i++) {
1590 		tqp->dev = &hdev->pdev->dev;
1591 		tqp->index = i;
1592 
1593 		tqp->q.ae_algo = &ae_algo;
1594 		tqp->q.buf_size = hdev->rx_buf_len;
1595 		tqp->q.tx_desc_num = hdev->num_tx_desc;
1596 		tqp->q.rx_desc_num = hdev->num_rx_desc;
1597 		tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1598 			i * HCLGE_TQP_REG_SIZE;
1599 
1600 		tqp++;
1601 	}
1602 
1603 	return 0;
1604 }
1605 
hclge_map_tqps_to_func(struct hclge_dev * hdev,u16 func_id,u16 tqp_pid,u16 tqp_vid,bool is_pf)1606 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1607 				  u16 tqp_pid, u16 tqp_vid, bool is_pf)
1608 {
1609 	struct hclge_tqp_map_cmd *req;
1610 	struct hclge_desc desc;
1611 	int ret;
1612 
1613 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1614 
1615 	req = (struct hclge_tqp_map_cmd *)desc.data;
1616 	req->tqp_id = cpu_to_le16(tqp_pid);
1617 	req->tqp_vf = func_id;
1618 	req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1619 	if (!is_pf)
1620 		req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1621 	req->tqp_vid = cpu_to_le16(tqp_vid);
1622 
1623 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1624 	if (ret)
1625 		dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1626 
1627 	return ret;
1628 }
1629 
hclge_assign_tqp(struct hclge_vport * vport,u16 num_tqps)1630 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1631 {
1632 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1633 	struct hclge_dev *hdev = vport->back;
1634 	int i, alloced;
1635 
1636 	for (i = 0, alloced = 0; i < hdev->num_tqps &&
1637 	     alloced < num_tqps; i++) {
1638 		if (!hdev->htqp[i].alloced) {
1639 			hdev->htqp[i].q.handle = &vport->nic;
1640 			hdev->htqp[i].q.tqp_index = alloced;
1641 			hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1642 			hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1643 			kinfo->tqp[alloced] = &hdev->htqp[i].q;
1644 			hdev->htqp[i].alloced = true;
1645 			alloced++;
1646 		}
1647 	}
1648 	vport->alloc_tqps = alloced;
1649 	kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1650 				vport->alloc_tqps / hdev->tm_info.num_tc);
1651 
1652 	/* ensure one to one mapping between irq and queue at default */
1653 	kinfo->rss_size = min_t(u16, kinfo->rss_size,
1654 				(hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1655 
1656 	return 0;
1657 }
1658 
hclge_knic_setup(struct hclge_vport * vport,u16 num_tqps,u16 num_tx_desc,u16 num_rx_desc)1659 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1660 			    u16 num_tx_desc, u16 num_rx_desc)
1661 
1662 {
1663 	struct hnae3_handle *nic = &vport->nic;
1664 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1665 	struct hclge_dev *hdev = vport->back;
1666 	int ret;
1667 
1668 	kinfo->num_tx_desc = num_tx_desc;
1669 	kinfo->num_rx_desc = num_rx_desc;
1670 
1671 	kinfo->rx_buf_len = hdev->rx_buf_len;
1672 
1673 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1674 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
1675 	if (!kinfo->tqp)
1676 		return -ENOMEM;
1677 
1678 	ret = hclge_assign_tqp(vport, num_tqps);
1679 	if (ret)
1680 		dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1681 
1682 	return ret;
1683 }
1684 
hclge_map_tqp_to_vport(struct hclge_dev * hdev,struct hclge_vport * vport)1685 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1686 				  struct hclge_vport *vport)
1687 {
1688 	struct hnae3_handle *nic = &vport->nic;
1689 	struct hnae3_knic_private_info *kinfo;
1690 	u16 i;
1691 
1692 	kinfo = &nic->kinfo;
1693 	for (i = 0; i < vport->alloc_tqps; i++) {
1694 		struct hclge_tqp *q =
1695 			container_of(kinfo->tqp[i], struct hclge_tqp, q);
1696 		bool is_pf;
1697 		int ret;
1698 
1699 		is_pf = !(vport->vport_id);
1700 		ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1701 					     i, is_pf);
1702 		if (ret)
1703 			return ret;
1704 	}
1705 
1706 	return 0;
1707 }
1708 
hclge_map_tqp(struct hclge_dev * hdev)1709 static int hclge_map_tqp(struct hclge_dev *hdev)
1710 {
1711 	struct hclge_vport *vport = hdev->vport;
1712 	u16 i, num_vport;
1713 
1714 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1715 	for (i = 0; i < num_vport; i++)	{
1716 		int ret;
1717 
1718 		ret = hclge_map_tqp_to_vport(hdev, vport);
1719 		if (ret)
1720 			return ret;
1721 
1722 		vport++;
1723 	}
1724 
1725 	return 0;
1726 }
1727 
hclge_vport_setup(struct hclge_vport * vport,u16 num_tqps)1728 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1729 {
1730 	struct hnae3_handle *nic = &vport->nic;
1731 	struct hclge_dev *hdev = vport->back;
1732 	int ret;
1733 
1734 	nic->pdev = hdev->pdev;
1735 	nic->ae_algo = &ae_algo;
1736 	nic->numa_node_mask = hdev->numa_node_mask;
1737 
1738 	ret = hclge_knic_setup(vport, num_tqps,
1739 			       hdev->num_tx_desc, hdev->num_rx_desc);
1740 	if (ret)
1741 		dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1742 
1743 	return ret;
1744 }
1745 
hclge_alloc_vport(struct hclge_dev * hdev)1746 static int hclge_alloc_vport(struct hclge_dev *hdev)
1747 {
1748 	struct pci_dev *pdev = hdev->pdev;
1749 	struct hclge_vport *vport;
1750 	u32 tqp_main_vport;
1751 	u32 tqp_per_vport;
1752 	int num_vport, i;
1753 	int ret;
1754 
1755 	/* We need to alloc a vport for main NIC of PF */
1756 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1757 
1758 	if (hdev->num_tqps < num_vport) {
1759 		dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1760 			hdev->num_tqps, num_vport);
1761 		return -EINVAL;
1762 	}
1763 
1764 	/* Alloc the same number of TQPs for every vport */
1765 	tqp_per_vport = hdev->num_tqps / num_vport;
1766 	tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1767 
1768 	vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1769 			     GFP_KERNEL);
1770 	if (!vport)
1771 		return -ENOMEM;
1772 
1773 	hdev->vport = vport;
1774 	hdev->num_alloc_vport = num_vport;
1775 
1776 	if (IS_ENABLED(CONFIG_PCI_IOV))
1777 		hdev->num_alloc_vfs = hdev->num_req_vfs;
1778 
1779 	for (i = 0; i < num_vport; i++) {
1780 		vport->back = hdev;
1781 		vport->vport_id = i;
1782 		vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1783 		vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1784 		vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1785 		vport->rxvlan_cfg.rx_vlan_offload_en = true;
1786 		INIT_LIST_HEAD(&vport->vlan_list);
1787 		INIT_LIST_HEAD(&vport->uc_mac_list);
1788 		INIT_LIST_HEAD(&vport->mc_mac_list);
1789 		spin_lock_init(&vport->mac_list_lock);
1790 
1791 		if (i == 0)
1792 			ret = hclge_vport_setup(vport, tqp_main_vport);
1793 		else
1794 			ret = hclge_vport_setup(vport, tqp_per_vport);
1795 		if (ret) {
1796 			dev_err(&pdev->dev,
1797 				"vport setup failed for vport %d, %d\n",
1798 				i, ret);
1799 			return ret;
1800 		}
1801 
1802 		vport++;
1803 	}
1804 
1805 	return 0;
1806 }
1807 
hclge_cmd_alloc_tx_buff(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)1808 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1809 				    struct hclge_pkt_buf_alloc *buf_alloc)
1810 {
1811 /* TX buffer size is unit by 128 byte */
1812 #define HCLGE_BUF_SIZE_UNIT_SHIFT	7
1813 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK	BIT(15)
1814 	struct hclge_tx_buff_alloc_cmd *req;
1815 	struct hclge_desc desc;
1816 	int ret;
1817 	u8 i;
1818 
1819 	req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1820 
1821 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1822 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1823 		u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1824 
1825 		req->tx_pkt_buff[i] =
1826 			cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1827 				     HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1828 	}
1829 
1830 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1831 	if (ret)
1832 		dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1833 			ret);
1834 
1835 	return ret;
1836 }
1837 
hclge_tx_buffer_alloc(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)1838 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1839 				 struct hclge_pkt_buf_alloc *buf_alloc)
1840 {
1841 	int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1842 
1843 	if (ret)
1844 		dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1845 
1846 	return ret;
1847 }
1848 
hclge_get_tc_num(struct hclge_dev * hdev)1849 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1850 {
1851 	unsigned int i;
1852 	u32 cnt = 0;
1853 
1854 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1855 		if (hdev->hw_tc_map & BIT(i))
1856 			cnt++;
1857 	return cnt;
1858 }
1859 
1860 /* Get the number of pfc enabled TCs, which have private buffer */
hclge_get_pfc_priv_num(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)1861 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1862 				  struct hclge_pkt_buf_alloc *buf_alloc)
1863 {
1864 	struct hclge_priv_buf *priv;
1865 	unsigned int i;
1866 	int cnt = 0;
1867 
1868 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1869 		priv = &buf_alloc->priv_buf[i];
1870 		if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1871 		    priv->enable)
1872 			cnt++;
1873 	}
1874 
1875 	return cnt;
1876 }
1877 
1878 /* Get the number of pfc disabled TCs, which have private buffer */
hclge_get_no_pfc_priv_num(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)1879 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1880 				     struct hclge_pkt_buf_alloc *buf_alloc)
1881 {
1882 	struct hclge_priv_buf *priv;
1883 	unsigned int i;
1884 	int cnt = 0;
1885 
1886 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1887 		priv = &buf_alloc->priv_buf[i];
1888 		if (hdev->hw_tc_map & BIT(i) &&
1889 		    !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1890 		    priv->enable)
1891 			cnt++;
1892 	}
1893 
1894 	return cnt;
1895 }
1896 
hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc * buf_alloc)1897 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1898 {
1899 	struct hclge_priv_buf *priv;
1900 	u32 rx_priv = 0;
1901 	int i;
1902 
1903 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1904 		priv = &buf_alloc->priv_buf[i];
1905 		if (priv->enable)
1906 			rx_priv += priv->buf_size;
1907 	}
1908 	return rx_priv;
1909 }
1910 
hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc * buf_alloc)1911 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1912 {
1913 	u32 i, total_tx_size = 0;
1914 
1915 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1916 		total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1917 
1918 	return total_tx_size;
1919 }
1920 
hclge_is_rx_buf_ok(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc,u32 rx_all)1921 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1922 				struct hclge_pkt_buf_alloc *buf_alloc,
1923 				u32 rx_all)
1924 {
1925 	u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1926 	u32 tc_num = hclge_get_tc_num(hdev);
1927 	u32 shared_buf, aligned_mps;
1928 	u32 rx_priv;
1929 	int i;
1930 
1931 	aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1932 
1933 	if (hnae3_dev_dcb_supported(hdev))
1934 		shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1935 					hdev->dv_buf_size;
1936 	else
1937 		shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1938 					+ hdev->dv_buf_size;
1939 
1940 	shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1941 	shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1942 			     HCLGE_BUF_SIZE_UNIT);
1943 
1944 	rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1945 	if (rx_all < rx_priv + shared_std)
1946 		return false;
1947 
1948 	shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1949 	buf_alloc->s_buf.buf_size = shared_buf;
1950 	if (hnae3_dev_dcb_supported(hdev)) {
1951 		buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1952 		buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1953 			- roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1954 				  HCLGE_BUF_SIZE_UNIT);
1955 	} else {
1956 		buf_alloc->s_buf.self.high = aligned_mps +
1957 						HCLGE_NON_DCB_ADDITIONAL_BUF;
1958 		buf_alloc->s_buf.self.low = aligned_mps;
1959 	}
1960 
1961 	if (hnae3_dev_dcb_supported(hdev)) {
1962 		hi_thrd = shared_buf - hdev->dv_buf_size;
1963 
1964 		if (tc_num <= NEED_RESERVE_TC_NUM)
1965 			hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1966 					/ BUF_MAX_PERCENT;
1967 
1968 		if (tc_num)
1969 			hi_thrd = hi_thrd / tc_num;
1970 
1971 		hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1972 		hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1973 		lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1974 	} else {
1975 		hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1976 		lo_thrd = aligned_mps;
1977 	}
1978 
1979 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1980 		buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1981 		buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1982 	}
1983 
1984 	return true;
1985 }
1986 
hclge_tx_buffer_calc(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)1987 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1988 				struct hclge_pkt_buf_alloc *buf_alloc)
1989 {
1990 	u32 i, total_size;
1991 
1992 	total_size = hdev->pkt_buf_size;
1993 
1994 	/* alloc tx buffer for all enabled tc */
1995 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1996 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1997 
1998 		if (hdev->hw_tc_map & BIT(i)) {
1999 			if (total_size < hdev->tx_buf_size)
2000 				return -ENOMEM;
2001 
2002 			priv->tx_buf_size = hdev->tx_buf_size;
2003 		} else {
2004 			priv->tx_buf_size = 0;
2005 		}
2006 
2007 		total_size -= priv->tx_buf_size;
2008 	}
2009 
2010 	return 0;
2011 }
2012 
hclge_rx_buf_calc_all(struct hclge_dev * hdev,bool max,struct hclge_pkt_buf_alloc * buf_alloc)2013 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2014 				  struct hclge_pkt_buf_alloc *buf_alloc)
2015 {
2016 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2017 	u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2018 	unsigned int i;
2019 
2020 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2021 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2022 
2023 		priv->enable = 0;
2024 		priv->wl.low = 0;
2025 		priv->wl.high = 0;
2026 		priv->buf_size = 0;
2027 
2028 		if (!(hdev->hw_tc_map & BIT(i)))
2029 			continue;
2030 
2031 		priv->enable = 1;
2032 
2033 		if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2034 			priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2035 			priv->wl.high = roundup(priv->wl.low + aligned_mps,
2036 						HCLGE_BUF_SIZE_UNIT);
2037 		} else {
2038 			priv->wl.low = 0;
2039 			priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2040 					aligned_mps;
2041 		}
2042 
2043 		priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2044 	}
2045 
2046 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2047 }
2048 
hclge_drop_nopfc_buf_till_fit(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2049 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2050 					  struct hclge_pkt_buf_alloc *buf_alloc)
2051 {
2052 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2053 	int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2054 	int i;
2055 
2056 	/* let the last to be cleared first */
2057 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2058 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2059 		unsigned int mask = BIT((unsigned int)i);
2060 
2061 		if (hdev->hw_tc_map & mask &&
2062 		    !(hdev->tm_info.hw_pfc_map & mask)) {
2063 			/* Clear the no pfc TC private buffer */
2064 			priv->wl.low = 0;
2065 			priv->wl.high = 0;
2066 			priv->buf_size = 0;
2067 			priv->enable = 0;
2068 			no_pfc_priv_num--;
2069 		}
2070 
2071 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2072 		    no_pfc_priv_num == 0)
2073 			break;
2074 	}
2075 
2076 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2077 }
2078 
hclge_drop_pfc_buf_till_fit(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2079 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2080 					struct hclge_pkt_buf_alloc *buf_alloc)
2081 {
2082 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2083 	int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2084 	int i;
2085 
2086 	/* let the last to be cleared first */
2087 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2088 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2089 		unsigned int mask = BIT((unsigned int)i);
2090 
2091 		if (hdev->hw_tc_map & mask &&
2092 		    hdev->tm_info.hw_pfc_map & mask) {
2093 			/* Reduce the number of pfc TC with private buffer */
2094 			priv->wl.low = 0;
2095 			priv->enable = 0;
2096 			priv->wl.high = 0;
2097 			priv->buf_size = 0;
2098 			pfc_priv_num--;
2099 		}
2100 
2101 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2102 		    pfc_priv_num == 0)
2103 			break;
2104 	}
2105 
2106 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2107 }
2108 
hclge_only_alloc_priv_buff(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2109 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2110 				      struct hclge_pkt_buf_alloc *buf_alloc)
2111 {
2112 #define COMPENSATE_BUFFER	0x3C00
2113 #define COMPENSATE_HALF_MPS_NUM	5
2114 #define PRIV_WL_GAP		0x1800
2115 
2116 	u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2117 	u32 tc_num = hclge_get_tc_num(hdev);
2118 	u32 half_mps = hdev->mps >> 1;
2119 	u32 min_rx_priv;
2120 	unsigned int i;
2121 
2122 	if (tc_num)
2123 		rx_priv = rx_priv / tc_num;
2124 
2125 	if (tc_num <= NEED_RESERVE_TC_NUM)
2126 		rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2127 
2128 	min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2129 			COMPENSATE_HALF_MPS_NUM * half_mps;
2130 	min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2131 	rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2132 
2133 	if (rx_priv < min_rx_priv)
2134 		return false;
2135 
2136 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2137 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2138 
2139 		priv->enable = 0;
2140 		priv->wl.low = 0;
2141 		priv->wl.high = 0;
2142 		priv->buf_size = 0;
2143 
2144 		if (!(hdev->hw_tc_map & BIT(i)))
2145 			continue;
2146 
2147 		priv->enable = 1;
2148 		priv->buf_size = rx_priv;
2149 		priv->wl.high = rx_priv - hdev->dv_buf_size;
2150 		priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2151 	}
2152 
2153 	buf_alloc->s_buf.buf_size = 0;
2154 
2155 	return true;
2156 }
2157 
2158 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2159  * @hdev: pointer to struct hclge_dev
2160  * @buf_alloc: pointer to buffer calculation data
2161  * @return: 0: calculate sucessful, negative: fail
2162  */
hclge_rx_buffer_calc(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2163 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2164 				struct hclge_pkt_buf_alloc *buf_alloc)
2165 {
2166 	/* When DCB is not supported, rx private buffer is not allocated. */
2167 	if (!hnae3_dev_dcb_supported(hdev)) {
2168 		u32 rx_all = hdev->pkt_buf_size;
2169 
2170 		rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2171 		if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2172 			return -ENOMEM;
2173 
2174 		return 0;
2175 	}
2176 
2177 	if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2178 		return 0;
2179 
2180 	if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2181 		return 0;
2182 
2183 	/* try to decrease the buffer size */
2184 	if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2185 		return 0;
2186 
2187 	if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2188 		return 0;
2189 
2190 	if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2191 		return 0;
2192 
2193 	return -ENOMEM;
2194 }
2195 
hclge_rx_priv_buf_alloc(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2196 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2197 				   struct hclge_pkt_buf_alloc *buf_alloc)
2198 {
2199 	struct hclge_rx_priv_buff_cmd *req;
2200 	struct hclge_desc desc;
2201 	int ret;
2202 	int i;
2203 
2204 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2205 	req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2206 
2207 	/* Alloc private buffer TCs */
2208 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2209 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2210 
2211 		req->buf_num[i] =
2212 			cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2213 		req->buf_num[i] |=
2214 			cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2215 	}
2216 
2217 	req->shared_buf =
2218 		cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2219 			    (1 << HCLGE_TC0_PRI_BUF_EN_B));
2220 
2221 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2222 	if (ret)
2223 		dev_err(&hdev->pdev->dev,
2224 			"rx private buffer alloc cmd failed %d\n", ret);
2225 
2226 	return ret;
2227 }
2228 
hclge_rx_priv_wl_config(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2229 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2230 				   struct hclge_pkt_buf_alloc *buf_alloc)
2231 {
2232 	struct hclge_rx_priv_wl_buf *req;
2233 	struct hclge_priv_buf *priv;
2234 	struct hclge_desc desc[2];
2235 	int i, j;
2236 	int ret;
2237 
2238 	for (i = 0; i < 2; i++) {
2239 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2240 					   false);
2241 		req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2242 
2243 		/* The first descriptor set the NEXT bit to 1 */
2244 		if (i == 0)
2245 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2246 		else
2247 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2248 
2249 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2250 			u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2251 
2252 			priv = &buf_alloc->priv_buf[idx];
2253 			req->tc_wl[j].high =
2254 				cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2255 			req->tc_wl[j].high |=
2256 				cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2257 			req->tc_wl[j].low =
2258 				cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2259 			req->tc_wl[j].low |=
2260 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2261 		}
2262 	}
2263 
2264 	/* Send 2 descriptor at one time */
2265 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2266 	if (ret)
2267 		dev_err(&hdev->pdev->dev,
2268 			"rx private waterline config cmd failed %d\n",
2269 			ret);
2270 	return ret;
2271 }
2272 
hclge_common_thrd_config(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2273 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2274 				    struct hclge_pkt_buf_alloc *buf_alloc)
2275 {
2276 	struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2277 	struct hclge_rx_com_thrd *req;
2278 	struct hclge_desc desc[2];
2279 	struct hclge_tc_thrd *tc;
2280 	int i, j;
2281 	int ret;
2282 
2283 	for (i = 0; i < 2; i++) {
2284 		hclge_cmd_setup_basic_desc(&desc[i],
2285 					   HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2286 		req = (struct hclge_rx_com_thrd *)&desc[i].data;
2287 
2288 		/* The first descriptor set the NEXT bit to 1 */
2289 		if (i == 0)
2290 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2291 		else
2292 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2293 
2294 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2295 			tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2296 
2297 			req->com_thrd[j].high =
2298 				cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2299 			req->com_thrd[j].high |=
2300 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2301 			req->com_thrd[j].low =
2302 				cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2303 			req->com_thrd[j].low |=
2304 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2305 		}
2306 	}
2307 
2308 	/* Send 2 descriptors at one time */
2309 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2310 	if (ret)
2311 		dev_err(&hdev->pdev->dev,
2312 			"common threshold config cmd failed %d\n", ret);
2313 	return ret;
2314 }
2315 
hclge_common_wl_config(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2316 static int hclge_common_wl_config(struct hclge_dev *hdev,
2317 				  struct hclge_pkt_buf_alloc *buf_alloc)
2318 {
2319 	struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2320 	struct hclge_rx_com_wl *req;
2321 	struct hclge_desc desc;
2322 	int ret;
2323 
2324 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2325 
2326 	req = (struct hclge_rx_com_wl *)desc.data;
2327 	req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2328 	req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2329 
2330 	req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2331 	req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2332 
2333 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2334 	if (ret)
2335 		dev_err(&hdev->pdev->dev,
2336 			"common waterline config cmd failed %d\n", ret);
2337 
2338 	return ret;
2339 }
2340 
hclge_buffer_alloc(struct hclge_dev * hdev)2341 int hclge_buffer_alloc(struct hclge_dev *hdev)
2342 {
2343 	struct hclge_pkt_buf_alloc *pkt_buf;
2344 	int ret;
2345 
2346 	pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2347 	if (!pkt_buf)
2348 		return -ENOMEM;
2349 
2350 	ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2351 	if (ret) {
2352 		dev_err(&hdev->pdev->dev,
2353 			"could not calc tx buffer size for all TCs %d\n", ret);
2354 		goto out;
2355 	}
2356 
2357 	ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2358 	if (ret) {
2359 		dev_err(&hdev->pdev->dev,
2360 			"could not alloc tx buffers %d\n", ret);
2361 		goto out;
2362 	}
2363 
2364 	ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2365 	if (ret) {
2366 		dev_err(&hdev->pdev->dev,
2367 			"could not calc rx priv buffer size for all TCs %d\n",
2368 			ret);
2369 		goto out;
2370 	}
2371 
2372 	ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2373 	if (ret) {
2374 		dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2375 			ret);
2376 		goto out;
2377 	}
2378 
2379 	if (hnae3_dev_dcb_supported(hdev)) {
2380 		ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2381 		if (ret) {
2382 			dev_err(&hdev->pdev->dev,
2383 				"could not configure rx private waterline %d\n",
2384 				ret);
2385 			goto out;
2386 		}
2387 
2388 		ret = hclge_common_thrd_config(hdev, pkt_buf);
2389 		if (ret) {
2390 			dev_err(&hdev->pdev->dev,
2391 				"could not configure common threshold %d\n",
2392 				ret);
2393 			goto out;
2394 		}
2395 	}
2396 
2397 	ret = hclge_common_wl_config(hdev, pkt_buf);
2398 	if (ret)
2399 		dev_err(&hdev->pdev->dev,
2400 			"could not configure common waterline %d\n", ret);
2401 
2402 out:
2403 	kfree(pkt_buf);
2404 	return ret;
2405 }
2406 
hclge_init_roce_base_info(struct hclge_vport * vport)2407 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2408 {
2409 	struct hnae3_handle *roce = &vport->roce;
2410 	struct hnae3_handle *nic = &vport->nic;
2411 
2412 	roce->rinfo.num_vectors = vport->back->num_roce_msi;
2413 
2414 	if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2415 	    vport->back->num_msi_left == 0)
2416 		return -EINVAL;
2417 
2418 	roce->rinfo.base_vector = vport->back->roce_base_vector;
2419 
2420 	roce->rinfo.netdev = nic->kinfo.netdev;
2421 	roce->rinfo.roce_io_base = vport->back->hw.io_base;
2422 
2423 	roce->pdev = nic->pdev;
2424 	roce->ae_algo = nic->ae_algo;
2425 	roce->numa_node_mask = nic->numa_node_mask;
2426 
2427 	return 0;
2428 }
2429 
hclge_init_msi(struct hclge_dev * hdev)2430 static int hclge_init_msi(struct hclge_dev *hdev)
2431 {
2432 	struct pci_dev *pdev = hdev->pdev;
2433 	int vectors;
2434 	int i;
2435 
2436 	vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2437 					hdev->num_msi,
2438 					PCI_IRQ_MSI | PCI_IRQ_MSIX);
2439 	if (vectors < 0) {
2440 		dev_err(&pdev->dev,
2441 			"failed(%d) to allocate MSI/MSI-X vectors\n",
2442 			vectors);
2443 		return vectors;
2444 	}
2445 	if (vectors < hdev->num_msi)
2446 		dev_warn(&hdev->pdev->dev,
2447 			 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2448 			 hdev->num_msi, vectors);
2449 
2450 	hdev->num_msi = vectors;
2451 	hdev->num_msi_left = vectors;
2452 
2453 	hdev->base_msi_vector = pdev->irq;
2454 	hdev->roce_base_vector = hdev->base_msi_vector +
2455 				hdev->roce_base_msix_offset;
2456 
2457 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2458 					   sizeof(u16), GFP_KERNEL);
2459 	if (!hdev->vector_status) {
2460 		pci_free_irq_vectors(pdev);
2461 		return -ENOMEM;
2462 	}
2463 
2464 	for (i = 0; i < hdev->num_msi; i++)
2465 		hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2466 
2467 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2468 					sizeof(int), GFP_KERNEL);
2469 	if (!hdev->vector_irq) {
2470 		pci_free_irq_vectors(pdev);
2471 		return -ENOMEM;
2472 	}
2473 
2474 	return 0;
2475 }
2476 
hclge_check_speed_dup(u8 duplex,int speed)2477 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2478 {
2479 	if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2480 		duplex = HCLGE_MAC_FULL;
2481 
2482 	return duplex;
2483 }
2484 
hclge_cfg_mac_speed_dup_hw(struct hclge_dev * hdev,int speed,u8 duplex)2485 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2486 				      u8 duplex)
2487 {
2488 	struct hclge_config_mac_speed_dup_cmd *req;
2489 	struct hclge_desc desc;
2490 	int ret;
2491 
2492 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2493 
2494 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2495 
2496 	if (duplex)
2497 		hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2498 
2499 	switch (speed) {
2500 	case HCLGE_MAC_SPEED_10M:
2501 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2502 				HCLGE_CFG_SPEED_S, 6);
2503 		break;
2504 	case HCLGE_MAC_SPEED_100M:
2505 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2506 				HCLGE_CFG_SPEED_S, 7);
2507 		break;
2508 	case HCLGE_MAC_SPEED_1G:
2509 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2510 				HCLGE_CFG_SPEED_S, 0);
2511 		break;
2512 	case HCLGE_MAC_SPEED_10G:
2513 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2514 				HCLGE_CFG_SPEED_S, 1);
2515 		break;
2516 	case HCLGE_MAC_SPEED_25G:
2517 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2518 				HCLGE_CFG_SPEED_S, 2);
2519 		break;
2520 	case HCLGE_MAC_SPEED_40G:
2521 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2522 				HCLGE_CFG_SPEED_S, 3);
2523 		break;
2524 	case HCLGE_MAC_SPEED_50G:
2525 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2526 				HCLGE_CFG_SPEED_S, 4);
2527 		break;
2528 	case HCLGE_MAC_SPEED_100G:
2529 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2530 				HCLGE_CFG_SPEED_S, 5);
2531 		break;
2532 	case HCLGE_MAC_SPEED_200G:
2533 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2534 				HCLGE_CFG_SPEED_S, 8);
2535 		break;
2536 	default:
2537 		dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2538 		return -EINVAL;
2539 	}
2540 
2541 	hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2542 		      1);
2543 
2544 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2545 	if (ret) {
2546 		dev_err(&hdev->pdev->dev,
2547 			"mac speed/duplex config cmd failed %d.\n", ret);
2548 		return ret;
2549 	}
2550 
2551 	return 0;
2552 }
2553 
hclge_cfg_mac_speed_dup(struct hclge_dev * hdev,int speed,u8 duplex)2554 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2555 {
2556 	struct hclge_mac *mac = &hdev->hw.mac;
2557 	int ret;
2558 
2559 	duplex = hclge_check_speed_dup(duplex, speed);
2560 	if (!mac->support_autoneg && mac->speed == speed &&
2561 	    mac->duplex == duplex)
2562 		return 0;
2563 
2564 	ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2565 	if (ret)
2566 		return ret;
2567 
2568 	hdev->hw.mac.speed = speed;
2569 	hdev->hw.mac.duplex = duplex;
2570 
2571 	return 0;
2572 }
2573 
hclge_cfg_mac_speed_dup_h(struct hnae3_handle * handle,int speed,u8 duplex)2574 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2575 				     u8 duplex)
2576 {
2577 	struct hclge_vport *vport = hclge_get_vport(handle);
2578 	struct hclge_dev *hdev = vport->back;
2579 
2580 	return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2581 }
2582 
hclge_set_autoneg_en(struct hclge_dev * hdev,bool enable)2583 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2584 {
2585 	struct hclge_config_auto_neg_cmd *req;
2586 	struct hclge_desc desc;
2587 	u32 flag = 0;
2588 	int ret;
2589 
2590 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2591 
2592 	req = (struct hclge_config_auto_neg_cmd *)desc.data;
2593 	if (enable)
2594 		hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2595 	req->cfg_an_cmd_flag = cpu_to_le32(flag);
2596 
2597 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2598 	if (ret)
2599 		dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2600 			ret);
2601 
2602 	return ret;
2603 }
2604 
hclge_set_autoneg(struct hnae3_handle * handle,bool enable)2605 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2606 {
2607 	struct hclge_vport *vport = hclge_get_vport(handle);
2608 	struct hclge_dev *hdev = vport->back;
2609 
2610 	if (!hdev->hw.mac.support_autoneg) {
2611 		if (enable) {
2612 			dev_err(&hdev->pdev->dev,
2613 				"autoneg is not supported by current port\n");
2614 			return -EOPNOTSUPP;
2615 		} else {
2616 			return 0;
2617 		}
2618 	}
2619 
2620 	return hclge_set_autoneg_en(hdev, enable);
2621 }
2622 
hclge_get_autoneg(struct hnae3_handle * handle)2623 static int hclge_get_autoneg(struct hnae3_handle *handle)
2624 {
2625 	struct hclge_vport *vport = hclge_get_vport(handle);
2626 	struct hclge_dev *hdev = vport->back;
2627 	struct phy_device *phydev = hdev->hw.mac.phydev;
2628 
2629 	if (phydev)
2630 		return phydev->autoneg;
2631 
2632 	return hdev->hw.mac.autoneg;
2633 }
2634 
hclge_restart_autoneg(struct hnae3_handle * handle)2635 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2636 {
2637 	struct hclge_vport *vport = hclge_get_vport(handle);
2638 	struct hclge_dev *hdev = vport->back;
2639 	int ret;
2640 
2641 	dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2642 
2643 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2644 	if (ret)
2645 		return ret;
2646 	return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2647 }
2648 
hclge_halt_autoneg(struct hnae3_handle * handle,bool halt)2649 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2650 {
2651 	struct hclge_vport *vport = hclge_get_vport(handle);
2652 	struct hclge_dev *hdev = vport->back;
2653 
2654 	if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2655 		return hclge_set_autoneg_en(hdev, !halt);
2656 
2657 	return 0;
2658 }
2659 
hclge_set_fec_hw(struct hclge_dev * hdev,u32 fec_mode)2660 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2661 {
2662 	struct hclge_config_fec_cmd *req;
2663 	struct hclge_desc desc;
2664 	int ret;
2665 
2666 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2667 
2668 	req = (struct hclge_config_fec_cmd *)desc.data;
2669 	if (fec_mode & BIT(HNAE3_FEC_AUTO))
2670 		hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2671 	if (fec_mode & BIT(HNAE3_FEC_RS))
2672 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2673 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2674 	if (fec_mode & BIT(HNAE3_FEC_BASER))
2675 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2676 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2677 
2678 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2679 	if (ret)
2680 		dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2681 
2682 	return ret;
2683 }
2684 
hclge_set_fec(struct hnae3_handle * handle,u32 fec_mode)2685 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2686 {
2687 	struct hclge_vport *vport = hclge_get_vport(handle);
2688 	struct hclge_dev *hdev = vport->back;
2689 	struct hclge_mac *mac = &hdev->hw.mac;
2690 	int ret;
2691 
2692 	if (fec_mode && !(mac->fec_ability & fec_mode)) {
2693 		dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2694 		return -EINVAL;
2695 	}
2696 
2697 	ret = hclge_set_fec_hw(hdev, fec_mode);
2698 	if (ret)
2699 		return ret;
2700 
2701 	mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2702 	return 0;
2703 }
2704 
hclge_get_fec(struct hnae3_handle * handle,u8 * fec_ability,u8 * fec_mode)2705 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2706 			  u8 *fec_mode)
2707 {
2708 	struct hclge_vport *vport = hclge_get_vport(handle);
2709 	struct hclge_dev *hdev = vport->back;
2710 	struct hclge_mac *mac = &hdev->hw.mac;
2711 
2712 	if (fec_ability)
2713 		*fec_ability = mac->fec_ability;
2714 	if (fec_mode)
2715 		*fec_mode = mac->fec_mode;
2716 }
2717 
hclge_mac_init(struct hclge_dev * hdev)2718 static int hclge_mac_init(struct hclge_dev *hdev)
2719 {
2720 	struct hclge_mac *mac = &hdev->hw.mac;
2721 	int ret;
2722 
2723 	hdev->support_sfp_query = true;
2724 	hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2725 	ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2726 					 hdev->hw.mac.duplex);
2727 	if (ret)
2728 		return ret;
2729 
2730 	if (hdev->hw.mac.support_autoneg) {
2731 		ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2732 		if (ret)
2733 			return ret;
2734 	}
2735 
2736 	mac->link = 0;
2737 
2738 	if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2739 		ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2740 		if (ret)
2741 			return ret;
2742 	}
2743 
2744 	ret = hclge_set_mac_mtu(hdev, hdev->mps);
2745 	if (ret) {
2746 		dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2747 		return ret;
2748 	}
2749 
2750 	ret = hclge_set_default_loopback(hdev);
2751 	if (ret)
2752 		return ret;
2753 
2754 	ret = hclge_buffer_alloc(hdev);
2755 	if (ret)
2756 		dev_err(&hdev->pdev->dev,
2757 			"allocate buffer fail, ret=%d\n", ret);
2758 
2759 	return ret;
2760 }
2761 
hclge_mbx_task_schedule(struct hclge_dev * hdev)2762 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2763 {
2764 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2765 	    !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2766 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2767 				    hclge_wq, &hdev->service_task, 0);
2768 }
2769 
hclge_reset_task_schedule(struct hclge_dev * hdev)2770 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2771 {
2772 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2773 	    !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2774 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2775 				    hclge_wq, &hdev->service_task, 0);
2776 }
2777 
hclge_task_schedule(struct hclge_dev * hdev,unsigned long delay_time)2778 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2779 {
2780 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2781 	    !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2782 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2783 				    hclge_wq, &hdev->service_task,
2784 				    delay_time);
2785 }
2786 
hclge_get_mac_link_status(struct hclge_dev * hdev,int * link_status)2787 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2788 {
2789 	struct hclge_link_status_cmd *req;
2790 	struct hclge_desc desc;
2791 	int ret;
2792 
2793 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2794 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2795 	if (ret) {
2796 		dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2797 			ret);
2798 		return ret;
2799 	}
2800 
2801 	req = (struct hclge_link_status_cmd *)desc.data;
2802 	*link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2803 		HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2804 
2805 	return 0;
2806 }
2807 
hclge_get_mac_phy_link(struct hclge_dev * hdev,int * link_status)2808 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2809 {
2810 	struct phy_device *phydev = hdev->hw.mac.phydev;
2811 
2812 	*link_status = HCLGE_LINK_STATUS_DOWN;
2813 
2814 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2815 		return 0;
2816 
2817 	if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2818 		return 0;
2819 
2820 	return hclge_get_mac_link_status(hdev, link_status);
2821 }
2822 
hclge_update_link_status(struct hclge_dev * hdev)2823 static void hclge_update_link_status(struct hclge_dev *hdev)
2824 {
2825 	struct hnae3_client *rclient = hdev->roce_client;
2826 	struct hnae3_client *client = hdev->nic_client;
2827 	struct hnae3_handle *rhandle;
2828 	struct hnae3_handle *handle;
2829 	int state;
2830 	int ret;
2831 	int i;
2832 
2833 	if (!client)
2834 		return;
2835 
2836 	if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2837 		return;
2838 
2839 	ret = hclge_get_mac_phy_link(hdev, &state);
2840 	if (ret) {
2841 		clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2842 		return;
2843 	}
2844 
2845 	if (state != hdev->hw.mac.link) {
2846 		for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2847 			handle = &hdev->vport[i].nic;
2848 			client->ops->link_status_change(handle, state);
2849 			hclge_config_mac_tnl_int(hdev, state);
2850 			rhandle = &hdev->vport[i].roce;
2851 			if (rclient && rclient->ops->link_status_change)
2852 				rclient->ops->link_status_change(rhandle,
2853 								 state);
2854 		}
2855 		hdev->hw.mac.link = state;
2856 	}
2857 
2858 	clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2859 }
2860 
hclge_update_port_capability(struct hclge_mac * mac)2861 static void hclge_update_port_capability(struct hclge_mac *mac)
2862 {
2863 	/* update fec ability by speed */
2864 	hclge_convert_setting_fec(mac);
2865 
2866 	/* firmware can not identify back plane type, the media type
2867 	 * read from configuration can help deal it
2868 	 */
2869 	if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2870 	    mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2871 		mac->module_type = HNAE3_MODULE_TYPE_KR;
2872 	else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2873 		mac->module_type = HNAE3_MODULE_TYPE_TP;
2874 
2875 	if (mac->support_autoneg) {
2876 		linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2877 		linkmode_copy(mac->advertising, mac->supported);
2878 	} else {
2879 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2880 				   mac->supported);
2881 		linkmode_zero(mac->advertising);
2882 	}
2883 }
2884 
hclge_get_sfp_speed(struct hclge_dev * hdev,u32 * speed)2885 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2886 {
2887 	struct hclge_sfp_info_cmd *resp;
2888 	struct hclge_desc desc;
2889 	int ret;
2890 
2891 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2892 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2893 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2894 	if (ret == -EOPNOTSUPP) {
2895 		dev_warn(&hdev->pdev->dev,
2896 			 "IMP do not support get SFP speed %d\n", ret);
2897 		return ret;
2898 	} else if (ret) {
2899 		dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2900 		return ret;
2901 	}
2902 
2903 	*speed = le32_to_cpu(resp->speed);
2904 
2905 	return 0;
2906 }
2907 
hclge_get_sfp_info(struct hclge_dev * hdev,struct hclge_mac * mac)2908 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2909 {
2910 	struct hclge_sfp_info_cmd *resp;
2911 	struct hclge_desc desc;
2912 	int ret;
2913 
2914 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2915 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2916 
2917 	resp->query_type = QUERY_ACTIVE_SPEED;
2918 
2919 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2920 	if (ret == -EOPNOTSUPP) {
2921 		dev_warn(&hdev->pdev->dev,
2922 			 "IMP does not support get SFP info %d\n", ret);
2923 		return ret;
2924 	} else if (ret) {
2925 		dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2926 		return ret;
2927 	}
2928 
2929 	/* In some case, mac speed get from IMP may be 0, it shouldn't be
2930 	 * set to mac->speed.
2931 	 */
2932 	if (!le32_to_cpu(resp->speed))
2933 		return 0;
2934 
2935 	mac->speed = le32_to_cpu(resp->speed);
2936 	/* if resp->speed_ability is 0, it means it's an old version
2937 	 * firmware, do not update these params
2938 	 */
2939 	if (resp->speed_ability) {
2940 		mac->module_type = le32_to_cpu(resp->module_type);
2941 		mac->speed_ability = le32_to_cpu(resp->speed_ability);
2942 		mac->autoneg = resp->autoneg;
2943 		mac->support_autoneg = resp->autoneg_ability;
2944 		mac->speed_type = QUERY_ACTIVE_SPEED;
2945 		if (!resp->active_fec)
2946 			mac->fec_mode = 0;
2947 		else
2948 			mac->fec_mode = BIT(resp->active_fec);
2949 	} else {
2950 		mac->speed_type = QUERY_SFP_SPEED;
2951 	}
2952 
2953 	return 0;
2954 }
2955 
hclge_update_port_info(struct hclge_dev * hdev)2956 static int hclge_update_port_info(struct hclge_dev *hdev)
2957 {
2958 	struct hclge_mac *mac = &hdev->hw.mac;
2959 	int speed = HCLGE_MAC_SPEED_UNKNOWN;
2960 	int ret;
2961 
2962 	/* get the port info from SFP cmd if not copper port */
2963 	if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2964 		return 0;
2965 
2966 	/* if IMP does not support get SFP/qSFP info, return directly */
2967 	if (!hdev->support_sfp_query)
2968 		return 0;
2969 
2970 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
2971 		ret = hclge_get_sfp_info(hdev, mac);
2972 	else
2973 		ret = hclge_get_sfp_speed(hdev, &speed);
2974 
2975 	if (ret == -EOPNOTSUPP) {
2976 		hdev->support_sfp_query = false;
2977 		return ret;
2978 	} else if (ret) {
2979 		return ret;
2980 	}
2981 
2982 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
2983 		if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2984 			hclge_update_port_capability(mac);
2985 			return 0;
2986 		}
2987 		return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2988 					       HCLGE_MAC_FULL);
2989 	} else {
2990 		if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2991 			return 0; /* do nothing if no SFP */
2992 
2993 		/* must config full duplex for SFP */
2994 		return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2995 	}
2996 }
2997 
hclge_get_status(struct hnae3_handle * handle)2998 static int hclge_get_status(struct hnae3_handle *handle)
2999 {
3000 	struct hclge_vport *vport = hclge_get_vport(handle);
3001 	struct hclge_dev *hdev = vport->back;
3002 
3003 	hclge_update_link_status(hdev);
3004 
3005 	return hdev->hw.mac.link;
3006 }
3007 
hclge_get_vf_vport(struct hclge_dev * hdev,int vf)3008 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3009 {
3010 	if (!pci_num_vf(hdev->pdev)) {
3011 		dev_err(&hdev->pdev->dev,
3012 			"SRIOV is disabled, can not get vport(%d) info.\n", vf);
3013 		return NULL;
3014 	}
3015 
3016 	if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3017 		dev_err(&hdev->pdev->dev,
3018 			"vf id(%d) is out of range(0 <= vfid < %d)\n",
3019 			vf, pci_num_vf(hdev->pdev));
3020 		return NULL;
3021 	}
3022 
3023 	/* VF start from 1 in vport */
3024 	vf += HCLGE_VF_VPORT_START_NUM;
3025 	return &hdev->vport[vf];
3026 }
3027 
hclge_get_vf_config(struct hnae3_handle * handle,int vf,struct ifla_vf_info * ivf)3028 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3029 			       struct ifla_vf_info *ivf)
3030 {
3031 	struct hclge_vport *vport = hclge_get_vport(handle);
3032 	struct hclge_dev *hdev = vport->back;
3033 
3034 	vport = hclge_get_vf_vport(hdev, vf);
3035 	if (!vport)
3036 		return -EINVAL;
3037 
3038 	ivf->vf = vf;
3039 	ivf->linkstate = vport->vf_info.link_state;
3040 	ivf->spoofchk = vport->vf_info.spoofchk;
3041 	ivf->trusted = vport->vf_info.trusted;
3042 	ivf->min_tx_rate = 0;
3043 	ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3044 	ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3045 	ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3046 	ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3047 	ether_addr_copy(ivf->mac, vport->vf_info.mac);
3048 
3049 	return 0;
3050 }
3051 
hclge_set_vf_link_state(struct hnae3_handle * handle,int vf,int link_state)3052 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3053 				   int link_state)
3054 {
3055 	struct hclge_vport *vport = hclge_get_vport(handle);
3056 	struct hclge_dev *hdev = vport->back;
3057 
3058 	vport = hclge_get_vf_vport(hdev, vf);
3059 	if (!vport)
3060 		return -EINVAL;
3061 
3062 	vport->vf_info.link_state = link_state;
3063 
3064 	return 0;
3065 }
3066 
hclge_check_event_cause(struct hclge_dev * hdev,u32 * clearval)3067 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3068 {
3069 	u32 cmdq_src_reg, msix_src_reg;
3070 
3071 	/* fetch the events from their corresponding regs */
3072 	cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3073 	msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3074 
3075 	/* Assumption: If by any chance reset and mailbox events are reported
3076 	 * together then we will only process reset event in this go and will
3077 	 * defer the processing of the mailbox events. Since, we would have not
3078 	 * cleared RX CMDQ event this time we would receive again another
3079 	 * interrupt from H/W just for the mailbox.
3080 	 *
3081 	 * check for vector0 reset event sources
3082 	 */
3083 	if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3084 		dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3085 		set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3086 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3087 		*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3088 		hdev->rst_stats.imp_rst_cnt++;
3089 		return HCLGE_VECTOR0_EVENT_RST;
3090 	}
3091 
3092 	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3093 		dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3094 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3095 		set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3096 		*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3097 		hdev->rst_stats.global_rst_cnt++;
3098 		return HCLGE_VECTOR0_EVENT_RST;
3099 	}
3100 
3101 	/* check for vector0 msix event source */
3102 	if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3103 		*clearval = msix_src_reg;
3104 		return HCLGE_VECTOR0_EVENT_ERR;
3105 	}
3106 
3107 	/* check for vector0 mailbox(=CMDQ RX) event source */
3108 	if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3109 		cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3110 		*clearval = cmdq_src_reg;
3111 		return HCLGE_VECTOR0_EVENT_MBX;
3112 	}
3113 
3114 	/* print other vector0 event source */
3115 	dev_info(&hdev->pdev->dev,
3116 		 "CMDQ INT status:0x%x, other INT status:0x%x\n",
3117 		 cmdq_src_reg, msix_src_reg);
3118 	*clearval = msix_src_reg;
3119 
3120 	return HCLGE_VECTOR0_EVENT_OTHER;
3121 }
3122 
hclge_clear_event_cause(struct hclge_dev * hdev,u32 event_type,u32 regclr)3123 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3124 				    u32 regclr)
3125 {
3126 	switch (event_type) {
3127 	case HCLGE_VECTOR0_EVENT_RST:
3128 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3129 		break;
3130 	case HCLGE_VECTOR0_EVENT_MBX:
3131 		hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3132 		break;
3133 	default:
3134 		break;
3135 	}
3136 }
3137 
hclge_clear_all_event_cause(struct hclge_dev * hdev)3138 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3139 {
3140 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3141 				BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3142 				BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3143 				BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3144 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3145 }
3146 
hclge_enable_vector(struct hclge_misc_vector * vector,bool enable)3147 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3148 {
3149 	writel(enable ? 1 : 0, vector->addr);
3150 }
3151 
hclge_misc_irq_handle(int irq,void * data)3152 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3153 {
3154 	struct hclge_dev *hdev = data;
3155 	u32 clearval = 0;
3156 	u32 event_cause;
3157 
3158 	hclge_enable_vector(&hdev->misc_vector, false);
3159 	event_cause = hclge_check_event_cause(hdev, &clearval);
3160 
3161 	/* vector 0 interrupt is shared with reset and mailbox source events.*/
3162 	switch (event_cause) {
3163 	case HCLGE_VECTOR0_EVENT_ERR:
3164 		/* we do not know what type of reset is required now. This could
3165 		 * only be decided after we fetch the type of errors which
3166 		 * caused this event. Therefore, we will do below for now:
3167 		 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3168 		 *    have defered type of reset to be used.
3169 		 * 2. Schedule the reset serivce task.
3170 		 * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
3171 		 *    will fetch the correct type of reset.  This would be done
3172 		 *    by first decoding the types of errors.
3173 		 */
3174 		set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3175 		fallthrough;
3176 	case HCLGE_VECTOR0_EVENT_RST:
3177 		hclge_reset_task_schedule(hdev);
3178 		break;
3179 	case HCLGE_VECTOR0_EVENT_MBX:
3180 		/* If we are here then,
3181 		 * 1. Either we are not handling any mbx task and we are not
3182 		 *    scheduled as well
3183 		 *                        OR
3184 		 * 2. We could be handling a mbx task but nothing more is
3185 		 *    scheduled.
3186 		 * In both cases, we should schedule mbx task as there are more
3187 		 * mbx messages reported by this interrupt.
3188 		 */
3189 		hclge_mbx_task_schedule(hdev);
3190 		break;
3191 	default:
3192 		dev_warn(&hdev->pdev->dev,
3193 			 "received unknown or unhandled event of vector0\n");
3194 		break;
3195 	}
3196 
3197 	hclge_clear_event_cause(hdev, event_cause, clearval);
3198 
3199 	/* Enable interrupt if it is not cause by reset. And when
3200 	 * clearval equal to 0, it means interrupt status may be
3201 	 * cleared by hardware before driver reads status register.
3202 	 * For this case, vector0 interrupt also should be enabled.
3203 	 */
3204 	if (!clearval ||
3205 	    event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3206 		hclge_enable_vector(&hdev->misc_vector, true);
3207 	}
3208 
3209 	return IRQ_HANDLED;
3210 }
3211 
hclge_free_vector(struct hclge_dev * hdev,int vector_id)3212 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3213 {
3214 	if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3215 		dev_warn(&hdev->pdev->dev,
3216 			 "vector(vector_id %d) has been freed.\n", vector_id);
3217 		return;
3218 	}
3219 
3220 	hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3221 	hdev->num_msi_left += 1;
3222 	hdev->num_msi_used -= 1;
3223 }
3224 
hclge_get_misc_vector(struct hclge_dev * hdev)3225 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3226 {
3227 	struct hclge_misc_vector *vector = &hdev->misc_vector;
3228 
3229 	vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3230 
3231 	vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3232 	hdev->vector_status[0] = 0;
3233 
3234 	hdev->num_msi_left -= 1;
3235 	hdev->num_msi_used += 1;
3236 }
3237 
hclge_irq_affinity_notify(struct irq_affinity_notify * notify,const cpumask_t * mask)3238 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3239 				      const cpumask_t *mask)
3240 {
3241 	struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3242 					      affinity_notify);
3243 
3244 	cpumask_copy(&hdev->affinity_mask, mask);
3245 }
3246 
hclge_irq_affinity_release(struct kref * ref)3247 static void hclge_irq_affinity_release(struct kref *ref)
3248 {
3249 }
3250 
hclge_misc_affinity_setup(struct hclge_dev * hdev)3251 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3252 {
3253 	irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3254 			      &hdev->affinity_mask);
3255 
3256 	hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3257 	hdev->affinity_notify.release = hclge_irq_affinity_release;
3258 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3259 				  &hdev->affinity_notify);
3260 }
3261 
hclge_misc_affinity_teardown(struct hclge_dev * hdev)3262 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3263 {
3264 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3265 	irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3266 }
3267 
hclge_misc_irq_init(struct hclge_dev * hdev)3268 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3269 {
3270 	int ret;
3271 
3272 	hclge_get_misc_vector(hdev);
3273 
3274 	/* this would be explicitly freed in the end */
3275 	snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3276 		 HCLGE_NAME, pci_name(hdev->pdev));
3277 	ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3278 			  0, hdev->misc_vector.name, hdev);
3279 	if (ret) {
3280 		hclge_free_vector(hdev, 0);
3281 		dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3282 			hdev->misc_vector.vector_irq);
3283 	}
3284 
3285 	return ret;
3286 }
3287 
hclge_misc_irq_uninit(struct hclge_dev * hdev)3288 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3289 {
3290 	free_irq(hdev->misc_vector.vector_irq, hdev);
3291 	hclge_free_vector(hdev, 0);
3292 }
3293 
hclge_notify_client(struct hclge_dev * hdev,enum hnae3_reset_notify_type type)3294 int hclge_notify_client(struct hclge_dev *hdev,
3295 			enum hnae3_reset_notify_type type)
3296 {
3297 	struct hnae3_client *client = hdev->nic_client;
3298 	u16 i;
3299 
3300 	if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3301 		return 0;
3302 
3303 	if (!client->ops->reset_notify)
3304 		return -EOPNOTSUPP;
3305 
3306 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3307 		struct hnae3_handle *handle = &hdev->vport[i].nic;
3308 		int ret;
3309 
3310 		ret = client->ops->reset_notify(handle, type);
3311 		if (ret) {
3312 			dev_err(&hdev->pdev->dev,
3313 				"notify nic client failed %d(%d)\n", type, ret);
3314 			return ret;
3315 		}
3316 	}
3317 
3318 	return 0;
3319 }
3320 
hclge_notify_roce_client(struct hclge_dev * hdev,enum hnae3_reset_notify_type type)3321 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3322 				    enum hnae3_reset_notify_type type)
3323 {
3324 	struct hnae3_client *client = hdev->roce_client;
3325 	int ret;
3326 	u16 i;
3327 
3328 	if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3329 		return 0;
3330 
3331 	if (!client->ops->reset_notify)
3332 		return -EOPNOTSUPP;
3333 
3334 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3335 		struct hnae3_handle *handle = &hdev->vport[i].roce;
3336 
3337 		ret = client->ops->reset_notify(handle, type);
3338 		if (ret) {
3339 			dev_err(&hdev->pdev->dev,
3340 				"notify roce client failed %d(%d)",
3341 				type, ret);
3342 			return ret;
3343 		}
3344 	}
3345 
3346 	return ret;
3347 }
3348 
hclge_reset_wait(struct hclge_dev * hdev)3349 static int hclge_reset_wait(struct hclge_dev *hdev)
3350 {
3351 #define HCLGE_RESET_WATI_MS	100
3352 #define HCLGE_RESET_WAIT_CNT	350
3353 
3354 	u32 val, reg, reg_bit;
3355 	u32 cnt = 0;
3356 
3357 	switch (hdev->reset_type) {
3358 	case HNAE3_IMP_RESET:
3359 		reg = HCLGE_GLOBAL_RESET_REG;
3360 		reg_bit = HCLGE_IMP_RESET_BIT;
3361 		break;
3362 	case HNAE3_GLOBAL_RESET:
3363 		reg = HCLGE_GLOBAL_RESET_REG;
3364 		reg_bit = HCLGE_GLOBAL_RESET_BIT;
3365 		break;
3366 	case HNAE3_FUNC_RESET:
3367 		reg = HCLGE_FUN_RST_ING;
3368 		reg_bit = HCLGE_FUN_RST_ING_B;
3369 		break;
3370 	default:
3371 		dev_err(&hdev->pdev->dev,
3372 			"Wait for unsupported reset type: %d\n",
3373 			hdev->reset_type);
3374 		return -EINVAL;
3375 	}
3376 
3377 	val = hclge_read_dev(&hdev->hw, reg);
3378 	while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3379 		msleep(HCLGE_RESET_WATI_MS);
3380 		val = hclge_read_dev(&hdev->hw, reg);
3381 		cnt++;
3382 	}
3383 
3384 	if (cnt >= HCLGE_RESET_WAIT_CNT) {
3385 		dev_warn(&hdev->pdev->dev,
3386 			 "Wait for reset timeout: %d\n", hdev->reset_type);
3387 		return -EBUSY;
3388 	}
3389 
3390 	return 0;
3391 }
3392 
hclge_set_vf_rst(struct hclge_dev * hdev,int func_id,bool reset)3393 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3394 {
3395 	struct hclge_vf_rst_cmd *req;
3396 	struct hclge_desc desc;
3397 
3398 	req = (struct hclge_vf_rst_cmd *)desc.data;
3399 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3400 	req->dest_vfid = func_id;
3401 
3402 	if (reset)
3403 		req->vf_rst = 0x1;
3404 
3405 	return hclge_cmd_send(&hdev->hw, &desc, 1);
3406 }
3407 
hclge_set_all_vf_rst(struct hclge_dev * hdev,bool reset)3408 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3409 {
3410 	int i;
3411 
3412 	for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3413 		struct hclge_vport *vport = &hdev->vport[i];
3414 		int ret;
3415 
3416 		/* Send cmd to set/clear VF's FUNC_RST_ING */
3417 		ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3418 		if (ret) {
3419 			dev_err(&hdev->pdev->dev,
3420 				"set vf(%u) rst failed %d!\n",
3421 				vport->vport_id, ret);
3422 			return ret;
3423 		}
3424 
3425 		if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3426 			continue;
3427 
3428 		/* Inform VF to process the reset.
3429 		 * hclge_inform_reset_assert_to_vf may fail if VF
3430 		 * driver is not loaded.
3431 		 */
3432 		ret = hclge_inform_reset_assert_to_vf(vport);
3433 		if (ret)
3434 			dev_warn(&hdev->pdev->dev,
3435 				 "inform reset to vf(%u) failed %d!\n",
3436 				 vport->vport_id, ret);
3437 	}
3438 
3439 	return 0;
3440 }
3441 
hclge_mailbox_service_task(struct hclge_dev * hdev)3442 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3443 {
3444 	if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3445 	    test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3446 	    test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3447 		return;
3448 
3449 	hclge_mbx_handler(hdev);
3450 
3451 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3452 }
3453 
hclge_func_reset_sync_vf(struct hclge_dev * hdev)3454 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3455 {
3456 	struct hclge_pf_rst_sync_cmd *req;
3457 	struct hclge_desc desc;
3458 	int cnt = 0;
3459 	int ret;
3460 
3461 	req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3462 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3463 
3464 	do {
3465 		/* vf need to down netdev by mbx during PF or FLR reset */
3466 		hclge_mailbox_service_task(hdev);
3467 
3468 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3469 		/* for compatible with old firmware, wait
3470 		 * 100 ms for VF to stop IO
3471 		 */
3472 		if (ret == -EOPNOTSUPP) {
3473 			msleep(HCLGE_RESET_SYNC_TIME);
3474 			return;
3475 		} else if (ret) {
3476 			dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3477 				 ret);
3478 			return;
3479 		} else if (req->all_vf_ready) {
3480 			return;
3481 		}
3482 		msleep(HCLGE_PF_RESET_SYNC_TIME);
3483 		hclge_cmd_reuse_desc(&desc, true);
3484 	} while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3485 
3486 	dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3487 }
3488 
hclge_report_hw_error(struct hclge_dev * hdev,enum hnae3_hw_error_type type)3489 void hclge_report_hw_error(struct hclge_dev *hdev,
3490 			   enum hnae3_hw_error_type type)
3491 {
3492 	struct hnae3_client *client = hdev->nic_client;
3493 	u16 i;
3494 
3495 	if (!client || !client->ops->process_hw_error ||
3496 	    !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3497 		return;
3498 
3499 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3500 		client->ops->process_hw_error(&hdev->vport[i].nic, type);
3501 }
3502 
hclge_handle_imp_error(struct hclge_dev * hdev)3503 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3504 {
3505 	u32 reg_val;
3506 
3507 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3508 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3509 		hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3510 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3511 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3512 	}
3513 
3514 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3515 		hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3516 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3517 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3518 	}
3519 }
3520 
hclge_func_reset_cmd(struct hclge_dev * hdev,int func_id)3521 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3522 {
3523 	struct hclge_desc desc;
3524 	struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3525 	int ret;
3526 
3527 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3528 	hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3529 	req->fun_reset_vfid = func_id;
3530 
3531 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3532 	if (ret)
3533 		dev_err(&hdev->pdev->dev,
3534 			"send function reset cmd fail, status =%d\n", ret);
3535 
3536 	return ret;
3537 }
3538 
hclge_do_reset(struct hclge_dev * hdev)3539 static void hclge_do_reset(struct hclge_dev *hdev)
3540 {
3541 	struct hnae3_handle *handle = &hdev->vport[0].nic;
3542 	struct pci_dev *pdev = hdev->pdev;
3543 	u32 val;
3544 
3545 	if (hclge_get_hw_reset_stat(handle)) {
3546 		dev_info(&pdev->dev, "hardware reset not finish\n");
3547 		dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3548 			 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3549 			 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3550 		return;
3551 	}
3552 
3553 	switch (hdev->reset_type) {
3554 	case HNAE3_GLOBAL_RESET:
3555 		dev_info(&pdev->dev, "global reset requested\n");
3556 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3557 		hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3558 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3559 		break;
3560 	case HNAE3_FUNC_RESET:
3561 		dev_info(&pdev->dev, "PF reset requested\n");
3562 		/* schedule again to check later */
3563 		set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3564 		hclge_reset_task_schedule(hdev);
3565 		break;
3566 	default:
3567 		dev_warn(&pdev->dev,
3568 			 "unsupported reset type: %d\n", hdev->reset_type);
3569 		break;
3570 	}
3571 }
3572 
hclge_get_reset_level(struct hnae3_ae_dev * ae_dev,unsigned long * addr)3573 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3574 						   unsigned long *addr)
3575 {
3576 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3577 	struct hclge_dev *hdev = ae_dev->priv;
3578 
3579 	/* first, resolve any unknown reset type to the known type(s) */
3580 	if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3581 		u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3582 					HCLGE_MISC_VECTOR_INT_STS);
3583 		/* we will intentionally ignore any errors from this function
3584 		 *  as we will end up in *some* reset request in any case
3585 		 */
3586 		if (hclge_handle_hw_msix_error(hdev, addr))
3587 			dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3588 				 msix_sts_reg);
3589 
3590 		clear_bit(HNAE3_UNKNOWN_RESET, addr);
3591 		/* We defered the clearing of the error event which caused
3592 		 * interrupt since it was not posssible to do that in
3593 		 * interrupt context (and this is the reason we introduced
3594 		 * new UNKNOWN reset type). Now, the errors have been
3595 		 * handled and cleared in hardware we can safely enable
3596 		 * interrupts. This is an exception to the norm.
3597 		 */
3598 		hclge_enable_vector(&hdev->misc_vector, true);
3599 	}
3600 
3601 	/* return the highest priority reset level amongst all */
3602 	if (test_bit(HNAE3_IMP_RESET, addr)) {
3603 		rst_level = HNAE3_IMP_RESET;
3604 		clear_bit(HNAE3_IMP_RESET, addr);
3605 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3606 		clear_bit(HNAE3_FUNC_RESET, addr);
3607 	} else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3608 		rst_level = HNAE3_GLOBAL_RESET;
3609 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3610 		clear_bit(HNAE3_FUNC_RESET, addr);
3611 	} else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3612 		rst_level = HNAE3_FUNC_RESET;
3613 		clear_bit(HNAE3_FUNC_RESET, addr);
3614 	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
3615 		rst_level = HNAE3_FLR_RESET;
3616 		clear_bit(HNAE3_FLR_RESET, addr);
3617 	}
3618 
3619 	if (hdev->reset_type != HNAE3_NONE_RESET &&
3620 	    rst_level < hdev->reset_type)
3621 		return HNAE3_NONE_RESET;
3622 
3623 	return rst_level;
3624 }
3625 
hclge_clear_reset_cause(struct hclge_dev * hdev)3626 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3627 {
3628 	u32 clearval = 0;
3629 
3630 	switch (hdev->reset_type) {
3631 	case HNAE3_IMP_RESET:
3632 		clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3633 		break;
3634 	case HNAE3_GLOBAL_RESET:
3635 		clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3636 		break;
3637 	default:
3638 		break;
3639 	}
3640 
3641 	if (!clearval)
3642 		return;
3643 
3644 	/* For revision 0x20, the reset interrupt source
3645 	 * can only be cleared after hardware reset done
3646 	 */
3647 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3648 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3649 				clearval);
3650 
3651 	hclge_enable_vector(&hdev->misc_vector, true);
3652 }
3653 
hclge_reset_handshake(struct hclge_dev * hdev,bool enable)3654 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3655 {
3656 	u32 reg_val;
3657 
3658 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3659 	if (enable)
3660 		reg_val |= HCLGE_NIC_SW_RST_RDY;
3661 	else
3662 		reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3663 
3664 	hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3665 }
3666 
hclge_func_reset_notify_vf(struct hclge_dev * hdev)3667 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3668 {
3669 	int ret;
3670 
3671 	ret = hclge_set_all_vf_rst(hdev, true);
3672 	if (ret)
3673 		return ret;
3674 
3675 	hclge_func_reset_sync_vf(hdev);
3676 
3677 	return 0;
3678 }
3679 
hclge_reset_prepare_wait(struct hclge_dev * hdev)3680 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3681 {
3682 	u32 reg_val;
3683 	int ret = 0;
3684 
3685 	switch (hdev->reset_type) {
3686 	case HNAE3_FUNC_RESET:
3687 		ret = hclge_func_reset_notify_vf(hdev);
3688 		if (ret)
3689 			return ret;
3690 
3691 		ret = hclge_func_reset_cmd(hdev, 0);
3692 		if (ret) {
3693 			dev_err(&hdev->pdev->dev,
3694 				"asserting function reset fail %d!\n", ret);
3695 			return ret;
3696 		}
3697 
3698 		/* After performaning pf reset, it is not necessary to do the
3699 		 * mailbox handling or send any command to firmware, because
3700 		 * any mailbox handling or command to firmware is only valid
3701 		 * after hclge_cmd_init is called.
3702 		 */
3703 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3704 		hdev->rst_stats.pf_rst_cnt++;
3705 		break;
3706 	case HNAE3_FLR_RESET:
3707 		ret = hclge_func_reset_notify_vf(hdev);
3708 		if (ret)
3709 			return ret;
3710 		break;
3711 	case HNAE3_IMP_RESET:
3712 		hclge_handle_imp_error(hdev);
3713 		reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3714 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3715 				BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3716 		break;
3717 	default:
3718 		break;
3719 	}
3720 
3721 	/* inform hardware that preparatory work is done */
3722 	msleep(HCLGE_RESET_SYNC_TIME);
3723 	hclge_reset_handshake(hdev, true);
3724 	dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3725 
3726 	return ret;
3727 }
3728 
hclge_reset_err_handle(struct hclge_dev * hdev)3729 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3730 {
3731 #define MAX_RESET_FAIL_CNT 5
3732 
3733 	if (hdev->reset_pending) {
3734 		dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3735 			 hdev->reset_pending);
3736 		return true;
3737 	} else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3738 		   HCLGE_RESET_INT_M) {
3739 		dev_info(&hdev->pdev->dev,
3740 			 "reset failed because new reset interrupt\n");
3741 		hclge_clear_reset_cause(hdev);
3742 		return false;
3743 	} else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3744 		hdev->rst_stats.reset_fail_cnt++;
3745 		set_bit(hdev->reset_type, &hdev->reset_pending);
3746 		dev_info(&hdev->pdev->dev,
3747 			 "re-schedule reset task(%u)\n",
3748 			 hdev->rst_stats.reset_fail_cnt);
3749 		return true;
3750 	}
3751 
3752 	hclge_clear_reset_cause(hdev);
3753 
3754 	/* recover the handshake status when reset fail */
3755 	hclge_reset_handshake(hdev, true);
3756 
3757 	dev_err(&hdev->pdev->dev, "Reset fail!\n");
3758 
3759 	hclge_dbg_dump_rst_info(hdev);
3760 
3761 	set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3762 
3763 	return false;
3764 }
3765 
hclge_set_rst_done(struct hclge_dev * hdev)3766 static int hclge_set_rst_done(struct hclge_dev *hdev)
3767 {
3768 	struct hclge_pf_rst_done_cmd *req;
3769 	struct hclge_desc desc;
3770 	int ret;
3771 
3772 	req = (struct hclge_pf_rst_done_cmd *)desc.data;
3773 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3774 	req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3775 
3776 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3777 	/* To be compatible with the old firmware, which does not support
3778 	 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3779 	 * return success
3780 	 */
3781 	if (ret == -EOPNOTSUPP) {
3782 		dev_warn(&hdev->pdev->dev,
3783 			 "current firmware does not support command(0x%x)!\n",
3784 			 HCLGE_OPC_PF_RST_DONE);
3785 		return 0;
3786 	} else if (ret) {
3787 		dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3788 			ret);
3789 	}
3790 
3791 	return ret;
3792 }
3793 
hclge_reset_prepare_up(struct hclge_dev * hdev)3794 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3795 {
3796 	int ret = 0;
3797 
3798 	switch (hdev->reset_type) {
3799 	case HNAE3_FUNC_RESET:
3800 	case HNAE3_FLR_RESET:
3801 		ret = hclge_set_all_vf_rst(hdev, false);
3802 		break;
3803 	case HNAE3_GLOBAL_RESET:
3804 	case HNAE3_IMP_RESET:
3805 		ret = hclge_set_rst_done(hdev);
3806 		break;
3807 	default:
3808 		break;
3809 	}
3810 
3811 	/* clear up the handshake status after re-initialize done */
3812 	hclge_reset_handshake(hdev, false);
3813 
3814 	return ret;
3815 }
3816 
hclge_reset_stack(struct hclge_dev * hdev)3817 static int hclge_reset_stack(struct hclge_dev *hdev)
3818 {
3819 	int ret;
3820 
3821 	ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3822 	if (ret)
3823 		return ret;
3824 
3825 	ret = hclge_reset_ae_dev(hdev->ae_dev);
3826 	if (ret)
3827 		return ret;
3828 
3829 	return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3830 }
3831 
hclge_reset_prepare(struct hclge_dev * hdev)3832 static int hclge_reset_prepare(struct hclge_dev *hdev)
3833 {
3834 	int ret;
3835 
3836 	hdev->rst_stats.reset_cnt++;
3837 	/* perform reset of the stack & ae device for a client */
3838 	ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3839 	if (ret)
3840 		return ret;
3841 
3842 	rtnl_lock();
3843 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3844 	rtnl_unlock();
3845 	if (ret)
3846 		return ret;
3847 
3848 	return hclge_reset_prepare_wait(hdev);
3849 }
3850 
hclge_reset_rebuild(struct hclge_dev * hdev)3851 static int hclge_reset_rebuild(struct hclge_dev *hdev)
3852 {
3853 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3854 	enum hnae3_reset_type reset_level;
3855 	int ret;
3856 
3857 	hdev->rst_stats.hw_reset_done_cnt++;
3858 
3859 	ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3860 	if (ret)
3861 		return ret;
3862 
3863 	rtnl_lock();
3864 	ret = hclge_reset_stack(hdev);
3865 	rtnl_unlock();
3866 	if (ret)
3867 		return ret;
3868 
3869 	hclge_clear_reset_cause(hdev);
3870 
3871 	ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3872 	/* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3873 	 * times
3874 	 */
3875 	if (ret &&
3876 	    hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3877 		return ret;
3878 
3879 	ret = hclge_reset_prepare_up(hdev);
3880 	if (ret)
3881 		return ret;
3882 
3883 	rtnl_lock();
3884 	ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3885 	rtnl_unlock();
3886 	if (ret)
3887 		return ret;
3888 
3889 	ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3890 	if (ret)
3891 		return ret;
3892 
3893 	hdev->last_reset_time = jiffies;
3894 	hdev->rst_stats.reset_fail_cnt = 0;
3895 	hdev->rst_stats.reset_done_cnt++;
3896 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3897 
3898 	/* if default_reset_request has a higher level reset request,
3899 	 * it should be handled as soon as possible. since some errors
3900 	 * need this kind of reset to fix.
3901 	 */
3902 	reset_level = hclge_get_reset_level(ae_dev,
3903 					    &hdev->default_reset_request);
3904 	if (reset_level != HNAE3_NONE_RESET)
3905 		set_bit(reset_level, &hdev->reset_request);
3906 
3907 	return 0;
3908 }
3909 
hclge_reset(struct hclge_dev * hdev)3910 static void hclge_reset(struct hclge_dev *hdev)
3911 {
3912 	if (hclge_reset_prepare(hdev))
3913 		goto err_reset;
3914 
3915 	if (hclge_reset_wait(hdev))
3916 		goto err_reset;
3917 
3918 	if (hclge_reset_rebuild(hdev))
3919 		goto err_reset;
3920 
3921 	return;
3922 
3923 err_reset:
3924 	if (hclge_reset_err_handle(hdev))
3925 		hclge_reset_task_schedule(hdev);
3926 }
3927 
hclge_reset_event(struct pci_dev * pdev,struct hnae3_handle * handle)3928 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3929 {
3930 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3931 	struct hclge_dev *hdev = ae_dev->priv;
3932 
3933 	/* We might end up getting called broadly because of 2 below cases:
3934 	 * 1. Recoverable error was conveyed through APEI and only way to bring
3935 	 *    normalcy is to reset.
3936 	 * 2. A new reset request from the stack due to timeout
3937 	 *
3938 	 * For the first case,error event might not have ae handle available.
3939 	 * check if this is a new reset request and we are not here just because
3940 	 * last reset attempt did not succeed and watchdog hit us again. We will
3941 	 * know this if last reset request did not occur very recently (watchdog
3942 	 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3943 	 * In case of new request we reset the "reset level" to PF reset.
3944 	 * And if it is a repeat reset request of the most recent one then we
3945 	 * want to make sure we throttle the reset request. Therefore, we will
3946 	 * not allow it again before 3*HZ times.
3947 	 */
3948 	if (!handle)
3949 		handle = &hdev->vport[0].nic;
3950 
3951 	if (time_before(jiffies, (hdev->last_reset_time +
3952 				  HCLGE_RESET_INTERVAL))) {
3953 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3954 		return;
3955 	} else if (hdev->default_reset_request) {
3956 		hdev->reset_level =
3957 			hclge_get_reset_level(ae_dev,
3958 					      &hdev->default_reset_request);
3959 	} else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
3960 		hdev->reset_level = HNAE3_FUNC_RESET;
3961 	}
3962 
3963 	dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3964 		 hdev->reset_level);
3965 
3966 	/* request reset & schedule reset task */
3967 	set_bit(hdev->reset_level, &hdev->reset_request);
3968 	hclge_reset_task_schedule(hdev);
3969 
3970 	if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3971 		hdev->reset_level++;
3972 }
3973 
hclge_set_def_reset_request(struct hnae3_ae_dev * ae_dev,enum hnae3_reset_type rst_type)3974 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3975 					enum hnae3_reset_type rst_type)
3976 {
3977 	struct hclge_dev *hdev = ae_dev->priv;
3978 
3979 	set_bit(rst_type, &hdev->default_reset_request);
3980 }
3981 
hclge_reset_timer(struct timer_list * t)3982 static void hclge_reset_timer(struct timer_list *t)
3983 {
3984 	struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3985 
3986 	/* if default_reset_request has no value, it means that this reset
3987 	 * request has already be handled, so just return here
3988 	 */
3989 	if (!hdev->default_reset_request)
3990 		return;
3991 
3992 	dev_info(&hdev->pdev->dev,
3993 		 "triggering reset in reset timer\n");
3994 	hclge_reset_event(hdev->pdev, NULL);
3995 }
3996 
hclge_reset_subtask(struct hclge_dev * hdev)3997 static void hclge_reset_subtask(struct hclge_dev *hdev)
3998 {
3999 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4000 
4001 	/* check if there is any ongoing reset in the hardware. This status can
4002 	 * be checked from reset_pending. If there is then, we need to wait for
4003 	 * hardware to complete reset.
4004 	 *    a. If we are able to figure out in reasonable time that hardware
4005 	 *       has fully resetted then, we can proceed with driver, client
4006 	 *       reset.
4007 	 *    b. else, we can come back later to check this status so re-sched
4008 	 *       now.
4009 	 */
4010 	hdev->last_reset_time = jiffies;
4011 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4012 	if (hdev->reset_type != HNAE3_NONE_RESET)
4013 		hclge_reset(hdev);
4014 
4015 	/* check if we got any *new* reset requests to be honored */
4016 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4017 	if (hdev->reset_type != HNAE3_NONE_RESET)
4018 		hclge_do_reset(hdev);
4019 
4020 	hdev->reset_type = HNAE3_NONE_RESET;
4021 }
4022 
hclge_reset_service_task(struct hclge_dev * hdev)4023 static void hclge_reset_service_task(struct hclge_dev *hdev)
4024 {
4025 	if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4026 		return;
4027 
4028 	down(&hdev->reset_sem);
4029 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4030 
4031 	hclge_reset_subtask(hdev);
4032 
4033 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4034 	up(&hdev->reset_sem);
4035 }
4036 
hclge_update_vport_alive(struct hclge_dev * hdev)4037 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4038 {
4039 	int i;
4040 
4041 	/* start from vport 1 for PF is always alive */
4042 	for (i = 1; i < hdev->num_alloc_vport; i++) {
4043 		struct hclge_vport *vport = &hdev->vport[i];
4044 
4045 		if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4046 			clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4047 
4048 		/* If vf is not alive, set to default value */
4049 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4050 			vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4051 	}
4052 }
4053 
hclge_periodic_service_task(struct hclge_dev * hdev)4054 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4055 {
4056 	unsigned long delta = round_jiffies_relative(HZ);
4057 
4058 	if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4059 		return;
4060 
4061 	/* Always handle the link updating to make sure link state is
4062 	 * updated when it is triggered by mbx.
4063 	 */
4064 	hclge_update_link_status(hdev);
4065 	hclge_sync_mac_table(hdev);
4066 	hclge_sync_promisc_mode(hdev);
4067 
4068 	if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4069 		delta = jiffies - hdev->last_serv_processed;
4070 
4071 		if (delta < round_jiffies_relative(HZ)) {
4072 			delta = round_jiffies_relative(HZ) - delta;
4073 			goto out;
4074 		}
4075 	}
4076 
4077 	hdev->serv_processed_cnt++;
4078 	hclge_update_vport_alive(hdev);
4079 
4080 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4081 		hdev->last_serv_processed = jiffies;
4082 		goto out;
4083 	}
4084 
4085 	if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4086 		hclge_update_stats_for_all(hdev);
4087 
4088 	hclge_update_port_info(hdev);
4089 	hclge_sync_vlan_filter(hdev);
4090 
4091 	if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4092 		hclge_rfs_filter_expire(hdev);
4093 
4094 	hdev->last_serv_processed = jiffies;
4095 
4096 out:
4097 	hclge_task_schedule(hdev, delta);
4098 }
4099 
hclge_service_task(struct work_struct * work)4100 static void hclge_service_task(struct work_struct *work)
4101 {
4102 	struct hclge_dev *hdev =
4103 		container_of(work, struct hclge_dev, service_task.work);
4104 
4105 	hclge_reset_service_task(hdev);
4106 	hclge_mailbox_service_task(hdev);
4107 	hclge_periodic_service_task(hdev);
4108 
4109 	/* Handle reset and mbx again in case periodical task delays the
4110 	 * handling by calling hclge_task_schedule() in
4111 	 * hclge_periodic_service_task().
4112 	 */
4113 	hclge_reset_service_task(hdev);
4114 	hclge_mailbox_service_task(hdev);
4115 }
4116 
hclge_get_vport(struct hnae3_handle * handle)4117 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4118 {
4119 	/* VF handle has no client */
4120 	if (!handle->client)
4121 		return container_of(handle, struct hclge_vport, nic);
4122 	else if (handle->client->type == HNAE3_CLIENT_ROCE)
4123 		return container_of(handle, struct hclge_vport, roce);
4124 	else
4125 		return container_of(handle, struct hclge_vport, nic);
4126 }
4127 
hclge_get_vector(struct hnae3_handle * handle,u16 vector_num,struct hnae3_vector_info * vector_info)4128 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4129 			    struct hnae3_vector_info *vector_info)
4130 {
4131 	struct hclge_vport *vport = hclge_get_vport(handle);
4132 	struct hnae3_vector_info *vector = vector_info;
4133 	struct hclge_dev *hdev = vport->back;
4134 	int alloc = 0;
4135 	int i, j;
4136 
4137 	vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4138 	vector_num = min(hdev->num_msi_left, vector_num);
4139 
4140 	for (j = 0; j < vector_num; j++) {
4141 		for (i = 1; i < hdev->num_msi; i++) {
4142 			if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4143 				vector->vector = pci_irq_vector(hdev->pdev, i);
4144 				vector->io_addr = hdev->hw.io_base +
4145 					HCLGE_VECTOR_REG_BASE +
4146 					(i - 1) * HCLGE_VECTOR_REG_OFFSET +
4147 					vport->vport_id *
4148 					HCLGE_VECTOR_VF_OFFSET;
4149 				hdev->vector_status[i] = vport->vport_id;
4150 				hdev->vector_irq[i] = vector->vector;
4151 
4152 				vector++;
4153 				alloc++;
4154 
4155 				break;
4156 			}
4157 		}
4158 	}
4159 	hdev->num_msi_left -= alloc;
4160 	hdev->num_msi_used += alloc;
4161 
4162 	return alloc;
4163 }
4164 
hclge_get_vector_index(struct hclge_dev * hdev,int vector)4165 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4166 {
4167 	int i;
4168 
4169 	for (i = 0; i < hdev->num_msi; i++)
4170 		if (vector == hdev->vector_irq[i])
4171 			return i;
4172 
4173 	return -EINVAL;
4174 }
4175 
hclge_put_vector(struct hnae3_handle * handle,int vector)4176 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4177 {
4178 	struct hclge_vport *vport = hclge_get_vport(handle);
4179 	struct hclge_dev *hdev = vport->back;
4180 	int vector_id;
4181 
4182 	vector_id = hclge_get_vector_index(hdev, vector);
4183 	if (vector_id < 0) {
4184 		dev_err(&hdev->pdev->dev,
4185 			"Get vector index fail. vector = %d\n", vector);
4186 		return vector_id;
4187 	}
4188 
4189 	hclge_free_vector(hdev, vector_id);
4190 
4191 	return 0;
4192 }
4193 
hclge_get_rss_key_size(struct hnae3_handle * handle)4194 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4195 {
4196 	return HCLGE_RSS_KEY_SIZE;
4197 }
4198 
hclge_get_rss_indir_size(struct hnae3_handle * handle)4199 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
4200 {
4201 	return HCLGE_RSS_IND_TBL_SIZE;
4202 }
4203 
hclge_set_rss_algo_key(struct hclge_dev * hdev,const u8 hfunc,const u8 * key)4204 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4205 				  const u8 hfunc, const u8 *key)
4206 {
4207 	struct hclge_rss_config_cmd *req;
4208 	unsigned int key_offset = 0;
4209 	struct hclge_desc desc;
4210 	int key_counts;
4211 	int key_size;
4212 	int ret;
4213 
4214 	key_counts = HCLGE_RSS_KEY_SIZE;
4215 	req = (struct hclge_rss_config_cmd *)desc.data;
4216 
4217 	while (key_counts) {
4218 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4219 					   false);
4220 
4221 		req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4222 		req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4223 
4224 		key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4225 		memcpy(req->hash_key,
4226 		       key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4227 
4228 		key_counts -= key_size;
4229 		key_offset++;
4230 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4231 		if (ret) {
4232 			dev_err(&hdev->pdev->dev,
4233 				"Configure RSS config fail, status = %d\n",
4234 				ret);
4235 			return ret;
4236 		}
4237 	}
4238 	return 0;
4239 }
4240 
hclge_set_rss_indir_table(struct hclge_dev * hdev,const u8 * indir)4241 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
4242 {
4243 	struct hclge_rss_indirection_table_cmd *req;
4244 	struct hclge_desc desc;
4245 	int i, j;
4246 	int ret;
4247 
4248 	req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4249 
4250 	for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4251 		hclge_cmd_setup_basic_desc
4252 			(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4253 
4254 		req->start_table_index =
4255 			cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4256 		req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4257 
4258 		for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
4259 			req->rss_result[j] =
4260 				indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4261 
4262 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4263 		if (ret) {
4264 			dev_err(&hdev->pdev->dev,
4265 				"Configure rss indir table fail,status = %d\n",
4266 				ret);
4267 			return ret;
4268 		}
4269 	}
4270 	return 0;
4271 }
4272 
hclge_set_rss_tc_mode(struct hclge_dev * hdev,u16 * tc_valid,u16 * tc_size,u16 * tc_offset)4273 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4274 				 u16 *tc_size, u16 *tc_offset)
4275 {
4276 	struct hclge_rss_tc_mode_cmd *req;
4277 	struct hclge_desc desc;
4278 	int ret;
4279 	int i;
4280 
4281 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4282 	req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4283 
4284 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4285 		u16 mode = 0;
4286 
4287 		hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4288 		hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4289 				HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4290 		hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4291 				HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4292 
4293 		req->rss_tc_mode[i] = cpu_to_le16(mode);
4294 	}
4295 
4296 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4297 	if (ret)
4298 		dev_err(&hdev->pdev->dev,
4299 			"Configure rss tc mode fail, status = %d\n", ret);
4300 
4301 	return ret;
4302 }
4303 
hclge_get_rss_type(struct hclge_vport * vport)4304 static void hclge_get_rss_type(struct hclge_vport *vport)
4305 {
4306 	if (vport->rss_tuple_sets.ipv4_tcp_en ||
4307 	    vport->rss_tuple_sets.ipv4_udp_en ||
4308 	    vport->rss_tuple_sets.ipv4_sctp_en ||
4309 	    vport->rss_tuple_sets.ipv6_tcp_en ||
4310 	    vport->rss_tuple_sets.ipv6_udp_en ||
4311 	    vport->rss_tuple_sets.ipv6_sctp_en)
4312 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4313 	else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4314 		 vport->rss_tuple_sets.ipv6_fragment_en)
4315 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4316 	else
4317 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4318 }
4319 
hclge_set_rss_input_tuple(struct hclge_dev * hdev)4320 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4321 {
4322 	struct hclge_rss_input_tuple_cmd *req;
4323 	struct hclge_desc desc;
4324 	int ret;
4325 
4326 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4327 
4328 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4329 
4330 	/* Get the tuple cfg from pf */
4331 	req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4332 	req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4333 	req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4334 	req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4335 	req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4336 	req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4337 	req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4338 	req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4339 	hclge_get_rss_type(&hdev->vport[0]);
4340 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4341 	if (ret)
4342 		dev_err(&hdev->pdev->dev,
4343 			"Configure rss input fail, status = %d\n", ret);
4344 	return ret;
4345 }
4346 
hclge_get_rss(struct hnae3_handle * handle,u32 * indir,u8 * key,u8 * hfunc)4347 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4348 			 u8 *key, u8 *hfunc)
4349 {
4350 	struct hclge_vport *vport = hclge_get_vport(handle);
4351 	int i;
4352 
4353 	/* Get hash algorithm */
4354 	if (hfunc) {
4355 		switch (vport->rss_algo) {
4356 		case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4357 			*hfunc = ETH_RSS_HASH_TOP;
4358 			break;
4359 		case HCLGE_RSS_HASH_ALGO_SIMPLE:
4360 			*hfunc = ETH_RSS_HASH_XOR;
4361 			break;
4362 		default:
4363 			*hfunc = ETH_RSS_HASH_UNKNOWN;
4364 			break;
4365 		}
4366 	}
4367 
4368 	/* Get the RSS Key required by the user */
4369 	if (key)
4370 		memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4371 
4372 	/* Get indirect table */
4373 	if (indir)
4374 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4375 			indir[i] =  vport->rss_indirection_tbl[i];
4376 
4377 	return 0;
4378 }
4379 
hclge_parse_rss_hfunc(struct hclge_vport * vport,const u8 hfunc,u8 * hash_algo)4380 static int hclge_parse_rss_hfunc(struct hclge_vport *vport, const u8 hfunc,
4381 				 u8 *hash_algo)
4382 {
4383 	switch (hfunc) {
4384 	case ETH_RSS_HASH_TOP:
4385 		*hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4386 		return 0;
4387 	case ETH_RSS_HASH_XOR:
4388 		*hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4389 		return 0;
4390 	case ETH_RSS_HASH_NO_CHANGE:
4391 		*hash_algo = vport->rss_algo;
4392 		return 0;
4393 	default:
4394 		return -EINVAL;
4395 	}
4396 }
4397 
hclge_set_rss(struct hnae3_handle * handle,const u32 * indir,const u8 * key,const u8 hfunc)4398 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4399 			 const  u8 *key, const  u8 hfunc)
4400 {
4401 	struct hclge_vport *vport = hclge_get_vport(handle);
4402 	struct hclge_dev *hdev = vport->back;
4403 	u8 hash_algo;
4404 	int ret, i;
4405 
4406 	ret = hclge_parse_rss_hfunc(vport, hfunc, &hash_algo);
4407 	if (ret) {
4408 		dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc);
4409 		return ret;
4410 	}
4411 
4412 	/* Set the RSS Hash Key if specififed by the user */
4413 	if (key) {
4414 		ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4415 		if (ret)
4416 			return ret;
4417 
4418 		/* Update the shadow RSS key with user specified qids */
4419 		memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4420 	} else {
4421 		ret = hclge_set_rss_algo_key(hdev, hash_algo,
4422 					     vport->rss_hash_key);
4423 		if (ret)
4424 			return ret;
4425 	}
4426 	vport->rss_algo = hash_algo;
4427 
4428 	/* Update the shadow RSS table with user specified qids */
4429 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4430 		vport->rss_indirection_tbl[i] = indir[i];
4431 
4432 	/* Update the hardware */
4433 	return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4434 }
4435 
hclge_get_rss_hash_bits(struct ethtool_rxnfc * nfc)4436 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4437 {
4438 	u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4439 
4440 	if (nfc->data & RXH_L4_B_2_3)
4441 		hash_sets |= HCLGE_D_PORT_BIT;
4442 	else
4443 		hash_sets &= ~HCLGE_D_PORT_BIT;
4444 
4445 	if (nfc->data & RXH_IP_SRC)
4446 		hash_sets |= HCLGE_S_IP_BIT;
4447 	else
4448 		hash_sets &= ~HCLGE_S_IP_BIT;
4449 
4450 	if (nfc->data & RXH_IP_DST)
4451 		hash_sets |= HCLGE_D_IP_BIT;
4452 	else
4453 		hash_sets &= ~HCLGE_D_IP_BIT;
4454 
4455 	if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4456 		hash_sets |= HCLGE_V_TAG_BIT;
4457 
4458 	return hash_sets;
4459 }
4460 
hclge_set_rss_tuple(struct hnae3_handle * handle,struct ethtool_rxnfc * nfc)4461 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4462 			       struct ethtool_rxnfc *nfc)
4463 {
4464 	struct hclge_vport *vport = hclge_get_vport(handle);
4465 	struct hclge_dev *hdev = vport->back;
4466 	struct hclge_rss_input_tuple_cmd *req;
4467 	struct hclge_desc desc;
4468 	u8 tuple_sets;
4469 	int ret;
4470 
4471 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4472 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
4473 		return -EINVAL;
4474 
4475 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4476 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4477 
4478 	req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4479 	req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4480 	req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4481 	req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4482 	req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4483 	req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4484 	req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4485 	req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4486 
4487 	tuple_sets = hclge_get_rss_hash_bits(nfc);
4488 	switch (nfc->flow_type) {
4489 	case TCP_V4_FLOW:
4490 		req->ipv4_tcp_en = tuple_sets;
4491 		break;
4492 	case TCP_V6_FLOW:
4493 		req->ipv6_tcp_en = tuple_sets;
4494 		break;
4495 	case UDP_V4_FLOW:
4496 		req->ipv4_udp_en = tuple_sets;
4497 		break;
4498 	case UDP_V6_FLOW:
4499 		req->ipv6_udp_en = tuple_sets;
4500 		break;
4501 	case SCTP_V4_FLOW:
4502 		req->ipv4_sctp_en = tuple_sets;
4503 		break;
4504 	case SCTP_V6_FLOW:
4505 		if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
4506 		    (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
4507 			return -EINVAL;
4508 
4509 		req->ipv6_sctp_en = tuple_sets;
4510 		break;
4511 	case IPV4_FLOW:
4512 		req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4513 		break;
4514 	case IPV6_FLOW:
4515 		req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4516 		break;
4517 	default:
4518 		return -EINVAL;
4519 	}
4520 
4521 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4522 	if (ret) {
4523 		dev_err(&hdev->pdev->dev,
4524 			"Set rss tuple fail, status = %d\n", ret);
4525 		return ret;
4526 	}
4527 
4528 	vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4529 	vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4530 	vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4531 	vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4532 	vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4533 	vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4534 	vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4535 	vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4536 	hclge_get_rss_type(vport);
4537 	return 0;
4538 }
4539 
hclge_get_rss_tuple(struct hnae3_handle * handle,struct ethtool_rxnfc * nfc)4540 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4541 			       struct ethtool_rxnfc *nfc)
4542 {
4543 	struct hclge_vport *vport = hclge_get_vport(handle);
4544 	u8 tuple_sets;
4545 
4546 	nfc->data = 0;
4547 
4548 	switch (nfc->flow_type) {
4549 	case TCP_V4_FLOW:
4550 		tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4551 		break;
4552 	case UDP_V4_FLOW:
4553 		tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4554 		break;
4555 	case TCP_V6_FLOW:
4556 		tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4557 		break;
4558 	case UDP_V6_FLOW:
4559 		tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4560 		break;
4561 	case SCTP_V4_FLOW:
4562 		tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4563 		break;
4564 	case SCTP_V6_FLOW:
4565 		tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4566 		break;
4567 	case IPV4_FLOW:
4568 	case IPV6_FLOW:
4569 		tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4570 		break;
4571 	default:
4572 		return -EINVAL;
4573 	}
4574 
4575 	if (!tuple_sets)
4576 		return 0;
4577 
4578 	if (tuple_sets & HCLGE_D_PORT_BIT)
4579 		nfc->data |= RXH_L4_B_2_3;
4580 	if (tuple_sets & HCLGE_S_PORT_BIT)
4581 		nfc->data |= RXH_L4_B_0_1;
4582 	if (tuple_sets & HCLGE_D_IP_BIT)
4583 		nfc->data |= RXH_IP_DST;
4584 	if (tuple_sets & HCLGE_S_IP_BIT)
4585 		nfc->data |= RXH_IP_SRC;
4586 
4587 	return 0;
4588 }
4589 
hclge_get_tc_size(struct hnae3_handle * handle)4590 static int hclge_get_tc_size(struct hnae3_handle *handle)
4591 {
4592 	struct hclge_vport *vport = hclge_get_vport(handle);
4593 	struct hclge_dev *hdev = vport->back;
4594 
4595 	return hdev->rss_size_max;
4596 }
4597 
hclge_rss_init_hw(struct hclge_dev * hdev)4598 int hclge_rss_init_hw(struct hclge_dev *hdev)
4599 {
4600 	struct hclge_vport *vport = hdev->vport;
4601 	u8 *rss_indir = vport[0].rss_indirection_tbl;
4602 	u16 rss_size = vport[0].alloc_rss_size;
4603 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4604 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4605 	u8 *key = vport[0].rss_hash_key;
4606 	u8 hfunc = vport[0].rss_algo;
4607 	u16 tc_valid[HCLGE_MAX_TC_NUM];
4608 	u16 roundup_size;
4609 	unsigned int i;
4610 	int ret;
4611 
4612 	ret = hclge_set_rss_indir_table(hdev, rss_indir);
4613 	if (ret)
4614 		return ret;
4615 
4616 	ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4617 	if (ret)
4618 		return ret;
4619 
4620 	ret = hclge_set_rss_input_tuple(hdev);
4621 	if (ret)
4622 		return ret;
4623 
4624 	/* Each TC have the same queue size, and tc_size set to hardware is
4625 	 * the log2 of roundup power of two of rss_size, the acutal queue
4626 	 * size is limited by indirection table.
4627 	 */
4628 	if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4629 		dev_err(&hdev->pdev->dev,
4630 			"Configure rss tc size failed, invalid TC_SIZE = %u\n",
4631 			rss_size);
4632 		return -EINVAL;
4633 	}
4634 
4635 	roundup_size = roundup_pow_of_two(rss_size);
4636 	roundup_size = ilog2(roundup_size);
4637 
4638 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4639 		tc_valid[i] = 0;
4640 
4641 		if (!(hdev->hw_tc_map & BIT(i)))
4642 			continue;
4643 
4644 		tc_valid[i] = 1;
4645 		tc_size[i] = roundup_size;
4646 		tc_offset[i] = rss_size * i;
4647 	}
4648 
4649 	return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4650 }
4651 
hclge_rss_indir_init_cfg(struct hclge_dev * hdev)4652 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4653 {
4654 	struct hclge_vport *vport = hdev->vport;
4655 	int i, j;
4656 
4657 	for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4658 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4659 			vport[j].rss_indirection_tbl[i] =
4660 				i % vport[j].alloc_rss_size;
4661 	}
4662 }
4663 
hclge_rss_init_cfg(struct hclge_dev * hdev)4664 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4665 {
4666 	int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4667 	struct hclge_vport *vport = hdev->vport;
4668 
4669 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
4670 		rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4671 
4672 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4673 		vport[i].rss_tuple_sets.ipv4_tcp_en =
4674 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4675 		vport[i].rss_tuple_sets.ipv4_udp_en =
4676 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4677 		vport[i].rss_tuple_sets.ipv4_sctp_en =
4678 			HCLGE_RSS_INPUT_TUPLE_SCTP;
4679 		vport[i].rss_tuple_sets.ipv4_fragment_en =
4680 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4681 		vport[i].rss_tuple_sets.ipv6_tcp_en =
4682 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4683 		vport[i].rss_tuple_sets.ipv6_udp_en =
4684 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4685 		vport[i].rss_tuple_sets.ipv6_sctp_en =
4686 			hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
4687 			HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
4688 			HCLGE_RSS_INPUT_TUPLE_SCTP;
4689 		vport[i].rss_tuple_sets.ipv6_fragment_en =
4690 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4691 
4692 		vport[i].rss_algo = rss_algo;
4693 
4694 		memcpy(vport[i].rss_hash_key, hclge_hash_key,
4695 		       HCLGE_RSS_KEY_SIZE);
4696 	}
4697 
4698 	hclge_rss_indir_init_cfg(hdev);
4699 }
4700 
hclge_bind_ring_with_vector(struct hclge_vport * vport,int vector_id,bool en,struct hnae3_ring_chain_node * ring_chain)4701 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4702 				int vector_id, bool en,
4703 				struct hnae3_ring_chain_node *ring_chain)
4704 {
4705 	struct hclge_dev *hdev = vport->back;
4706 	struct hnae3_ring_chain_node *node;
4707 	struct hclge_desc desc;
4708 	struct hclge_ctrl_vector_chain_cmd *req =
4709 		(struct hclge_ctrl_vector_chain_cmd *)desc.data;
4710 	enum hclge_cmd_status status;
4711 	enum hclge_opcode_type op;
4712 	u16 tqp_type_and_id;
4713 	int i;
4714 
4715 	op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4716 	hclge_cmd_setup_basic_desc(&desc, op, false);
4717 	req->int_vector_id = vector_id;
4718 
4719 	i = 0;
4720 	for (node = ring_chain; node; node = node->next) {
4721 		tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4722 		hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4723 				HCLGE_INT_TYPE_S,
4724 				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4725 		hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4726 				HCLGE_TQP_ID_S, node->tqp_index);
4727 		hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4728 				HCLGE_INT_GL_IDX_S,
4729 				hnae3_get_field(node->int_gl_idx,
4730 						HNAE3_RING_GL_IDX_M,
4731 						HNAE3_RING_GL_IDX_S));
4732 		req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4733 		if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4734 			req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4735 			req->vfid = vport->vport_id;
4736 
4737 			status = hclge_cmd_send(&hdev->hw, &desc, 1);
4738 			if (status) {
4739 				dev_err(&hdev->pdev->dev,
4740 					"Map TQP fail, status is %d.\n",
4741 					status);
4742 				return -EIO;
4743 			}
4744 			i = 0;
4745 
4746 			hclge_cmd_setup_basic_desc(&desc,
4747 						   op,
4748 						   false);
4749 			req->int_vector_id = vector_id;
4750 		}
4751 	}
4752 
4753 	if (i > 0) {
4754 		req->int_cause_num = i;
4755 		req->vfid = vport->vport_id;
4756 		status = hclge_cmd_send(&hdev->hw, &desc, 1);
4757 		if (status) {
4758 			dev_err(&hdev->pdev->dev,
4759 				"Map TQP fail, status is %d.\n", status);
4760 			return -EIO;
4761 		}
4762 	}
4763 
4764 	return 0;
4765 }
4766 
hclge_map_ring_to_vector(struct hnae3_handle * handle,int vector,struct hnae3_ring_chain_node * ring_chain)4767 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4768 				    struct hnae3_ring_chain_node *ring_chain)
4769 {
4770 	struct hclge_vport *vport = hclge_get_vport(handle);
4771 	struct hclge_dev *hdev = vport->back;
4772 	int vector_id;
4773 
4774 	vector_id = hclge_get_vector_index(hdev, vector);
4775 	if (vector_id < 0) {
4776 		dev_err(&hdev->pdev->dev,
4777 			"failed to get vector index. vector=%d\n", vector);
4778 		return vector_id;
4779 	}
4780 
4781 	return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4782 }
4783 
hclge_unmap_ring_frm_vector(struct hnae3_handle * handle,int vector,struct hnae3_ring_chain_node * ring_chain)4784 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4785 				       struct hnae3_ring_chain_node *ring_chain)
4786 {
4787 	struct hclge_vport *vport = hclge_get_vport(handle);
4788 	struct hclge_dev *hdev = vport->back;
4789 	int vector_id, ret;
4790 
4791 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4792 		return 0;
4793 
4794 	vector_id = hclge_get_vector_index(hdev, vector);
4795 	if (vector_id < 0) {
4796 		dev_err(&handle->pdev->dev,
4797 			"Get vector index fail. ret =%d\n", vector_id);
4798 		return vector_id;
4799 	}
4800 
4801 	ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4802 	if (ret)
4803 		dev_err(&handle->pdev->dev,
4804 			"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4805 			vector_id, ret);
4806 
4807 	return ret;
4808 }
4809 
hclge_cmd_set_promisc_mode(struct hclge_dev * hdev,struct hclge_promisc_param * param)4810 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4811 				      struct hclge_promisc_param *param)
4812 {
4813 	struct hclge_promisc_cfg_cmd *req;
4814 	struct hclge_desc desc;
4815 	int ret;
4816 
4817 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4818 
4819 	req = (struct hclge_promisc_cfg_cmd *)desc.data;
4820 	req->vf_id = param->vf_id;
4821 
4822 	/* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4823 	 * pdev revision(0x20), new revision support them. The
4824 	 * value of this two fields will not return error when driver
4825 	 * send command to fireware in revision(0x20).
4826 	 */
4827 	req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4828 		HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4829 
4830 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4831 	if (ret)
4832 		dev_err(&hdev->pdev->dev,
4833 			"failed to set vport %d promisc mode, ret = %d.\n",
4834 			param->vf_id, ret);
4835 
4836 	return ret;
4837 }
4838 
hclge_promisc_param_init(struct hclge_promisc_param * param,bool en_uc,bool en_mc,bool en_bc,int vport_id)4839 static void hclge_promisc_param_init(struct hclge_promisc_param *param,
4840 				     bool en_uc, bool en_mc, bool en_bc,
4841 				     int vport_id)
4842 {
4843 	if (!param)
4844 		return;
4845 
4846 	memset(param, 0, sizeof(struct hclge_promisc_param));
4847 	if (en_uc)
4848 		param->enable = HCLGE_PROMISC_EN_UC;
4849 	if (en_mc)
4850 		param->enable |= HCLGE_PROMISC_EN_MC;
4851 	if (en_bc)
4852 		param->enable |= HCLGE_PROMISC_EN_BC;
4853 	param->vf_id = vport_id;
4854 }
4855 
hclge_set_vport_promisc_mode(struct hclge_vport * vport,bool en_uc_pmc,bool en_mc_pmc,bool en_bc_pmc)4856 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4857 				 bool en_mc_pmc, bool en_bc_pmc)
4858 {
4859 	struct hclge_dev *hdev = vport->back;
4860 	struct hclge_promisc_param param;
4861 
4862 	hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4863 				 vport->vport_id);
4864 	return hclge_cmd_set_promisc_mode(hdev, &param);
4865 }
4866 
hclge_set_promisc_mode(struct hnae3_handle * handle,bool en_uc_pmc,bool en_mc_pmc)4867 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4868 				  bool en_mc_pmc)
4869 {
4870 	struct hclge_vport *vport = hclge_get_vport(handle);
4871 	struct hclge_dev *hdev = vport->back;
4872 	bool en_bc_pmc = true;
4873 
4874 	/* For device whose version below V2, if broadcast promisc enabled,
4875 	 * vlan filter is always bypassed. So broadcast promisc should be
4876 	 * disabled until user enable promisc mode
4877 	 */
4878 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
4879 		en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4880 
4881 	return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4882 					    en_bc_pmc);
4883 }
4884 
hclge_request_update_promisc_mode(struct hnae3_handle * handle)4885 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
4886 {
4887 	struct hclge_vport *vport = hclge_get_vport(handle);
4888 	struct hclge_dev *hdev = vport->back;
4889 
4890 	set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
4891 }
4892 
hclge_get_fd_mode(struct hclge_dev * hdev,u8 * fd_mode)4893 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4894 {
4895 	struct hclge_get_fd_mode_cmd *req;
4896 	struct hclge_desc desc;
4897 	int ret;
4898 
4899 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4900 
4901 	req = (struct hclge_get_fd_mode_cmd *)desc.data;
4902 
4903 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4904 	if (ret) {
4905 		dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4906 		return ret;
4907 	}
4908 
4909 	*fd_mode = req->mode;
4910 
4911 	return ret;
4912 }
4913 
hclge_get_fd_allocation(struct hclge_dev * hdev,u32 * stage1_entry_num,u32 * stage2_entry_num,u16 * stage1_counter_num,u16 * stage2_counter_num)4914 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4915 				   u32 *stage1_entry_num,
4916 				   u32 *stage2_entry_num,
4917 				   u16 *stage1_counter_num,
4918 				   u16 *stage2_counter_num)
4919 {
4920 	struct hclge_get_fd_allocation_cmd *req;
4921 	struct hclge_desc desc;
4922 	int ret;
4923 
4924 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4925 
4926 	req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4927 
4928 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4929 	if (ret) {
4930 		dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4931 			ret);
4932 		return ret;
4933 	}
4934 
4935 	*stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4936 	*stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4937 	*stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4938 	*stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4939 
4940 	return ret;
4941 }
4942 
hclge_set_fd_key_config(struct hclge_dev * hdev,enum HCLGE_FD_STAGE stage_num)4943 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
4944 				   enum HCLGE_FD_STAGE stage_num)
4945 {
4946 	struct hclge_set_fd_key_config_cmd *req;
4947 	struct hclge_fd_key_cfg *stage;
4948 	struct hclge_desc desc;
4949 	int ret;
4950 
4951 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4952 
4953 	req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4954 	stage = &hdev->fd_cfg.key_cfg[stage_num];
4955 	req->stage = stage_num;
4956 	req->key_select = stage->key_sel;
4957 	req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4958 	req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4959 	req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4960 	req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4961 	req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4962 	req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4963 
4964 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4965 	if (ret)
4966 		dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4967 
4968 	return ret;
4969 }
4970 
hclge_init_fd_config(struct hclge_dev * hdev)4971 static int hclge_init_fd_config(struct hclge_dev *hdev)
4972 {
4973 #define LOW_2_WORDS		0x03
4974 	struct hclge_fd_key_cfg *key_cfg;
4975 	int ret;
4976 
4977 	if (!hnae3_dev_fd_supported(hdev))
4978 		return 0;
4979 
4980 	ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4981 	if (ret)
4982 		return ret;
4983 
4984 	switch (hdev->fd_cfg.fd_mode) {
4985 	case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4986 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4987 		break;
4988 	case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4989 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4990 		break;
4991 	default:
4992 		dev_err(&hdev->pdev->dev,
4993 			"Unsupported flow director mode %u\n",
4994 			hdev->fd_cfg.fd_mode);
4995 		return -EOPNOTSUPP;
4996 	}
4997 
4998 	key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4999 	key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
5000 	key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5001 	key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5002 	key_cfg->outer_sipv6_word_en = 0;
5003 	key_cfg->outer_dipv6_word_en = 0;
5004 
5005 	key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5006 				BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5007 				BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5008 				BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5009 
5010 	/* If use max 400bit key, we can support tuples for ether type */
5011 	if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1)
5012 		key_cfg->tuple_active |=
5013 				BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5014 
5015 	/* roce_type is used to filter roce frames
5016 	 * dst_vport is used to specify the rule
5017 	 */
5018 	key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5019 
5020 	ret = hclge_get_fd_allocation(hdev,
5021 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5022 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5023 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5024 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5025 	if (ret)
5026 		return ret;
5027 
5028 	return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5029 }
5030 
hclge_fd_tcam_config(struct hclge_dev * hdev,u8 stage,bool sel_x,int loc,u8 * key,bool is_add)5031 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5032 				int loc, u8 *key, bool is_add)
5033 {
5034 	struct hclge_fd_tcam_config_1_cmd *req1;
5035 	struct hclge_fd_tcam_config_2_cmd *req2;
5036 	struct hclge_fd_tcam_config_3_cmd *req3;
5037 	struct hclge_desc desc[3];
5038 	int ret;
5039 
5040 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5041 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5042 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5043 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5044 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5045 
5046 	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5047 	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5048 	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5049 
5050 	req1->stage = stage;
5051 	req1->xy_sel = sel_x ? 1 : 0;
5052 	hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5053 	req1->index = cpu_to_le32(loc);
5054 	req1->entry_vld = sel_x ? is_add : 0;
5055 
5056 	if (key) {
5057 		memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5058 		memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5059 		       sizeof(req2->tcam_data));
5060 		memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5061 		       sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5062 	}
5063 
5064 	ret = hclge_cmd_send(&hdev->hw, desc, 3);
5065 	if (ret)
5066 		dev_err(&hdev->pdev->dev,
5067 			"config tcam key fail, ret=%d\n",
5068 			ret);
5069 
5070 	return ret;
5071 }
5072 
hclge_fd_ad_config(struct hclge_dev * hdev,u8 stage,int loc,struct hclge_fd_ad_data * action)5073 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5074 			      struct hclge_fd_ad_data *action)
5075 {
5076 	struct hclge_fd_ad_config_cmd *req;
5077 	struct hclge_desc desc;
5078 	u64 ad_data = 0;
5079 	int ret;
5080 
5081 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5082 
5083 	req = (struct hclge_fd_ad_config_cmd *)desc.data;
5084 	req->index = cpu_to_le32(loc);
5085 	req->stage = stage;
5086 
5087 	hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5088 		      action->write_rule_id_to_bd);
5089 	hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5090 			action->rule_id);
5091 	ad_data <<= 32;
5092 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5093 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5094 		      action->forward_to_direct_queue);
5095 	hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5096 			action->queue_id);
5097 	hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5098 	hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5099 			HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5100 	hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5101 	hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5102 			action->counter_id);
5103 
5104 	req->ad_data = cpu_to_le64(ad_data);
5105 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5106 	if (ret)
5107 		dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5108 
5109 	return ret;
5110 }
5111 
hclge_fd_convert_tuple(u32 tuple_bit,u8 * key_x,u8 * key_y,struct hclge_fd_rule * rule)5112 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5113 				   struct hclge_fd_rule *rule)
5114 {
5115 	u16 tmp_x_s, tmp_y_s;
5116 	u32 tmp_x_l, tmp_y_l;
5117 	int i;
5118 
5119 	if (rule->unused_tuple & tuple_bit)
5120 		return true;
5121 
5122 	switch (tuple_bit) {
5123 	case BIT(INNER_DST_MAC):
5124 		for (i = 0; i < ETH_ALEN; i++) {
5125 			calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5126 			       rule->tuples_mask.dst_mac[i]);
5127 			calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5128 			       rule->tuples_mask.dst_mac[i]);
5129 		}
5130 
5131 		return true;
5132 	case BIT(INNER_SRC_MAC):
5133 		for (i = 0; i < ETH_ALEN; i++) {
5134 			calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5135 			       rule->tuples_mask.src_mac[i]);
5136 			calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5137 			       rule->tuples_mask.src_mac[i]);
5138 		}
5139 
5140 		return true;
5141 	case BIT(INNER_VLAN_TAG_FST):
5142 		calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5143 		       rule->tuples_mask.vlan_tag1);
5144 		calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5145 		       rule->tuples_mask.vlan_tag1);
5146 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5147 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5148 
5149 		return true;
5150 	case BIT(INNER_ETH_TYPE):
5151 		calc_x(tmp_x_s, rule->tuples.ether_proto,
5152 		       rule->tuples_mask.ether_proto);
5153 		calc_y(tmp_y_s, rule->tuples.ether_proto,
5154 		       rule->tuples_mask.ether_proto);
5155 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5156 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5157 
5158 		return true;
5159 	case BIT(INNER_IP_TOS):
5160 		calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5161 		calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5162 
5163 		return true;
5164 	case BIT(INNER_IP_PROTO):
5165 		calc_x(*key_x, rule->tuples.ip_proto,
5166 		       rule->tuples_mask.ip_proto);
5167 		calc_y(*key_y, rule->tuples.ip_proto,
5168 		       rule->tuples_mask.ip_proto);
5169 
5170 		return true;
5171 	case BIT(INNER_SRC_IP):
5172 		calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5173 		       rule->tuples_mask.src_ip[IPV4_INDEX]);
5174 		calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5175 		       rule->tuples_mask.src_ip[IPV4_INDEX]);
5176 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5177 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5178 
5179 		return true;
5180 	case BIT(INNER_DST_IP):
5181 		calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5182 		       rule->tuples_mask.dst_ip[IPV4_INDEX]);
5183 		calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5184 		       rule->tuples_mask.dst_ip[IPV4_INDEX]);
5185 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5186 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5187 
5188 		return true;
5189 	case BIT(INNER_SRC_PORT):
5190 		calc_x(tmp_x_s, rule->tuples.src_port,
5191 		       rule->tuples_mask.src_port);
5192 		calc_y(tmp_y_s, rule->tuples.src_port,
5193 		       rule->tuples_mask.src_port);
5194 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5195 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5196 
5197 		return true;
5198 	case BIT(INNER_DST_PORT):
5199 		calc_x(tmp_x_s, rule->tuples.dst_port,
5200 		       rule->tuples_mask.dst_port);
5201 		calc_y(tmp_y_s, rule->tuples.dst_port,
5202 		       rule->tuples_mask.dst_port);
5203 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5204 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5205 
5206 		return true;
5207 	default:
5208 		return false;
5209 	}
5210 }
5211 
hclge_get_port_number(enum HLCGE_PORT_TYPE port_type,u8 pf_id,u8 vf_id,u8 network_port_id)5212 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5213 				 u8 vf_id, u8 network_port_id)
5214 {
5215 	u32 port_number = 0;
5216 
5217 	if (port_type == HOST_PORT) {
5218 		hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5219 				pf_id);
5220 		hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5221 				vf_id);
5222 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5223 	} else {
5224 		hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5225 				HCLGE_NETWORK_PORT_ID_S, network_port_id);
5226 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5227 	}
5228 
5229 	return port_number;
5230 }
5231 
hclge_fd_convert_meta_data(struct hclge_fd_key_cfg * key_cfg,__le32 * key_x,__le32 * key_y,struct hclge_fd_rule * rule)5232 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5233 				       __le32 *key_x, __le32 *key_y,
5234 				       struct hclge_fd_rule *rule)
5235 {
5236 	u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5237 	u8 cur_pos = 0, tuple_size, shift_bits;
5238 	unsigned int i;
5239 
5240 	for (i = 0; i < MAX_META_DATA; i++) {
5241 		tuple_size = meta_data_key_info[i].key_length;
5242 		tuple_bit = key_cfg->meta_data_active & BIT(i);
5243 
5244 		switch (tuple_bit) {
5245 		case BIT(ROCE_TYPE):
5246 			hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5247 			cur_pos += tuple_size;
5248 			break;
5249 		case BIT(DST_VPORT):
5250 			port_number = hclge_get_port_number(HOST_PORT, 0,
5251 							    rule->vf_id, 0);
5252 			hnae3_set_field(meta_data,
5253 					GENMASK(cur_pos + tuple_size, cur_pos),
5254 					cur_pos, port_number);
5255 			cur_pos += tuple_size;
5256 			break;
5257 		default:
5258 			break;
5259 		}
5260 	}
5261 
5262 	calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5263 	calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5264 	shift_bits = sizeof(meta_data) * 8 - cur_pos;
5265 
5266 	*key_x = cpu_to_le32(tmp_x << shift_bits);
5267 	*key_y = cpu_to_le32(tmp_y << shift_bits);
5268 }
5269 
5270 /* A complete key is combined with meta data key and tuple key.
5271  * Meta data key is stored at the MSB region, and tuple key is stored at
5272  * the LSB region, unused bits will be filled 0.
5273  */
hclge_config_key(struct hclge_dev * hdev,u8 stage,struct hclge_fd_rule * rule)5274 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5275 			    struct hclge_fd_rule *rule)
5276 {
5277 	struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5278 	u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5279 	u8 *cur_key_x, *cur_key_y;
5280 	u8 meta_data_region;
5281 	u8 tuple_size;
5282 	int ret;
5283 	u32 i;
5284 
5285 	memset(key_x, 0, sizeof(key_x));
5286 	memset(key_y, 0, sizeof(key_y));
5287 	cur_key_x = key_x;
5288 	cur_key_y = key_y;
5289 
5290 	for (i = 0 ; i < MAX_TUPLE; i++) {
5291 		bool tuple_valid;
5292 		u32 check_tuple;
5293 
5294 		tuple_size = tuple_key_info[i].key_length / 8;
5295 		check_tuple = key_cfg->tuple_active & BIT(i);
5296 
5297 		tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5298 						     cur_key_y, rule);
5299 		if (tuple_valid) {
5300 			cur_key_x += tuple_size;
5301 			cur_key_y += tuple_size;
5302 		}
5303 	}
5304 
5305 	meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5306 			MAX_META_DATA_LENGTH / 8;
5307 
5308 	hclge_fd_convert_meta_data(key_cfg,
5309 				   (__le32 *)(key_x + meta_data_region),
5310 				   (__le32 *)(key_y + meta_data_region),
5311 				   rule);
5312 
5313 	ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5314 				   true);
5315 	if (ret) {
5316 		dev_err(&hdev->pdev->dev,
5317 			"fd key_y config fail, loc=%u, ret=%d\n",
5318 			rule->queue_id, ret);
5319 		return ret;
5320 	}
5321 
5322 	ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5323 				   true);
5324 	if (ret)
5325 		dev_err(&hdev->pdev->dev,
5326 			"fd key_x config fail, loc=%u, ret=%d\n",
5327 			rule->queue_id, ret);
5328 	return ret;
5329 }
5330 
hclge_config_action(struct hclge_dev * hdev,u8 stage,struct hclge_fd_rule * rule)5331 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5332 			       struct hclge_fd_rule *rule)
5333 {
5334 	struct hclge_fd_ad_data ad_data;
5335 
5336 	ad_data.ad_id = rule->location;
5337 
5338 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5339 		ad_data.drop_packet = true;
5340 		ad_data.forward_to_direct_queue = false;
5341 		ad_data.queue_id = 0;
5342 	} else {
5343 		ad_data.drop_packet = false;
5344 		ad_data.forward_to_direct_queue = true;
5345 		ad_data.queue_id = rule->queue_id;
5346 	}
5347 
5348 	ad_data.use_counter = false;
5349 	ad_data.counter_id = 0;
5350 
5351 	ad_data.use_next_stage = false;
5352 	ad_data.next_input_key = 0;
5353 
5354 	ad_data.write_rule_id_to_bd = true;
5355 	ad_data.rule_id = rule->location;
5356 
5357 	return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5358 }
5359 
hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec * spec,u32 * unused_tuple)5360 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5361 				       u32 *unused_tuple)
5362 {
5363 	if (!spec || !unused_tuple)
5364 		return -EINVAL;
5365 
5366 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5367 
5368 	if (!spec->ip4src)
5369 		*unused_tuple |= BIT(INNER_SRC_IP);
5370 
5371 	if (!spec->ip4dst)
5372 		*unused_tuple |= BIT(INNER_DST_IP);
5373 
5374 	if (!spec->psrc)
5375 		*unused_tuple |= BIT(INNER_SRC_PORT);
5376 
5377 	if (!spec->pdst)
5378 		*unused_tuple |= BIT(INNER_DST_PORT);
5379 
5380 	if (!spec->tos)
5381 		*unused_tuple |= BIT(INNER_IP_TOS);
5382 
5383 	return 0;
5384 }
5385 
hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec * spec,u32 * unused_tuple)5386 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5387 				    u32 *unused_tuple)
5388 {
5389 	if (!spec || !unused_tuple)
5390 		return -EINVAL;
5391 
5392 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5393 		BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5394 
5395 	if (!spec->ip4src)
5396 		*unused_tuple |= BIT(INNER_SRC_IP);
5397 
5398 	if (!spec->ip4dst)
5399 		*unused_tuple |= BIT(INNER_DST_IP);
5400 
5401 	if (!spec->tos)
5402 		*unused_tuple |= BIT(INNER_IP_TOS);
5403 
5404 	if (!spec->proto)
5405 		*unused_tuple |= BIT(INNER_IP_PROTO);
5406 
5407 	if (spec->l4_4_bytes)
5408 		return -EOPNOTSUPP;
5409 
5410 	if (spec->ip_ver != ETH_RX_NFC_IP4)
5411 		return -EOPNOTSUPP;
5412 
5413 	return 0;
5414 }
5415 
hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec * spec,u32 * unused_tuple)5416 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5417 				       u32 *unused_tuple)
5418 {
5419 	if (!spec || !unused_tuple)
5420 		return -EINVAL;
5421 
5422 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5423 		BIT(INNER_IP_TOS);
5424 
5425 	/* check whether src/dst ip address used */
5426 	if (!spec->ip6src[0] && !spec->ip6src[1] &&
5427 	    !spec->ip6src[2] && !spec->ip6src[3])
5428 		*unused_tuple |= BIT(INNER_SRC_IP);
5429 
5430 	if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5431 	    !spec->ip6dst[2] && !spec->ip6dst[3])
5432 		*unused_tuple |= BIT(INNER_DST_IP);
5433 
5434 	if (!spec->psrc)
5435 		*unused_tuple |= BIT(INNER_SRC_PORT);
5436 
5437 	if (!spec->pdst)
5438 		*unused_tuple |= BIT(INNER_DST_PORT);
5439 
5440 	if (spec->tclass)
5441 		return -EOPNOTSUPP;
5442 
5443 	return 0;
5444 }
5445 
hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec * spec,u32 * unused_tuple)5446 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
5447 				    u32 *unused_tuple)
5448 {
5449 	if (!spec || !unused_tuple)
5450 		return -EINVAL;
5451 
5452 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5453 		BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5454 
5455 	/* check whether src/dst ip address used */
5456 	if (!spec->ip6src[0] && !spec->ip6src[1] &&
5457 	    !spec->ip6src[2] && !spec->ip6src[3])
5458 		*unused_tuple |= BIT(INNER_SRC_IP);
5459 
5460 	if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5461 	    !spec->ip6dst[2] && !spec->ip6dst[3])
5462 		*unused_tuple |= BIT(INNER_DST_IP);
5463 
5464 	if (!spec->l4_proto)
5465 		*unused_tuple |= BIT(INNER_IP_PROTO);
5466 
5467 	if (spec->tclass)
5468 		return -EOPNOTSUPP;
5469 
5470 	if (spec->l4_4_bytes)
5471 		return -EOPNOTSUPP;
5472 
5473 	return 0;
5474 }
5475 
hclge_fd_check_ether_tuple(struct ethhdr * spec,u32 * unused_tuple)5476 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
5477 {
5478 	if (!spec || !unused_tuple)
5479 		return -EINVAL;
5480 
5481 	*unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5482 		BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5483 		BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5484 
5485 	if (is_zero_ether_addr(spec->h_source))
5486 		*unused_tuple |= BIT(INNER_SRC_MAC);
5487 
5488 	if (is_zero_ether_addr(spec->h_dest))
5489 		*unused_tuple |= BIT(INNER_DST_MAC);
5490 
5491 	if (!spec->h_proto)
5492 		*unused_tuple |= BIT(INNER_ETH_TYPE);
5493 
5494 	return 0;
5495 }
5496 
hclge_fd_check_ext_tuple(struct hclge_dev * hdev,struct ethtool_rx_flow_spec * fs,u32 * unused_tuple)5497 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
5498 				    struct ethtool_rx_flow_spec *fs,
5499 				    u32 *unused_tuple)
5500 {
5501 	if (fs->flow_type & FLOW_EXT) {
5502 		if (fs->h_ext.vlan_etype) {
5503 			dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
5504 			return -EOPNOTSUPP;
5505 		}
5506 
5507 		if (!fs->h_ext.vlan_tci)
5508 			*unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5509 
5510 		if (fs->m_ext.vlan_tci &&
5511 		    be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
5512 			dev_err(&hdev->pdev->dev,
5513 				"failed to config vlan_tci, invalid vlan_tci: %u, max is %u.\n",
5514 				ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
5515 			return -EINVAL;
5516 		}
5517 	} else {
5518 		*unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5519 	}
5520 
5521 	if (fs->flow_type & FLOW_MAC_EXT) {
5522 		if (hdev->fd_cfg.fd_mode !=
5523 		    HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5524 			dev_err(&hdev->pdev->dev,
5525 				"FLOW_MAC_EXT is not supported in current fd mode!\n");
5526 			return -EOPNOTSUPP;
5527 		}
5528 
5529 		if (is_zero_ether_addr(fs->h_ext.h_dest))
5530 			*unused_tuple |= BIT(INNER_DST_MAC);
5531 		else
5532 			*unused_tuple &= ~BIT(INNER_DST_MAC);
5533 	}
5534 
5535 	return 0;
5536 }
5537 
hclge_fd_check_spec(struct hclge_dev * hdev,struct ethtool_rx_flow_spec * fs,u32 * unused_tuple)5538 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5539 			       struct ethtool_rx_flow_spec *fs,
5540 			       u32 *unused_tuple)
5541 {
5542 	u32 flow_type;
5543 	int ret;
5544 
5545 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5546 		dev_err(&hdev->pdev->dev,
5547 			"failed to config fd rules, invalid rule location: %u, max is %u\n.",
5548 			fs->location,
5549 			hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
5550 		return -EINVAL;
5551 	}
5552 
5553 	if ((fs->flow_type & FLOW_EXT) &&
5554 	    (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5555 		dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5556 		return -EOPNOTSUPP;
5557 	}
5558 
5559 	flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5560 	switch (flow_type) {
5561 	case SCTP_V4_FLOW:
5562 	case TCP_V4_FLOW:
5563 	case UDP_V4_FLOW:
5564 		ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
5565 						  unused_tuple);
5566 		break;
5567 	case IP_USER_FLOW:
5568 		ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
5569 					       unused_tuple);
5570 		break;
5571 	case SCTP_V6_FLOW:
5572 	case TCP_V6_FLOW:
5573 	case UDP_V6_FLOW:
5574 		ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
5575 						  unused_tuple);
5576 		break;
5577 	case IPV6_USER_FLOW:
5578 		ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
5579 					       unused_tuple);
5580 		break;
5581 	case ETHER_FLOW:
5582 		if (hdev->fd_cfg.fd_mode !=
5583 			HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5584 			dev_err(&hdev->pdev->dev,
5585 				"ETHER_FLOW is not supported in current fd mode!\n");
5586 			return -EOPNOTSUPP;
5587 		}
5588 
5589 		ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
5590 						 unused_tuple);
5591 		break;
5592 	default:
5593 		dev_err(&hdev->pdev->dev,
5594 			"unsupported protocol type, protocol type = %#x\n",
5595 			flow_type);
5596 		return -EOPNOTSUPP;
5597 	}
5598 
5599 	if (ret) {
5600 		dev_err(&hdev->pdev->dev,
5601 			"failed to check flow union tuple, ret = %d\n",
5602 			ret);
5603 		return ret;
5604 	}
5605 
5606 	return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
5607 }
5608 
hclge_fd_rule_exist(struct hclge_dev * hdev,u16 location)5609 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5610 {
5611 	struct hclge_fd_rule *rule = NULL;
5612 	struct hlist_node *node2;
5613 
5614 	spin_lock_bh(&hdev->fd_rule_lock);
5615 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5616 		if (rule->location >= location)
5617 			break;
5618 	}
5619 
5620 	spin_unlock_bh(&hdev->fd_rule_lock);
5621 
5622 	return  rule && rule->location == location;
5623 }
5624 
5625 /* make sure being called after lock up with fd_rule_lock */
hclge_fd_update_rule_list(struct hclge_dev * hdev,struct hclge_fd_rule * new_rule,u16 location,bool is_add)5626 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5627 				     struct hclge_fd_rule *new_rule,
5628 				     u16 location,
5629 				     bool is_add)
5630 {
5631 	struct hclge_fd_rule *rule = NULL, *parent = NULL;
5632 	struct hlist_node *node2;
5633 
5634 	if (is_add && !new_rule)
5635 		return -EINVAL;
5636 
5637 	hlist_for_each_entry_safe(rule, node2,
5638 				  &hdev->fd_rule_list, rule_node) {
5639 		if (rule->location >= location)
5640 			break;
5641 		parent = rule;
5642 	}
5643 
5644 	if (rule && rule->location == location) {
5645 		hlist_del(&rule->rule_node);
5646 		kfree(rule);
5647 		hdev->hclge_fd_rule_num--;
5648 
5649 		if (!is_add) {
5650 			if (!hdev->hclge_fd_rule_num)
5651 				hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5652 			clear_bit(location, hdev->fd_bmap);
5653 
5654 			return 0;
5655 		}
5656 	} else if (!is_add) {
5657 		dev_err(&hdev->pdev->dev,
5658 			"delete fail, rule %u is inexistent\n",
5659 			location);
5660 		return -EINVAL;
5661 	}
5662 
5663 	INIT_HLIST_NODE(&new_rule->rule_node);
5664 
5665 	if (parent)
5666 		hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5667 	else
5668 		hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5669 
5670 	set_bit(location, hdev->fd_bmap);
5671 	hdev->hclge_fd_rule_num++;
5672 	hdev->fd_active_type = new_rule->rule_type;
5673 
5674 	return 0;
5675 }
5676 
hclge_fd_get_tuple(struct hclge_dev * hdev,struct ethtool_rx_flow_spec * fs,struct hclge_fd_rule * rule)5677 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5678 			      struct ethtool_rx_flow_spec *fs,
5679 			      struct hclge_fd_rule *rule)
5680 {
5681 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5682 
5683 	switch (flow_type) {
5684 	case SCTP_V4_FLOW:
5685 	case TCP_V4_FLOW:
5686 	case UDP_V4_FLOW:
5687 		rule->tuples.src_ip[IPV4_INDEX] =
5688 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5689 		rule->tuples_mask.src_ip[IPV4_INDEX] =
5690 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5691 
5692 		rule->tuples.dst_ip[IPV4_INDEX] =
5693 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5694 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
5695 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5696 
5697 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5698 		rule->tuples_mask.src_port =
5699 				be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5700 
5701 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5702 		rule->tuples_mask.dst_port =
5703 				be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5704 
5705 		rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5706 		rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5707 
5708 		rule->tuples.ether_proto = ETH_P_IP;
5709 		rule->tuples_mask.ether_proto = 0xFFFF;
5710 
5711 		break;
5712 	case IP_USER_FLOW:
5713 		rule->tuples.src_ip[IPV4_INDEX] =
5714 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5715 		rule->tuples_mask.src_ip[IPV4_INDEX] =
5716 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5717 
5718 		rule->tuples.dst_ip[IPV4_INDEX] =
5719 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5720 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
5721 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5722 
5723 		rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5724 		rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5725 
5726 		rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5727 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5728 
5729 		rule->tuples.ether_proto = ETH_P_IP;
5730 		rule->tuples_mask.ether_proto = 0xFFFF;
5731 
5732 		break;
5733 	case SCTP_V6_FLOW:
5734 	case TCP_V6_FLOW:
5735 	case UDP_V6_FLOW:
5736 		be32_to_cpu_array(rule->tuples.src_ip,
5737 				  fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5738 		be32_to_cpu_array(rule->tuples_mask.src_ip,
5739 				  fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5740 
5741 		be32_to_cpu_array(rule->tuples.dst_ip,
5742 				  fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5743 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
5744 				  fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5745 
5746 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5747 		rule->tuples_mask.src_port =
5748 				be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5749 
5750 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5751 		rule->tuples_mask.dst_port =
5752 				be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5753 
5754 		rule->tuples.ether_proto = ETH_P_IPV6;
5755 		rule->tuples_mask.ether_proto = 0xFFFF;
5756 
5757 		break;
5758 	case IPV6_USER_FLOW:
5759 		be32_to_cpu_array(rule->tuples.src_ip,
5760 				  fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5761 		be32_to_cpu_array(rule->tuples_mask.src_ip,
5762 				  fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5763 
5764 		be32_to_cpu_array(rule->tuples.dst_ip,
5765 				  fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5766 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
5767 				  fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5768 
5769 		rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5770 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5771 
5772 		rule->tuples.ether_proto = ETH_P_IPV6;
5773 		rule->tuples_mask.ether_proto = 0xFFFF;
5774 
5775 		break;
5776 	case ETHER_FLOW:
5777 		ether_addr_copy(rule->tuples.src_mac,
5778 				fs->h_u.ether_spec.h_source);
5779 		ether_addr_copy(rule->tuples_mask.src_mac,
5780 				fs->m_u.ether_spec.h_source);
5781 
5782 		ether_addr_copy(rule->tuples.dst_mac,
5783 				fs->h_u.ether_spec.h_dest);
5784 		ether_addr_copy(rule->tuples_mask.dst_mac,
5785 				fs->m_u.ether_spec.h_dest);
5786 
5787 		rule->tuples.ether_proto =
5788 				be16_to_cpu(fs->h_u.ether_spec.h_proto);
5789 		rule->tuples_mask.ether_proto =
5790 				be16_to_cpu(fs->m_u.ether_spec.h_proto);
5791 
5792 		break;
5793 	default:
5794 		return -EOPNOTSUPP;
5795 	}
5796 
5797 	switch (flow_type) {
5798 	case SCTP_V4_FLOW:
5799 	case SCTP_V6_FLOW:
5800 		rule->tuples.ip_proto = IPPROTO_SCTP;
5801 		rule->tuples_mask.ip_proto = 0xFF;
5802 		break;
5803 	case TCP_V4_FLOW:
5804 	case TCP_V6_FLOW:
5805 		rule->tuples.ip_proto = IPPROTO_TCP;
5806 		rule->tuples_mask.ip_proto = 0xFF;
5807 		break;
5808 	case UDP_V4_FLOW:
5809 	case UDP_V6_FLOW:
5810 		rule->tuples.ip_proto = IPPROTO_UDP;
5811 		rule->tuples_mask.ip_proto = 0xFF;
5812 		break;
5813 	default:
5814 		break;
5815 	}
5816 
5817 	if (fs->flow_type & FLOW_EXT) {
5818 		rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5819 		rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5820 	}
5821 
5822 	if (fs->flow_type & FLOW_MAC_EXT) {
5823 		ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5824 		ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5825 	}
5826 
5827 	return 0;
5828 }
5829 
5830 /* make sure being called after lock up with fd_rule_lock */
hclge_fd_config_rule(struct hclge_dev * hdev,struct hclge_fd_rule * rule)5831 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5832 				struct hclge_fd_rule *rule)
5833 {
5834 	int ret;
5835 
5836 	if (!rule) {
5837 		dev_err(&hdev->pdev->dev,
5838 			"The flow director rule is NULL\n");
5839 		return -EINVAL;
5840 	}
5841 
5842 	/* it will never fail here, so needn't to check return value */
5843 	hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5844 
5845 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5846 	if (ret)
5847 		goto clear_rule;
5848 
5849 	ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5850 	if (ret)
5851 		goto clear_rule;
5852 
5853 	return 0;
5854 
5855 clear_rule:
5856 	hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5857 	return ret;
5858 }
5859 
hclge_add_fd_entry(struct hnae3_handle * handle,struct ethtool_rxnfc * cmd)5860 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5861 			      struct ethtool_rxnfc *cmd)
5862 {
5863 	struct hclge_vport *vport = hclge_get_vport(handle);
5864 	struct hclge_dev *hdev = vport->back;
5865 	u16 dst_vport_id = 0, q_index = 0;
5866 	struct ethtool_rx_flow_spec *fs;
5867 	struct hclge_fd_rule *rule;
5868 	u32 unused = 0;
5869 	u8 action;
5870 	int ret;
5871 
5872 	if (!hnae3_dev_fd_supported(hdev)) {
5873 		dev_err(&hdev->pdev->dev,
5874 			"flow table director is not supported\n");
5875 		return -EOPNOTSUPP;
5876 	}
5877 
5878 	if (!hdev->fd_en) {
5879 		dev_err(&hdev->pdev->dev,
5880 			"please enable flow director first\n");
5881 		return -EOPNOTSUPP;
5882 	}
5883 
5884 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5885 
5886 	ret = hclge_fd_check_spec(hdev, fs, &unused);
5887 	if (ret)
5888 		return ret;
5889 
5890 	if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5891 		action = HCLGE_FD_ACTION_DROP_PACKET;
5892 	} else {
5893 		u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5894 		u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5895 		u16 tqps;
5896 
5897 		if (vf > hdev->num_req_vfs) {
5898 			dev_err(&hdev->pdev->dev,
5899 				"Error: vf id (%u) > max vf num (%u)\n",
5900 				vf, hdev->num_req_vfs);
5901 			return -EINVAL;
5902 		}
5903 
5904 		dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5905 		tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5906 
5907 		if (ring >= tqps) {
5908 			dev_err(&hdev->pdev->dev,
5909 				"Error: queue id (%u) > max tqp num (%u)\n",
5910 				ring, tqps - 1);
5911 			return -EINVAL;
5912 		}
5913 
5914 		action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5915 		q_index = ring;
5916 	}
5917 
5918 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5919 	if (!rule)
5920 		return -ENOMEM;
5921 
5922 	ret = hclge_fd_get_tuple(hdev, fs, rule);
5923 	if (ret) {
5924 		kfree(rule);
5925 		return ret;
5926 	}
5927 
5928 	rule->flow_type = fs->flow_type;
5929 	rule->location = fs->location;
5930 	rule->unused_tuple = unused;
5931 	rule->vf_id = dst_vport_id;
5932 	rule->queue_id = q_index;
5933 	rule->action = action;
5934 	rule->rule_type = HCLGE_FD_EP_ACTIVE;
5935 
5936 	/* to avoid rule conflict, when user configure rule by ethtool,
5937 	 * we need to clear all arfs rules
5938 	 */
5939 	spin_lock_bh(&hdev->fd_rule_lock);
5940 	hclge_clear_arfs_rules(handle);
5941 
5942 	ret = hclge_fd_config_rule(hdev, rule);
5943 
5944 	spin_unlock_bh(&hdev->fd_rule_lock);
5945 
5946 	return ret;
5947 }
5948 
hclge_del_fd_entry(struct hnae3_handle * handle,struct ethtool_rxnfc * cmd)5949 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5950 			      struct ethtool_rxnfc *cmd)
5951 {
5952 	struct hclge_vport *vport = hclge_get_vport(handle);
5953 	struct hclge_dev *hdev = vport->back;
5954 	struct ethtool_rx_flow_spec *fs;
5955 	int ret;
5956 
5957 	if (!hnae3_dev_fd_supported(hdev))
5958 		return -EOPNOTSUPP;
5959 
5960 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5961 
5962 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5963 		return -EINVAL;
5964 
5965 	if (!hclge_fd_rule_exist(hdev, fs->location)) {
5966 		dev_err(&hdev->pdev->dev,
5967 			"Delete fail, rule %u is inexistent\n", fs->location);
5968 		return -ENOENT;
5969 	}
5970 
5971 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5972 				   NULL, false);
5973 	if (ret)
5974 		return ret;
5975 
5976 	spin_lock_bh(&hdev->fd_rule_lock);
5977 	ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5978 
5979 	spin_unlock_bh(&hdev->fd_rule_lock);
5980 
5981 	return ret;
5982 }
5983 
5984 /* make sure being called after lock up with fd_rule_lock */
hclge_del_all_fd_entries(struct hnae3_handle * handle,bool clear_list)5985 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5986 				     bool clear_list)
5987 {
5988 	struct hclge_vport *vport = hclge_get_vport(handle);
5989 	struct hclge_dev *hdev = vport->back;
5990 	struct hclge_fd_rule *rule;
5991 	struct hlist_node *node;
5992 	u16 location;
5993 
5994 	if (!hnae3_dev_fd_supported(hdev))
5995 		return;
5996 
5997 	for_each_set_bit(location, hdev->fd_bmap,
5998 			 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5999 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6000 				     NULL, false);
6001 
6002 	if (clear_list) {
6003 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6004 					  rule_node) {
6005 			hlist_del(&rule->rule_node);
6006 			kfree(rule);
6007 		}
6008 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6009 		hdev->hclge_fd_rule_num = 0;
6010 		bitmap_zero(hdev->fd_bmap,
6011 			    hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6012 	}
6013 }
6014 
hclge_restore_fd_entries(struct hnae3_handle * handle)6015 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6016 {
6017 	struct hclge_vport *vport = hclge_get_vport(handle);
6018 	struct hclge_dev *hdev = vport->back;
6019 	struct hclge_fd_rule *rule;
6020 	struct hlist_node *node;
6021 	int ret;
6022 
6023 	/* Return ok here, because reset error handling will check this
6024 	 * return value. If error is returned here, the reset process will
6025 	 * fail.
6026 	 */
6027 	if (!hnae3_dev_fd_supported(hdev))
6028 		return 0;
6029 
6030 	/* if fd is disabled, should not restore it when reset */
6031 	if (!hdev->fd_en)
6032 		return 0;
6033 
6034 	spin_lock_bh(&hdev->fd_rule_lock);
6035 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6036 		ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6037 		if (!ret)
6038 			ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6039 
6040 		if (ret) {
6041 			dev_warn(&hdev->pdev->dev,
6042 				 "Restore rule %u failed, remove it\n",
6043 				 rule->location);
6044 			clear_bit(rule->location, hdev->fd_bmap);
6045 			hlist_del(&rule->rule_node);
6046 			kfree(rule);
6047 			hdev->hclge_fd_rule_num--;
6048 		}
6049 	}
6050 
6051 	if (hdev->hclge_fd_rule_num)
6052 		hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
6053 
6054 	spin_unlock_bh(&hdev->fd_rule_lock);
6055 
6056 	return 0;
6057 }
6058 
hclge_get_fd_rule_cnt(struct hnae3_handle * handle,struct ethtool_rxnfc * cmd)6059 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6060 				 struct ethtool_rxnfc *cmd)
6061 {
6062 	struct hclge_vport *vport = hclge_get_vport(handle);
6063 	struct hclge_dev *hdev = vport->back;
6064 
6065 	if (!hnae3_dev_fd_supported(hdev))
6066 		return -EOPNOTSUPP;
6067 
6068 	cmd->rule_cnt = hdev->hclge_fd_rule_num;
6069 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6070 
6071 	return 0;
6072 }
6073 
hclge_fd_get_tcpip4_info(struct hclge_fd_rule * rule,struct ethtool_tcpip4_spec * spec,struct ethtool_tcpip4_spec * spec_mask)6074 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6075 				     struct ethtool_tcpip4_spec *spec,
6076 				     struct ethtool_tcpip4_spec *spec_mask)
6077 {
6078 	spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6079 	spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6080 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6081 
6082 	spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6083 	spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6084 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6085 
6086 	spec->psrc = cpu_to_be16(rule->tuples.src_port);
6087 	spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6088 			0 : cpu_to_be16(rule->tuples_mask.src_port);
6089 
6090 	spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6091 	spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6092 			0 : cpu_to_be16(rule->tuples_mask.dst_port);
6093 
6094 	spec->tos = rule->tuples.ip_tos;
6095 	spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6096 			0 : rule->tuples_mask.ip_tos;
6097 }
6098 
hclge_fd_get_ip4_info(struct hclge_fd_rule * rule,struct ethtool_usrip4_spec * spec,struct ethtool_usrip4_spec * spec_mask)6099 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6100 				  struct ethtool_usrip4_spec *spec,
6101 				  struct ethtool_usrip4_spec *spec_mask)
6102 {
6103 	spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6104 	spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6105 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6106 
6107 	spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6108 	spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6109 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6110 
6111 	spec->tos = rule->tuples.ip_tos;
6112 	spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6113 			0 : rule->tuples_mask.ip_tos;
6114 
6115 	spec->proto = rule->tuples.ip_proto;
6116 	spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6117 			0 : rule->tuples_mask.ip_proto;
6118 
6119 	spec->ip_ver = ETH_RX_NFC_IP4;
6120 }
6121 
hclge_fd_get_tcpip6_info(struct hclge_fd_rule * rule,struct ethtool_tcpip6_spec * spec,struct ethtool_tcpip6_spec * spec_mask)6122 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6123 				     struct ethtool_tcpip6_spec *spec,
6124 				     struct ethtool_tcpip6_spec *spec_mask)
6125 {
6126 	cpu_to_be32_array(spec->ip6src,
6127 			  rule->tuples.src_ip, IPV6_SIZE);
6128 	cpu_to_be32_array(spec->ip6dst,
6129 			  rule->tuples.dst_ip, IPV6_SIZE);
6130 	if (rule->unused_tuple & BIT(INNER_SRC_IP))
6131 		memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6132 	else
6133 		cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6134 				  IPV6_SIZE);
6135 
6136 	if (rule->unused_tuple & BIT(INNER_DST_IP))
6137 		memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6138 	else
6139 		cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6140 				  IPV6_SIZE);
6141 
6142 	spec->psrc = cpu_to_be16(rule->tuples.src_port);
6143 	spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6144 			0 : cpu_to_be16(rule->tuples_mask.src_port);
6145 
6146 	spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6147 	spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6148 			0 : cpu_to_be16(rule->tuples_mask.dst_port);
6149 }
6150 
hclge_fd_get_ip6_info(struct hclge_fd_rule * rule,struct ethtool_usrip6_spec * spec,struct ethtool_usrip6_spec * spec_mask)6151 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6152 				  struct ethtool_usrip6_spec *spec,
6153 				  struct ethtool_usrip6_spec *spec_mask)
6154 {
6155 	cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6156 	cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6157 	if (rule->unused_tuple & BIT(INNER_SRC_IP))
6158 		memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6159 	else
6160 		cpu_to_be32_array(spec_mask->ip6src,
6161 				  rule->tuples_mask.src_ip, IPV6_SIZE);
6162 
6163 	if (rule->unused_tuple & BIT(INNER_DST_IP))
6164 		memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6165 	else
6166 		cpu_to_be32_array(spec_mask->ip6dst,
6167 				  rule->tuples_mask.dst_ip, IPV6_SIZE);
6168 
6169 	spec->l4_proto = rule->tuples.ip_proto;
6170 	spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6171 			0 : rule->tuples_mask.ip_proto;
6172 }
6173 
hclge_fd_get_ether_info(struct hclge_fd_rule * rule,struct ethhdr * spec,struct ethhdr * spec_mask)6174 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6175 				    struct ethhdr *spec,
6176 				    struct ethhdr *spec_mask)
6177 {
6178 	ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6179 	ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6180 
6181 	if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6182 		eth_zero_addr(spec_mask->h_source);
6183 	else
6184 		ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6185 
6186 	if (rule->unused_tuple & BIT(INNER_DST_MAC))
6187 		eth_zero_addr(spec_mask->h_dest);
6188 	else
6189 		ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6190 
6191 	spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6192 	spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6193 			0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6194 }
6195 
hclge_fd_get_ext_info(struct ethtool_rx_flow_spec * fs,struct hclge_fd_rule * rule)6196 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6197 				  struct hclge_fd_rule *rule)
6198 {
6199 	if (fs->flow_type & FLOW_EXT) {
6200 		fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6201 		fs->m_ext.vlan_tci =
6202 				rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6203 				0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
6204 	}
6205 
6206 	if (fs->flow_type & FLOW_MAC_EXT) {
6207 		ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6208 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
6209 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
6210 		else
6211 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
6212 					rule->tuples_mask.dst_mac);
6213 	}
6214 }
6215 
hclge_get_fd_rule_info(struct hnae3_handle * handle,struct ethtool_rxnfc * cmd)6216 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6217 				  struct ethtool_rxnfc *cmd)
6218 {
6219 	struct hclge_vport *vport = hclge_get_vport(handle);
6220 	struct hclge_fd_rule *rule = NULL;
6221 	struct hclge_dev *hdev = vport->back;
6222 	struct ethtool_rx_flow_spec *fs;
6223 	struct hlist_node *node2;
6224 
6225 	if (!hnae3_dev_fd_supported(hdev))
6226 		return -EOPNOTSUPP;
6227 
6228 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6229 
6230 	spin_lock_bh(&hdev->fd_rule_lock);
6231 
6232 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6233 		if (rule->location >= fs->location)
6234 			break;
6235 	}
6236 
6237 	if (!rule || fs->location != rule->location) {
6238 		spin_unlock_bh(&hdev->fd_rule_lock);
6239 
6240 		return -ENOENT;
6241 	}
6242 
6243 	fs->flow_type = rule->flow_type;
6244 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6245 	case SCTP_V4_FLOW:
6246 	case TCP_V4_FLOW:
6247 	case UDP_V4_FLOW:
6248 		hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6249 					 &fs->m_u.tcp_ip4_spec);
6250 		break;
6251 	case IP_USER_FLOW:
6252 		hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6253 				      &fs->m_u.usr_ip4_spec);
6254 		break;
6255 	case SCTP_V6_FLOW:
6256 	case TCP_V6_FLOW:
6257 	case UDP_V6_FLOW:
6258 		hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6259 					 &fs->m_u.tcp_ip6_spec);
6260 		break;
6261 	case IPV6_USER_FLOW:
6262 		hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6263 				      &fs->m_u.usr_ip6_spec);
6264 		break;
6265 	/* The flow type of fd rule has been checked before adding in to rule
6266 	 * list. As other flow types have been handled, it must be ETHER_FLOW
6267 	 * for the default case
6268 	 */
6269 	default:
6270 		hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6271 					&fs->m_u.ether_spec);
6272 		break;
6273 	}
6274 
6275 	hclge_fd_get_ext_info(fs, rule);
6276 
6277 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6278 		fs->ring_cookie = RX_CLS_FLOW_DISC;
6279 	} else {
6280 		u64 vf_id;
6281 
6282 		fs->ring_cookie = rule->queue_id;
6283 		vf_id = rule->vf_id;
6284 		vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6285 		fs->ring_cookie |= vf_id;
6286 	}
6287 
6288 	spin_unlock_bh(&hdev->fd_rule_lock);
6289 
6290 	return 0;
6291 }
6292 
hclge_get_all_rules(struct hnae3_handle * handle,struct ethtool_rxnfc * cmd,u32 * rule_locs)6293 static int hclge_get_all_rules(struct hnae3_handle *handle,
6294 			       struct ethtool_rxnfc *cmd, u32 *rule_locs)
6295 {
6296 	struct hclge_vport *vport = hclge_get_vport(handle);
6297 	struct hclge_dev *hdev = vport->back;
6298 	struct hclge_fd_rule *rule;
6299 	struct hlist_node *node2;
6300 	int cnt = 0;
6301 
6302 	if (!hnae3_dev_fd_supported(hdev))
6303 		return -EOPNOTSUPP;
6304 
6305 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6306 
6307 	spin_lock_bh(&hdev->fd_rule_lock);
6308 	hlist_for_each_entry_safe(rule, node2,
6309 				  &hdev->fd_rule_list, rule_node) {
6310 		if (cnt == cmd->rule_cnt) {
6311 			spin_unlock_bh(&hdev->fd_rule_lock);
6312 			return -EMSGSIZE;
6313 		}
6314 
6315 		rule_locs[cnt] = rule->location;
6316 		cnt++;
6317 	}
6318 
6319 	spin_unlock_bh(&hdev->fd_rule_lock);
6320 
6321 	cmd->rule_cnt = cnt;
6322 
6323 	return 0;
6324 }
6325 
hclge_fd_get_flow_tuples(const struct flow_keys * fkeys,struct hclge_fd_rule_tuples * tuples)6326 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6327 				     struct hclge_fd_rule_tuples *tuples)
6328 {
6329 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6330 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6331 
6332 	tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6333 	tuples->ip_proto = fkeys->basic.ip_proto;
6334 	tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6335 
6336 	if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6337 		tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6338 		tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6339 	} else {
6340 		int i;
6341 
6342 		for (i = 0; i < IPV6_SIZE; i++) {
6343 			tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6344 			tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6345 		}
6346 	}
6347 }
6348 
6349 /* traverse all rules, check whether an existed rule has the same tuples */
6350 static struct hclge_fd_rule *
hclge_fd_search_flow_keys(struct hclge_dev * hdev,const struct hclge_fd_rule_tuples * tuples)6351 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6352 			  const struct hclge_fd_rule_tuples *tuples)
6353 {
6354 	struct hclge_fd_rule *rule = NULL;
6355 	struct hlist_node *node;
6356 
6357 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6358 		if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6359 			return rule;
6360 	}
6361 
6362 	return NULL;
6363 }
6364 
hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples * tuples,struct hclge_fd_rule * rule)6365 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6366 				     struct hclge_fd_rule *rule)
6367 {
6368 	rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6369 			     BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6370 			     BIT(INNER_SRC_PORT);
6371 	rule->action = 0;
6372 	rule->vf_id = 0;
6373 	rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6374 	if (tuples->ether_proto == ETH_P_IP) {
6375 		if (tuples->ip_proto == IPPROTO_TCP)
6376 			rule->flow_type = TCP_V4_FLOW;
6377 		else
6378 			rule->flow_type = UDP_V4_FLOW;
6379 	} else {
6380 		if (tuples->ip_proto == IPPROTO_TCP)
6381 			rule->flow_type = TCP_V6_FLOW;
6382 		else
6383 			rule->flow_type = UDP_V6_FLOW;
6384 	}
6385 	memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6386 	memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6387 }
6388 
hclge_add_fd_entry_by_arfs(struct hnae3_handle * handle,u16 queue_id,u16 flow_id,struct flow_keys * fkeys)6389 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6390 				      u16 flow_id, struct flow_keys *fkeys)
6391 {
6392 	struct hclge_vport *vport = hclge_get_vport(handle);
6393 	struct hclge_fd_rule_tuples new_tuples = {};
6394 	struct hclge_dev *hdev = vport->back;
6395 	struct hclge_fd_rule *rule;
6396 	u16 tmp_queue_id;
6397 	u16 bit_id;
6398 	int ret;
6399 
6400 	if (!hnae3_dev_fd_supported(hdev))
6401 		return -EOPNOTSUPP;
6402 
6403 	/* when there is already fd rule existed add by user,
6404 	 * arfs should not work
6405 	 */
6406 	spin_lock_bh(&hdev->fd_rule_lock);
6407 	if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6408 		spin_unlock_bh(&hdev->fd_rule_lock);
6409 		return -EOPNOTSUPP;
6410 	}
6411 
6412 	hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6413 
6414 	/* check is there flow director filter existed for this flow,
6415 	 * if not, create a new filter for it;
6416 	 * if filter exist with different queue id, modify the filter;
6417 	 * if filter exist with same queue id, do nothing
6418 	 */
6419 	rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6420 	if (!rule) {
6421 		bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6422 		if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6423 			spin_unlock_bh(&hdev->fd_rule_lock);
6424 			return -ENOSPC;
6425 		}
6426 
6427 		rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6428 		if (!rule) {
6429 			spin_unlock_bh(&hdev->fd_rule_lock);
6430 			return -ENOMEM;
6431 		}
6432 
6433 		set_bit(bit_id, hdev->fd_bmap);
6434 		rule->location = bit_id;
6435 		rule->flow_id = flow_id;
6436 		rule->queue_id = queue_id;
6437 		hclge_fd_build_arfs_rule(&new_tuples, rule);
6438 		ret = hclge_fd_config_rule(hdev, rule);
6439 
6440 		spin_unlock_bh(&hdev->fd_rule_lock);
6441 
6442 		if (ret)
6443 			return ret;
6444 
6445 		return rule->location;
6446 	}
6447 
6448 	spin_unlock_bh(&hdev->fd_rule_lock);
6449 
6450 	if (rule->queue_id == queue_id)
6451 		return rule->location;
6452 
6453 	tmp_queue_id = rule->queue_id;
6454 	rule->queue_id = queue_id;
6455 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6456 	if (ret) {
6457 		rule->queue_id = tmp_queue_id;
6458 		return ret;
6459 	}
6460 
6461 	return rule->location;
6462 }
6463 
hclge_rfs_filter_expire(struct hclge_dev * hdev)6464 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6465 {
6466 #ifdef CONFIG_RFS_ACCEL
6467 	struct hnae3_handle *handle = &hdev->vport[0].nic;
6468 	struct hclge_fd_rule *rule;
6469 	struct hlist_node *node;
6470 	HLIST_HEAD(del_list);
6471 
6472 	spin_lock_bh(&hdev->fd_rule_lock);
6473 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6474 		spin_unlock_bh(&hdev->fd_rule_lock);
6475 		return;
6476 	}
6477 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6478 		if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6479 					rule->flow_id, rule->location)) {
6480 			hlist_del_init(&rule->rule_node);
6481 			hlist_add_head(&rule->rule_node, &del_list);
6482 			hdev->hclge_fd_rule_num--;
6483 			clear_bit(rule->location, hdev->fd_bmap);
6484 		}
6485 	}
6486 	spin_unlock_bh(&hdev->fd_rule_lock);
6487 
6488 	hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6489 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6490 				     rule->location, NULL, false);
6491 		kfree(rule);
6492 	}
6493 #endif
6494 }
6495 
6496 /* make sure being called after lock up with fd_rule_lock */
hclge_clear_arfs_rules(struct hnae3_handle * handle)6497 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6498 {
6499 #ifdef CONFIG_RFS_ACCEL
6500 	struct hclge_vport *vport = hclge_get_vport(handle);
6501 	struct hclge_dev *hdev = vport->back;
6502 
6503 	if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6504 		hclge_del_all_fd_entries(handle, true);
6505 #endif
6506 }
6507 
hclge_get_hw_reset_stat(struct hnae3_handle * handle)6508 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6509 {
6510 	struct hclge_vport *vport = hclge_get_vport(handle);
6511 	struct hclge_dev *hdev = vport->back;
6512 
6513 	return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6514 	       hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6515 }
6516 
hclge_get_cmdq_stat(struct hnae3_handle * handle)6517 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
6518 {
6519 	struct hclge_vport *vport = hclge_get_vport(handle);
6520 	struct hclge_dev *hdev = vport->back;
6521 
6522 	return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
6523 }
6524 
hclge_ae_dev_resetting(struct hnae3_handle * handle)6525 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6526 {
6527 	struct hclge_vport *vport = hclge_get_vport(handle);
6528 	struct hclge_dev *hdev = vport->back;
6529 
6530 	return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6531 }
6532 
hclge_ae_dev_reset_cnt(struct hnae3_handle * handle)6533 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6534 {
6535 	struct hclge_vport *vport = hclge_get_vport(handle);
6536 	struct hclge_dev *hdev = vport->back;
6537 
6538 	return hdev->rst_stats.hw_reset_done_cnt;
6539 }
6540 
hclge_enable_fd(struct hnae3_handle * handle,bool enable)6541 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6542 {
6543 	struct hclge_vport *vport = hclge_get_vport(handle);
6544 	struct hclge_dev *hdev = vport->back;
6545 	bool clear;
6546 
6547 	hdev->fd_en = enable;
6548 	clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6549 
6550 	if (!enable) {
6551 		spin_lock_bh(&hdev->fd_rule_lock);
6552 		hclge_del_all_fd_entries(handle, clear);
6553 		spin_unlock_bh(&hdev->fd_rule_lock);
6554 	} else {
6555 		hclge_restore_fd_entries(handle);
6556 	}
6557 }
6558 
hclge_cfg_mac_mode(struct hclge_dev * hdev,bool enable)6559 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6560 {
6561 	struct hclge_desc desc;
6562 	struct hclge_config_mac_mode_cmd *req =
6563 		(struct hclge_config_mac_mode_cmd *)desc.data;
6564 	u32 loop_en = 0;
6565 	int ret;
6566 
6567 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6568 
6569 	if (enable) {
6570 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6571 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6572 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6573 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6574 		hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6575 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6576 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6577 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6578 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6579 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6580 	}
6581 
6582 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6583 
6584 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6585 	if (ret)
6586 		dev_err(&hdev->pdev->dev,
6587 			"mac enable fail, ret =%d.\n", ret);
6588 }
6589 
hclge_config_switch_param(struct hclge_dev * hdev,int vfid,u8 switch_param,u8 param_mask)6590 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6591 				     u8 switch_param, u8 param_mask)
6592 {
6593 	struct hclge_mac_vlan_switch_cmd *req;
6594 	struct hclge_desc desc;
6595 	u32 func_id;
6596 	int ret;
6597 
6598 	func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6599 	req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6600 
6601 	/* read current config parameter */
6602 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6603 				   true);
6604 	req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6605 	req->func_id = cpu_to_le32(func_id);
6606 
6607 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6608 	if (ret) {
6609 		dev_err(&hdev->pdev->dev,
6610 			"read mac vlan switch parameter fail, ret = %d\n", ret);
6611 		return ret;
6612 	}
6613 
6614 	/* modify and write new config parameter */
6615 	hclge_cmd_reuse_desc(&desc, false);
6616 	req->switch_param = (req->switch_param & param_mask) | switch_param;
6617 	req->param_mask = param_mask;
6618 
6619 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6620 	if (ret)
6621 		dev_err(&hdev->pdev->dev,
6622 			"set mac vlan switch parameter fail, ret = %d\n", ret);
6623 	return ret;
6624 }
6625 
hclge_phy_link_status_wait(struct hclge_dev * hdev,int link_ret)6626 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6627 				       int link_ret)
6628 {
6629 #define HCLGE_PHY_LINK_STATUS_NUM  200
6630 
6631 	struct phy_device *phydev = hdev->hw.mac.phydev;
6632 	int i = 0;
6633 	int ret;
6634 
6635 	do {
6636 		ret = phy_read_status(phydev);
6637 		if (ret) {
6638 			dev_err(&hdev->pdev->dev,
6639 				"phy update link status fail, ret = %d\n", ret);
6640 			return;
6641 		}
6642 
6643 		if (phydev->link == link_ret)
6644 			break;
6645 
6646 		msleep(HCLGE_LINK_STATUS_MS);
6647 	} while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6648 }
6649 
hclge_mac_link_status_wait(struct hclge_dev * hdev,int link_ret)6650 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
6651 {
6652 #define HCLGE_MAC_LINK_STATUS_NUM  100
6653 
6654 	int link_status;
6655 	int i = 0;
6656 	int ret;
6657 
6658 	do {
6659 		ret = hclge_get_mac_link_status(hdev, &link_status);
6660 		if (ret)
6661 			return ret;
6662 		if (link_status == link_ret)
6663 			return 0;
6664 
6665 		msleep(HCLGE_LINK_STATUS_MS);
6666 	} while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6667 	return -EBUSY;
6668 }
6669 
hclge_mac_phy_link_status_wait(struct hclge_dev * hdev,bool en,bool is_phy)6670 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6671 					  bool is_phy)
6672 {
6673 	int link_ret;
6674 
6675 	link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6676 
6677 	if (is_phy)
6678 		hclge_phy_link_status_wait(hdev, link_ret);
6679 
6680 	return hclge_mac_link_status_wait(hdev, link_ret);
6681 }
6682 
hclge_set_app_loopback(struct hclge_dev * hdev,bool en)6683 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
6684 {
6685 	struct hclge_config_mac_mode_cmd *req;
6686 	struct hclge_desc desc;
6687 	u32 loop_en;
6688 	int ret;
6689 
6690 	req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6691 	/* 1 Read out the MAC mode config at first */
6692 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6693 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6694 	if (ret) {
6695 		dev_err(&hdev->pdev->dev,
6696 			"mac loopback get fail, ret =%d.\n", ret);
6697 		return ret;
6698 	}
6699 
6700 	/* 2 Then setup the loopback flag */
6701 	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
6702 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
6703 
6704 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6705 
6706 	/* 3 Config mac work mode with loopback flag
6707 	 * and its original configure parameters
6708 	 */
6709 	hclge_cmd_reuse_desc(&desc, false);
6710 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6711 	if (ret)
6712 		dev_err(&hdev->pdev->dev,
6713 			"mac loopback set fail, ret =%d.\n", ret);
6714 	return ret;
6715 }
6716 
hclge_cfg_serdes_loopback(struct hclge_dev * hdev,bool en,enum hnae3_loop loop_mode)6717 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
6718 				     enum hnae3_loop loop_mode)
6719 {
6720 #define HCLGE_SERDES_RETRY_MS	10
6721 #define HCLGE_SERDES_RETRY_NUM	100
6722 
6723 	struct hclge_serdes_lb_cmd *req;
6724 	struct hclge_desc desc;
6725 	int ret, i = 0;
6726 	u8 loop_mode_b;
6727 
6728 	req = (struct hclge_serdes_lb_cmd *)desc.data;
6729 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6730 
6731 	switch (loop_mode) {
6732 	case HNAE3_LOOP_SERIAL_SERDES:
6733 		loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6734 		break;
6735 	case HNAE3_LOOP_PARALLEL_SERDES:
6736 		loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6737 		break;
6738 	default:
6739 		dev_err(&hdev->pdev->dev,
6740 			"unsupported serdes loopback mode %d\n", loop_mode);
6741 		return -ENOTSUPP;
6742 	}
6743 
6744 	if (en) {
6745 		req->enable = loop_mode_b;
6746 		req->mask = loop_mode_b;
6747 	} else {
6748 		req->mask = loop_mode_b;
6749 	}
6750 
6751 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6752 	if (ret) {
6753 		dev_err(&hdev->pdev->dev,
6754 			"serdes loopback set fail, ret = %d\n", ret);
6755 		return ret;
6756 	}
6757 
6758 	do {
6759 		msleep(HCLGE_SERDES_RETRY_MS);
6760 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6761 					   true);
6762 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6763 		if (ret) {
6764 			dev_err(&hdev->pdev->dev,
6765 				"serdes loopback get, ret = %d\n", ret);
6766 			return ret;
6767 		}
6768 	} while (++i < HCLGE_SERDES_RETRY_NUM &&
6769 		 !(req->result & HCLGE_CMD_SERDES_DONE_B));
6770 
6771 	if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6772 		dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6773 		return -EBUSY;
6774 	} else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6775 		dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6776 		return -EIO;
6777 	}
6778 	return ret;
6779 }
6780 
hclge_set_serdes_loopback(struct hclge_dev * hdev,bool en,enum hnae3_loop loop_mode)6781 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6782 				     enum hnae3_loop loop_mode)
6783 {
6784 	int ret;
6785 
6786 	ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
6787 	if (ret)
6788 		return ret;
6789 
6790 	hclge_cfg_mac_mode(hdev, en);
6791 
6792 	ret = hclge_mac_phy_link_status_wait(hdev, en, false);
6793 	if (ret)
6794 		dev_err(&hdev->pdev->dev,
6795 			"serdes loopback config mac mode timeout\n");
6796 
6797 	return ret;
6798 }
6799 
hclge_enable_phy_loopback(struct hclge_dev * hdev,struct phy_device * phydev)6800 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6801 				     struct phy_device *phydev)
6802 {
6803 	int ret;
6804 
6805 	if (!phydev->suspended) {
6806 		ret = phy_suspend(phydev);
6807 		if (ret)
6808 			return ret;
6809 	}
6810 
6811 	ret = phy_resume(phydev);
6812 	if (ret)
6813 		return ret;
6814 
6815 	return phy_loopback(phydev, true);
6816 }
6817 
hclge_disable_phy_loopback(struct hclge_dev * hdev,struct phy_device * phydev)6818 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6819 				      struct phy_device *phydev)
6820 {
6821 	int ret;
6822 
6823 	ret = phy_loopback(phydev, false);
6824 	if (ret)
6825 		return ret;
6826 
6827 	return phy_suspend(phydev);
6828 }
6829 
hclge_set_phy_loopback(struct hclge_dev * hdev,bool en)6830 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6831 {
6832 	struct phy_device *phydev = hdev->hw.mac.phydev;
6833 	int ret;
6834 
6835 	if (!phydev)
6836 		return -ENOTSUPP;
6837 
6838 	if (en)
6839 		ret = hclge_enable_phy_loopback(hdev, phydev);
6840 	else
6841 		ret = hclge_disable_phy_loopback(hdev, phydev);
6842 	if (ret) {
6843 		dev_err(&hdev->pdev->dev,
6844 			"set phy loopback fail, ret = %d\n", ret);
6845 		return ret;
6846 	}
6847 
6848 	hclge_cfg_mac_mode(hdev, en);
6849 
6850 	ret = hclge_mac_phy_link_status_wait(hdev, en, true);
6851 	if (ret)
6852 		dev_err(&hdev->pdev->dev,
6853 			"phy loopback config mac mode timeout\n");
6854 
6855 	return ret;
6856 }
6857 
hclge_tqp_enable(struct hclge_dev * hdev,unsigned int tqp_id,int stream_id,bool enable)6858 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6859 			    int stream_id, bool enable)
6860 {
6861 	struct hclge_desc desc;
6862 	struct hclge_cfg_com_tqp_queue_cmd *req =
6863 		(struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6864 	int ret;
6865 
6866 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6867 	req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6868 	req->stream_id = cpu_to_le16(stream_id);
6869 	if (enable)
6870 		req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6871 
6872 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6873 	if (ret)
6874 		dev_err(&hdev->pdev->dev,
6875 			"Tqp enable fail, status =%d.\n", ret);
6876 	return ret;
6877 }
6878 
hclge_set_loopback(struct hnae3_handle * handle,enum hnae3_loop loop_mode,bool en)6879 static int hclge_set_loopback(struct hnae3_handle *handle,
6880 			      enum hnae3_loop loop_mode, bool en)
6881 {
6882 	struct hclge_vport *vport = hclge_get_vport(handle);
6883 	struct hnae3_knic_private_info *kinfo;
6884 	struct hclge_dev *hdev = vport->back;
6885 	int i, ret;
6886 
6887 	/* Loopback can be enabled in three places: SSU, MAC, and serdes. By
6888 	 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
6889 	 * the same, the packets are looped back in the SSU. If SSU loopback
6890 	 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
6891 	 */
6892 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
6893 		u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6894 
6895 		ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6896 						HCLGE_SWITCH_ALW_LPBK_MASK);
6897 		if (ret)
6898 			return ret;
6899 	}
6900 
6901 	switch (loop_mode) {
6902 	case HNAE3_LOOP_APP:
6903 		ret = hclge_set_app_loopback(hdev, en);
6904 		break;
6905 	case HNAE3_LOOP_SERIAL_SERDES:
6906 	case HNAE3_LOOP_PARALLEL_SERDES:
6907 		ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6908 		break;
6909 	case HNAE3_LOOP_PHY:
6910 		ret = hclge_set_phy_loopback(hdev, en);
6911 		break;
6912 	default:
6913 		ret = -ENOTSUPP;
6914 		dev_err(&hdev->pdev->dev,
6915 			"loop_mode %d is not supported\n", loop_mode);
6916 		break;
6917 	}
6918 
6919 	if (ret)
6920 		return ret;
6921 
6922 	kinfo = &vport->nic.kinfo;
6923 	for (i = 0; i < kinfo->num_tqps; i++) {
6924 		ret = hclge_tqp_enable(hdev, i, 0, en);
6925 		if (ret)
6926 			return ret;
6927 	}
6928 
6929 	return 0;
6930 }
6931 
hclge_set_default_loopback(struct hclge_dev * hdev)6932 static int hclge_set_default_loopback(struct hclge_dev *hdev)
6933 {
6934 	int ret;
6935 
6936 	ret = hclge_set_app_loopback(hdev, false);
6937 	if (ret)
6938 		return ret;
6939 
6940 	ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
6941 	if (ret)
6942 		return ret;
6943 
6944 	return hclge_cfg_serdes_loopback(hdev, false,
6945 					 HNAE3_LOOP_PARALLEL_SERDES);
6946 }
6947 
hclge_reset_tqp_stats(struct hnae3_handle * handle)6948 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6949 {
6950 	struct hclge_vport *vport = hclge_get_vport(handle);
6951 	struct hnae3_knic_private_info *kinfo;
6952 	struct hnae3_queue *queue;
6953 	struct hclge_tqp *tqp;
6954 	int i;
6955 
6956 	kinfo = &vport->nic.kinfo;
6957 	for (i = 0; i < kinfo->num_tqps; i++) {
6958 		queue = handle->kinfo.tqp[i];
6959 		tqp = container_of(queue, struct hclge_tqp, q);
6960 		memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6961 	}
6962 }
6963 
hclge_flush_link_update(struct hclge_dev * hdev)6964 static void hclge_flush_link_update(struct hclge_dev *hdev)
6965 {
6966 #define HCLGE_FLUSH_LINK_TIMEOUT	100000
6967 
6968 	unsigned long last = hdev->serv_processed_cnt;
6969 	int i = 0;
6970 
6971 	while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
6972 	       i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
6973 	       last == hdev->serv_processed_cnt)
6974 		usleep_range(1, 1);
6975 }
6976 
hclge_set_timer_task(struct hnae3_handle * handle,bool enable)6977 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6978 {
6979 	struct hclge_vport *vport = hclge_get_vport(handle);
6980 	struct hclge_dev *hdev = vport->back;
6981 
6982 	if (enable) {
6983 		hclge_task_schedule(hdev, 0);
6984 	} else {
6985 		/* Set the DOWN flag here to disable link updating */
6986 		set_bit(HCLGE_STATE_DOWN, &hdev->state);
6987 
6988 		/* flush memory to make sure DOWN is seen by service task */
6989 		smp_mb__before_atomic();
6990 		hclge_flush_link_update(hdev);
6991 	}
6992 }
6993 
hclge_ae_start(struct hnae3_handle * handle)6994 static int hclge_ae_start(struct hnae3_handle *handle)
6995 {
6996 	struct hclge_vport *vport = hclge_get_vport(handle);
6997 	struct hclge_dev *hdev = vport->back;
6998 
6999 	/* mac enable */
7000 	hclge_cfg_mac_mode(hdev, true);
7001 	clear_bit(HCLGE_STATE_DOWN, &hdev->state);
7002 	hdev->hw.mac.link = 0;
7003 
7004 	/* reset tqp stats */
7005 	hclge_reset_tqp_stats(handle);
7006 
7007 	hclge_mac_start_phy(hdev);
7008 
7009 	return 0;
7010 }
7011 
hclge_ae_stop(struct hnae3_handle * handle)7012 static void hclge_ae_stop(struct hnae3_handle *handle)
7013 {
7014 	struct hclge_vport *vport = hclge_get_vport(handle);
7015 	struct hclge_dev *hdev = vport->back;
7016 	int i;
7017 
7018 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
7019 	spin_lock_bh(&hdev->fd_rule_lock);
7020 	hclge_clear_arfs_rules(handle);
7021 	spin_unlock_bh(&hdev->fd_rule_lock);
7022 
7023 	/* If it is not PF reset or FLR, the firmware will disable the MAC,
7024 	 * so it only need to stop phy here.
7025 	 */
7026 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
7027 	    hdev->reset_type != HNAE3_FUNC_RESET &&
7028 	    hdev->reset_type != HNAE3_FLR_RESET) {
7029 		hclge_mac_stop_phy(hdev);
7030 		hclge_update_link_status(hdev);
7031 		return;
7032 	}
7033 
7034 	for (i = 0; i < handle->kinfo.num_tqps; i++)
7035 		hclge_reset_tqp(handle, i);
7036 
7037 	hclge_config_mac_tnl_int(hdev, false);
7038 
7039 	/* Mac disable */
7040 	hclge_cfg_mac_mode(hdev, false);
7041 
7042 	hclge_mac_stop_phy(hdev);
7043 
7044 	/* reset tqp stats */
7045 	hclge_reset_tqp_stats(handle);
7046 	hclge_update_link_status(hdev);
7047 }
7048 
hclge_vport_start(struct hclge_vport * vport)7049 int hclge_vport_start(struct hclge_vport *vport)
7050 {
7051 	struct hclge_dev *hdev = vport->back;
7052 
7053 	set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7054 	vport->last_active_jiffies = jiffies;
7055 
7056 	if (test_bit(vport->vport_id, hdev->vport_config_block)) {
7057 		if (vport->vport_id) {
7058 			hclge_restore_mac_table_common(vport);
7059 			hclge_restore_vport_vlan_table(vport);
7060 		} else {
7061 			hclge_restore_hw_table(hdev);
7062 		}
7063 	}
7064 
7065 	clear_bit(vport->vport_id, hdev->vport_config_block);
7066 
7067 	return 0;
7068 }
7069 
hclge_vport_stop(struct hclge_vport * vport)7070 void hclge_vport_stop(struct hclge_vport *vport)
7071 {
7072 	clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7073 }
7074 
hclge_client_start(struct hnae3_handle * handle)7075 static int hclge_client_start(struct hnae3_handle *handle)
7076 {
7077 	struct hclge_vport *vport = hclge_get_vport(handle);
7078 
7079 	return hclge_vport_start(vport);
7080 }
7081 
hclge_client_stop(struct hnae3_handle * handle)7082 static void hclge_client_stop(struct hnae3_handle *handle)
7083 {
7084 	struct hclge_vport *vport = hclge_get_vport(handle);
7085 
7086 	hclge_vport_stop(vport);
7087 }
7088 
hclge_get_mac_vlan_cmd_status(struct hclge_vport * vport,u16 cmdq_resp,u8 resp_code,enum hclge_mac_vlan_tbl_opcode op)7089 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
7090 					 u16 cmdq_resp, u8  resp_code,
7091 					 enum hclge_mac_vlan_tbl_opcode op)
7092 {
7093 	struct hclge_dev *hdev = vport->back;
7094 
7095 	if (cmdq_resp) {
7096 		dev_err(&hdev->pdev->dev,
7097 			"cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
7098 			cmdq_resp);
7099 		return -EIO;
7100 	}
7101 
7102 	if (op == HCLGE_MAC_VLAN_ADD) {
7103 		if (!resp_code || resp_code == 1)
7104 			return 0;
7105 		else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
7106 			 resp_code == HCLGE_ADD_MC_OVERFLOW)
7107 			return -ENOSPC;
7108 
7109 		dev_err(&hdev->pdev->dev,
7110 			"add mac addr failed for undefined, code=%u.\n",
7111 			resp_code);
7112 		return -EIO;
7113 	} else if (op == HCLGE_MAC_VLAN_REMOVE) {
7114 		if (!resp_code) {
7115 			return 0;
7116 		} else if (resp_code == 1) {
7117 			dev_dbg(&hdev->pdev->dev,
7118 				"remove mac addr failed for miss.\n");
7119 			return -ENOENT;
7120 		}
7121 
7122 		dev_err(&hdev->pdev->dev,
7123 			"remove mac addr failed for undefined, code=%u.\n",
7124 			resp_code);
7125 		return -EIO;
7126 	} else if (op == HCLGE_MAC_VLAN_LKUP) {
7127 		if (!resp_code) {
7128 			return 0;
7129 		} else if (resp_code == 1) {
7130 			dev_dbg(&hdev->pdev->dev,
7131 				"lookup mac addr failed for miss.\n");
7132 			return -ENOENT;
7133 		}
7134 
7135 		dev_err(&hdev->pdev->dev,
7136 			"lookup mac addr failed for undefined, code=%u.\n",
7137 			resp_code);
7138 		return -EIO;
7139 	}
7140 
7141 	dev_err(&hdev->pdev->dev,
7142 		"unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
7143 
7144 	return -EINVAL;
7145 }
7146 
hclge_update_desc_vfid(struct hclge_desc * desc,int vfid,bool clr)7147 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
7148 {
7149 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
7150 
7151 	unsigned int word_num;
7152 	unsigned int bit_num;
7153 
7154 	if (vfid > 255 || vfid < 0)
7155 		return -EIO;
7156 
7157 	if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
7158 		word_num = vfid / 32;
7159 		bit_num  = vfid % 32;
7160 		if (clr)
7161 			desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7162 		else
7163 			desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
7164 	} else {
7165 		word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
7166 		bit_num  = vfid % 32;
7167 		if (clr)
7168 			desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7169 		else
7170 			desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
7171 	}
7172 
7173 	return 0;
7174 }
7175 
hclge_is_all_function_id_zero(struct hclge_desc * desc)7176 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
7177 {
7178 #define HCLGE_DESC_NUMBER 3
7179 #define HCLGE_FUNC_NUMBER_PER_DESC 6
7180 	int i, j;
7181 
7182 	for (i = 1; i < HCLGE_DESC_NUMBER; i++)
7183 		for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
7184 			if (desc[i].data[j])
7185 				return false;
7186 
7187 	return true;
7188 }
7189 
hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd * new_req,const u8 * addr,bool is_mc)7190 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
7191 				   const u8 *addr, bool is_mc)
7192 {
7193 	const unsigned char *mac_addr = addr;
7194 	u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
7195 		       (mac_addr[0]) | (mac_addr[1] << 8);
7196 	u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
7197 
7198 	hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7199 	if (is_mc) {
7200 		hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
7201 		hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7202 	}
7203 
7204 	new_req->mac_addr_hi32 = cpu_to_le32(high_val);
7205 	new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
7206 }
7207 
hclge_remove_mac_vlan_tbl(struct hclge_vport * vport,struct hclge_mac_vlan_tbl_entry_cmd * req)7208 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
7209 				     struct hclge_mac_vlan_tbl_entry_cmd *req)
7210 {
7211 	struct hclge_dev *hdev = vport->back;
7212 	struct hclge_desc desc;
7213 	u8 resp_code;
7214 	u16 retval;
7215 	int ret;
7216 
7217 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
7218 
7219 	memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7220 
7221 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7222 	if (ret) {
7223 		dev_err(&hdev->pdev->dev,
7224 			"del mac addr failed for cmd_send, ret =%d.\n",
7225 			ret);
7226 		return ret;
7227 	}
7228 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7229 	retval = le16_to_cpu(desc.retval);
7230 
7231 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7232 					     HCLGE_MAC_VLAN_REMOVE);
7233 }
7234 
hclge_lookup_mac_vlan_tbl(struct hclge_vport * vport,struct hclge_mac_vlan_tbl_entry_cmd * req,struct hclge_desc * desc,bool is_mc)7235 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
7236 				     struct hclge_mac_vlan_tbl_entry_cmd *req,
7237 				     struct hclge_desc *desc,
7238 				     bool is_mc)
7239 {
7240 	struct hclge_dev *hdev = vport->back;
7241 	u8 resp_code;
7242 	u16 retval;
7243 	int ret;
7244 
7245 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7246 	if (is_mc) {
7247 		desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7248 		memcpy(desc[0].data,
7249 		       req,
7250 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7251 		hclge_cmd_setup_basic_desc(&desc[1],
7252 					   HCLGE_OPC_MAC_VLAN_ADD,
7253 					   true);
7254 		desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7255 		hclge_cmd_setup_basic_desc(&desc[2],
7256 					   HCLGE_OPC_MAC_VLAN_ADD,
7257 					   true);
7258 		ret = hclge_cmd_send(&hdev->hw, desc, 3);
7259 	} else {
7260 		memcpy(desc[0].data,
7261 		       req,
7262 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7263 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
7264 	}
7265 	if (ret) {
7266 		dev_err(&hdev->pdev->dev,
7267 			"lookup mac addr failed for cmd_send, ret =%d.\n",
7268 			ret);
7269 		return ret;
7270 	}
7271 	resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7272 	retval = le16_to_cpu(desc[0].retval);
7273 
7274 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7275 					     HCLGE_MAC_VLAN_LKUP);
7276 }
7277 
hclge_add_mac_vlan_tbl(struct hclge_vport * vport,struct hclge_mac_vlan_tbl_entry_cmd * req,struct hclge_desc * mc_desc)7278 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7279 				  struct hclge_mac_vlan_tbl_entry_cmd *req,
7280 				  struct hclge_desc *mc_desc)
7281 {
7282 	struct hclge_dev *hdev = vport->back;
7283 	int cfg_status;
7284 	u8 resp_code;
7285 	u16 retval;
7286 	int ret;
7287 
7288 	if (!mc_desc) {
7289 		struct hclge_desc desc;
7290 
7291 		hclge_cmd_setup_basic_desc(&desc,
7292 					   HCLGE_OPC_MAC_VLAN_ADD,
7293 					   false);
7294 		memcpy(desc.data, req,
7295 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7296 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7297 		resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7298 		retval = le16_to_cpu(desc.retval);
7299 
7300 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7301 							   resp_code,
7302 							   HCLGE_MAC_VLAN_ADD);
7303 	} else {
7304 		hclge_cmd_reuse_desc(&mc_desc[0], false);
7305 		mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7306 		hclge_cmd_reuse_desc(&mc_desc[1], false);
7307 		mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7308 		hclge_cmd_reuse_desc(&mc_desc[2], false);
7309 		mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7310 		memcpy(mc_desc[0].data, req,
7311 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7312 		ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7313 		resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7314 		retval = le16_to_cpu(mc_desc[0].retval);
7315 
7316 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7317 							   resp_code,
7318 							   HCLGE_MAC_VLAN_ADD);
7319 	}
7320 
7321 	if (ret) {
7322 		dev_err(&hdev->pdev->dev,
7323 			"add mac addr failed for cmd_send, ret =%d.\n",
7324 			ret);
7325 		return ret;
7326 	}
7327 
7328 	return cfg_status;
7329 }
7330 
hclge_set_umv_space(struct hclge_dev * hdev,u16 space_size,u16 * allocated_size)7331 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7332 			       u16 *allocated_size)
7333 {
7334 	struct hclge_umv_spc_alc_cmd *req;
7335 	struct hclge_desc desc;
7336 	int ret;
7337 
7338 	req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7339 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7340 
7341 	req->space_size = cpu_to_le32(space_size);
7342 
7343 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7344 	if (ret) {
7345 		dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
7346 			ret);
7347 		return ret;
7348 	}
7349 
7350 	*allocated_size = le32_to_cpu(desc.data[1]);
7351 
7352 	return 0;
7353 }
7354 
hclge_init_umv_space(struct hclge_dev * hdev)7355 static int hclge_init_umv_space(struct hclge_dev *hdev)
7356 {
7357 	u16 allocated_size = 0;
7358 	int ret;
7359 
7360 	ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
7361 	if (ret)
7362 		return ret;
7363 
7364 	if (allocated_size < hdev->wanted_umv_size)
7365 		dev_warn(&hdev->pdev->dev,
7366 			 "failed to alloc umv space, want %u, get %u\n",
7367 			 hdev->wanted_umv_size, allocated_size);
7368 
7369 	hdev->max_umv_size = allocated_size;
7370 	hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
7371 	hdev->share_umv_size = hdev->priv_umv_size +
7372 			hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7373 
7374 	return 0;
7375 }
7376 
hclge_reset_umv_space(struct hclge_dev * hdev)7377 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7378 {
7379 	struct hclge_vport *vport;
7380 	int i;
7381 
7382 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7383 		vport = &hdev->vport[i];
7384 		vport->used_umv_num = 0;
7385 	}
7386 
7387 	mutex_lock(&hdev->vport_lock);
7388 	hdev->share_umv_size = hdev->priv_umv_size +
7389 			hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7390 	mutex_unlock(&hdev->vport_lock);
7391 }
7392 
hclge_is_umv_space_full(struct hclge_vport * vport,bool need_lock)7393 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
7394 {
7395 	struct hclge_dev *hdev = vport->back;
7396 	bool is_full;
7397 
7398 	if (need_lock)
7399 		mutex_lock(&hdev->vport_lock);
7400 
7401 	is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7402 		   hdev->share_umv_size == 0);
7403 
7404 	if (need_lock)
7405 		mutex_unlock(&hdev->vport_lock);
7406 
7407 	return is_full;
7408 }
7409 
hclge_update_umv_space(struct hclge_vport * vport,bool is_free)7410 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7411 {
7412 	struct hclge_dev *hdev = vport->back;
7413 
7414 	if (is_free) {
7415 		if (vport->used_umv_num > hdev->priv_umv_size)
7416 			hdev->share_umv_size++;
7417 
7418 		if (vport->used_umv_num > 0)
7419 			vport->used_umv_num--;
7420 	} else {
7421 		if (vport->used_umv_num >= hdev->priv_umv_size &&
7422 		    hdev->share_umv_size > 0)
7423 			hdev->share_umv_size--;
7424 		vport->used_umv_num++;
7425 	}
7426 }
7427 
hclge_find_mac_node(struct list_head * list,const u8 * mac_addr)7428 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
7429 						  const u8 *mac_addr)
7430 {
7431 	struct hclge_mac_node *mac_node, *tmp;
7432 
7433 	list_for_each_entry_safe(mac_node, tmp, list, node)
7434 		if (ether_addr_equal(mac_addr, mac_node->mac_addr))
7435 			return mac_node;
7436 
7437 	return NULL;
7438 }
7439 
hclge_update_mac_node(struct hclge_mac_node * mac_node,enum HCLGE_MAC_NODE_STATE state)7440 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
7441 				  enum HCLGE_MAC_NODE_STATE state)
7442 {
7443 	switch (state) {
7444 	/* from set_rx_mode or tmp_add_list */
7445 	case HCLGE_MAC_TO_ADD:
7446 		if (mac_node->state == HCLGE_MAC_TO_DEL)
7447 			mac_node->state = HCLGE_MAC_ACTIVE;
7448 		break;
7449 	/* only from set_rx_mode */
7450 	case HCLGE_MAC_TO_DEL:
7451 		if (mac_node->state == HCLGE_MAC_TO_ADD) {
7452 			list_del(&mac_node->node);
7453 			kfree(mac_node);
7454 		} else {
7455 			mac_node->state = HCLGE_MAC_TO_DEL;
7456 		}
7457 		break;
7458 	/* only from tmp_add_list, the mac_node->state won't be
7459 	 * ACTIVE.
7460 	 */
7461 	case HCLGE_MAC_ACTIVE:
7462 		if (mac_node->state == HCLGE_MAC_TO_ADD)
7463 			mac_node->state = HCLGE_MAC_ACTIVE;
7464 
7465 		break;
7466 	}
7467 }
7468 
hclge_update_mac_list(struct hclge_vport * vport,enum HCLGE_MAC_NODE_STATE state,enum HCLGE_MAC_ADDR_TYPE mac_type,const unsigned char * addr)7469 int hclge_update_mac_list(struct hclge_vport *vport,
7470 			  enum HCLGE_MAC_NODE_STATE state,
7471 			  enum HCLGE_MAC_ADDR_TYPE mac_type,
7472 			  const unsigned char *addr)
7473 {
7474 	struct hclge_dev *hdev = vport->back;
7475 	struct hclge_mac_node *mac_node;
7476 	struct list_head *list;
7477 
7478 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7479 		&vport->uc_mac_list : &vport->mc_mac_list;
7480 
7481 	spin_lock_bh(&vport->mac_list_lock);
7482 
7483 	/* if the mac addr is already in the mac list, no need to add a new
7484 	 * one into it, just check the mac addr state, convert it to a new
7485 	 * new state, or just remove it, or do nothing.
7486 	 */
7487 	mac_node = hclge_find_mac_node(list, addr);
7488 	if (mac_node) {
7489 		hclge_update_mac_node(mac_node, state);
7490 		spin_unlock_bh(&vport->mac_list_lock);
7491 		set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7492 		return 0;
7493 	}
7494 
7495 	/* if this address is never added, unnecessary to delete */
7496 	if (state == HCLGE_MAC_TO_DEL) {
7497 		spin_unlock_bh(&vport->mac_list_lock);
7498 		dev_err(&hdev->pdev->dev,
7499 			"failed to delete address %pM from mac list\n",
7500 			addr);
7501 		return -ENOENT;
7502 	}
7503 
7504 	mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
7505 	if (!mac_node) {
7506 		spin_unlock_bh(&vport->mac_list_lock);
7507 		return -ENOMEM;
7508 	}
7509 
7510 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7511 
7512 	mac_node->state = state;
7513 	ether_addr_copy(mac_node->mac_addr, addr);
7514 	list_add_tail(&mac_node->node, list);
7515 
7516 	spin_unlock_bh(&vport->mac_list_lock);
7517 
7518 	return 0;
7519 }
7520 
hclge_add_uc_addr(struct hnae3_handle * handle,const unsigned char * addr)7521 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7522 			     const unsigned char *addr)
7523 {
7524 	struct hclge_vport *vport = hclge_get_vport(handle);
7525 
7526 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
7527 				     addr);
7528 }
7529 
hclge_add_uc_addr_common(struct hclge_vport * vport,const unsigned char * addr)7530 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7531 			     const unsigned char *addr)
7532 {
7533 	struct hclge_dev *hdev = vport->back;
7534 	struct hclge_mac_vlan_tbl_entry_cmd req;
7535 	struct hclge_desc desc;
7536 	u16 egress_port = 0;
7537 	int ret;
7538 
7539 	/* mac addr check */
7540 	if (is_zero_ether_addr(addr) ||
7541 	    is_broadcast_ether_addr(addr) ||
7542 	    is_multicast_ether_addr(addr)) {
7543 		dev_err(&hdev->pdev->dev,
7544 			"Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7545 			 addr, is_zero_ether_addr(addr),
7546 			 is_broadcast_ether_addr(addr),
7547 			 is_multicast_ether_addr(addr));
7548 		return -EINVAL;
7549 	}
7550 
7551 	memset(&req, 0, sizeof(req));
7552 
7553 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7554 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7555 
7556 	req.egress_port = cpu_to_le16(egress_port);
7557 
7558 	hclge_prepare_mac_addr(&req, addr, false);
7559 
7560 	/* Lookup the mac address in the mac_vlan table, and add
7561 	 * it if the entry is inexistent. Repeated unicast entry
7562 	 * is not allowed in the mac vlan table.
7563 	 */
7564 	ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7565 	if (ret == -ENOENT) {
7566 		mutex_lock(&hdev->vport_lock);
7567 		if (!hclge_is_umv_space_full(vport, false)) {
7568 			ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7569 			if (!ret)
7570 				hclge_update_umv_space(vport, false);
7571 			mutex_unlock(&hdev->vport_lock);
7572 			return ret;
7573 		}
7574 		mutex_unlock(&hdev->vport_lock);
7575 
7576 		if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
7577 			dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7578 				hdev->priv_umv_size);
7579 
7580 		return -ENOSPC;
7581 	}
7582 
7583 	/* check if we just hit the duplicate */
7584 	if (!ret)
7585 		return -EEXIST;
7586 
7587 	return ret;
7588 }
7589 
hclge_rm_uc_addr(struct hnae3_handle * handle,const unsigned char * addr)7590 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7591 			    const unsigned char *addr)
7592 {
7593 	struct hclge_vport *vport = hclge_get_vport(handle);
7594 
7595 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
7596 				     addr);
7597 }
7598 
hclge_rm_uc_addr_common(struct hclge_vport * vport,const unsigned char * addr)7599 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7600 			    const unsigned char *addr)
7601 {
7602 	struct hclge_dev *hdev = vport->back;
7603 	struct hclge_mac_vlan_tbl_entry_cmd req;
7604 	int ret;
7605 
7606 	/* mac addr check */
7607 	if (is_zero_ether_addr(addr) ||
7608 	    is_broadcast_ether_addr(addr) ||
7609 	    is_multicast_ether_addr(addr)) {
7610 		dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7611 			addr);
7612 		return -EINVAL;
7613 	}
7614 
7615 	memset(&req, 0, sizeof(req));
7616 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7617 	hclge_prepare_mac_addr(&req, addr, false);
7618 	ret = hclge_remove_mac_vlan_tbl(vport, &req);
7619 	if (!ret || ret == -ENOENT) {
7620 		mutex_lock(&hdev->vport_lock);
7621 		hclge_update_umv_space(vport, true);
7622 		mutex_unlock(&hdev->vport_lock);
7623 		return 0;
7624 	}
7625 
7626 	return ret;
7627 }
7628 
hclge_add_mc_addr(struct hnae3_handle * handle,const unsigned char * addr)7629 static int hclge_add_mc_addr(struct hnae3_handle *handle,
7630 			     const unsigned char *addr)
7631 {
7632 	struct hclge_vport *vport = hclge_get_vport(handle);
7633 
7634 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
7635 				     addr);
7636 }
7637 
hclge_add_mc_addr_common(struct hclge_vport * vport,const unsigned char * addr)7638 int hclge_add_mc_addr_common(struct hclge_vport *vport,
7639 			     const unsigned char *addr)
7640 {
7641 	struct hclge_dev *hdev = vport->back;
7642 	struct hclge_mac_vlan_tbl_entry_cmd req;
7643 	struct hclge_desc desc[3];
7644 	int status;
7645 
7646 	/* mac addr check */
7647 	if (!is_multicast_ether_addr(addr)) {
7648 		dev_err(&hdev->pdev->dev,
7649 			"Add mc mac err! invalid mac:%pM.\n",
7650 			 addr);
7651 		return -EINVAL;
7652 	}
7653 	memset(&req, 0, sizeof(req));
7654 	hclge_prepare_mac_addr(&req, addr, true);
7655 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7656 	if (status) {
7657 		/* This mac addr do not exist, add new entry for it */
7658 		memset(desc[0].data, 0, sizeof(desc[0].data));
7659 		memset(desc[1].data, 0, sizeof(desc[0].data));
7660 		memset(desc[2].data, 0, sizeof(desc[0].data));
7661 	}
7662 	status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7663 	if (status)
7664 		return status;
7665 	status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7666 
7667 	/* if already overflow, not to print each time */
7668 	if (status == -ENOSPC &&
7669 	    !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
7670 		dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
7671 
7672 	return status;
7673 }
7674 
hclge_rm_mc_addr(struct hnae3_handle * handle,const unsigned char * addr)7675 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
7676 			    const unsigned char *addr)
7677 {
7678 	struct hclge_vport *vport = hclge_get_vport(handle);
7679 
7680 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
7681 				     addr);
7682 }
7683 
hclge_rm_mc_addr_common(struct hclge_vport * vport,const unsigned char * addr)7684 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
7685 			    const unsigned char *addr)
7686 {
7687 	struct hclge_dev *hdev = vport->back;
7688 	struct hclge_mac_vlan_tbl_entry_cmd req;
7689 	enum hclge_cmd_status status;
7690 	struct hclge_desc desc[3];
7691 
7692 	/* mac addr check */
7693 	if (!is_multicast_ether_addr(addr)) {
7694 		dev_dbg(&hdev->pdev->dev,
7695 			"Remove mc mac err! invalid mac:%pM.\n",
7696 			 addr);
7697 		return -EINVAL;
7698 	}
7699 
7700 	memset(&req, 0, sizeof(req));
7701 	hclge_prepare_mac_addr(&req, addr, true);
7702 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7703 	if (!status) {
7704 		/* This mac addr exist, remove this handle's VFID for it */
7705 		status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7706 		if (status)
7707 			return status;
7708 
7709 		if (hclge_is_all_function_id_zero(desc))
7710 			/* All the vfid is zero, so need to delete this entry */
7711 			status = hclge_remove_mac_vlan_tbl(vport, &req);
7712 		else
7713 			/* Not all the vfid is zero, update the vfid */
7714 			status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7715 
7716 	} else if (status == -ENOENT) {
7717 		status = 0;
7718 	}
7719 
7720 	return status;
7721 }
7722 
hclge_sync_vport_mac_list(struct hclge_vport * vport,struct list_head * list,int (* sync)(struct hclge_vport *,const unsigned char *))7723 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
7724 				      struct list_head *list,
7725 				      int (*sync)(struct hclge_vport *,
7726 						  const unsigned char *))
7727 {
7728 	struct hclge_mac_node *mac_node, *tmp;
7729 	int ret;
7730 
7731 	list_for_each_entry_safe(mac_node, tmp, list, node) {
7732 		ret = sync(vport, mac_node->mac_addr);
7733 		if (!ret) {
7734 			mac_node->state = HCLGE_MAC_ACTIVE;
7735 		} else {
7736 			set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
7737 				&vport->state);
7738 
7739 			/* If one unicast mac address is existing in hardware,
7740 			 * we need to try whether other unicast mac addresses
7741 			 * are new addresses that can be added.
7742 			 */
7743 			if (ret != -EEXIST)
7744 				break;
7745 		}
7746 	}
7747 }
7748 
hclge_unsync_vport_mac_list(struct hclge_vport * vport,struct list_head * list,int (* unsync)(struct hclge_vport *,const unsigned char *))7749 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
7750 					struct list_head *list,
7751 					int (*unsync)(struct hclge_vport *,
7752 						      const unsigned char *))
7753 {
7754 	struct hclge_mac_node *mac_node, *tmp;
7755 	int ret;
7756 
7757 	list_for_each_entry_safe(mac_node, tmp, list, node) {
7758 		ret = unsync(vport, mac_node->mac_addr);
7759 		if (!ret || ret == -ENOENT) {
7760 			list_del(&mac_node->node);
7761 			kfree(mac_node);
7762 		} else {
7763 			set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
7764 				&vport->state);
7765 			break;
7766 		}
7767 	}
7768 }
7769 
hclge_sync_from_add_list(struct list_head * add_list,struct list_head * mac_list)7770 static bool hclge_sync_from_add_list(struct list_head *add_list,
7771 				     struct list_head *mac_list)
7772 {
7773 	struct hclge_mac_node *mac_node, *tmp, *new_node;
7774 	bool all_added = true;
7775 
7776 	list_for_each_entry_safe(mac_node, tmp, add_list, node) {
7777 		if (mac_node->state == HCLGE_MAC_TO_ADD)
7778 			all_added = false;
7779 
7780 		/* if the mac address from tmp_add_list is not in the
7781 		 * uc/mc_mac_list, it means have received a TO_DEL request
7782 		 * during the time window of adding the mac address into mac
7783 		 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
7784 		 * then it will be removed at next time. else it must be TO_ADD,
7785 		 * this address hasn't been added into mac table,
7786 		 * so just remove the mac node.
7787 		 */
7788 		new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
7789 		if (new_node) {
7790 			hclge_update_mac_node(new_node, mac_node->state);
7791 			list_del(&mac_node->node);
7792 			kfree(mac_node);
7793 		} else if (mac_node->state == HCLGE_MAC_ACTIVE) {
7794 			mac_node->state = HCLGE_MAC_TO_DEL;
7795 			list_del(&mac_node->node);
7796 			list_add_tail(&mac_node->node, mac_list);
7797 		} else {
7798 			list_del(&mac_node->node);
7799 			kfree(mac_node);
7800 		}
7801 	}
7802 
7803 	return all_added;
7804 }
7805 
hclge_sync_from_del_list(struct list_head * del_list,struct list_head * mac_list)7806 static void hclge_sync_from_del_list(struct list_head *del_list,
7807 				     struct list_head *mac_list)
7808 {
7809 	struct hclge_mac_node *mac_node, *tmp, *new_node;
7810 
7811 	list_for_each_entry_safe(mac_node, tmp, del_list, node) {
7812 		new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
7813 		if (new_node) {
7814 			/* If the mac addr exists in the mac list, it means
7815 			 * received a new TO_ADD request during the time window
7816 			 * of configuring the mac address. For the mac node
7817 			 * state is TO_ADD, and the address is already in the
7818 			 * in the hardware(due to delete fail), so we just need
7819 			 * to change the mac node state to ACTIVE.
7820 			 */
7821 			new_node->state = HCLGE_MAC_ACTIVE;
7822 			list_del(&mac_node->node);
7823 			kfree(mac_node);
7824 		} else {
7825 			list_del(&mac_node->node);
7826 			list_add_tail(&mac_node->node, mac_list);
7827 		}
7828 	}
7829 }
7830 
hclge_update_overflow_flags(struct hclge_vport * vport,enum HCLGE_MAC_ADDR_TYPE mac_type,bool is_all_added)7831 static void hclge_update_overflow_flags(struct hclge_vport *vport,
7832 					enum HCLGE_MAC_ADDR_TYPE mac_type,
7833 					bool is_all_added)
7834 {
7835 	if (mac_type == HCLGE_MAC_ADDR_UC) {
7836 		if (is_all_added)
7837 			vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
7838 		else
7839 			vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
7840 	} else {
7841 		if (is_all_added)
7842 			vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
7843 		else
7844 			vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
7845 	}
7846 }
7847 
hclge_sync_vport_mac_table(struct hclge_vport * vport,enum HCLGE_MAC_ADDR_TYPE mac_type)7848 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
7849 				       enum HCLGE_MAC_ADDR_TYPE mac_type)
7850 {
7851 	struct hclge_mac_node *mac_node, *tmp, *new_node;
7852 	struct list_head tmp_add_list, tmp_del_list;
7853 	struct list_head *list;
7854 	bool all_added;
7855 
7856 	INIT_LIST_HEAD(&tmp_add_list);
7857 	INIT_LIST_HEAD(&tmp_del_list);
7858 
7859 	/* move the mac addr to the tmp_add_list and tmp_del_list, then
7860 	 * we can add/delete these mac addr outside the spin lock
7861 	 */
7862 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7863 		&vport->uc_mac_list : &vport->mc_mac_list;
7864 
7865 	spin_lock_bh(&vport->mac_list_lock);
7866 
7867 	list_for_each_entry_safe(mac_node, tmp, list, node) {
7868 		switch (mac_node->state) {
7869 		case HCLGE_MAC_TO_DEL:
7870 			list_del(&mac_node->node);
7871 			list_add_tail(&mac_node->node, &tmp_del_list);
7872 			break;
7873 		case HCLGE_MAC_TO_ADD:
7874 			new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
7875 			if (!new_node)
7876 				goto stop_traverse;
7877 			ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
7878 			new_node->state = mac_node->state;
7879 			list_add_tail(&new_node->node, &tmp_add_list);
7880 			break;
7881 		default:
7882 			break;
7883 		}
7884 	}
7885 
7886 stop_traverse:
7887 	spin_unlock_bh(&vport->mac_list_lock);
7888 
7889 	/* delete first, in order to get max mac table space for adding */
7890 	if (mac_type == HCLGE_MAC_ADDR_UC) {
7891 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7892 					    hclge_rm_uc_addr_common);
7893 		hclge_sync_vport_mac_list(vport, &tmp_add_list,
7894 					  hclge_add_uc_addr_common);
7895 	} else {
7896 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7897 					    hclge_rm_mc_addr_common);
7898 		hclge_sync_vport_mac_list(vport, &tmp_add_list,
7899 					  hclge_add_mc_addr_common);
7900 	}
7901 
7902 	/* if some mac addresses were added/deleted fail, move back to the
7903 	 * mac_list, and retry at next time.
7904 	 */
7905 	spin_lock_bh(&vport->mac_list_lock);
7906 
7907 	hclge_sync_from_del_list(&tmp_del_list, list);
7908 	all_added = hclge_sync_from_add_list(&tmp_add_list, list);
7909 
7910 	spin_unlock_bh(&vport->mac_list_lock);
7911 
7912 	hclge_update_overflow_flags(vport, mac_type, all_added);
7913 }
7914 
hclge_need_sync_mac_table(struct hclge_vport * vport)7915 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
7916 {
7917 	struct hclge_dev *hdev = vport->back;
7918 
7919 	if (test_bit(vport->vport_id, hdev->vport_config_block))
7920 		return false;
7921 
7922 	if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
7923 		return true;
7924 
7925 	return false;
7926 }
7927 
hclge_sync_mac_table(struct hclge_dev * hdev)7928 static void hclge_sync_mac_table(struct hclge_dev *hdev)
7929 {
7930 	int i;
7931 
7932 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7933 		struct hclge_vport *vport = &hdev->vport[i];
7934 
7935 		if (!hclge_need_sync_mac_table(vport))
7936 			continue;
7937 
7938 		hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
7939 		hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
7940 	}
7941 }
7942 
hclge_rm_vport_all_mac_table(struct hclge_vport * vport,bool is_del_list,enum HCLGE_MAC_ADDR_TYPE mac_type)7943 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7944 				  enum HCLGE_MAC_ADDR_TYPE mac_type)
7945 {
7946 	int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
7947 	struct hclge_mac_node *mac_cfg, *tmp;
7948 	struct hclge_dev *hdev = vport->back;
7949 	struct list_head tmp_del_list, *list;
7950 	int ret;
7951 
7952 	if (mac_type == HCLGE_MAC_ADDR_UC) {
7953 		list = &vport->uc_mac_list;
7954 		unsync = hclge_rm_uc_addr_common;
7955 	} else {
7956 		list = &vport->mc_mac_list;
7957 		unsync = hclge_rm_mc_addr_common;
7958 	}
7959 
7960 	INIT_LIST_HEAD(&tmp_del_list);
7961 
7962 	if (!is_del_list)
7963 		set_bit(vport->vport_id, hdev->vport_config_block);
7964 
7965 	spin_lock_bh(&vport->mac_list_lock);
7966 
7967 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7968 		switch (mac_cfg->state) {
7969 		case HCLGE_MAC_TO_DEL:
7970 		case HCLGE_MAC_ACTIVE:
7971 			list_del(&mac_cfg->node);
7972 			list_add_tail(&mac_cfg->node, &tmp_del_list);
7973 			break;
7974 		case HCLGE_MAC_TO_ADD:
7975 			if (is_del_list) {
7976 				list_del(&mac_cfg->node);
7977 				kfree(mac_cfg);
7978 			}
7979 			break;
7980 		}
7981 	}
7982 
7983 	spin_unlock_bh(&vport->mac_list_lock);
7984 
7985 	list_for_each_entry_safe(mac_cfg, tmp, &tmp_del_list, node) {
7986 		ret = unsync(vport, mac_cfg->mac_addr);
7987 		if (!ret || ret == -ENOENT) {
7988 			/* clear all mac addr from hardware, but remain these
7989 			 * mac addr in the mac list, and restore them after
7990 			 * vf reset finished.
7991 			 */
7992 			if (!is_del_list &&
7993 			    mac_cfg->state == HCLGE_MAC_ACTIVE) {
7994 				mac_cfg->state = HCLGE_MAC_TO_ADD;
7995 			} else {
7996 				list_del(&mac_cfg->node);
7997 				kfree(mac_cfg);
7998 			}
7999 		} else if (is_del_list) {
8000 			mac_cfg->state = HCLGE_MAC_TO_DEL;
8001 		}
8002 	}
8003 
8004 	spin_lock_bh(&vport->mac_list_lock);
8005 
8006 	hclge_sync_from_del_list(&tmp_del_list, list);
8007 
8008 	spin_unlock_bh(&vport->mac_list_lock);
8009 }
8010 
8011 /* remove all mac address when uninitailize */
hclge_uninit_vport_mac_list(struct hclge_vport * vport,enum HCLGE_MAC_ADDR_TYPE mac_type)8012 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
8013 					enum HCLGE_MAC_ADDR_TYPE mac_type)
8014 {
8015 	struct hclge_mac_node *mac_node, *tmp;
8016 	struct hclge_dev *hdev = vport->back;
8017 	struct list_head tmp_del_list, *list;
8018 
8019 	INIT_LIST_HEAD(&tmp_del_list);
8020 
8021 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8022 		&vport->uc_mac_list : &vport->mc_mac_list;
8023 
8024 	spin_lock_bh(&vport->mac_list_lock);
8025 
8026 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8027 		switch (mac_node->state) {
8028 		case HCLGE_MAC_TO_DEL:
8029 		case HCLGE_MAC_ACTIVE:
8030 			list_del(&mac_node->node);
8031 			list_add_tail(&mac_node->node, &tmp_del_list);
8032 			break;
8033 		case HCLGE_MAC_TO_ADD:
8034 			list_del(&mac_node->node);
8035 			kfree(mac_node);
8036 			break;
8037 		}
8038 	}
8039 
8040 	spin_unlock_bh(&vport->mac_list_lock);
8041 
8042 	if (mac_type == HCLGE_MAC_ADDR_UC)
8043 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8044 					    hclge_rm_uc_addr_common);
8045 	else
8046 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8047 					    hclge_rm_mc_addr_common);
8048 
8049 	if (!list_empty(&tmp_del_list))
8050 		dev_warn(&hdev->pdev->dev,
8051 			 "uninit %s mac list for vport %u not completely.\n",
8052 			 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
8053 			 vport->vport_id);
8054 
8055 	list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
8056 		list_del(&mac_node->node);
8057 		kfree(mac_node);
8058 	}
8059 }
8060 
hclge_uninit_mac_table(struct hclge_dev * hdev)8061 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
8062 {
8063 	struct hclge_vport *vport;
8064 	int i;
8065 
8066 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8067 		vport = &hdev->vport[i];
8068 		hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
8069 		hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
8070 	}
8071 }
8072 
hclge_get_mac_ethertype_cmd_status(struct hclge_dev * hdev,u16 cmdq_resp,u8 resp_code)8073 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
8074 					      u16 cmdq_resp, u8 resp_code)
8075 {
8076 #define HCLGE_ETHERTYPE_SUCCESS_ADD		0
8077 #define HCLGE_ETHERTYPE_ALREADY_ADD		1
8078 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW	2
8079 #define HCLGE_ETHERTYPE_KEY_CONFLICT		3
8080 
8081 	int return_status;
8082 
8083 	if (cmdq_resp) {
8084 		dev_err(&hdev->pdev->dev,
8085 			"cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
8086 			cmdq_resp);
8087 		return -EIO;
8088 	}
8089 
8090 	switch (resp_code) {
8091 	case HCLGE_ETHERTYPE_SUCCESS_ADD:
8092 	case HCLGE_ETHERTYPE_ALREADY_ADD:
8093 		return_status = 0;
8094 		break;
8095 	case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
8096 		dev_err(&hdev->pdev->dev,
8097 			"add mac ethertype failed for manager table overflow.\n");
8098 		return_status = -EIO;
8099 		break;
8100 	case HCLGE_ETHERTYPE_KEY_CONFLICT:
8101 		dev_err(&hdev->pdev->dev,
8102 			"add mac ethertype failed for key conflict.\n");
8103 		return_status = -EIO;
8104 		break;
8105 	default:
8106 		dev_err(&hdev->pdev->dev,
8107 			"add mac ethertype failed for undefined, code=%u.\n",
8108 			resp_code);
8109 		return_status = -EIO;
8110 	}
8111 
8112 	return return_status;
8113 }
8114 
hclge_check_vf_mac_exist(struct hclge_vport * vport,int vf_idx,u8 * mac_addr)8115 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
8116 				     u8 *mac_addr)
8117 {
8118 	struct hclge_mac_vlan_tbl_entry_cmd req;
8119 	struct hclge_dev *hdev = vport->back;
8120 	struct hclge_desc desc;
8121 	u16 egress_port = 0;
8122 	int i;
8123 
8124 	if (is_zero_ether_addr(mac_addr))
8125 		return false;
8126 
8127 	memset(&req, 0, sizeof(req));
8128 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8129 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8130 	req.egress_port = cpu_to_le16(egress_port);
8131 	hclge_prepare_mac_addr(&req, mac_addr, false);
8132 
8133 	if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
8134 		return true;
8135 
8136 	vf_idx += HCLGE_VF_VPORT_START_NUM;
8137 	for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
8138 		if (i != vf_idx &&
8139 		    ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
8140 			return true;
8141 
8142 	return false;
8143 }
8144 
hclge_set_vf_mac(struct hnae3_handle * handle,int vf,u8 * mac_addr)8145 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
8146 			    u8 *mac_addr)
8147 {
8148 	struct hclge_vport *vport = hclge_get_vport(handle);
8149 	struct hclge_dev *hdev = vport->back;
8150 
8151 	vport = hclge_get_vf_vport(hdev, vf);
8152 	if (!vport)
8153 		return -EINVAL;
8154 
8155 	if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
8156 		dev_info(&hdev->pdev->dev,
8157 			 "Specified MAC(=%pM) is same as before, no change committed!\n",
8158 			 mac_addr);
8159 		return 0;
8160 	}
8161 
8162 	if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
8163 		dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
8164 			mac_addr);
8165 		return -EEXIST;
8166 	}
8167 
8168 	ether_addr_copy(vport->vf_info.mac, mac_addr);
8169 
8170 	if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8171 		dev_info(&hdev->pdev->dev,
8172 			 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
8173 			 vf, mac_addr);
8174 		return hclge_inform_reset_assert_to_vf(vport);
8175 	}
8176 
8177 	dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
8178 		 vf, mac_addr);
8179 	return 0;
8180 }
8181 
hclge_add_mgr_tbl(struct hclge_dev * hdev,const struct hclge_mac_mgr_tbl_entry_cmd * req)8182 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
8183 			     const struct hclge_mac_mgr_tbl_entry_cmd *req)
8184 {
8185 	struct hclge_desc desc;
8186 	u8 resp_code;
8187 	u16 retval;
8188 	int ret;
8189 
8190 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
8191 	memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
8192 
8193 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8194 	if (ret) {
8195 		dev_err(&hdev->pdev->dev,
8196 			"add mac ethertype failed for cmd_send, ret =%d.\n",
8197 			ret);
8198 		return ret;
8199 	}
8200 
8201 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8202 	retval = le16_to_cpu(desc.retval);
8203 
8204 	return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
8205 }
8206 
init_mgr_tbl(struct hclge_dev * hdev)8207 static int init_mgr_tbl(struct hclge_dev *hdev)
8208 {
8209 	int ret;
8210 	int i;
8211 
8212 	for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
8213 		ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
8214 		if (ret) {
8215 			dev_err(&hdev->pdev->dev,
8216 				"add mac ethertype failed, ret =%d.\n",
8217 				ret);
8218 			return ret;
8219 		}
8220 	}
8221 
8222 	return 0;
8223 }
8224 
hclge_get_mac_addr(struct hnae3_handle * handle,u8 * p)8225 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
8226 {
8227 	struct hclge_vport *vport = hclge_get_vport(handle);
8228 	struct hclge_dev *hdev = vport->back;
8229 
8230 	ether_addr_copy(p, hdev->hw.mac.mac_addr);
8231 }
8232 
hclge_update_mac_node_for_dev_addr(struct hclge_vport * vport,const u8 * old_addr,const u8 * new_addr)8233 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
8234 				       const u8 *old_addr, const u8 *new_addr)
8235 {
8236 	struct list_head *list = &vport->uc_mac_list;
8237 	struct hclge_mac_node *old_node, *new_node;
8238 
8239 	new_node = hclge_find_mac_node(list, new_addr);
8240 	if (!new_node) {
8241 		new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8242 		if (!new_node)
8243 			return -ENOMEM;
8244 
8245 		new_node->state = HCLGE_MAC_TO_ADD;
8246 		ether_addr_copy(new_node->mac_addr, new_addr);
8247 		list_add(&new_node->node, list);
8248 	} else {
8249 		if (new_node->state == HCLGE_MAC_TO_DEL)
8250 			new_node->state = HCLGE_MAC_ACTIVE;
8251 
8252 		/* make sure the new addr is in the list head, avoid dev
8253 		 * addr may be not re-added into mac table for the umv space
8254 		 * limitation after global/imp reset which will clear mac
8255 		 * table by hardware.
8256 		 */
8257 		list_move(&new_node->node, list);
8258 	}
8259 
8260 	if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
8261 		old_node = hclge_find_mac_node(list, old_addr);
8262 		if (old_node) {
8263 			if (old_node->state == HCLGE_MAC_TO_ADD) {
8264 				list_del(&old_node->node);
8265 				kfree(old_node);
8266 			} else {
8267 				old_node->state = HCLGE_MAC_TO_DEL;
8268 			}
8269 		}
8270 	}
8271 
8272 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8273 
8274 	return 0;
8275 }
8276 
hclge_set_mac_addr(struct hnae3_handle * handle,void * p,bool is_first)8277 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
8278 			      bool is_first)
8279 {
8280 	const unsigned char *new_addr = (const unsigned char *)p;
8281 	struct hclge_vport *vport = hclge_get_vport(handle);
8282 	struct hclge_dev *hdev = vport->back;
8283 	unsigned char *old_addr = NULL;
8284 	int ret;
8285 
8286 	/* mac addr check */
8287 	if (is_zero_ether_addr(new_addr) ||
8288 	    is_broadcast_ether_addr(new_addr) ||
8289 	    is_multicast_ether_addr(new_addr)) {
8290 		dev_err(&hdev->pdev->dev,
8291 			"change uc mac err! invalid mac: %pM.\n",
8292 			 new_addr);
8293 		return -EINVAL;
8294 	}
8295 
8296 	ret = hclge_pause_addr_cfg(hdev, new_addr);
8297 	if (ret) {
8298 		dev_err(&hdev->pdev->dev,
8299 			"failed to configure mac pause address, ret = %d\n",
8300 			ret);
8301 		return ret;
8302 	}
8303 
8304 	if (!is_first)
8305 		old_addr = hdev->hw.mac.mac_addr;
8306 
8307 	spin_lock_bh(&vport->mac_list_lock);
8308 	ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
8309 	if (ret) {
8310 		dev_err(&hdev->pdev->dev,
8311 			"failed to change the mac addr:%pM, ret = %d\n",
8312 			new_addr, ret);
8313 		spin_unlock_bh(&vport->mac_list_lock);
8314 
8315 		if (!is_first)
8316 			hclge_pause_addr_cfg(hdev, old_addr);
8317 
8318 		return ret;
8319 	}
8320 	/* we must update dev addr with spin lock protect, preventing dev addr
8321 	 * being removed by set_rx_mode path.
8322 	 */
8323 	ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
8324 	spin_unlock_bh(&vport->mac_list_lock);
8325 
8326 	hclge_task_schedule(hdev, 0);
8327 
8328 	return 0;
8329 }
8330 
hclge_do_ioctl(struct hnae3_handle * handle,struct ifreq * ifr,int cmd)8331 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
8332 			  int cmd)
8333 {
8334 	struct hclge_vport *vport = hclge_get_vport(handle);
8335 	struct hclge_dev *hdev = vport->back;
8336 
8337 	if (!hdev->hw.mac.phydev)
8338 		return -EOPNOTSUPP;
8339 
8340 	return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
8341 }
8342 
hclge_set_vlan_filter_ctrl(struct hclge_dev * hdev,u8 vlan_type,u8 fe_type,bool filter_en,u8 vf_id)8343 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
8344 				      u8 fe_type, bool filter_en, u8 vf_id)
8345 {
8346 	struct hclge_vlan_filter_ctrl_cmd *req;
8347 	struct hclge_desc desc;
8348 	int ret;
8349 
8350 	/* read current vlan filter parameter */
8351 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
8352 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
8353 	req->vlan_type = vlan_type;
8354 	req->vf_id = vf_id;
8355 
8356 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8357 	if (ret) {
8358 		dev_err(&hdev->pdev->dev,
8359 			"failed to get vlan filter config, ret = %d.\n", ret);
8360 		return ret;
8361 	}
8362 
8363 	/* modify and write new config parameter */
8364 	hclge_cmd_reuse_desc(&desc, false);
8365 	req->vlan_fe = filter_en ?
8366 			(req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
8367 
8368 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8369 	if (ret)
8370 		dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
8371 			ret);
8372 
8373 	return ret;
8374 }
8375 
8376 #define HCLGE_FILTER_TYPE_VF		0
8377 #define HCLGE_FILTER_TYPE_PORT		1
8378 #define HCLGE_FILTER_FE_EGRESS_V1_B	BIT(0)
8379 #define HCLGE_FILTER_FE_NIC_INGRESS_B	BIT(0)
8380 #define HCLGE_FILTER_FE_NIC_EGRESS_B	BIT(1)
8381 #define HCLGE_FILTER_FE_ROCE_INGRESS_B	BIT(2)
8382 #define HCLGE_FILTER_FE_ROCE_EGRESS_B	BIT(3)
8383 #define HCLGE_FILTER_FE_EGRESS		(HCLGE_FILTER_FE_NIC_EGRESS_B \
8384 					| HCLGE_FILTER_FE_ROCE_EGRESS_B)
8385 #define HCLGE_FILTER_FE_INGRESS		(HCLGE_FILTER_FE_NIC_INGRESS_B \
8386 					| HCLGE_FILTER_FE_ROCE_INGRESS_B)
8387 
hclge_enable_vlan_filter(struct hnae3_handle * handle,bool enable)8388 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
8389 {
8390 	struct hclge_vport *vport = hclge_get_vport(handle);
8391 	struct hclge_dev *hdev = vport->back;
8392 
8393 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
8394 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8395 					   HCLGE_FILTER_FE_EGRESS, enable, 0);
8396 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8397 					   HCLGE_FILTER_FE_INGRESS, enable, 0);
8398 	} else {
8399 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8400 					   HCLGE_FILTER_FE_EGRESS_V1_B, enable,
8401 					   0);
8402 	}
8403 	if (enable)
8404 		handle->netdev_flags |= HNAE3_VLAN_FLTR;
8405 	else
8406 		handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
8407 }
8408 
hclge_set_vf_vlan_common(struct hclge_dev * hdev,u16 vfid,bool is_kill,u16 vlan,__be16 proto)8409 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
8410 				    bool is_kill, u16 vlan,
8411 				    __be16 proto)
8412 {
8413 	struct hclge_vport *vport = &hdev->vport[vfid];
8414 	struct hclge_vlan_filter_vf_cfg_cmd *req0;
8415 	struct hclge_vlan_filter_vf_cfg_cmd *req1;
8416 	struct hclge_desc desc[2];
8417 	u8 vf_byte_val;
8418 	u8 vf_byte_off;
8419 	int ret;
8420 
8421 	/* if vf vlan table is full, firmware will close vf vlan filter, it
8422 	 * is unable and unnecessary to add new vlan id to vf vlan filter.
8423 	 * If spoof check is enable, and vf vlan is full, it shouldn't add
8424 	 * new vlan, because tx packets with these vlan id will be dropped.
8425 	 */
8426 	if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
8427 		if (vport->vf_info.spoofchk && vlan) {
8428 			dev_err(&hdev->pdev->dev,
8429 				"Can't add vlan due to spoof check is on and vf vlan table is full\n");
8430 			return -EPERM;
8431 		}
8432 		return 0;
8433 	}
8434 
8435 	hclge_cmd_setup_basic_desc(&desc[0],
8436 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8437 	hclge_cmd_setup_basic_desc(&desc[1],
8438 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8439 
8440 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8441 
8442 	vf_byte_off = vfid / 8;
8443 	vf_byte_val = 1 << (vfid % 8);
8444 
8445 	req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
8446 	req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
8447 
8448 	req0->vlan_id  = cpu_to_le16(vlan);
8449 	req0->vlan_cfg = is_kill;
8450 
8451 	if (vf_byte_off < HCLGE_MAX_VF_BYTES)
8452 		req0->vf_bitmap[vf_byte_off] = vf_byte_val;
8453 	else
8454 		req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
8455 
8456 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
8457 	if (ret) {
8458 		dev_err(&hdev->pdev->dev,
8459 			"Send vf vlan command fail, ret =%d.\n",
8460 			ret);
8461 		return ret;
8462 	}
8463 
8464 	if (!is_kill) {
8465 #define HCLGE_VF_VLAN_NO_ENTRY	2
8466 		if (!req0->resp_code || req0->resp_code == 1)
8467 			return 0;
8468 
8469 		if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
8470 			set_bit(vfid, hdev->vf_vlan_full);
8471 			dev_warn(&hdev->pdev->dev,
8472 				 "vf vlan table is full, vf vlan filter is disabled\n");
8473 			return 0;
8474 		}
8475 
8476 		dev_err(&hdev->pdev->dev,
8477 			"Add vf vlan filter fail, ret =%u.\n",
8478 			req0->resp_code);
8479 	} else {
8480 #define HCLGE_VF_VLAN_DEL_NO_FOUND	1
8481 		if (!req0->resp_code)
8482 			return 0;
8483 
8484 		/* vf vlan filter is disabled when vf vlan table is full,
8485 		 * then new vlan id will not be added into vf vlan table.
8486 		 * Just return 0 without warning, avoid massive verbose
8487 		 * print logs when unload.
8488 		 */
8489 		if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
8490 			return 0;
8491 
8492 		dev_err(&hdev->pdev->dev,
8493 			"Kill vf vlan filter fail, ret =%u.\n",
8494 			req0->resp_code);
8495 	}
8496 
8497 	return -EIO;
8498 }
8499 
hclge_set_port_vlan_filter(struct hclge_dev * hdev,__be16 proto,u16 vlan_id,bool is_kill)8500 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
8501 				      u16 vlan_id, bool is_kill)
8502 {
8503 	struct hclge_vlan_filter_pf_cfg_cmd *req;
8504 	struct hclge_desc desc;
8505 	u8 vlan_offset_byte_val;
8506 	u8 vlan_offset_byte;
8507 	u8 vlan_offset_160;
8508 	int ret;
8509 
8510 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
8511 
8512 	vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
8513 	vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
8514 			   HCLGE_VLAN_BYTE_SIZE;
8515 	vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
8516 
8517 	req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
8518 	req->vlan_offset = vlan_offset_160;
8519 	req->vlan_cfg = is_kill;
8520 	req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
8521 
8522 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8523 	if (ret)
8524 		dev_err(&hdev->pdev->dev,
8525 			"port vlan command, send fail, ret =%d.\n", ret);
8526 	return ret;
8527 }
8528 
hclge_set_vlan_filter_hw(struct hclge_dev * hdev,__be16 proto,u16 vport_id,u16 vlan_id,bool is_kill)8529 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
8530 				    u16 vport_id, u16 vlan_id,
8531 				    bool is_kill)
8532 {
8533 	u16 vport_idx, vport_num = 0;
8534 	int ret;
8535 
8536 	if (is_kill && !vlan_id)
8537 		return 0;
8538 
8539 	ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
8540 				       proto);
8541 	if (ret) {
8542 		dev_err(&hdev->pdev->dev,
8543 			"Set %u vport vlan filter config fail, ret =%d.\n",
8544 			vport_id, ret);
8545 		return ret;
8546 	}
8547 
8548 	/* vlan 0 may be added twice when 8021q module is enabled */
8549 	if (!is_kill && !vlan_id &&
8550 	    test_bit(vport_id, hdev->vlan_table[vlan_id]))
8551 		return 0;
8552 
8553 	if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
8554 		dev_err(&hdev->pdev->dev,
8555 			"Add port vlan failed, vport %u is already in vlan %u\n",
8556 			vport_id, vlan_id);
8557 		return -EINVAL;
8558 	}
8559 
8560 	if (is_kill &&
8561 	    !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
8562 		dev_err(&hdev->pdev->dev,
8563 			"Delete port vlan failed, vport %u is not in vlan %u\n",
8564 			vport_id, vlan_id);
8565 		return -EINVAL;
8566 	}
8567 
8568 	for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
8569 		vport_num++;
8570 
8571 	if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
8572 		ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
8573 						 is_kill);
8574 
8575 	return ret;
8576 }
8577 
hclge_set_vlan_tx_offload_cfg(struct hclge_vport * vport)8578 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
8579 {
8580 	struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
8581 	struct hclge_vport_vtag_tx_cfg_cmd *req;
8582 	struct hclge_dev *hdev = vport->back;
8583 	struct hclge_desc desc;
8584 	u16 bmap_index;
8585 	int status;
8586 
8587 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
8588 
8589 	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
8590 	req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
8591 	req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
8592 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
8593 		      vcfg->accept_tag1 ? 1 : 0);
8594 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
8595 		      vcfg->accept_untag1 ? 1 : 0);
8596 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
8597 		      vcfg->accept_tag2 ? 1 : 0);
8598 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
8599 		      vcfg->accept_untag2 ? 1 : 0);
8600 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
8601 		      vcfg->insert_tag1_en ? 1 : 0);
8602 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
8603 		      vcfg->insert_tag2_en ? 1 : 0);
8604 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
8605 
8606 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8607 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8608 			HCLGE_VF_NUM_PER_BYTE;
8609 	req->vf_bitmap[bmap_index] =
8610 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8611 
8612 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
8613 	if (status)
8614 		dev_err(&hdev->pdev->dev,
8615 			"Send port txvlan cfg command fail, ret =%d\n",
8616 			status);
8617 
8618 	return status;
8619 }
8620 
hclge_set_vlan_rx_offload_cfg(struct hclge_vport * vport)8621 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
8622 {
8623 	struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
8624 	struct hclge_vport_vtag_rx_cfg_cmd *req;
8625 	struct hclge_dev *hdev = vport->back;
8626 	struct hclge_desc desc;
8627 	u16 bmap_index;
8628 	int status;
8629 
8630 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
8631 
8632 	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
8633 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
8634 		      vcfg->strip_tag1_en ? 1 : 0);
8635 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
8636 		      vcfg->strip_tag2_en ? 1 : 0);
8637 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
8638 		      vcfg->vlan1_vlan_prionly ? 1 : 0);
8639 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
8640 		      vcfg->vlan2_vlan_prionly ? 1 : 0);
8641 
8642 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8643 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8644 			HCLGE_VF_NUM_PER_BYTE;
8645 	req->vf_bitmap[bmap_index] =
8646 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8647 
8648 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
8649 	if (status)
8650 		dev_err(&hdev->pdev->dev,
8651 			"Send port rxvlan cfg command fail, ret =%d\n",
8652 			status);
8653 
8654 	return status;
8655 }
8656 
hclge_vlan_offload_cfg(struct hclge_vport * vport,u16 port_base_vlan_state,u16 vlan_tag)8657 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
8658 				  u16 port_base_vlan_state,
8659 				  u16 vlan_tag)
8660 {
8661 	int ret;
8662 
8663 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8664 		vport->txvlan_cfg.accept_tag1 = true;
8665 		vport->txvlan_cfg.insert_tag1_en = false;
8666 		vport->txvlan_cfg.default_tag1 = 0;
8667 	} else {
8668 		vport->txvlan_cfg.accept_tag1 = false;
8669 		vport->txvlan_cfg.insert_tag1_en = true;
8670 		vport->txvlan_cfg.default_tag1 = vlan_tag;
8671 	}
8672 
8673 	vport->txvlan_cfg.accept_untag1 = true;
8674 
8675 	/* accept_tag2 and accept_untag2 are not supported on
8676 	 * pdev revision(0x20), new revision support them,
8677 	 * this two fields can not be configured by user.
8678 	 */
8679 	vport->txvlan_cfg.accept_tag2 = true;
8680 	vport->txvlan_cfg.accept_untag2 = true;
8681 	vport->txvlan_cfg.insert_tag2_en = false;
8682 	vport->txvlan_cfg.default_tag2 = 0;
8683 
8684 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8685 		vport->rxvlan_cfg.strip_tag1_en = false;
8686 		vport->rxvlan_cfg.strip_tag2_en =
8687 				vport->rxvlan_cfg.rx_vlan_offload_en;
8688 	} else {
8689 		vport->rxvlan_cfg.strip_tag1_en =
8690 				vport->rxvlan_cfg.rx_vlan_offload_en;
8691 		vport->rxvlan_cfg.strip_tag2_en = true;
8692 	}
8693 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8694 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8695 
8696 	ret = hclge_set_vlan_tx_offload_cfg(vport);
8697 	if (ret)
8698 		return ret;
8699 
8700 	return hclge_set_vlan_rx_offload_cfg(vport);
8701 }
8702 
hclge_set_vlan_protocol_type(struct hclge_dev * hdev)8703 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
8704 {
8705 	struct hclge_rx_vlan_type_cfg_cmd *rx_req;
8706 	struct hclge_tx_vlan_type_cfg_cmd *tx_req;
8707 	struct hclge_desc desc;
8708 	int status;
8709 
8710 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
8711 	rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
8712 	rx_req->ot_fst_vlan_type =
8713 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
8714 	rx_req->ot_sec_vlan_type =
8715 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
8716 	rx_req->in_fst_vlan_type =
8717 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
8718 	rx_req->in_sec_vlan_type =
8719 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
8720 
8721 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
8722 	if (status) {
8723 		dev_err(&hdev->pdev->dev,
8724 			"Send rxvlan protocol type command fail, ret =%d\n",
8725 			status);
8726 		return status;
8727 	}
8728 
8729 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
8730 
8731 	tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
8732 	tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
8733 	tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
8734 
8735 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
8736 	if (status)
8737 		dev_err(&hdev->pdev->dev,
8738 			"Send txvlan protocol type command fail, ret =%d\n",
8739 			status);
8740 
8741 	return status;
8742 }
8743 
hclge_init_vlan_config(struct hclge_dev * hdev)8744 static int hclge_init_vlan_config(struct hclge_dev *hdev)
8745 {
8746 #define HCLGE_DEF_VLAN_TYPE		0x8100
8747 
8748 	struct hnae3_handle *handle = &hdev->vport[0].nic;
8749 	struct hclge_vport *vport;
8750 	int ret;
8751 	int i;
8752 
8753 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
8754 		/* for revision 0x21, vf vlan filter is per function */
8755 		for (i = 0; i < hdev->num_alloc_vport; i++) {
8756 			vport = &hdev->vport[i];
8757 			ret = hclge_set_vlan_filter_ctrl(hdev,
8758 							 HCLGE_FILTER_TYPE_VF,
8759 							 HCLGE_FILTER_FE_EGRESS,
8760 							 true,
8761 							 vport->vport_id);
8762 			if (ret)
8763 				return ret;
8764 		}
8765 
8766 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8767 						 HCLGE_FILTER_FE_INGRESS, true,
8768 						 0);
8769 		if (ret)
8770 			return ret;
8771 	} else {
8772 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8773 						 HCLGE_FILTER_FE_EGRESS_V1_B,
8774 						 true, 0);
8775 		if (ret)
8776 			return ret;
8777 	}
8778 
8779 	handle->netdev_flags |= HNAE3_VLAN_FLTR;
8780 
8781 	hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8782 	hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8783 	hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8784 	hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8785 	hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
8786 	hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
8787 
8788 	ret = hclge_set_vlan_protocol_type(hdev);
8789 	if (ret)
8790 		return ret;
8791 
8792 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8793 		u16 vlan_tag;
8794 
8795 		vport = &hdev->vport[i];
8796 		vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8797 
8798 		ret = hclge_vlan_offload_cfg(vport,
8799 					     vport->port_base_vlan_cfg.state,
8800 					     vlan_tag);
8801 		if (ret)
8802 			return ret;
8803 	}
8804 
8805 	return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
8806 }
8807 
hclge_add_vport_vlan_table(struct hclge_vport * vport,u16 vlan_id,bool writen_to_tbl)8808 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8809 				       bool writen_to_tbl)
8810 {
8811 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8812 
8813 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
8814 		if (vlan->vlan_id == vlan_id)
8815 			return;
8816 
8817 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
8818 	if (!vlan)
8819 		return;
8820 
8821 	vlan->hd_tbl_status = writen_to_tbl;
8822 	vlan->vlan_id = vlan_id;
8823 
8824 	list_add_tail(&vlan->node, &vport->vlan_list);
8825 }
8826 
hclge_add_vport_all_vlan_table(struct hclge_vport * vport)8827 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
8828 {
8829 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8830 	struct hclge_dev *hdev = vport->back;
8831 	int ret;
8832 
8833 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8834 		if (!vlan->hd_tbl_status) {
8835 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8836 						       vport->vport_id,
8837 						       vlan->vlan_id, false);
8838 			if (ret) {
8839 				dev_err(&hdev->pdev->dev,
8840 					"restore vport vlan list failed, ret=%d\n",
8841 					ret);
8842 				return ret;
8843 			}
8844 		}
8845 		vlan->hd_tbl_status = true;
8846 	}
8847 
8848 	return 0;
8849 }
8850 
hclge_rm_vport_vlan_table(struct hclge_vport * vport,u16 vlan_id,bool is_write_tbl)8851 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8852 				      bool is_write_tbl)
8853 {
8854 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8855 	struct hclge_dev *hdev = vport->back;
8856 
8857 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8858 		if (vlan->vlan_id == vlan_id) {
8859 			if (is_write_tbl && vlan->hd_tbl_status)
8860 				hclge_set_vlan_filter_hw(hdev,
8861 							 htons(ETH_P_8021Q),
8862 							 vport->vport_id,
8863 							 vlan_id,
8864 							 true);
8865 
8866 			list_del(&vlan->node);
8867 			kfree(vlan);
8868 			break;
8869 		}
8870 	}
8871 }
8872 
hclge_rm_vport_all_vlan_table(struct hclge_vport * vport,bool is_del_list)8873 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
8874 {
8875 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8876 	struct hclge_dev *hdev = vport->back;
8877 
8878 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8879 		if (vlan->hd_tbl_status)
8880 			hclge_set_vlan_filter_hw(hdev,
8881 						 htons(ETH_P_8021Q),
8882 						 vport->vport_id,
8883 						 vlan->vlan_id,
8884 						 true);
8885 
8886 		vlan->hd_tbl_status = false;
8887 		if (is_del_list) {
8888 			list_del(&vlan->node);
8889 			kfree(vlan);
8890 		}
8891 	}
8892 	clear_bit(vport->vport_id, hdev->vf_vlan_full);
8893 }
8894 
hclge_uninit_vport_vlan_table(struct hclge_dev * hdev)8895 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8896 {
8897 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8898 	struct hclge_vport *vport;
8899 	int i;
8900 
8901 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8902 		vport = &hdev->vport[i];
8903 		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8904 			list_del(&vlan->node);
8905 			kfree(vlan);
8906 		}
8907 	}
8908 }
8909 
hclge_restore_vport_vlan_table(struct hclge_vport * vport)8910 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
8911 {
8912 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8913 	struct hclge_dev *hdev = vport->back;
8914 	u16 vlan_proto;
8915 	u16 vlan_id;
8916 	u16 state;
8917 	int ret;
8918 
8919 	vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8920 	vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8921 	state = vport->port_base_vlan_cfg.state;
8922 
8923 	if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8924 		clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
8925 		hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8926 					 vport->vport_id, vlan_id,
8927 					 false);
8928 		return;
8929 	}
8930 
8931 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8932 		ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8933 					       vport->vport_id,
8934 					       vlan->vlan_id, false);
8935 		if (ret)
8936 			break;
8937 		vlan->hd_tbl_status = true;
8938 	}
8939 }
8940 
8941 /* For global reset and imp reset, hardware will clear the mac table,
8942  * so we change the mac address state from ACTIVE to TO_ADD, then they
8943  * can be restored in the service task after reset complete. Furtherly,
8944  * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
8945  * be restored after reset, so just remove these mac nodes from mac_list.
8946  */
hclge_mac_node_convert_for_reset(struct list_head * list)8947 static void hclge_mac_node_convert_for_reset(struct list_head *list)
8948 {
8949 	struct hclge_mac_node *mac_node, *tmp;
8950 
8951 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8952 		if (mac_node->state == HCLGE_MAC_ACTIVE) {
8953 			mac_node->state = HCLGE_MAC_TO_ADD;
8954 		} else if (mac_node->state == HCLGE_MAC_TO_DEL) {
8955 			list_del(&mac_node->node);
8956 			kfree(mac_node);
8957 		}
8958 	}
8959 }
8960 
hclge_restore_mac_table_common(struct hclge_vport * vport)8961 void hclge_restore_mac_table_common(struct hclge_vport *vport)
8962 {
8963 	spin_lock_bh(&vport->mac_list_lock);
8964 
8965 	hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
8966 	hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
8967 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8968 
8969 	spin_unlock_bh(&vport->mac_list_lock);
8970 }
8971 
hclge_restore_hw_table(struct hclge_dev * hdev)8972 static void hclge_restore_hw_table(struct hclge_dev *hdev)
8973 {
8974 	struct hclge_vport *vport = &hdev->vport[0];
8975 	struct hnae3_handle *handle = &vport->nic;
8976 
8977 	hclge_restore_mac_table_common(vport);
8978 	hclge_restore_vport_vlan_table(vport);
8979 	set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
8980 
8981 	hclge_restore_fd_entries(handle);
8982 }
8983 
hclge_en_hw_strip_rxvtag(struct hnae3_handle * handle,bool enable)8984 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
8985 {
8986 	struct hclge_vport *vport = hclge_get_vport(handle);
8987 
8988 	if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8989 		vport->rxvlan_cfg.strip_tag1_en = false;
8990 		vport->rxvlan_cfg.strip_tag2_en = enable;
8991 	} else {
8992 		vport->rxvlan_cfg.strip_tag1_en = enable;
8993 		vport->rxvlan_cfg.strip_tag2_en = true;
8994 	}
8995 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8996 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8997 	vport->rxvlan_cfg.rx_vlan_offload_en = enable;
8998 
8999 	return hclge_set_vlan_rx_offload_cfg(vport);
9000 }
9001 
hclge_update_vlan_filter_entries(struct hclge_vport * vport,u16 port_base_vlan_state,struct hclge_vlan_info * new_info,struct hclge_vlan_info * old_info)9002 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
9003 					    u16 port_base_vlan_state,
9004 					    struct hclge_vlan_info *new_info,
9005 					    struct hclge_vlan_info *old_info)
9006 {
9007 	struct hclge_dev *hdev = vport->back;
9008 	int ret;
9009 
9010 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
9011 		hclge_rm_vport_all_vlan_table(vport, false);
9012 		return hclge_set_vlan_filter_hw(hdev,
9013 						 htons(new_info->vlan_proto),
9014 						 vport->vport_id,
9015 						 new_info->vlan_tag,
9016 						 false);
9017 	}
9018 
9019 	ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
9020 				       vport->vport_id, old_info->vlan_tag,
9021 				       true);
9022 	if (ret)
9023 		return ret;
9024 
9025 	return hclge_add_vport_all_vlan_table(vport);
9026 }
9027 
hclge_update_port_base_vlan_cfg(struct hclge_vport * vport,u16 state,struct hclge_vlan_info * vlan_info)9028 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
9029 				    struct hclge_vlan_info *vlan_info)
9030 {
9031 	struct hnae3_handle *nic = &vport->nic;
9032 	struct hclge_vlan_info *old_vlan_info;
9033 	struct hclge_dev *hdev = vport->back;
9034 	int ret;
9035 
9036 	old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9037 
9038 	ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
9039 	if (ret)
9040 		return ret;
9041 
9042 	if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
9043 		/* add new VLAN tag */
9044 		ret = hclge_set_vlan_filter_hw(hdev,
9045 					       htons(vlan_info->vlan_proto),
9046 					       vport->vport_id,
9047 					       vlan_info->vlan_tag,
9048 					       false);
9049 		if (ret)
9050 			return ret;
9051 
9052 		/* remove old VLAN tag */
9053 		ret = hclge_set_vlan_filter_hw(hdev,
9054 					       htons(old_vlan_info->vlan_proto),
9055 					       vport->vport_id,
9056 					       old_vlan_info->vlan_tag,
9057 					       true);
9058 		if (ret)
9059 			return ret;
9060 
9061 		goto update;
9062 	}
9063 
9064 	ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
9065 					       old_vlan_info);
9066 	if (ret)
9067 		return ret;
9068 
9069 	/* update state only when disable/enable port based VLAN */
9070 	vport->port_base_vlan_cfg.state = state;
9071 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
9072 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
9073 	else
9074 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
9075 
9076 update:
9077 	vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
9078 	vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
9079 	vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
9080 
9081 	return 0;
9082 }
9083 
hclge_get_port_base_vlan_state(struct hclge_vport * vport,enum hnae3_port_base_vlan_state state,u16 vlan)9084 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
9085 					  enum hnae3_port_base_vlan_state state,
9086 					  u16 vlan)
9087 {
9088 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9089 		if (!vlan)
9090 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9091 		else
9092 			return HNAE3_PORT_BASE_VLAN_ENABLE;
9093 	} else {
9094 		if (!vlan)
9095 			return HNAE3_PORT_BASE_VLAN_DISABLE;
9096 		else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
9097 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9098 		else
9099 			return HNAE3_PORT_BASE_VLAN_MODIFY;
9100 	}
9101 }
9102 
hclge_set_vf_vlan_filter(struct hnae3_handle * handle,int vfid,u16 vlan,u8 qos,__be16 proto)9103 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
9104 				    u16 vlan, u8 qos, __be16 proto)
9105 {
9106 	struct hclge_vport *vport = hclge_get_vport(handle);
9107 	struct hclge_dev *hdev = vport->back;
9108 	struct hclge_vlan_info vlan_info;
9109 	u16 state;
9110 	int ret;
9111 
9112 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9113 		return -EOPNOTSUPP;
9114 
9115 	vport = hclge_get_vf_vport(hdev, vfid);
9116 	if (!vport)
9117 		return -EINVAL;
9118 
9119 	/* qos is a 3 bits value, so can not be bigger than 7 */
9120 	if (vlan > VLAN_N_VID - 1 || qos > 7)
9121 		return -EINVAL;
9122 	if (proto != htons(ETH_P_8021Q))
9123 		return -EPROTONOSUPPORT;
9124 
9125 	state = hclge_get_port_base_vlan_state(vport,
9126 					       vport->port_base_vlan_cfg.state,
9127 					       vlan);
9128 	if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
9129 		return 0;
9130 
9131 	vlan_info.vlan_tag = vlan;
9132 	vlan_info.qos = qos;
9133 	vlan_info.vlan_proto = ntohs(proto);
9134 
9135 	if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9136 		return hclge_update_port_base_vlan_cfg(vport, state,
9137 						       &vlan_info);
9138 	} else {
9139 		ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
9140 							vport->vport_id, state,
9141 							vlan, qos,
9142 							ntohs(proto));
9143 		return ret;
9144 	}
9145 }
9146 
hclge_clear_vf_vlan(struct hclge_dev * hdev)9147 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
9148 {
9149 	struct hclge_vlan_info *vlan_info;
9150 	struct hclge_vport *vport;
9151 	int ret;
9152 	int vf;
9153 
9154 	/* clear port base vlan for all vf */
9155 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9156 		vport = &hdev->vport[vf];
9157 		vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9158 
9159 		ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9160 					       vport->vport_id,
9161 					       vlan_info->vlan_tag, true);
9162 		if (ret)
9163 			dev_err(&hdev->pdev->dev,
9164 				"failed to clear vf vlan for vf%d, ret = %d\n",
9165 				vf - HCLGE_VF_VPORT_START_NUM, ret);
9166 	}
9167 }
9168 
hclge_set_vlan_filter(struct hnae3_handle * handle,__be16 proto,u16 vlan_id,bool is_kill)9169 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
9170 			  u16 vlan_id, bool is_kill)
9171 {
9172 	struct hclge_vport *vport = hclge_get_vport(handle);
9173 	struct hclge_dev *hdev = vport->back;
9174 	bool writen_to_tbl = false;
9175 	int ret = 0;
9176 
9177 	/* When device is resetting or reset failed, firmware is unable to
9178 	 * handle mailbox. Just record the vlan id, and remove it after
9179 	 * reset finished.
9180 	 */
9181 	if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9182 	     test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
9183 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
9184 		return -EBUSY;
9185 	}
9186 
9187 	/* when port base vlan enabled, we use port base vlan as the vlan
9188 	 * filter entry. In this case, we don't update vlan filter table
9189 	 * when user add new vlan or remove exist vlan, just update the vport
9190 	 * vlan list. The vlan id in vlan list will be writen in vlan filter
9191 	 * table until port base vlan disabled
9192 	 */
9193 	if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9194 		ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
9195 					       vlan_id, is_kill);
9196 		writen_to_tbl = true;
9197 	}
9198 
9199 	if (!ret) {
9200 		if (!is_kill)
9201 			hclge_add_vport_vlan_table(vport, vlan_id,
9202 						   writen_to_tbl);
9203 		else if (is_kill && vlan_id != 0)
9204 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
9205 	} else if (is_kill) {
9206 		/* when remove hw vlan filter failed, record the vlan id,
9207 		 * and try to remove it from hw later, to be consistence
9208 		 * with stack
9209 		 */
9210 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
9211 	}
9212 	return ret;
9213 }
9214 
hclge_sync_vlan_filter(struct hclge_dev * hdev)9215 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
9216 {
9217 #define HCLGE_MAX_SYNC_COUNT	60
9218 
9219 	int i, ret, sync_cnt = 0;
9220 	u16 vlan_id;
9221 
9222 	/* start from vport 1 for PF is always alive */
9223 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9224 		struct hclge_vport *vport = &hdev->vport[i];
9225 
9226 		vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9227 					 VLAN_N_VID);
9228 		while (vlan_id != VLAN_N_VID) {
9229 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9230 						       vport->vport_id, vlan_id,
9231 						       true);
9232 			if (ret && ret != -EINVAL)
9233 				return;
9234 
9235 			clear_bit(vlan_id, vport->vlan_del_fail_bmap);
9236 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
9237 
9238 			sync_cnt++;
9239 			if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
9240 				return;
9241 
9242 			vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9243 						 VLAN_N_VID);
9244 		}
9245 	}
9246 }
9247 
hclge_set_mac_mtu(struct hclge_dev * hdev,int new_mps)9248 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
9249 {
9250 	struct hclge_config_max_frm_size_cmd *req;
9251 	struct hclge_desc desc;
9252 
9253 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
9254 
9255 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
9256 	req->max_frm_size = cpu_to_le16(new_mps);
9257 	req->min_frm_size = HCLGE_MAC_MIN_FRAME;
9258 
9259 	return hclge_cmd_send(&hdev->hw, &desc, 1);
9260 }
9261 
hclge_set_mtu(struct hnae3_handle * handle,int new_mtu)9262 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
9263 {
9264 	struct hclge_vport *vport = hclge_get_vport(handle);
9265 
9266 	return hclge_set_vport_mtu(vport, new_mtu);
9267 }
9268 
hclge_set_vport_mtu(struct hclge_vport * vport,int new_mtu)9269 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
9270 {
9271 	struct hclge_dev *hdev = vport->back;
9272 	int i, max_frm_size, ret;
9273 
9274 	/* HW supprt 2 layer vlan */
9275 	max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9276 	if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
9277 	    max_frm_size > HCLGE_MAC_MAX_FRAME)
9278 		return -EINVAL;
9279 
9280 	max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
9281 	mutex_lock(&hdev->vport_lock);
9282 	/* VF's mps must fit within hdev->mps */
9283 	if (vport->vport_id && max_frm_size > hdev->mps) {
9284 		mutex_unlock(&hdev->vport_lock);
9285 		return -EINVAL;
9286 	} else if (vport->vport_id) {
9287 		vport->mps = max_frm_size;
9288 		mutex_unlock(&hdev->vport_lock);
9289 		return 0;
9290 	}
9291 
9292 	/* PF's mps must be greater then VF's mps */
9293 	for (i = 1; i < hdev->num_alloc_vport; i++)
9294 		if (max_frm_size < hdev->vport[i].mps) {
9295 			mutex_unlock(&hdev->vport_lock);
9296 			return -EINVAL;
9297 		}
9298 
9299 	hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
9300 
9301 	ret = hclge_set_mac_mtu(hdev, max_frm_size);
9302 	if (ret) {
9303 		dev_err(&hdev->pdev->dev,
9304 			"Change mtu fail, ret =%d\n", ret);
9305 		goto out;
9306 	}
9307 
9308 	hdev->mps = max_frm_size;
9309 	vport->mps = max_frm_size;
9310 
9311 	ret = hclge_buffer_alloc(hdev);
9312 	if (ret)
9313 		dev_err(&hdev->pdev->dev,
9314 			"Allocate buffer fail, ret =%d\n", ret);
9315 
9316 out:
9317 	hclge_notify_client(hdev, HNAE3_UP_CLIENT);
9318 	mutex_unlock(&hdev->vport_lock);
9319 	return ret;
9320 }
9321 
hclge_send_reset_tqp_cmd(struct hclge_dev * hdev,u16 queue_id,bool enable)9322 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
9323 				    bool enable)
9324 {
9325 	struct hclge_reset_tqp_queue_cmd *req;
9326 	struct hclge_desc desc;
9327 	int ret;
9328 
9329 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
9330 
9331 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9332 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
9333 	if (enable)
9334 		hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
9335 
9336 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9337 	if (ret) {
9338 		dev_err(&hdev->pdev->dev,
9339 			"Send tqp reset cmd error, status =%d\n", ret);
9340 		return ret;
9341 	}
9342 
9343 	return 0;
9344 }
9345 
hclge_get_reset_status(struct hclge_dev * hdev,u16 queue_id)9346 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
9347 {
9348 	struct hclge_reset_tqp_queue_cmd *req;
9349 	struct hclge_desc desc;
9350 	int ret;
9351 
9352 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
9353 
9354 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9355 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
9356 
9357 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9358 	if (ret) {
9359 		dev_err(&hdev->pdev->dev,
9360 			"Get reset status error, status =%d\n", ret);
9361 		return ret;
9362 	}
9363 
9364 	return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
9365 }
9366 
hclge_covert_handle_qid_global(struct hnae3_handle * handle,u16 queue_id)9367 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
9368 {
9369 	struct hnae3_queue *queue;
9370 	struct hclge_tqp *tqp;
9371 
9372 	queue = handle->kinfo.tqp[queue_id];
9373 	tqp = container_of(queue, struct hclge_tqp, q);
9374 
9375 	return tqp->index;
9376 }
9377 
hclge_reset_tqp(struct hnae3_handle * handle,u16 queue_id)9378 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
9379 {
9380 	struct hclge_vport *vport = hclge_get_vport(handle);
9381 	struct hclge_dev *hdev = vport->back;
9382 	int reset_try_times = 0;
9383 	int reset_status;
9384 	u16 queue_gid;
9385 	int ret;
9386 
9387 	queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
9388 
9389 	ret = hclge_tqp_enable(hdev, queue_id, 0, false);
9390 	if (ret) {
9391 		dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
9392 		return ret;
9393 	}
9394 
9395 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9396 	if (ret) {
9397 		dev_err(&hdev->pdev->dev,
9398 			"Send reset tqp cmd fail, ret = %d\n", ret);
9399 		return ret;
9400 	}
9401 
9402 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9403 		reset_status = hclge_get_reset_status(hdev, queue_gid);
9404 		if (reset_status)
9405 			break;
9406 
9407 		/* Wait for tqp hw reset */
9408 		usleep_range(1000, 1200);
9409 	}
9410 
9411 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9412 		dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
9413 		return ret;
9414 	}
9415 
9416 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9417 	if (ret)
9418 		dev_err(&hdev->pdev->dev,
9419 			"Deassert the soft reset fail, ret = %d\n", ret);
9420 
9421 	return ret;
9422 }
9423 
hclge_reset_vf_queue(struct hclge_vport * vport,u16 queue_id)9424 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
9425 {
9426 	struct hnae3_handle *handle = &vport->nic;
9427 	struct hclge_dev *hdev = vport->back;
9428 	int reset_try_times = 0;
9429 	int reset_status;
9430 	u16 queue_gid;
9431 	int ret;
9432 
9433 	if (queue_id >= handle->kinfo.num_tqps) {
9434 		dev_warn(&hdev->pdev->dev, "Invalid vf queue id(%u)\n",
9435 			 queue_id);
9436 		return;
9437 	}
9438 
9439 	queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
9440 
9441 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9442 	if (ret) {
9443 		dev_warn(&hdev->pdev->dev,
9444 			 "Send reset tqp cmd fail, ret = %d\n", ret);
9445 		return;
9446 	}
9447 
9448 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9449 		reset_status = hclge_get_reset_status(hdev, queue_gid);
9450 		if (reset_status)
9451 			break;
9452 
9453 		/* Wait for tqp hw reset */
9454 		usleep_range(1000, 1200);
9455 	}
9456 
9457 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9458 		dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
9459 		return;
9460 	}
9461 
9462 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9463 	if (ret)
9464 		dev_warn(&hdev->pdev->dev,
9465 			 "Deassert the soft reset fail, ret = %d\n", ret);
9466 }
9467 
hclge_get_fw_version(struct hnae3_handle * handle)9468 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
9469 {
9470 	struct hclge_vport *vport = hclge_get_vport(handle);
9471 	struct hclge_dev *hdev = vport->back;
9472 
9473 	return hdev->fw_version;
9474 }
9475 
hclge_set_flowctrl_adv(struct hclge_dev * hdev,u32 rx_en,u32 tx_en)9476 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9477 {
9478 	struct phy_device *phydev = hdev->hw.mac.phydev;
9479 
9480 	if (!phydev)
9481 		return;
9482 
9483 	phy_set_asym_pause(phydev, rx_en, tx_en);
9484 }
9485 
hclge_cfg_pauseparam(struct hclge_dev * hdev,u32 rx_en,u32 tx_en)9486 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9487 {
9488 	int ret;
9489 
9490 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
9491 		return 0;
9492 
9493 	ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
9494 	if (ret)
9495 		dev_err(&hdev->pdev->dev,
9496 			"configure pauseparam error, ret = %d.\n", ret);
9497 
9498 	return ret;
9499 }
9500 
hclge_cfg_flowctrl(struct hclge_dev * hdev)9501 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
9502 {
9503 	struct phy_device *phydev = hdev->hw.mac.phydev;
9504 	u16 remote_advertising = 0;
9505 	u16 local_advertising;
9506 	u32 rx_pause, tx_pause;
9507 	u8 flowctl;
9508 
9509 	if (!phydev->link || !phydev->autoneg)
9510 		return 0;
9511 
9512 	local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
9513 
9514 	if (phydev->pause)
9515 		remote_advertising = LPA_PAUSE_CAP;
9516 
9517 	if (phydev->asym_pause)
9518 		remote_advertising |= LPA_PAUSE_ASYM;
9519 
9520 	flowctl = mii_resolve_flowctrl_fdx(local_advertising,
9521 					   remote_advertising);
9522 	tx_pause = flowctl & FLOW_CTRL_TX;
9523 	rx_pause = flowctl & FLOW_CTRL_RX;
9524 
9525 	if (phydev->duplex == HCLGE_MAC_HALF) {
9526 		tx_pause = 0;
9527 		rx_pause = 0;
9528 	}
9529 
9530 	return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
9531 }
9532 
hclge_get_pauseparam(struct hnae3_handle * handle,u32 * auto_neg,u32 * rx_en,u32 * tx_en)9533 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
9534 				 u32 *rx_en, u32 *tx_en)
9535 {
9536 	struct hclge_vport *vport = hclge_get_vport(handle);
9537 	struct hclge_dev *hdev = vport->back;
9538 	struct phy_device *phydev = hdev->hw.mac.phydev;
9539 
9540 	*auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
9541 
9542 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
9543 		*rx_en = 0;
9544 		*tx_en = 0;
9545 		return;
9546 	}
9547 
9548 	if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
9549 		*rx_en = 1;
9550 		*tx_en = 0;
9551 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
9552 		*tx_en = 1;
9553 		*rx_en = 0;
9554 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
9555 		*rx_en = 1;
9556 		*tx_en = 1;
9557 	} else {
9558 		*rx_en = 0;
9559 		*tx_en = 0;
9560 	}
9561 }
9562 
hclge_record_user_pauseparam(struct hclge_dev * hdev,u32 rx_en,u32 tx_en)9563 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
9564 					 u32 rx_en, u32 tx_en)
9565 {
9566 	if (rx_en && tx_en)
9567 		hdev->fc_mode_last_time = HCLGE_FC_FULL;
9568 	else if (rx_en && !tx_en)
9569 		hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
9570 	else if (!rx_en && tx_en)
9571 		hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
9572 	else
9573 		hdev->fc_mode_last_time = HCLGE_FC_NONE;
9574 
9575 	hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
9576 }
9577 
hclge_set_pauseparam(struct hnae3_handle * handle,u32 auto_neg,u32 rx_en,u32 tx_en)9578 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
9579 				u32 rx_en, u32 tx_en)
9580 {
9581 	struct hclge_vport *vport = hclge_get_vport(handle);
9582 	struct hclge_dev *hdev = vport->back;
9583 	struct phy_device *phydev = hdev->hw.mac.phydev;
9584 	u32 fc_autoneg;
9585 
9586 	if (phydev) {
9587 		fc_autoneg = hclge_get_autoneg(handle);
9588 		if (auto_neg != fc_autoneg) {
9589 			dev_info(&hdev->pdev->dev,
9590 				 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
9591 			return -EOPNOTSUPP;
9592 		}
9593 	}
9594 
9595 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
9596 		dev_info(&hdev->pdev->dev,
9597 			 "Priority flow control enabled. Cannot set link flow control.\n");
9598 		return -EOPNOTSUPP;
9599 	}
9600 
9601 	hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
9602 
9603 	hclge_record_user_pauseparam(hdev, rx_en, tx_en);
9604 
9605 	if (!auto_neg)
9606 		return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
9607 
9608 	if (phydev)
9609 		return phy_start_aneg(phydev);
9610 
9611 	return -EOPNOTSUPP;
9612 }
9613 
hclge_get_ksettings_an_result(struct hnae3_handle * handle,u8 * auto_neg,u32 * speed,u8 * duplex)9614 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
9615 					  u8 *auto_neg, u32 *speed, u8 *duplex)
9616 {
9617 	struct hclge_vport *vport = hclge_get_vport(handle);
9618 	struct hclge_dev *hdev = vport->back;
9619 
9620 	if (speed)
9621 		*speed = hdev->hw.mac.speed;
9622 	if (duplex)
9623 		*duplex = hdev->hw.mac.duplex;
9624 	if (auto_neg)
9625 		*auto_neg = hdev->hw.mac.autoneg;
9626 }
9627 
hclge_get_media_type(struct hnae3_handle * handle,u8 * media_type,u8 * module_type)9628 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
9629 				 u8 *module_type)
9630 {
9631 	struct hclge_vport *vport = hclge_get_vport(handle);
9632 	struct hclge_dev *hdev = vport->back;
9633 
9634 	/* When nic is down, the service task is not running, doesn't update
9635 	 * the port information per second. Query the port information before
9636 	 * return the media type, ensure getting the correct media information.
9637 	 */
9638 	hclge_update_port_info(hdev);
9639 
9640 	if (media_type)
9641 		*media_type = hdev->hw.mac.media_type;
9642 
9643 	if (module_type)
9644 		*module_type = hdev->hw.mac.module_type;
9645 }
9646 
hclge_get_mdix_mode(struct hnae3_handle * handle,u8 * tp_mdix_ctrl,u8 * tp_mdix)9647 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
9648 				u8 *tp_mdix_ctrl, u8 *tp_mdix)
9649 {
9650 	struct hclge_vport *vport = hclge_get_vport(handle);
9651 	struct hclge_dev *hdev = vport->back;
9652 	struct phy_device *phydev = hdev->hw.mac.phydev;
9653 	int mdix_ctrl, mdix, is_resolved;
9654 	unsigned int retval;
9655 
9656 	if (!phydev) {
9657 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9658 		*tp_mdix = ETH_TP_MDI_INVALID;
9659 		return;
9660 	}
9661 
9662 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
9663 
9664 	retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
9665 	mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
9666 				    HCLGE_PHY_MDIX_CTRL_S);
9667 
9668 	retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
9669 	mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
9670 	is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
9671 
9672 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
9673 
9674 	switch (mdix_ctrl) {
9675 	case 0x0:
9676 		*tp_mdix_ctrl = ETH_TP_MDI;
9677 		break;
9678 	case 0x1:
9679 		*tp_mdix_ctrl = ETH_TP_MDI_X;
9680 		break;
9681 	case 0x3:
9682 		*tp_mdix_ctrl = ETH_TP_MDI_AUTO;
9683 		break;
9684 	default:
9685 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9686 		break;
9687 	}
9688 
9689 	if (!is_resolved)
9690 		*tp_mdix = ETH_TP_MDI_INVALID;
9691 	else if (mdix)
9692 		*tp_mdix = ETH_TP_MDI_X;
9693 	else
9694 		*tp_mdix = ETH_TP_MDI;
9695 }
9696 
hclge_info_show(struct hclge_dev * hdev)9697 static void hclge_info_show(struct hclge_dev *hdev)
9698 {
9699 	struct device *dev = &hdev->pdev->dev;
9700 
9701 	dev_info(dev, "PF info begin:\n");
9702 
9703 	dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
9704 	dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
9705 	dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
9706 	dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
9707 	dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
9708 	dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
9709 	dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
9710 	dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
9711 	dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
9712 	dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
9713 	dev_info(dev, "This is %s PF\n",
9714 		 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
9715 	dev_info(dev, "DCB %s\n",
9716 		 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
9717 	dev_info(dev, "MQPRIO %s\n",
9718 		 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
9719 
9720 	dev_info(dev, "PF info end.\n");
9721 }
9722 
hclge_init_nic_client_instance(struct hnae3_ae_dev * ae_dev,struct hclge_vport * vport)9723 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
9724 					  struct hclge_vport *vport)
9725 {
9726 	struct hnae3_client *client = vport->nic.client;
9727 	struct hclge_dev *hdev = ae_dev->priv;
9728 	int rst_cnt = hdev->rst_stats.reset_cnt;
9729 	int ret;
9730 
9731 	ret = client->ops->init_instance(&vport->nic);
9732 	if (ret)
9733 		return ret;
9734 
9735 	set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9736 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9737 	    rst_cnt != hdev->rst_stats.reset_cnt) {
9738 		ret = -EBUSY;
9739 		goto init_nic_err;
9740 	}
9741 
9742 	/* Enable nic hw error interrupts */
9743 	ret = hclge_config_nic_hw_error(hdev, true);
9744 	if (ret) {
9745 		dev_err(&ae_dev->pdev->dev,
9746 			"fail(%d) to enable hw error interrupts\n", ret);
9747 		goto init_nic_err;
9748 	}
9749 
9750 	hnae3_set_client_init_flag(client, ae_dev, 1);
9751 
9752 	if (netif_msg_drv(&hdev->vport->nic))
9753 		hclge_info_show(hdev);
9754 
9755 	return ret;
9756 
9757 init_nic_err:
9758 	clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9759 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9760 		msleep(HCLGE_WAIT_RESET_DONE);
9761 
9762 	client->ops->uninit_instance(&vport->nic, 0);
9763 
9764 	return ret;
9765 }
9766 
hclge_init_roce_client_instance(struct hnae3_ae_dev * ae_dev,struct hclge_vport * vport)9767 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
9768 					   struct hclge_vport *vport)
9769 {
9770 	struct hclge_dev *hdev = ae_dev->priv;
9771 	struct hnae3_client *client;
9772 	int rst_cnt;
9773 	int ret;
9774 
9775 	if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
9776 	    !hdev->nic_client)
9777 		return 0;
9778 
9779 	client = hdev->roce_client;
9780 	ret = hclge_init_roce_base_info(vport);
9781 	if (ret)
9782 		return ret;
9783 
9784 	rst_cnt = hdev->rst_stats.reset_cnt;
9785 	ret = client->ops->init_instance(&vport->roce);
9786 	if (ret)
9787 		return ret;
9788 
9789 	set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9790 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9791 	    rst_cnt != hdev->rst_stats.reset_cnt) {
9792 		ret = -EBUSY;
9793 		goto init_roce_err;
9794 	}
9795 
9796 	/* Enable roce ras interrupts */
9797 	ret = hclge_config_rocee_ras_interrupt(hdev, true);
9798 	if (ret) {
9799 		dev_err(&ae_dev->pdev->dev,
9800 			"fail(%d) to enable roce ras interrupts\n", ret);
9801 		goto init_roce_err;
9802 	}
9803 
9804 	hnae3_set_client_init_flag(client, ae_dev, 1);
9805 
9806 	return 0;
9807 
9808 init_roce_err:
9809 	clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9810 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9811 		msleep(HCLGE_WAIT_RESET_DONE);
9812 
9813 	hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
9814 
9815 	return ret;
9816 }
9817 
hclge_init_client_instance(struct hnae3_client * client,struct hnae3_ae_dev * ae_dev)9818 static int hclge_init_client_instance(struct hnae3_client *client,
9819 				      struct hnae3_ae_dev *ae_dev)
9820 {
9821 	struct hclge_dev *hdev = ae_dev->priv;
9822 	struct hclge_vport *vport;
9823 	int i, ret;
9824 
9825 	for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
9826 		vport = &hdev->vport[i];
9827 
9828 		switch (client->type) {
9829 		case HNAE3_CLIENT_KNIC:
9830 			hdev->nic_client = client;
9831 			vport->nic.client = client;
9832 			ret = hclge_init_nic_client_instance(ae_dev, vport);
9833 			if (ret)
9834 				goto clear_nic;
9835 
9836 			ret = hclge_init_roce_client_instance(ae_dev, vport);
9837 			if (ret)
9838 				goto clear_roce;
9839 
9840 			break;
9841 		case HNAE3_CLIENT_ROCE:
9842 			if (hnae3_dev_roce_supported(hdev)) {
9843 				hdev->roce_client = client;
9844 				vport->roce.client = client;
9845 			}
9846 
9847 			ret = hclge_init_roce_client_instance(ae_dev, vport);
9848 			if (ret)
9849 				goto clear_roce;
9850 
9851 			break;
9852 		default:
9853 			return -EINVAL;
9854 		}
9855 	}
9856 
9857 	return 0;
9858 
9859 clear_nic:
9860 	hdev->nic_client = NULL;
9861 	vport->nic.client = NULL;
9862 	return ret;
9863 clear_roce:
9864 	hdev->roce_client = NULL;
9865 	vport->roce.client = NULL;
9866 	return ret;
9867 }
9868 
hclge_uninit_client_instance(struct hnae3_client * client,struct hnae3_ae_dev * ae_dev)9869 static void hclge_uninit_client_instance(struct hnae3_client *client,
9870 					 struct hnae3_ae_dev *ae_dev)
9871 {
9872 	struct hclge_dev *hdev = ae_dev->priv;
9873 	struct hclge_vport *vport;
9874 	int i;
9875 
9876 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9877 		vport = &hdev->vport[i];
9878 		if (hdev->roce_client) {
9879 			clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9880 			while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9881 				msleep(HCLGE_WAIT_RESET_DONE);
9882 
9883 			hdev->roce_client->ops->uninit_instance(&vport->roce,
9884 								0);
9885 			hdev->roce_client = NULL;
9886 			vport->roce.client = NULL;
9887 		}
9888 		if (client->type == HNAE3_CLIENT_ROCE)
9889 			return;
9890 		if (hdev->nic_client && client->ops->uninit_instance) {
9891 			clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9892 			while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9893 				msleep(HCLGE_WAIT_RESET_DONE);
9894 
9895 			client->ops->uninit_instance(&vport->nic, 0);
9896 			hdev->nic_client = NULL;
9897 			vport->nic.client = NULL;
9898 		}
9899 	}
9900 }
9901 
hclge_pci_init(struct hclge_dev * hdev)9902 static int hclge_pci_init(struct hclge_dev *hdev)
9903 {
9904 	struct pci_dev *pdev = hdev->pdev;
9905 	struct hclge_hw *hw;
9906 	int ret;
9907 
9908 	ret = pci_enable_device(pdev);
9909 	if (ret) {
9910 		dev_err(&pdev->dev, "failed to enable PCI device\n");
9911 		return ret;
9912 	}
9913 
9914 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9915 	if (ret) {
9916 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9917 		if (ret) {
9918 			dev_err(&pdev->dev,
9919 				"can't set consistent PCI DMA");
9920 			goto err_disable_device;
9921 		}
9922 		dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
9923 	}
9924 
9925 	ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
9926 	if (ret) {
9927 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
9928 		goto err_disable_device;
9929 	}
9930 
9931 	pci_set_master(pdev);
9932 	hw = &hdev->hw;
9933 	hw->io_base = pcim_iomap(pdev, 2, 0);
9934 	if (!hw->io_base) {
9935 		dev_err(&pdev->dev, "Can't map configuration register space\n");
9936 		ret = -ENOMEM;
9937 		goto err_clr_master;
9938 	}
9939 
9940 	hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
9941 
9942 	return 0;
9943 err_clr_master:
9944 	pci_clear_master(pdev);
9945 	pci_release_regions(pdev);
9946 err_disable_device:
9947 	pci_disable_device(pdev);
9948 
9949 	return ret;
9950 }
9951 
hclge_pci_uninit(struct hclge_dev * hdev)9952 static void hclge_pci_uninit(struct hclge_dev *hdev)
9953 {
9954 	struct pci_dev *pdev = hdev->pdev;
9955 
9956 	pcim_iounmap(pdev, hdev->hw.io_base);
9957 	pci_free_irq_vectors(pdev);
9958 	pci_clear_master(pdev);
9959 	pci_release_mem_regions(pdev);
9960 	pci_disable_device(pdev);
9961 }
9962 
hclge_state_init(struct hclge_dev * hdev)9963 static void hclge_state_init(struct hclge_dev *hdev)
9964 {
9965 	set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
9966 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
9967 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
9968 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9969 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
9970 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
9971 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
9972 }
9973 
hclge_state_uninit(struct hclge_dev * hdev)9974 static void hclge_state_uninit(struct hclge_dev *hdev)
9975 {
9976 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
9977 	set_bit(HCLGE_STATE_REMOVING, &hdev->state);
9978 
9979 	if (hdev->reset_timer.function)
9980 		del_timer_sync(&hdev->reset_timer);
9981 	if (hdev->service_task.work.func)
9982 		cancel_delayed_work_sync(&hdev->service_task);
9983 }
9984 
hclge_flr_prepare(struct hnae3_ae_dev * ae_dev)9985 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
9986 {
9987 #define HCLGE_FLR_RETRY_WAIT_MS	500
9988 #define HCLGE_FLR_RETRY_CNT	5
9989 
9990 	struct hclge_dev *hdev = ae_dev->priv;
9991 	int retry_cnt = 0;
9992 	int ret;
9993 
9994 retry:
9995 	down(&hdev->reset_sem);
9996 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9997 	hdev->reset_type = HNAE3_FLR_RESET;
9998 	ret = hclge_reset_prepare(hdev);
9999 	if (ret || hdev->reset_pending) {
10000 		dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
10001 			ret);
10002 		if (hdev->reset_pending ||
10003 		    retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
10004 			dev_err(&hdev->pdev->dev,
10005 				"reset_pending:0x%lx, retry_cnt:%d\n",
10006 				hdev->reset_pending, retry_cnt);
10007 			clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10008 			up(&hdev->reset_sem);
10009 			msleep(HCLGE_FLR_RETRY_WAIT_MS);
10010 			goto retry;
10011 		}
10012 	}
10013 
10014 	/* disable misc vector before FLR done */
10015 	hclge_enable_vector(&hdev->misc_vector, false);
10016 	set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
10017 	hdev->rst_stats.flr_rst_cnt++;
10018 }
10019 
hclge_flr_done(struct hnae3_ae_dev * ae_dev)10020 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
10021 {
10022 	struct hclge_dev *hdev = ae_dev->priv;
10023 	int ret;
10024 
10025 	hclge_enable_vector(&hdev->misc_vector, true);
10026 
10027 	ret = hclge_reset_rebuild(hdev);
10028 	if (ret)
10029 		dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
10030 
10031 	hdev->reset_type = HNAE3_NONE_RESET;
10032 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10033 	up(&hdev->reset_sem);
10034 }
10035 
hclge_clear_resetting_state(struct hclge_dev * hdev)10036 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
10037 {
10038 	u16 i;
10039 
10040 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10041 		struct hclge_vport *vport = &hdev->vport[i];
10042 		int ret;
10043 
10044 		 /* Send cmd to clear VF's FUNC_RST_ING */
10045 		ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
10046 		if (ret)
10047 			dev_warn(&hdev->pdev->dev,
10048 				 "clear vf(%u) rst failed %d!\n",
10049 				 vport->vport_id, ret);
10050 	}
10051 }
10052 
hclge_clear_hw_resource(struct hclge_dev * hdev)10053 static int hclge_clear_hw_resource(struct hclge_dev *hdev)
10054 {
10055 	struct hclge_desc desc;
10056 	int ret;
10057 
10058 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false);
10059 
10060 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10061 	/* This new command is only supported by new firmware, it will
10062 	 * fail with older firmware. Error value -EOPNOSUPP can only be
10063 	 * returned by older firmware running this command, to keep code
10064 	 * backward compatible we will override this value and return
10065 	 * success.
10066 	 */
10067 	if (ret && ret != -EOPNOTSUPP) {
10068 		dev_err(&hdev->pdev->dev,
10069 			"failed to clear hw resource, ret = %d\n", ret);
10070 		return ret;
10071 	}
10072 	return 0;
10073 }
10074 
hclge_init_ae_dev(struct hnae3_ae_dev * ae_dev)10075 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
10076 {
10077 	struct pci_dev *pdev = ae_dev->pdev;
10078 	struct hclge_dev *hdev;
10079 	int ret;
10080 
10081 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
10082 	if (!hdev)
10083 		return -ENOMEM;
10084 
10085 	hdev->pdev = pdev;
10086 	hdev->ae_dev = ae_dev;
10087 	hdev->reset_type = HNAE3_NONE_RESET;
10088 	hdev->reset_level = HNAE3_FUNC_RESET;
10089 	ae_dev->priv = hdev;
10090 
10091 	/* HW supprt 2 layer vlan */
10092 	hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10093 
10094 	mutex_init(&hdev->vport_lock);
10095 	spin_lock_init(&hdev->fd_rule_lock);
10096 	sema_init(&hdev->reset_sem, 1);
10097 
10098 	ret = hclge_pci_init(hdev);
10099 	if (ret)
10100 		goto out;
10101 
10102 	/* Firmware command queue initialize */
10103 	ret = hclge_cmd_queue_init(hdev);
10104 	if (ret)
10105 		goto err_pci_uninit;
10106 
10107 	/* Firmware command initialize */
10108 	ret = hclge_cmd_init(hdev);
10109 	if (ret)
10110 		goto err_cmd_uninit;
10111 
10112 	ret  = hclge_clear_hw_resource(hdev);
10113 	if (ret)
10114 		goto err_cmd_uninit;
10115 
10116 	ret = hclge_get_cap(hdev);
10117 	if (ret)
10118 		goto err_cmd_uninit;
10119 
10120 	ret = hclge_query_dev_specs(hdev);
10121 	if (ret) {
10122 		dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
10123 			ret);
10124 		goto err_cmd_uninit;
10125 	}
10126 
10127 	ret = hclge_configure(hdev);
10128 	if (ret) {
10129 		dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
10130 		goto err_cmd_uninit;
10131 	}
10132 
10133 	ret = hclge_init_msi(hdev);
10134 	if (ret) {
10135 		dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
10136 		goto err_cmd_uninit;
10137 	}
10138 
10139 	ret = hclge_misc_irq_init(hdev);
10140 	if (ret)
10141 		goto err_msi_uninit;
10142 
10143 	ret = hclge_alloc_tqps(hdev);
10144 	if (ret) {
10145 		dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
10146 		goto err_msi_irq_uninit;
10147 	}
10148 
10149 	ret = hclge_alloc_vport(hdev);
10150 	if (ret)
10151 		goto err_msi_irq_uninit;
10152 
10153 	ret = hclge_map_tqp(hdev);
10154 	if (ret)
10155 		goto err_msi_irq_uninit;
10156 
10157 	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
10158 		ret = hclge_mac_mdio_config(hdev);
10159 		if (ret)
10160 			goto err_msi_irq_uninit;
10161 	}
10162 
10163 	ret = hclge_init_umv_space(hdev);
10164 	if (ret)
10165 		goto err_mdiobus_unreg;
10166 
10167 	ret = hclge_mac_init(hdev);
10168 	if (ret) {
10169 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10170 		goto err_mdiobus_unreg;
10171 	}
10172 
10173 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10174 	if (ret) {
10175 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10176 		goto err_mdiobus_unreg;
10177 	}
10178 
10179 	ret = hclge_config_gro(hdev, true);
10180 	if (ret)
10181 		goto err_mdiobus_unreg;
10182 
10183 	ret = hclge_init_vlan_config(hdev);
10184 	if (ret) {
10185 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10186 		goto err_mdiobus_unreg;
10187 	}
10188 
10189 	ret = hclge_tm_schd_init(hdev);
10190 	if (ret) {
10191 		dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
10192 		goto err_mdiobus_unreg;
10193 	}
10194 
10195 	hclge_rss_init_cfg(hdev);
10196 	ret = hclge_rss_init_hw(hdev);
10197 	if (ret) {
10198 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10199 		goto err_mdiobus_unreg;
10200 	}
10201 
10202 	ret = init_mgr_tbl(hdev);
10203 	if (ret) {
10204 		dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
10205 		goto err_mdiobus_unreg;
10206 	}
10207 
10208 	ret = hclge_init_fd_config(hdev);
10209 	if (ret) {
10210 		dev_err(&pdev->dev,
10211 			"fd table init fail, ret=%d\n", ret);
10212 		goto err_mdiobus_unreg;
10213 	}
10214 
10215 	INIT_KFIFO(hdev->mac_tnl_log);
10216 
10217 	hclge_dcb_ops_set(hdev);
10218 
10219 	timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
10220 	INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
10221 
10222 	/* Setup affinity after service timer setup because add_timer_on
10223 	 * is called in affinity notify.
10224 	 */
10225 	hclge_misc_affinity_setup(hdev);
10226 
10227 	hclge_clear_all_event_cause(hdev);
10228 	hclge_clear_resetting_state(hdev);
10229 
10230 	/* Log and clear the hw errors those already occurred */
10231 	hclge_handle_all_hns_hw_errors(ae_dev);
10232 
10233 	/* request delayed reset for the error recovery because an immediate
10234 	 * global reset on a PF affecting pending initialization of other PFs
10235 	 */
10236 	if (ae_dev->hw_err_reset_req) {
10237 		enum hnae3_reset_type reset_level;
10238 
10239 		reset_level = hclge_get_reset_level(ae_dev,
10240 						    &ae_dev->hw_err_reset_req);
10241 		hclge_set_def_reset_request(ae_dev, reset_level);
10242 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
10243 	}
10244 
10245 	/* Enable MISC vector(vector0) */
10246 	hclge_enable_vector(&hdev->misc_vector, true);
10247 
10248 	hclge_state_init(hdev);
10249 	hdev->last_reset_time = jiffies;
10250 
10251 	dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
10252 		 HCLGE_DRIVER_NAME);
10253 
10254 	hclge_task_schedule(hdev, round_jiffies_relative(HZ));
10255 
10256 	return 0;
10257 
10258 err_mdiobus_unreg:
10259 	if (hdev->hw.mac.phydev)
10260 		mdiobus_unregister(hdev->hw.mac.mdio_bus);
10261 err_msi_irq_uninit:
10262 	hclge_misc_irq_uninit(hdev);
10263 err_msi_uninit:
10264 	pci_free_irq_vectors(pdev);
10265 err_cmd_uninit:
10266 	hclge_cmd_uninit(hdev);
10267 err_pci_uninit:
10268 	pcim_iounmap(pdev, hdev->hw.io_base);
10269 	pci_clear_master(pdev);
10270 	pci_release_regions(pdev);
10271 	pci_disable_device(pdev);
10272 out:
10273 	mutex_destroy(&hdev->vport_lock);
10274 	return ret;
10275 }
10276 
hclge_stats_clear(struct hclge_dev * hdev)10277 static void hclge_stats_clear(struct hclge_dev *hdev)
10278 {
10279 	memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
10280 }
10281 
hclge_set_mac_spoofchk(struct hclge_dev * hdev,int vf,bool enable)10282 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10283 {
10284 	return hclge_config_switch_param(hdev, vf, enable,
10285 					 HCLGE_SWITCH_ANTI_SPOOF_MASK);
10286 }
10287 
hclge_set_vlan_spoofchk(struct hclge_dev * hdev,int vf,bool enable)10288 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10289 {
10290 	return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10291 					  HCLGE_FILTER_FE_NIC_INGRESS_B,
10292 					  enable, vf);
10293 }
10294 
hclge_set_vf_spoofchk_hw(struct hclge_dev * hdev,int vf,bool enable)10295 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
10296 {
10297 	int ret;
10298 
10299 	ret = hclge_set_mac_spoofchk(hdev, vf, enable);
10300 	if (ret) {
10301 		dev_err(&hdev->pdev->dev,
10302 			"Set vf %d mac spoof check %s failed, ret=%d\n",
10303 			vf, enable ? "on" : "off", ret);
10304 		return ret;
10305 	}
10306 
10307 	ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
10308 	if (ret)
10309 		dev_err(&hdev->pdev->dev,
10310 			"Set vf %d vlan spoof check %s failed, ret=%d\n",
10311 			vf, enable ? "on" : "off", ret);
10312 
10313 	return ret;
10314 }
10315 
hclge_set_vf_spoofchk(struct hnae3_handle * handle,int vf,bool enable)10316 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
10317 				 bool enable)
10318 {
10319 	struct hclge_vport *vport = hclge_get_vport(handle);
10320 	struct hclge_dev *hdev = vport->back;
10321 	u32 new_spoofchk = enable ? 1 : 0;
10322 	int ret;
10323 
10324 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10325 		return -EOPNOTSUPP;
10326 
10327 	vport = hclge_get_vf_vport(hdev, vf);
10328 	if (!vport)
10329 		return -EINVAL;
10330 
10331 	if (vport->vf_info.spoofchk == new_spoofchk)
10332 		return 0;
10333 
10334 	if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
10335 		dev_warn(&hdev->pdev->dev,
10336 			 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
10337 			 vf);
10338 	else if (enable && hclge_is_umv_space_full(vport, true))
10339 		dev_warn(&hdev->pdev->dev,
10340 			 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
10341 			 vf);
10342 
10343 	ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
10344 	if (ret)
10345 		return ret;
10346 
10347 	vport->vf_info.spoofchk = new_spoofchk;
10348 	return 0;
10349 }
10350 
hclge_reset_vport_spoofchk(struct hclge_dev * hdev)10351 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
10352 {
10353 	struct hclge_vport *vport = hdev->vport;
10354 	int ret;
10355 	int i;
10356 
10357 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10358 		return 0;
10359 
10360 	/* resume the vf spoof check state after reset */
10361 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10362 		ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
10363 					       vport->vf_info.spoofchk);
10364 		if (ret)
10365 			return ret;
10366 
10367 		vport++;
10368 	}
10369 
10370 	return 0;
10371 }
10372 
hclge_set_vf_trust(struct hnae3_handle * handle,int vf,bool enable)10373 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
10374 {
10375 	struct hclge_vport *vport = hclge_get_vport(handle);
10376 	struct hclge_dev *hdev = vport->back;
10377 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
10378 	u32 new_trusted = enable ? 1 : 0;
10379 	bool en_bc_pmc;
10380 	int ret;
10381 
10382 	vport = hclge_get_vf_vport(hdev, vf);
10383 	if (!vport)
10384 		return -EINVAL;
10385 
10386 	if (vport->vf_info.trusted == new_trusted)
10387 		return 0;
10388 
10389 	/* Disable promisc mode for VF if it is not trusted any more. */
10390 	if (!enable && vport->vf_info.promisc_enable) {
10391 		en_bc_pmc = ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
10392 		ret = hclge_set_vport_promisc_mode(vport, false, false,
10393 						   en_bc_pmc);
10394 		if (ret)
10395 			return ret;
10396 		vport->vf_info.promisc_enable = 0;
10397 		hclge_inform_vf_promisc_info(vport);
10398 	}
10399 
10400 	vport->vf_info.trusted = new_trusted;
10401 
10402 	return 0;
10403 }
10404 
hclge_reset_vf_rate(struct hclge_dev * hdev)10405 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
10406 {
10407 	int ret;
10408 	int vf;
10409 
10410 	/* reset vf rate to default value */
10411 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10412 		struct hclge_vport *vport = &hdev->vport[vf];
10413 
10414 		vport->vf_info.max_tx_rate = 0;
10415 		ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
10416 		if (ret)
10417 			dev_err(&hdev->pdev->dev,
10418 				"vf%d failed to reset to default, ret=%d\n",
10419 				vf - HCLGE_VF_VPORT_START_NUM, ret);
10420 	}
10421 }
10422 
hclge_vf_rate_param_check(struct hclge_dev * hdev,int vf,int min_tx_rate,int max_tx_rate)10423 static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
10424 				     int min_tx_rate, int max_tx_rate)
10425 {
10426 	if (min_tx_rate != 0 ||
10427 	    max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
10428 		dev_err(&hdev->pdev->dev,
10429 			"min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
10430 			min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
10431 		return -EINVAL;
10432 	}
10433 
10434 	return 0;
10435 }
10436 
hclge_set_vf_rate(struct hnae3_handle * handle,int vf,int min_tx_rate,int max_tx_rate,bool force)10437 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
10438 			     int min_tx_rate, int max_tx_rate, bool force)
10439 {
10440 	struct hclge_vport *vport = hclge_get_vport(handle);
10441 	struct hclge_dev *hdev = vport->back;
10442 	int ret;
10443 
10444 	ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
10445 	if (ret)
10446 		return ret;
10447 
10448 	vport = hclge_get_vf_vport(hdev, vf);
10449 	if (!vport)
10450 		return -EINVAL;
10451 
10452 	if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
10453 		return 0;
10454 
10455 	ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
10456 	if (ret)
10457 		return ret;
10458 
10459 	vport->vf_info.max_tx_rate = max_tx_rate;
10460 
10461 	return 0;
10462 }
10463 
hclge_resume_vf_rate(struct hclge_dev * hdev)10464 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
10465 {
10466 	struct hnae3_handle *handle = &hdev->vport->nic;
10467 	struct hclge_vport *vport;
10468 	int ret;
10469 	int vf;
10470 
10471 	/* resume the vf max_tx_rate after reset */
10472 	for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
10473 		vport = hclge_get_vf_vport(hdev, vf);
10474 		if (!vport)
10475 			return -EINVAL;
10476 
10477 		/* zero means max rate, after reset, firmware already set it to
10478 		 * max rate, so just continue.
10479 		 */
10480 		if (!vport->vf_info.max_tx_rate)
10481 			continue;
10482 
10483 		ret = hclge_set_vf_rate(handle, vf, 0,
10484 					vport->vf_info.max_tx_rate, true);
10485 		if (ret) {
10486 			dev_err(&hdev->pdev->dev,
10487 				"vf%d failed to resume tx_rate:%u, ret=%d\n",
10488 				vf, vport->vf_info.max_tx_rate, ret);
10489 			return ret;
10490 		}
10491 	}
10492 
10493 	return 0;
10494 }
10495 
hclge_reset_vport_state(struct hclge_dev * hdev)10496 static void hclge_reset_vport_state(struct hclge_dev *hdev)
10497 {
10498 	struct hclge_vport *vport = hdev->vport;
10499 	int i;
10500 
10501 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10502 		hclge_vport_stop(vport);
10503 		vport++;
10504 	}
10505 }
10506 
hclge_reset_ae_dev(struct hnae3_ae_dev * ae_dev)10507 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
10508 {
10509 	struct hclge_dev *hdev = ae_dev->priv;
10510 	struct pci_dev *pdev = ae_dev->pdev;
10511 	int ret;
10512 
10513 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
10514 
10515 	hclge_stats_clear(hdev);
10516 	/* NOTE: pf reset needn't to clear or restore pf and vf table entry.
10517 	 * so here should not clean table in memory.
10518 	 */
10519 	if (hdev->reset_type == HNAE3_IMP_RESET ||
10520 	    hdev->reset_type == HNAE3_GLOBAL_RESET) {
10521 		memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
10522 		memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
10523 		bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
10524 		hclge_reset_umv_space(hdev);
10525 	}
10526 
10527 	ret = hclge_cmd_init(hdev);
10528 	if (ret) {
10529 		dev_err(&pdev->dev, "Cmd queue init failed\n");
10530 		return ret;
10531 	}
10532 
10533 	ret = hclge_map_tqp(hdev);
10534 	if (ret) {
10535 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
10536 		return ret;
10537 	}
10538 
10539 	ret = hclge_mac_init(hdev);
10540 	if (ret) {
10541 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10542 		return ret;
10543 	}
10544 
10545 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10546 	if (ret) {
10547 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10548 		return ret;
10549 	}
10550 
10551 	ret = hclge_config_gro(hdev, true);
10552 	if (ret)
10553 		return ret;
10554 
10555 	ret = hclge_init_vlan_config(hdev);
10556 	if (ret) {
10557 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10558 		return ret;
10559 	}
10560 
10561 	ret = hclge_tm_init_hw(hdev, true);
10562 	if (ret) {
10563 		dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
10564 		return ret;
10565 	}
10566 
10567 	ret = hclge_rss_init_hw(hdev);
10568 	if (ret) {
10569 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10570 		return ret;
10571 	}
10572 
10573 	ret = init_mgr_tbl(hdev);
10574 	if (ret) {
10575 		dev_err(&pdev->dev,
10576 			"failed to reinit manager table, ret = %d\n", ret);
10577 		return ret;
10578 	}
10579 
10580 	ret = hclge_init_fd_config(hdev);
10581 	if (ret) {
10582 		dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
10583 		return ret;
10584 	}
10585 
10586 	/* Log and clear the hw errors those already occurred */
10587 	hclge_handle_all_hns_hw_errors(ae_dev);
10588 
10589 	/* Re-enable the hw error interrupts because
10590 	 * the interrupts get disabled on global reset.
10591 	 */
10592 	ret = hclge_config_nic_hw_error(hdev, true);
10593 	if (ret) {
10594 		dev_err(&pdev->dev,
10595 			"fail(%d) to re-enable NIC hw error interrupts\n",
10596 			ret);
10597 		return ret;
10598 	}
10599 
10600 	if (hdev->roce_client) {
10601 		ret = hclge_config_rocee_ras_interrupt(hdev, true);
10602 		if (ret) {
10603 			dev_err(&pdev->dev,
10604 				"fail(%d) to re-enable roce ras interrupts\n",
10605 				ret);
10606 			return ret;
10607 		}
10608 	}
10609 
10610 	hclge_reset_vport_state(hdev);
10611 	ret = hclge_reset_vport_spoofchk(hdev);
10612 	if (ret)
10613 		return ret;
10614 
10615 	ret = hclge_resume_vf_rate(hdev);
10616 	if (ret)
10617 		return ret;
10618 
10619 	dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
10620 		 HCLGE_DRIVER_NAME);
10621 
10622 	return 0;
10623 }
10624 
hclge_uninit_ae_dev(struct hnae3_ae_dev * ae_dev)10625 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
10626 {
10627 	struct hclge_dev *hdev = ae_dev->priv;
10628 	struct hclge_mac *mac = &hdev->hw.mac;
10629 
10630 	hclge_reset_vf_rate(hdev);
10631 	hclge_clear_vf_vlan(hdev);
10632 	hclge_misc_affinity_teardown(hdev);
10633 	hclge_state_uninit(hdev);
10634 	hclge_uninit_mac_table(hdev);
10635 
10636 	if (mac->phydev)
10637 		mdiobus_unregister(mac->mdio_bus);
10638 
10639 	/* Disable MISC vector(vector0) */
10640 	hclge_enable_vector(&hdev->misc_vector, false);
10641 	synchronize_irq(hdev->misc_vector.vector_irq);
10642 
10643 	/* Disable all hw interrupts */
10644 	hclge_config_mac_tnl_int(hdev, false);
10645 	hclge_config_nic_hw_error(hdev, false);
10646 	hclge_config_rocee_ras_interrupt(hdev, false);
10647 
10648 	hclge_cmd_uninit(hdev);
10649 	hclge_misc_irq_uninit(hdev);
10650 	hclge_pci_uninit(hdev);
10651 	mutex_destroy(&hdev->vport_lock);
10652 	hclge_uninit_vport_vlan_table(hdev);
10653 	ae_dev->priv = NULL;
10654 }
10655 
hclge_get_max_channels(struct hnae3_handle * handle)10656 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
10657 {
10658 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10659 	struct hclge_vport *vport = hclge_get_vport(handle);
10660 	struct hclge_dev *hdev = vport->back;
10661 
10662 	return min_t(u32, hdev->rss_size_max,
10663 		     vport->alloc_tqps / kinfo->num_tc);
10664 }
10665 
hclge_get_channels(struct hnae3_handle * handle,struct ethtool_channels * ch)10666 static void hclge_get_channels(struct hnae3_handle *handle,
10667 			       struct ethtool_channels *ch)
10668 {
10669 	ch->max_combined = hclge_get_max_channels(handle);
10670 	ch->other_count = 1;
10671 	ch->max_other = 1;
10672 	ch->combined_count = handle->kinfo.rss_size;
10673 }
10674 
hclge_get_tqps_and_rss_info(struct hnae3_handle * handle,u16 * alloc_tqps,u16 * max_rss_size)10675 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
10676 					u16 *alloc_tqps, u16 *max_rss_size)
10677 {
10678 	struct hclge_vport *vport = hclge_get_vport(handle);
10679 	struct hclge_dev *hdev = vport->back;
10680 
10681 	*alloc_tqps = vport->alloc_tqps;
10682 	*max_rss_size = hdev->rss_size_max;
10683 }
10684 
hclge_set_channels(struct hnae3_handle * handle,u32 new_tqps_num,bool rxfh_configured)10685 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
10686 			      bool rxfh_configured)
10687 {
10688 	struct hclge_vport *vport = hclge_get_vport(handle);
10689 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
10690 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
10691 	struct hclge_dev *hdev = vport->back;
10692 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
10693 	u16 cur_rss_size = kinfo->rss_size;
10694 	u16 cur_tqps = kinfo->num_tqps;
10695 	u16 tc_valid[HCLGE_MAX_TC_NUM];
10696 	u16 roundup_size;
10697 	u32 *rss_indir;
10698 	unsigned int i;
10699 	int ret;
10700 
10701 	kinfo->req_rss_size = new_tqps_num;
10702 
10703 	ret = hclge_tm_vport_map_update(hdev);
10704 	if (ret) {
10705 		dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
10706 		return ret;
10707 	}
10708 
10709 	roundup_size = roundup_pow_of_two(kinfo->rss_size);
10710 	roundup_size = ilog2(roundup_size);
10711 	/* Set the RSS TC mode according to the new RSS size */
10712 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
10713 		tc_valid[i] = 0;
10714 
10715 		if (!(hdev->hw_tc_map & BIT(i)))
10716 			continue;
10717 
10718 		tc_valid[i] = 1;
10719 		tc_size[i] = roundup_size;
10720 		tc_offset[i] = kinfo->rss_size * i;
10721 	}
10722 	ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
10723 	if (ret)
10724 		return ret;
10725 
10726 	/* RSS indirection table has been configuared by user */
10727 	if (rxfh_configured)
10728 		goto out;
10729 
10730 	/* Reinitializes the rss indirect table according to the new RSS size */
10731 	rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
10732 	if (!rss_indir)
10733 		return -ENOMEM;
10734 
10735 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
10736 		rss_indir[i] = i % kinfo->rss_size;
10737 
10738 	ret = hclge_set_rss(handle, rss_indir, NULL, 0);
10739 	if (ret)
10740 		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
10741 			ret);
10742 
10743 	kfree(rss_indir);
10744 
10745 out:
10746 	if (!ret)
10747 		dev_info(&hdev->pdev->dev,
10748 			 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
10749 			 cur_rss_size, kinfo->rss_size,
10750 			 cur_tqps, kinfo->rss_size * kinfo->num_tc);
10751 
10752 	return ret;
10753 }
10754 
hclge_get_regs_num(struct hclge_dev * hdev,u32 * regs_num_32_bit,u32 * regs_num_64_bit)10755 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
10756 			      u32 *regs_num_64_bit)
10757 {
10758 	struct hclge_desc desc;
10759 	u32 total_num;
10760 	int ret;
10761 
10762 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
10763 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10764 	if (ret) {
10765 		dev_err(&hdev->pdev->dev,
10766 			"Query register number cmd failed, ret = %d.\n", ret);
10767 		return ret;
10768 	}
10769 
10770 	*regs_num_32_bit = le32_to_cpu(desc.data[0]);
10771 	*regs_num_64_bit = le32_to_cpu(desc.data[1]);
10772 
10773 	total_num = *regs_num_32_bit + *regs_num_64_bit;
10774 	if (!total_num)
10775 		return -EINVAL;
10776 
10777 	return 0;
10778 }
10779 
hclge_get_32_bit_regs(struct hclge_dev * hdev,u32 regs_num,void * data)10780 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10781 				 void *data)
10782 {
10783 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
10784 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
10785 
10786 	struct hclge_desc *desc;
10787 	u32 *reg_val = data;
10788 	__le32 *desc_data;
10789 	int nodata_num;
10790 	int cmd_num;
10791 	int i, k, n;
10792 	int ret;
10793 
10794 	if (regs_num == 0)
10795 		return 0;
10796 
10797 	nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
10798 	cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
10799 			       HCLGE_32_BIT_REG_RTN_DATANUM);
10800 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10801 	if (!desc)
10802 		return -ENOMEM;
10803 
10804 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
10805 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10806 	if (ret) {
10807 		dev_err(&hdev->pdev->dev,
10808 			"Query 32 bit register cmd failed, ret = %d.\n", ret);
10809 		kfree(desc);
10810 		return ret;
10811 	}
10812 
10813 	for (i = 0; i < cmd_num; i++) {
10814 		if (i == 0) {
10815 			desc_data = (__le32 *)(&desc[i].data[0]);
10816 			n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
10817 		} else {
10818 			desc_data = (__le32 *)(&desc[i]);
10819 			n = HCLGE_32_BIT_REG_RTN_DATANUM;
10820 		}
10821 		for (k = 0; k < n; k++) {
10822 			*reg_val++ = le32_to_cpu(*desc_data++);
10823 
10824 			regs_num--;
10825 			if (!regs_num)
10826 				break;
10827 		}
10828 	}
10829 
10830 	kfree(desc);
10831 	return 0;
10832 }
10833 
hclge_get_64_bit_regs(struct hclge_dev * hdev,u32 regs_num,void * data)10834 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10835 				 void *data)
10836 {
10837 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
10838 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
10839 
10840 	struct hclge_desc *desc;
10841 	u64 *reg_val = data;
10842 	__le64 *desc_data;
10843 	int nodata_len;
10844 	int cmd_num;
10845 	int i, k, n;
10846 	int ret;
10847 
10848 	if (regs_num == 0)
10849 		return 0;
10850 
10851 	nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
10852 	cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
10853 			       HCLGE_64_BIT_REG_RTN_DATANUM);
10854 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10855 	if (!desc)
10856 		return -ENOMEM;
10857 
10858 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
10859 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10860 	if (ret) {
10861 		dev_err(&hdev->pdev->dev,
10862 			"Query 64 bit register cmd failed, ret = %d.\n", ret);
10863 		kfree(desc);
10864 		return ret;
10865 	}
10866 
10867 	for (i = 0; i < cmd_num; i++) {
10868 		if (i == 0) {
10869 			desc_data = (__le64 *)(&desc[i].data[0]);
10870 			n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
10871 		} else {
10872 			desc_data = (__le64 *)(&desc[i]);
10873 			n = HCLGE_64_BIT_REG_RTN_DATANUM;
10874 		}
10875 		for (k = 0; k < n; k++) {
10876 			*reg_val++ = le64_to_cpu(*desc_data++);
10877 
10878 			regs_num--;
10879 			if (!regs_num)
10880 				break;
10881 		}
10882 	}
10883 
10884 	kfree(desc);
10885 	return 0;
10886 }
10887 
10888 #define MAX_SEPARATE_NUM	4
10889 #define SEPARATOR_VALUE		0xFDFCFBFA
10890 #define REG_NUM_PER_LINE	4
10891 #define REG_LEN_PER_LINE	(REG_NUM_PER_LINE * sizeof(u32))
10892 #define REG_SEPARATOR_LINE	1
10893 #define REG_NUM_REMAIN_MASK	3
10894 
hclge_query_bd_num_cmd_send(struct hclge_dev * hdev,struct hclge_desc * desc)10895 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
10896 {
10897 	int i;
10898 
10899 	/* initialize command BD except the last one */
10900 	for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
10901 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
10902 					   true);
10903 		desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10904 	}
10905 
10906 	/* initialize the last command BD */
10907 	hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
10908 
10909 	return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
10910 }
10911 
hclge_get_dfx_reg_bd_num(struct hclge_dev * hdev,int * bd_num_list,u32 type_num)10912 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
10913 				    int *bd_num_list,
10914 				    u32 type_num)
10915 {
10916 	u32 entries_per_desc, desc_index, index, offset, i;
10917 	struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
10918 	int ret;
10919 
10920 	ret = hclge_query_bd_num_cmd_send(hdev, desc);
10921 	if (ret) {
10922 		dev_err(&hdev->pdev->dev,
10923 			"Get dfx bd num fail, status is %d.\n", ret);
10924 		return ret;
10925 	}
10926 
10927 	entries_per_desc = ARRAY_SIZE(desc[0].data);
10928 	for (i = 0; i < type_num; i++) {
10929 		offset = hclge_dfx_bd_offset_list[i];
10930 		index = offset % entries_per_desc;
10931 		desc_index = offset / entries_per_desc;
10932 		bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
10933 	}
10934 
10935 	return ret;
10936 }
10937 
hclge_dfx_reg_cmd_send(struct hclge_dev * hdev,struct hclge_desc * desc_src,int bd_num,enum hclge_opcode_type cmd)10938 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
10939 				  struct hclge_desc *desc_src, int bd_num,
10940 				  enum hclge_opcode_type cmd)
10941 {
10942 	struct hclge_desc *desc = desc_src;
10943 	int i, ret;
10944 
10945 	hclge_cmd_setup_basic_desc(desc, cmd, true);
10946 	for (i = 0; i < bd_num - 1; i++) {
10947 		desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10948 		desc++;
10949 		hclge_cmd_setup_basic_desc(desc, cmd, true);
10950 	}
10951 
10952 	desc = desc_src;
10953 	ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
10954 	if (ret)
10955 		dev_err(&hdev->pdev->dev,
10956 			"Query dfx reg cmd(0x%x) send fail, status is %d.\n",
10957 			cmd, ret);
10958 
10959 	return ret;
10960 }
10961 
hclge_dfx_reg_fetch_data(struct hclge_desc * desc_src,int bd_num,void * data)10962 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
10963 				    void *data)
10964 {
10965 	int entries_per_desc, reg_num, separator_num, desc_index, index, i;
10966 	struct hclge_desc *desc = desc_src;
10967 	u32 *reg = data;
10968 
10969 	entries_per_desc = ARRAY_SIZE(desc->data);
10970 	reg_num = entries_per_desc * bd_num;
10971 	separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
10972 	for (i = 0; i < reg_num; i++) {
10973 		index = i % entries_per_desc;
10974 		desc_index = i / entries_per_desc;
10975 		*reg++ = le32_to_cpu(desc[desc_index].data[index]);
10976 	}
10977 	for (i = 0; i < separator_num; i++)
10978 		*reg++ = SEPARATOR_VALUE;
10979 
10980 	return reg_num + separator_num;
10981 }
10982 
hclge_get_dfx_reg_len(struct hclge_dev * hdev,int * len)10983 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
10984 {
10985 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10986 	int data_len_per_desc, bd_num, i;
10987 	int *bd_num_list;
10988 	u32 data_len;
10989 	int ret;
10990 
10991 	bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
10992 	if (!bd_num_list)
10993 		return -ENOMEM;
10994 
10995 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10996 	if (ret) {
10997 		dev_err(&hdev->pdev->dev,
10998 			"Get dfx reg bd num fail, status is %d.\n", ret);
10999 		goto out;
11000 	}
11001 
11002 	data_len_per_desc = sizeof_field(struct hclge_desc, data);
11003 	*len = 0;
11004 	for (i = 0; i < dfx_reg_type_num; i++) {
11005 		bd_num = bd_num_list[i];
11006 		data_len = data_len_per_desc * bd_num;
11007 		*len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
11008 	}
11009 
11010 out:
11011 	kfree(bd_num_list);
11012 	return ret;
11013 }
11014 
hclge_get_dfx_reg(struct hclge_dev * hdev,void * data)11015 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
11016 {
11017 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
11018 	int bd_num, bd_num_max, buf_len, i;
11019 	struct hclge_desc *desc_src;
11020 	int *bd_num_list;
11021 	u32 *reg = data;
11022 	int ret;
11023 
11024 	bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
11025 	if (!bd_num_list)
11026 		return -ENOMEM;
11027 
11028 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
11029 	if (ret) {
11030 		dev_err(&hdev->pdev->dev,
11031 			"Get dfx reg bd num fail, status is %d.\n", ret);
11032 		goto out;
11033 	}
11034 
11035 	bd_num_max = bd_num_list[0];
11036 	for (i = 1; i < dfx_reg_type_num; i++)
11037 		bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
11038 
11039 	buf_len = sizeof(*desc_src) * bd_num_max;
11040 	desc_src = kzalloc(buf_len, GFP_KERNEL);
11041 	if (!desc_src) {
11042 		ret = -ENOMEM;
11043 		goto out;
11044 	}
11045 
11046 	for (i = 0; i < dfx_reg_type_num; i++) {
11047 		bd_num = bd_num_list[i];
11048 		ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
11049 					     hclge_dfx_reg_opcode_list[i]);
11050 		if (ret) {
11051 			dev_err(&hdev->pdev->dev,
11052 				"Get dfx reg fail, status is %d.\n", ret);
11053 			break;
11054 		}
11055 
11056 		reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
11057 	}
11058 
11059 	kfree(desc_src);
11060 out:
11061 	kfree(bd_num_list);
11062 	return ret;
11063 }
11064 
hclge_fetch_pf_reg(struct hclge_dev * hdev,void * data,struct hnae3_knic_private_info * kinfo)11065 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
11066 			      struct hnae3_knic_private_info *kinfo)
11067 {
11068 #define HCLGE_RING_REG_OFFSET		0x200
11069 #define HCLGE_RING_INT_REG_OFFSET	0x4
11070 
11071 	int i, j, reg_num, separator_num;
11072 	int data_num_sum;
11073 	u32 *reg = data;
11074 
11075 	/* fetching per-PF registers valus from PF PCIe register space */
11076 	reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
11077 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11078 	for (i = 0; i < reg_num; i++)
11079 		*reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
11080 	for (i = 0; i < separator_num; i++)
11081 		*reg++ = SEPARATOR_VALUE;
11082 	data_num_sum = reg_num + separator_num;
11083 
11084 	reg_num = ARRAY_SIZE(common_reg_addr_list);
11085 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11086 	for (i = 0; i < reg_num; i++)
11087 		*reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
11088 	for (i = 0; i < separator_num; i++)
11089 		*reg++ = SEPARATOR_VALUE;
11090 	data_num_sum += reg_num + separator_num;
11091 
11092 	reg_num = ARRAY_SIZE(ring_reg_addr_list);
11093 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11094 	for (j = 0; j < kinfo->num_tqps; j++) {
11095 		for (i = 0; i < reg_num; i++)
11096 			*reg++ = hclge_read_dev(&hdev->hw,
11097 						ring_reg_addr_list[i] +
11098 						HCLGE_RING_REG_OFFSET * j);
11099 		for (i = 0; i < separator_num; i++)
11100 			*reg++ = SEPARATOR_VALUE;
11101 	}
11102 	data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
11103 
11104 	reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
11105 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11106 	for (j = 0; j < hdev->num_msi_used - 1; j++) {
11107 		for (i = 0; i < reg_num; i++)
11108 			*reg++ = hclge_read_dev(&hdev->hw,
11109 						tqp_intr_reg_addr_list[i] +
11110 						HCLGE_RING_INT_REG_OFFSET * j);
11111 		for (i = 0; i < separator_num; i++)
11112 			*reg++ = SEPARATOR_VALUE;
11113 	}
11114 	data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
11115 
11116 	return data_num_sum;
11117 }
11118 
hclge_get_regs_len(struct hnae3_handle * handle)11119 static int hclge_get_regs_len(struct hnae3_handle *handle)
11120 {
11121 	int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
11122 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
11123 	struct hclge_vport *vport = hclge_get_vport(handle);
11124 	struct hclge_dev *hdev = vport->back;
11125 	int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
11126 	int regs_lines_32_bit, regs_lines_64_bit;
11127 	int ret;
11128 
11129 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
11130 	if (ret) {
11131 		dev_err(&hdev->pdev->dev,
11132 			"Get register number failed, ret = %d.\n", ret);
11133 		return ret;
11134 	}
11135 
11136 	ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
11137 	if (ret) {
11138 		dev_err(&hdev->pdev->dev,
11139 			"Get dfx reg len failed, ret = %d.\n", ret);
11140 		return ret;
11141 	}
11142 
11143 	cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
11144 		REG_SEPARATOR_LINE;
11145 	common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
11146 		REG_SEPARATOR_LINE;
11147 	ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
11148 		REG_SEPARATOR_LINE;
11149 	tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
11150 		REG_SEPARATOR_LINE;
11151 	regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
11152 		REG_SEPARATOR_LINE;
11153 	regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
11154 		REG_SEPARATOR_LINE;
11155 
11156 	return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
11157 		tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
11158 		regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
11159 }
11160 
hclge_get_regs(struct hnae3_handle * handle,u32 * version,void * data)11161 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
11162 			   void *data)
11163 {
11164 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
11165 	struct hclge_vport *vport = hclge_get_vport(handle);
11166 	struct hclge_dev *hdev = vport->back;
11167 	u32 regs_num_32_bit, regs_num_64_bit;
11168 	int i, reg_num, separator_num, ret;
11169 	u32 *reg = data;
11170 
11171 	*version = hdev->fw_version;
11172 
11173 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
11174 	if (ret) {
11175 		dev_err(&hdev->pdev->dev,
11176 			"Get register number failed, ret = %d.\n", ret);
11177 		return;
11178 	}
11179 
11180 	reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
11181 
11182 	ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
11183 	if (ret) {
11184 		dev_err(&hdev->pdev->dev,
11185 			"Get 32 bit register failed, ret = %d.\n", ret);
11186 		return;
11187 	}
11188 	reg_num = regs_num_32_bit;
11189 	reg += reg_num;
11190 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11191 	for (i = 0; i < separator_num; i++)
11192 		*reg++ = SEPARATOR_VALUE;
11193 
11194 	ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
11195 	if (ret) {
11196 		dev_err(&hdev->pdev->dev,
11197 			"Get 64 bit register failed, ret = %d.\n", ret);
11198 		return;
11199 	}
11200 	reg_num = regs_num_64_bit * 2;
11201 	reg += reg_num;
11202 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11203 	for (i = 0; i < separator_num; i++)
11204 		*reg++ = SEPARATOR_VALUE;
11205 
11206 	ret = hclge_get_dfx_reg(hdev, reg);
11207 	if (ret)
11208 		dev_err(&hdev->pdev->dev,
11209 			"Get dfx register failed, ret = %d.\n", ret);
11210 }
11211 
hclge_set_led_status(struct hclge_dev * hdev,u8 locate_led_status)11212 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
11213 {
11214 	struct hclge_set_led_state_cmd *req;
11215 	struct hclge_desc desc;
11216 	int ret;
11217 
11218 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
11219 
11220 	req = (struct hclge_set_led_state_cmd *)desc.data;
11221 	hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
11222 			HCLGE_LED_LOCATE_STATE_S, locate_led_status);
11223 
11224 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11225 	if (ret)
11226 		dev_err(&hdev->pdev->dev,
11227 			"Send set led state cmd error, ret =%d\n", ret);
11228 
11229 	return ret;
11230 }
11231 
11232 enum hclge_led_status {
11233 	HCLGE_LED_OFF,
11234 	HCLGE_LED_ON,
11235 	HCLGE_LED_NO_CHANGE = 0xFF,
11236 };
11237 
hclge_set_led_id(struct hnae3_handle * handle,enum ethtool_phys_id_state status)11238 static int hclge_set_led_id(struct hnae3_handle *handle,
11239 			    enum ethtool_phys_id_state status)
11240 {
11241 	struct hclge_vport *vport = hclge_get_vport(handle);
11242 	struct hclge_dev *hdev = vport->back;
11243 
11244 	switch (status) {
11245 	case ETHTOOL_ID_ACTIVE:
11246 		return hclge_set_led_status(hdev, HCLGE_LED_ON);
11247 	case ETHTOOL_ID_INACTIVE:
11248 		return hclge_set_led_status(hdev, HCLGE_LED_OFF);
11249 	default:
11250 		return -EINVAL;
11251 	}
11252 }
11253 
hclge_get_link_mode(struct hnae3_handle * handle,unsigned long * supported,unsigned long * advertising)11254 static void hclge_get_link_mode(struct hnae3_handle *handle,
11255 				unsigned long *supported,
11256 				unsigned long *advertising)
11257 {
11258 	unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
11259 	struct hclge_vport *vport = hclge_get_vport(handle);
11260 	struct hclge_dev *hdev = vport->back;
11261 	unsigned int idx = 0;
11262 
11263 	for (; idx < size; idx++) {
11264 		supported[idx] = hdev->hw.mac.supported[idx];
11265 		advertising[idx] = hdev->hw.mac.advertising[idx];
11266 	}
11267 }
11268 
hclge_gro_en(struct hnae3_handle * handle,bool enable)11269 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
11270 {
11271 	struct hclge_vport *vport = hclge_get_vport(handle);
11272 	struct hclge_dev *hdev = vport->back;
11273 
11274 	return hclge_config_gro(hdev, enable);
11275 }
11276 
hclge_sync_promisc_mode(struct hclge_dev * hdev)11277 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
11278 {
11279 	struct hclge_vport *vport = &hdev->vport[0];
11280 	struct hnae3_handle *handle = &vport->nic;
11281 	u8 tmp_flags;
11282 	int ret;
11283 
11284 	if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
11285 		set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11286 		vport->last_promisc_flags = vport->overflow_promisc_flags;
11287 	}
11288 
11289 	if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) {
11290 		tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
11291 		ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
11292 					     tmp_flags & HNAE3_MPE);
11293 		if (!ret) {
11294 			clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11295 			hclge_enable_vlan_filter(handle,
11296 						 tmp_flags & HNAE3_VLAN_FLTR);
11297 		}
11298 	}
11299 }
11300 
hclge_module_existed(struct hclge_dev * hdev)11301 static bool hclge_module_existed(struct hclge_dev *hdev)
11302 {
11303 	struct hclge_desc desc;
11304 	u32 existed;
11305 	int ret;
11306 
11307 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
11308 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11309 	if (ret) {
11310 		dev_err(&hdev->pdev->dev,
11311 			"failed to get SFP exist state, ret = %d\n", ret);
11312 		return false;
11313 	}
11314 
11315 	existed = le32_to_cpu(desc.data[0]);
11316 
11317 	return existed != 0;
11318 }
11319 
11320 /* need 6 bds(total 140 bytes) in one reading
11321  * return the number of bytes actually read, 0 means read failed.
11322  */
hclge_get_sfp_eeprom_info(struct hclge_dev * hdev,u32 offset,u32 len,u8 * data)11323 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
11324 				     u32 len, u8 *data)
11325 {
11326 	struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
11327 	struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
11328 	u16 read_len;
11329 	u16 copy_len;
11330 	int ret;
11331 	int i;
11332 
11333 	/* setup all 6 bds to read module eeprom info. */
11334 	for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11335 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
11336 					   true);
11337 
11338 		/* bd0~bd4 need next flag */
11339 		if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
11340 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11341 	}
11342 
11343 	/* setup bd0, this bd contains offset and read length. */
11344 	sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
11345 	sfp_info_bd0->offset = cpu_to_le16((u16)offset);
11346 	read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
11347 	sfp_info_bd0->read_len = cpu_to_le16(read_len);
11348 
11349 	ret = hclge_cmd_send(&hdev->hw, desc, i);
11350 	if (ret) {
11351 		dev_err(&hdev->pdev->dev,
11352 			"failed to get SFP eeprom info, ret = %d\n", ret);
11353 		return 0;
11354 	}
11355 
11356 	/* copy sfp info from bd0 to out buffer. */
11357 	copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
11358 	memcpy(data, sfp_info_bd0->data, copy_len);
11359 	read_len = copy_len;
11360 
11361 	/* copy sfp info from bd1~bd5 to out buffer if needed. */
11362 	for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11363 		if (read_len >= len)
11364 			return read_len;
11365 
11366 		copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
11367 		memcpy(data + read_len, desc[i].data, copy_len);
11368 		read_len += copy_len;
11369 	}
11370 
11371 	return read_len;
11372 }
11373 
hclge_get_module_eeprom(struct hnae3_handle * handle,u32 offset,u32 len,u8 * data)11374 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
11375 				   u32 len, u8 *data)
11376 {
11377 	struct hclge_vport *vport = hclge_get_vport(handle);
11378 	struct hclge_dev *hdev = vport->back;
11379 	u32 read_len = 0;
11380 	u16 data_len;
11381 
11382 	if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
11383 		return -EOPNOTSUPP;
11384 
11385 	if (!hclge_module_existed(hdev))
11386 		return -ENXIO;
11387 
11388 	while (read_len < len) {
11389 		data_len = hclge_get_sfp_eeprom_info(hdev,
11390 						     offset + read_len,
11391 						     len - read_len,
11392 						     data + read_len);
11393 		if (!data_len)
11394 			return -EIO;
11395 
11396 		read_len += data_len;
11397 	}
11398 
11399 	return 0;
11400 }
11401 
11402 static const struct hnae3_ae_ops hclge_ops = {
11403 	.init_ae_dev = hclge_init_ae_dev,
11404 	.uninit_ae_dev = hclge_uninit_ae_dev,
11405 	.flr_prepare = hclge_flr_prepare,
11406 	.flr_done = hclge_flr_done,
11407 	.init_client_instance = hclge_init_client_instance,
11408 	.uninit_client_instance = hclge_uninit_client_instance,
11409 	.map_ring_to_vector = hclge_map_ring_to_vector,
11410 	.unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
11411 	.get_vector = hclge_get_vector,
11412 	.put_vector = hclge_put_vector,
11413 	.set_promisc_mode = hclge_set_promisc_mode,
11414 	.request_update_promisc_mode = hclge_request_update_promisc_mode,
11415 	.set_loopback = hclge_set_loopback,
11416 	.start = hclge_ae_start,
11417 	.stop = hclge_ae_stop,
11418 	.client_start = hclge_client_start,
11419 	.client_stop = hclge_client_stop,
11420 	.get_status = hclge_get_status,
11421 	.get_ksettings_an_result = hclge_get_ksettings_an_result,
11422 	.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
11423 	.get_media_type = hclge_get_media_type,
11424 	.check_port_speed = hclge_check_port_speed,
11425 	.get_fec = hclge_get_fec,
11426 	.set_fec = hclge_set_fec,
11427 	.get_rss_key_size = hclge_get_rss_key_size,
11428 	.get_rss_indir_size = hclge_get_rss_indir_size,
11429 	.get_rss = hclge_get_rss,
11430 	.set_rss = hclge_set_rss,
11431 	.set_rss_tuple = hclge_set_rss_tuple,
11432 	.get_rss_tuple = hclge_get_rss_tuple,
11433 	.get_tc_size = hclge_get_tc_size,
11434 	.get_mac_addr = hclge_get_mac_addr,
11435 	.set_mac_addr = hclge_set_mac_addr,
11436 	.do_ioctl = hclge_do_ioctl,
11437 	.add_uc_addr = hclge_add_uc_addr,
11438 	.rm_uc_addr = hclge_rm_uc_addr,
11439 	.add_mc_addr = hclge_add_mc_addr,
11440 	.rm_mc_addr = hclge_rm_mc_addr,
11441 	.set_autoneg = hclge_set_autoneg,
11442 	.get_autoneg = hclge_get_autoneg,
11443 	.restart_autoneg = hclge_restart_autoneg,
11444 	.halt_autoneg = hclge_halt_autoneg,
11445 	.get_pauseparam = hclge_get_pauseparam,
11446 	.set_pauseparam = hclge_set_pauseparam,
11447 	.set_mtu = hclge_set_mtu,
11448 	.reset_queue = hclge_reset_tqp,
11449 	.get_stats = hclge_get_stats,
11450 	.get_mac_stats = hclge_get_mac_stat,
11451 	.update_stats = hclge_update_stats,
11452 	.get_strings = hclge_get_strings,
11453 	.get_sset_count = hclge_get_sset_count,
11454 	.get_fw_version = hclge_get_fw_version,
11455 	.get_mdix_mode = hclge_get_mdix_mode,
11456 	.enable_vlan_filter = hclge_enable_vlan_filter,
11457 	.set_vlan_filter = hclge_set_vlan_filter,
11458 	.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
11459 	.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
11460 	.reset_event = hclge_reset_event,
11461 	.get_reset_level = hclge_get_reset_level,
11462 	.set_default_reset_request = hclge_set_def_reset_request,
11463 	.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
11464 	.set_channels = hclge_set_channels,
11465 	.get_channels = hclge_get_channels,
11466 	.get_regs_len = hclge_get_regs_len,
11467 	.get_regs = hclge_get_regs,
11468 	.set_led_id = hclge_set_led_id,
11469 	.get_link_mode = hclge_get_link_mode,
11470 	.add_fd_entry = hclge_add_fd_entry,
11471 	.del_fd_entry = hclge_del_fd_entry,
11472 	.del_all_fd_entries = hclge_del_all_fd_entries,
11473 	.get_fd_rule_cnt = hclge_get_fd_rule_cnt,
11474 	.get_fd_rule_info = hclge_get_fd_rule_info,
11475 	.get_fd_all_rules = hclge_get_all_rules,
11476 	.enable_fd = hclge_enable_fd,
11477 	.add_arfs_entry = hclge_add_fd_entry_by_arfs,
11478 	.dbg_run_cmd = hclge_dbg_run_cmd,
11479 	.handle_hw_ras_error = hclge_handle_hw_ras_error,
11480 	.get_hw_reset_stat = hclge_get_hw_reset_stat,
11481 	.ae_dev_resetting = hclge_ae_dev_resetting,
11482 	.ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
11483 	.set_gro_en = hclge_gro_en,
11484 	.get_global_queue_id = hclge_covert_handle_qid_global,
11485 	.set_timer_task = hclge_set_timer_task,
11486 	.mac_connect_phy = hclge_mac_connect_phy,
11487 	.mac_disconnect_phy = hclge_mac_disconnect_phy,
11488 	.get_vf_config = hclge_get_vf_config,
11489 	.set_vf_link_state = hclge_set_vf_link_state,
11490 	.set_vf_spoofchk = hclge_set_vf_spoofchk,
11491 	.set_vf_trust = hclge_set_vf_trust,
11492 	.set_vf_rate = hclge_set_vf_rate,
11493 	.set_vf_mac = hclge_set_vf_mac,
11494 	.get_module_eeprom = hclge_get_module_eeprom,
11495 	.get_cmdq_stat = hclge_get_cmdq_stat,
11496 };
11497 
11498 static struct hnae3_ae_algo ae_algo = {
11499 	.ops = &hclge_ops,
11500 	.pdev_id_table = ae_algo_pci_tbl,
11501 };
11502 
hclge_init(void)11503 static int hclge_init(void)
11504 {
11505 	pr_info("%s is initializing\n", HCLGE_NAME);
11506 
11507 	hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
11508 	if (!hclge_wq) {
11509 		pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
11510 		return -ENOMEM;
11511 	}
11512 
11513 	hnae3_register_ae_algo(&ae_algo);
11514 
11515 	return 0;
11516 }
11517 
hclge_exit(void)11518 static void hclge_exit(void)
11519 {
11520 	hnae3_unregister_ae_algo_prepare(&ae_algo);
11521 	hnae3_unregister_ae_algo(&ae_algo);
11522 	destroy_workqueue(hclge_wq);
11523 }
11524 module_init(hclge_init);
11525 module_exit(hclge_exit);
11526 
11527 MODULE_LICENSE("GPL");
11528 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
11529 MODULE_DESCRIPTION("HCLGE Driver");
11530 MODULE_VERSION(HCLGE_MOD_VERSION);
11531