• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25 
26 #define HCLGE_NAME			"hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29 
30 #define HCLGE_BUF_SIZE_UNIT	256U
31 #define HCLGE_BUF_MUL_BY	2
32 #define HCLGE_BUF_DIV_BY	2
33 #define NEED_RESERVE_TC_NUM	2
34 #define BUF_MAX_PERCENT		100
35 #define BUF_RESERVE_PERCENT	90
36 
37 #define HCLGE_RESET_MAX_FAIL_CNT	5
38 #define HCLGE_RESET_SYNC_TIME		100
39 #define HCLGE_PF_RESET_SYNC_TIME	20
40 #define HCLGE_PF_RESET_SYNC_CNT		1500
41 
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET        1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
46 #define HCLGE_DFX_IGU_BD_OFFSET         4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
49 #define HCLGE_DFX_NCSI_BD_OFFSET        7
50 #define HCLGE_DFX_RTC_BD_OFFSET         8
51 #define HCLGE_DFX_PPP_BD_OFFSET         9
52 #define HCLGE_DFX_RCB_BD_OFFSET         10
53 #define HCLGE_DFX_TQP_BD_OFFSET         11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
55 
56 #define HCLGE_LINK_STATUS_MS	10
57 
58 #define HCLGE_VF_VPORT_START_NUM	1
59 
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
66 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
67 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
68 						   unsigned long *addr);
69 static int hclge_set_default_loopback(struct hclge_dev *hdev);
70 
71 static void hclge_sync_mac_table(struct hclge_dev *hdev);
72 static void hclge_restore_hw_table(struct hclge_dev *hdev);
73 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
74 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret,
75 				      int wait_cnt);
76 
77 static struct hnae3_ae_algo ae_algo;
78 
79 static struct workqueue_struct *hclge_wq;
80 
81 static const struct pci_device_id ae_algo_pci_tbl[] = {
82 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
83 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
84 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
85 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
86 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
87 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
88 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
89 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
90 	/* required last entry */
91 	{0, }
92 };
93 
94 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
95 
96 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
97 					 HCLGE_CMDQ_TX_ADDR_H_REG,
98 					 HCLGE_CMDQ_TX_DEPTH_REG,
99 					 HCLGE_CMDQ_TX_TAIL_REG,
100 					 HCLGE_CMDQ_TX_HEAD_REG,
101 					 HCLGE_CMDQ_RX_ADDR_L_REG,
102 					 HCLGE_CMDQ_RX_ADDR_H_REG,
103 					 HCLGE_CMDQ_RX_DEPTH_REG,
104 					 HCLGE_CMDQ_RX_TAIL_REG,
105 					 HCLGE_CMDQ_RX_HEAD_REG,
106 					 HCLGE_VECTOR0_CMDQ_SRC_REG,
107 					 HCLGE_CMDQ_INTR_STS_REG,
108 					 HCLGE_CMDQ_INTR_EN_REG,
109 					 HCLGE_CMDQ_INTR_GEN_REG};
110 
111 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
112 					   HCLGE_VECTOR0_OTER_EN_REG,
113 					   HCLGE_MISC_RESET_STS_REG,
114 					   HCLGE_MISC_VECTOR_INT_STS,
115 					   HCLGE_GLOBAL_RESET_REG,
116 					   HCLGE_FUN_RST_ING,
117 					   HCLGE_GRO_EN_REG};
118 
119 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
120 					 HCLGE_RING_RX_ADDR_H_REG,
121 					 HCLGE_RING_RX_BD_NUM_REG,
122 					 HCLGE_RING_RX_BD_LENGTH_REG,
123 					 HCLGE_RING_RX_MERGE_EN_REG,
124 					 HCLGE_RING_RX_TAIL_REG,
125 					 HCLGE_RING_RX_HEAD_REG,
126 					 HCLGE_RING_RX_FBD_NUM_REG,
127 					 HCLGE_RING_RX_OFFSET_REG,
128 					 HCLGE_RING_RX_FBD_OFFSET_REG,
129 					 HCLGE_RING_RX_STASH_REG,
130 					 HCLGE_RING_RX_BD_ERR_REG,
131 					 HCLGE_RING_TX_ADDR_L_REG,
132 					 HCLGE_RING_TX_ADDR_H_REG,
133 					 HCLGE_RING_TX_BD_NUM_REG,
134 					 HCLGE_RING_TX_PRIORITY_REG,
135 					 HCLGE_RING_TX_TC_REG,
136 					 HCLGE_RING_TX_MERGE_EN_REG,
137 					 HCLGE_RING_TX_TAIL_REG,
138 					 HCLGE_RING_TX_HEAD_REG,
139 					 HCLGE_RING_TX_FBD_NUM_REG,
140 					 HCLGE_RING_TX_OFFSET_REG,
141 					 HCLGE_RING_TX_EBD_NUM_REG,
142 					 HCLGE_RING_TX_EBD_OFFSET_REG,
143 					 HCLGE_RING_TX_BD_ERR_REG,
144 					 HCLGE_RING_EN_REG};
145 
146 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
147 					     HCLGE_TQP_INTR_GL0_REG,
148 					     HCLGE_TQP_INTR_GL1_REG,
149 					     HCLGE_TQP_INTR_GL2_REG,
150 					     HCLGE_TQP_INTR_RL_REG};
151 
152 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
153 	"App    Loopback test",
154 	"Serdes serial Loopback test",
155 	"Serdes parallel Loopback test",
156 	"Phy    Loopback test"
157 };
158 
159 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
160 	{"mac_tx_mac_pause_num",
161 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
162 	{"mac_rx_mac_pause_num",
163 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
164 	{"mac_tx_control_pkt_num",
165 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
166 	{"mac_rx_control_pkt_num",
167 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
168 	{"mac_tx_pfc_pkt_num",
169 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
170 	{"mac_tx_pfc_pri0_pkt_num",
171 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
172 	{"mac_tx_pfc_pri1_pkt_num",
173 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
174 	{"mac_tx_pfc_pri2_pkt_num",
175 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
176 	{"mac_tx_pfc_pri3_pkt_num",
177 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
178 	{"mac_tx_pfc_pri4_pkt_num",
179 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
180 	{"mac_tx_pfc_pri5_pkt_num",
181 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
182 	{"mac_tx_pfc_pri6_pkt_num",
183 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
184 	{"mac_tx_pfc_pri7_pkt_num",
185 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
186 	{"mac_rx_pfc_pkt_num",
187 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
188 	{"mac_rx_pfc_pri0_pkt_num",
189 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
190 	{"mac_rx_pfc_pri1_pkt_num",
191 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
192 	{"mac_rx_pfc_pri2_pkt_num",
193 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
194 	{"mac_rx_pfc_pri3_pkt_num",
195 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
196 	{"mac_rx_pfc_pri4_pkt_num",
197 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
198 	{"mac_rx_pfc_pri5_pkt_num",
199 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
200 	{"mac_rx_pfc_pri6_pkt_num",
201 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
202 	{"mac_rx_pfc_pri7_pkt_num",
203 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
204 	{"mac_tx_total_pkt_num",
205 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
206 	{"mac_tx_total_oct_num",
207 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
208 	{"mac_tx_good_pkt_num",
209 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
210 	{"mac_tx_bad_pkt_num",
211 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
212 	{"mac_tx_good_oct_num",
213 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
214 	{"mac_tx_bad_oct_num",
215 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
216 	{"mac_tx_uni_pkt_num",
217 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
218 	{"mac_tx_multi_pkt_num",
219 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
220 	{"mac_tx_broad_pkt_num",
221 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
222 	{"mac_tx_undersize_pkt_num",
223 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
224 	{"mac_tx_oversize_pkt_num",
225 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
226 	{"mac_tx_64_oct_pkt_num",
227 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
228 	{"mac_tx_65_127_oct_pkt_num",
229 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
230 	{"mac_tx_128_255_oct_pkt_num",
231 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
232 	{"mac_tx_256_511_oct_pkt_num",
233 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
234 	{"mac_tx_512_1023_oct_pkt_num",
235 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
236 	{"mac_tx_1024_1518_oct_pkt_num",
237 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
238 	{"mac_tx_1519_2047_oct_pkt_num",
239 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
240 	{"mac_tx_2048_4095_oct_pkt_num",
241 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
242 	{"mac_tx_4096_8191_oct_pkt_num",
243 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
244 	{"mac_tx_8192_9216_oct_pkt_num",
245 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
246 	{"mac_tx_9217_12287_oct_pkt_num",
247 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
248 	{"mac_tx_12288_16383_oct_pkt_num",
249 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
250 	{"mac_tx_1519_max_good_pkt_num",
251 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
252 	{"mac_tx_1519_max_bad_pkt_num",
253 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
254 	{"mac_rx_total_pkt_num",
255 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
256 	{"mac_rx_total_oct_num",
257 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
258 	{"mac_rx_good_pkt_num",
259 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
260 	{"mac_rx_bad_pkt_num",
261 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
262 	{"mac_rx_good_oct_num",
263 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
264 	{"mac_rx_bad_oct_num",
265 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
266 	{"mac_rx_uni_pkt_num",
267 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
268 	{"mac_rx_multi_pkt_num",
269 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
270 	{"mac_rx_broad_pkt_num",
271 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
272 	{"mac_rx_undersize_pkt_num",
273 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
274 	{"mac_rx_oversize_pkt_num",
275 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
276 	{"mac_rx_64_oct_pkt_num",
277 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
278 	{"mac_rx_65_127_oct_pkt_num",
279 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
280 	{"mac_rx_128_255_oct_pkt_num",
281 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
282 	{"mac_rx_256_511_oct_pkt_num",
283 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
284 	{"mac_rx_512_1023_oct_pkt_num",
285 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
286 	{"mac_rx_1024_1518_oct_pkt_num",
287 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
288 	{"mac_rx_1519_2047_oct_pkt_num",
289 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
290 	{"mac_rx_2048_4095_oct_pkt_num",
291 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
292 	{"mac_rx_4096_8191_oct_pkt_num",
293 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
294 	{"mac_rx_8192_9216_oct_pkt_num",
295 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
296 	{"mac_rx_9217_12287_oct_pkt_num",
297 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
298 	{"mac_rx_12288_16383_oct_pkt_num",
299 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
300 	{"mac_rx_1519_max_good_pkt_num",
301 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
302 	{"mac_rx_1519_max_bad_pkt_num",
303 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
304 
305 	{"mac_tx_fragment_pkt_num",
306 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
307 	{"mac_tx_undermin_pkt_num",
308 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
309 	{"mac_tx_jabber_pkt_num",
310 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
311 	{"mac_tx_err_all_pkt_num",
312 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
313 	{"mac_tx_from_app_good_pkt_num",
314 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
315 	{"mac_tx_from_app_bad_pkt_num",
316 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
317 	{"mac_rx_fragment_pkt_num",
318 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
319 	{"mac_rx_undermin_pkt_num",
320 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
321 	{"mac_rx_jabber_pkt_num",
322 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
323 	{"mac_rx_fcs_err_pkt_num",
324 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
325 	{"mac_rx_send_app_good_pkt_num",
326 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
327 	{"mac_rx_send_app_bad_pkt_num",
328 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
329 };
330 
331 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
332 	{
333 		.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
334 		.ethter_type = cpu_to_le16(ETH_P_LLDP),
335 		.mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
336 		.i_port_bitmap = 0x1,
337 	},
338 };
339 
340 static const u8 hclge_hash_key[] = {
341 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
342 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
343 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
344 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
345 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
346 };
347 
348 static const u32 hclge_dfx_bd_offset_list[] = {
349 	HCLGE_DFX_BIOS_BD_OFFSET,
350 	HCLGE_DFX_SSU_0_BD_OFFSET,
351 	HCLGE_DFX_SSU_1_BD_OFFSET,
352 	HCLGE_DFX_IGU_BD_OFFSET,
353 	HCLGE_DFX_RPU_0_BD_OFFSET,
354 	HCLGE_DFX_RPU_1_BD_OFFSET,
355 	HCLGE_DFX_NCSI_BD_OFFSET,
356 	HCLGE_DFX_RTC_BD_OFFSET,
357 	HCLGE_DFX_PPP_BD_OFFSET,
358 	HCLGE_DFX_RCB_BD_OFFSET,
359 	HCLGE_DFX_TQP_BD_OFFSET,
360 	HCLGE_DFX_SSU_2_BD_OFFSET
361 };
362 
363 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
364 	HCLGE_OPC_DFX_BIOS_COMMON_REG,
365 	HCLGE_OPC_DFX_SSU_REG_0,
366 	HCLGE_OPC_DFX_SSU_REG_1,
367 	HCLGE_OPC_DFX_IGU_EGU_REG,
368 	HCLGE_OPC_DFX_RPU_REG_0,
369 	HCLGE_OPC_DFX_RPU_REG_1,
370 	HCLGE_OPC_DFX_NCSI_REG,
371 	HCLGE_OPC_DFX_RTC_REG,
372 	HCLGE_OPC_DFX_PPP_REG,
373 	HCLGE_OPC_DFX_RCB_REG,
374 	HCLGE_OPC_DFX_TQP_REG,
375 	HCLGE_OPC_DFX_SSU_REG_2
376 };
377 
378 static const struct key_info meta_data_key_info[] = {
379 	{ PACKET_TYPE_ID, 6},
380 	{ IP_FRAGEMENT, 1},
381 	{ ROCE_TYPE, 1},
382 	{ NEXT_KEY, 5},
383 	{ VLAN_NUMBER, 2},
384 	{ SRC_VPORT, 12},
385 	{ DST_VPORT, 12},
386 	{ TUNNEL_PACKET, 1},
387 };
388 
389 static const struct key_info tuple_key_info[] = {
390 	{ OUTER_DST_MAC, 48},
391 	{ OUTER_SRC_MAC, 48},
392 	{ OUTER_VLAN_TAG_FST, 16},
393 	{ OUTER_VLAN_TAG_SEC, 16},
394 	{ OUTER_ETH_TYPE, 16},
395 	{ OUTER_L2_RSV, 16},
396 	{ OUTER_IP_TOS, 8},
397 	{ OUTER_IP_PROTO, 8},
398 	{ OUTER_SRC_IP, 32},
399 	{ OUTER_DST_IP, 32},
400 	{ OUTER_L3_RSV, 16},
401 	{ OUTER_SRC_PORT, 16},
402 	{ OUTER_DST_PORT, 16},
403 	{ OUTER_L4_RSV, 32},
404 	{ OUTER_TUN_VNI, 24},
405 	{ OUTER_TUN_FLOW_ID, 8},
406 	{ INNER_DST_MAC, 48},
407 	{ INNER_SRC_MAC, 48},
408 	{ INNER_VLAN_TAG_FST, 16},
409 	{ INNER_VLAN_TAG_SEC, 16},
410 	{ INNER_ETH_TYPE, 16},
411 	{ INNER_L2_RSV, 16},
412 	{ INNER_IP_TOS, 8},
413 	{ INNER_IP_PROTO, 8},
414 	{ INNER_SRC_IP, 32},
415 	{ INNER_DST_IP, 32},
416 	{ INNER_L3_RSV, 16},
417 	{ INNER_SRC_PORT, 16},
418 	{ INNER_DST_PORT, 16},
419 	{ INNER_L4_RSV, 32},
420 };
421 
hclge_mac_update_stats_defective(struct hclge_dev * hdev)422 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
423 {
424 #define HCLGE_MAC_CMD_NUM 21
425 
426 	u64 *data = (u64 *)(&hdev->mac_stats);
427 	struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
428 	__le64 *desc_data;
429 	int i, k, n;
430 	int ret;
431 
432 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
433 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
434 	if (ret) {
435 		dev_err(&hdev->pdev->dev,
436 			"Get MAC pkt stats fail, status = %d.\n", ret);
437 
438 		return ret;
439 	}
440 
441 	for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
442 		/* for special opcode 0032, only the first desc has the head */
443 		if (unlikely(i == 0)) {
444 			desc_data = (__le64 *)(&desc[i].data[0]);
445 			n = HCLGE_RD_FIRST_STATS_NUM;
446 		} else {
447 			desc_data = (__le64 *)(&desc[i]);
448 			n = HCLGE_RD_OTHER_STATS_NUM;
449 		}
450 
451 		for (k = 0; k < n; k++) {
452 			*data += le64_to_cpu(*desc_data);
453 			data++;
454 			desc_data++;
455 		}
456 	}
457 
458 	return 0;
459 }
460 
hclge_mac_update_stats_complete(struct hclge_dev * hdev,u32 desc_num)461 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
462 {
463 	u64 *data = (u64 *)(&hdev->mac_stats);
464 	struct hclge_desc *desc;
465 	__le64 *desc_data;
466 	u16 i, k, n;
467 	int ret;
468 
469 	/* This may be called inside atomic sections,
470 	 * so GFP_ATOMIC is more suitalbe here
471 	 */
472 	desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
473 	if (!desc)
474 		return -ENOMEM;
475 
476 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
477 	ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
478 	if (ret) {
479 		kfree(desc);
480 		return ret;
481 	}
482 
483 	for (i = 0; i < desc_num; i++) {
484 		/* for special opcode 0034, only the first desc has the head */
485 		if (i == 0) {
486 			desc_data = (__le64 *)(&desc[i].data[0]);
487 			n = HCLGE_RD_FIRST_STATS_NUM;
488 		} else {
489 			desc_data = (__le64 *)(&desc[i]);
490 			n = HCLGE_RD_OTHER_STATS_NUM;
491 		}
492 
493 		for (k = 0; k < n; k++) {
494 			*data += le64_to_cpu(*desc_data);
495 			data++;
496 			desc_data++;
497 		}
498 	}
499 
500 	kfree(desc);
501 
502 	return 0;
503 }
504 
hclge_mac_query_reg_num(struct hclge_dev * hdev,u32 * desc_num)505 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
506 {
507 	struct hclge_desc desc;
508 	__le32 *desc_data;
509 	u32 reg_num;
510 	int ret;
511 
512 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
513 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
514 	if (ret)
515 		return ret;
516 
517 	desc_data = (__le32 *)(&desc.data[0]);
518 	reg_num = le32_to_cpu(*desc_data);
519 
520 	*desc_num = 1 + ((reg_num - 3) >> 2) +
521 		    (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
522 
523 	return 0;
524 }
525 
hclge_mac_update_stats(struct hclge_dev * hdev)526 static int hclge_mac_update_stats(struct hclge_dev *hdev)
527 {
528 	u32 desc_num;
529 	int ret;
530 
531 	ret = hclge_mac_query_reg_num(hdev, &desc_num);
532 
533 	/* The firmware supports the new statistics acquisition method */
534 	if (!ret)
535 		ret = hclge_mac_update_stats_complete(hdev, desc_num);
536 	else if (ret == -EOPNOTSUPP)
537 		ret = hclge_mac_update_stats_defective(hdev);
538 	else
539 		dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
540 
541 	return ret;
542 }
543 
hclge_tqps_update_stats(struct hnae3_handle * handle)544 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
545 {
546 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
547 	struct hclge_vport *vport = hclge_get_vport(handle);
548 	struct hclge_dev *hdev = vport->back;
549 	struct hnae3_queue *queue;
550 	struct hclge_desc desc[1];
551 	struct hclge_tqp *tqp;
552 	int ret, i;
553 
554 	for (i = 0; i < kinfo->num_tqps; i++) {
555 		queue = handle->kinfo.tqp[i];
556 		tqp = container_of(queue, struct hclge_tqp, q);
557 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
558 		hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
559 					   true);
560 
561 		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
562 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
563 		if (ret) {
564 			dev_err(&hdev->pdev->dev,
565 				"Query tqp stat fail, status = %d,queue = %d\n",
566 				ret, i);
567 			return ret;
568 		}
569 		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
570 			le32_to_cpu(desc[0].data[1]);
571 	}
572 
573 	for (i = 0; i < kinfo->num_tqps; i++) {
574 		queue = handle->kinfo.tqp[i];
575 		tqp = container_of(queue, struct hclge_tqp, q);
576 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
577 		hclge_cmd_setup_basic_desc(&desc[0],
578 					   HCLGE_OPC_QUERY_TX_STATS,
579 					   true);
580 
581 		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
582 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
583 		if (ret) {
584 			dev_err(&hdev->pdev->dev,
585 				"Query tqp stat fail, status = %d,queue = %d\n",
586 				ret, i);
587 			return ret;
588 		}
589 		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
590 			le32_to_cpu(desc[0].data[1]);
591 	}
592 
593 	return 0;
594 }
595 
hclge_tqps_get_stats(struct hnae3_handle * handle,u64 * data)596 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
597 {
598 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
599 	struct hclge_tqp *tqp;
600 	u64 *buff = data;
601 	int i;
602 
603 	for (i = 0; i < kinfo->num_tqps; i++) {
604 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
605 		*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
606 	}
607 
608 	for (i = 0; i < kinfo->num_tqps; i++) {
609 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
610 		*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
611 	}
612 
613 	return buff;
614 }
615 
hclge_tqps_get_sset_count(struct hnae3_handle * handle,int stringset)616 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
617 {
618 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
619 
620 	/* each tqp has TX & RX two queues */
621 	return kinfo->num_tqps * (2);
622 }
623 
hclge_tqps_get_strings(struct hnae3_handle * handle,u8 * data)624 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
625 {
626 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
627 	u8 *buff = data;
628 	int i;
629 
630 	for (i = 0; i < kinfo->num_tqps; i++) {
631 		struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
632 			struct hclge_tqp, q);
633 		snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
634 			 tqp->index);
635 		buff = buff + ETH_GSTRING_LEN;
636 	}
637 
638 	for (i = 0; i < kinfo->num_tqps; i++) {
639 		struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
640 			struct hclge_tqp, q);
641 		snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
642 			 tqp->index);
643 		buff = buff + ETH_GSTRING_LEN;
644 	}
645 
646 	return buff;
647 }
648 
hclge_comm_get_stats(const void * comm_stats,const struct hclge_comm_stats_str strs[],int size,u64 * data)649 static u64 *hclge_comm_get_stats(const void *comm_stats,
650 				 const struct hclge_comm_stats_str strs[],
651 				 int size, u64 *data)
652 {
653 	u64 *buf = data;
654 	u32 i;
655 
656 	for (i = 0; i < size; i++)
657 		buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
658 
659 	return buf + size;
660 }
661 
hclge_comm_get_strings(u32 stringset,const struct hclge_comm_stats_str strs[],int size,u8 * data)662 static u8 *hclge_comm_get_strings(u32 stringset,
663 				  const struct hclge_comm_stats_str strs[],
664 				  int size, u8 *data)
665 {
666 	char *buff = (char *)data;
667 	u32 i;
668 
669 	if (stringset != ETH_SS_STATS)
670 		return buff;
671 
672 	for (i = 0; i < size; i++) {
673 		snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
674 		buff = buff + ETH_GSTRING_LEN;
675 	}
676 
677 	return (u8 *)buff;
678 }
679 
hclge_update_stats_for_all(struct hclge_dev * hdev)680 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
681 {
682 	struct hnae3_handle *handle;
683 	int status;
684 
685 	handle = &hdev->vport[0].nic;
686 	if (handle->client) {
687 		status = hclge_tqps_update_stats(handle);
688 		if (status) {
689 			dev_err(&hdev->pdev->dev,
690 				"Update TQPS stats fail, status = %d.\n",
691 				status);
692 		}
693 	}
694 
695 	status = hclge_mac_update_stats(hdev);
696 	if (status)
697 		dev_err(&hdev->pdev->dev,
698 			"Update MAC stats fail, status = %d.\n", status);
699 }
700 
hclge_update_stats(struct hnae3_handle * handle,struct net_device_stats * net_stats)701 static void hclge_update_stats(struct hnae3_handle *handle,
702 			       struct net_device_stats *net_stats)
703 {
704 	struct hclge_vport *vport = hclge_get_vport(handle);
705 	struct hclge_dev *hdev = vport->back;
706 	int status;
707 
708 	if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
709 		return;
710 
711 	status = hclge_mac_update_stats(hdev);
712 	if (status)
713 		dev_err(&hdev->pdev->dev,
714 			"Update MAC stats fail, status = %d.\n",
715 			status);
716 
717 	status = hclge_tqps_update_stats(handle);
718 	if (status)
719 		dev_err(&hdev->pdev->dev,
720 			"Update TQPS stats fail, status = %d.\n",
721 			status);
722 
723 	clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
724 }
725 
hclge_get_sset_count(struct hnae3_handle * handle,int stringset)726 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
727 {
728 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
729 		HNAE3_SUPPORT_PHY_LOOPBACK |\
730 		HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
731 		HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
732 
733 	struct hclge_vport *vport = hclge_get_vport(handle);
734 	struct hclge_dev *hdev = vport->back;
735 	int count = 0;
736 
737 	/* Loopback test support rules:
738 	 * mac: only GE mode support
739 	 * serdes: all mac mode will support include GE/XGE/LGE/CGE
740 	 * phy: only support when phy device exist on board
741 	 */
742 	if (stringset == ETH_SS_TEST) {
743 		/* clear loopback bit flags at first */
744 		handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
745 		if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
746 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
747 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
748 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
749 			count += 1;
750 			handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
751 		}
752 
753 		count += 2;
754 		handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
755 		handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
756 
757 		if (hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
758 		    hdev->hw.mac.phydev->drv->set_loopback) {
759 			count += 1;
760 			handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
761 		}
762 
763 	} else if (stringset == ETH_SS_STATS) {
764 		count = ARRAY_SIZE(g_mac_stats_string) +
765 			hclge_tqps_get_sset_count(handle, stringset);
766 	}
767 
768 	return count;
769 }
770 
hclge_get_strings(struct hnae3_handle * handle,u32 stringset,u8 * data)771 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
772 			      u8 *data)
773 {
774 	u8 *p = (char *)data;
775 	int size;
776 
777 	if (stringset == ETH_SS_STATS) {
778 		size = ARRAY_SIZE(g_mac_stats_string);
779 		p = hclge_comm_get_strings(stringset, g_mac_stats_string,
780 					   size, p);
781 		p = hclge_tqps_get_strings(handle, p);
782 	} else if (stringset == ETH_SS_TEST) {
783 		if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
784 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
785 			       ETH_GSTRING_LEN);
786 			p += ETH_GSTRING_LEN;
787 		}
788 		if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
789 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
790 			       ETH_GSTRING_LEN);
791 			p += ETH_GSTRING_LEN;
792 		}
793 		if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
794 			memcpy(p,
795 			       hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
796 			       ETH_GSTRING_LEN);
797 			p += ETH_GSTRING_LEN;
798 		}
799 		if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
800 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
801 			       ETH_GSTRING_LEN);
802 			p += ETH_GSTRING_LEN;
803 		}
804 	}
805 }
806 
hclge_get_stats(struct hnae3_handle * handle,u64 * data)807 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
808 {
809 	struct hclge_vport *vport = hclge_get_vport(handle);
810 	struct hclge_dev *hdev = vport->back;
811 	u64 *p;
812 
813 	p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
814 				 ARRAY_SIZE(g_mac_stats_string), data);
815 	p = hclge_tqps_get_stats(handle, p);
816 }
817 
hclge_get_mac_stat(struct hnae3_handle * handle,struct hns3_mac_stats * mac_stats)818 static void hclge_get_mac_stat(struct hnae3_handle *handle,
819 			       struct hns3_mac_stats *mac_stats)
820 {
821 	struct hclge_vport *vport = hclge_get_vport(handle);
822 	struct hclge_dev *hdev = vport->back;
823 
824 	hclge_update_stats(handle, NULL);
825 
826 	mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
827 	mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
828 }
829 
hclge_parse_func_status(struct hclge_dev * hdev,struct hclge_func_status_cmd * status)830 static int hclge_parse_func_status(struct hclge_dev *hdev,
831 				   struct hclge_func_status_cmd *status)
832 {
833 #define HCLGE_MAC_ID_MASK	0xF
834 
835 	if (!(status->pf_state & HCLGE_PF_STATE_DONE))
836 		return -EINVAL;
837 
838 	/* Set the pf to main pf */
839 	if (status->pf_state & HCLGE_PF_STATE_MAIN)
840 		hdev->flag |= HCLGE_FLAG_MAIN;
841 	else
842 		hdev->flag &= ~HCLGE_FLAG_MAIN;
843 
844 	hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
845 	return 0;
846 }
847 
hclge_query_function_status(struct hclge_dev * hdev)848 static int hclge_query_function_status(struct hclge_dev *hdev)
849 {
850 #define HCLGE_QUERY_MAX_CNT	5
851 
852 	struct hclge_func_status_cmd *req;
853 	struct hclge_desc desc;
854 	int timeout = 0;
855 	int ret;
856 
857 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
858 	req = (struct hclge_func_status_cmd *)desc.data;
859 
860 	do {
861 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
862 		if (ret) {
863 			dev_err(&hdev->pdev->dev,
864 				"query function status failed %d.\n", ret);
865 			return ret;
866 		}
867 
868 		/* Check pf reset is done */
869 		if (req->pf_state)
870 			break;
871 		usleep_range(1000, 2000);
872 	} while (timeout++ < HCLGE_QUERY_MAX_CNT);
873 
874 	return hclge_parse_func_status(hdev, req);
875 }
876 
hclge_query_pf_resource(struct hclge_dev * hdev)877 static int hclge_query_pf_resource(struct hclge_dev *hdev)
878 {
879 	struct hclge_pf_res_cmd *req;
880 	struct hclge_desc desc;
881 	int ret;
882 
883 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
884 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
885 	if (ret) {
886 		dev_err(&hdev->pdev->dev,
887 			"query pf resource failed %d.\n", ret);
888 		return ret;
889 	}
890 
891 	req = (struct hclge_pf_res_cmd *)desc.data;
892 	hdev->num_tqps = le16_to_cpu(req->tqp_num);
893 	hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
894 
895 	if (req->tx_buf_size)
896 		hdev->tx_buf_size =
897 			le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
898 	else
899 		hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
900 
901 	hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
902 
903 	if (req->dv_buf_size)
904 		hdev->dv_buf_size =
905 			le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
906 	else
907 		hdev->dv_buf_size = HCLGE_DEFAULT_DV;
908 
909 	hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
910 
911 	if (hnae3_dev_roce_supported(hdev)) {
912 		hdev->roce_base_msix_offset =
913 		hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
914 				HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
915 		hdev->num_roce_msi =
916 		hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
917 				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
918 
919 		/* nic's msix numbers is always equals to the roce's. */
920 		hdev->num_nic_msi = hdev->num_roce_msi;
921 
922 		/* PF should have NIC vectors and Roce vectors,
923 		 * NIC vectors are queued before Roce vectors.
924 		 */
925 		hdev->num_msi = hdev->num_roce_msi +
926 				hdev->roce_base_msix_offset;
927 	} else {
928 		hdev->num_msi =
929 		hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
930 				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
931 
932 		hdev->num_nic_msi = hdev->num_msi;
933 	}
934 
935 	if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
936 		dev_err(&hdev->pdev->dev,
937 			"Just %u msi resources, not enough for pf(min:2).\n",
938 			hdev->num_nic_msi);
939 		return -EINVAL;
940 	}
941 
942 	return 0;
943 }
944 
hclge_parse_speed(int speed_cmd,int * speed)945 static int hclge_parse_speed(int speed_cmd, int *speed)
946 {
947 	switch (speed_cmd) {
948 	case 6:
949 		*speed = HCLGE_MAC_SPEED_10M;
950 		break;
951 	case 7:
952 		*speed = HCLGE_MAC_SPEED_100M;
953 		break;
954 	case 0:
955 		*speed = HCLGE_MAC_SPEED_1G;
956 		break;
957 	case 1:
958 		*speed = HCLGE_MAC_SPEED_10G;
959 		break;
960 	case 2:
961 		*speed = HCLGE_MAC_SPEED_25G;
962 		break;
963 	case 3:
964 		*speed = HCLGE_MAC_SPEED_40G;
965 		break;
966 	case 4:
967 		*speed = HCLGE_MAC_SPEED_50G;
968 		break;
969 	case 5:
970 		*speed = HCLGE_MAC_SPEED_100G;
971 		break;
972 	case 8:
973 		*speed = HCLGE_MAC_SPEED_200G;
974 		break;
975 	default:
976 		return -EINVAL;
977 	}
978 
979 	return 0;
980 }
981 
hclge_check_port_speed(struct hnae3_handle * handle,u32 speed)982 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
983 {
984 	struct hclge_vport *vport = hclge_get_vport(handle);
985 	struct hclge_dev *hdev = vport->back;
986 	u32 speed_ability = hdev->hw.mac.speed_ability;
987 	u32 speed_bit = 0;
988 
989 	switch (speed) {
990 	case HCLGE_MAC_SPEED_10M:
991 		speed_bit = HCLGE_SUPPORT_10M_BIT;
992 		break;
993 	case HCLGE_MAC_SPEED_100M:
994 		speed_bit = HCLGE_SUPPORT_100M_BIT;
995 		break;
996 	case HCLGE_MAC_SPEED_1G:
997 		speed_bit = HCLGE_SUPPORT_1G_BIT;
998 		break;
999 	case HCLGE_MAC_SPEED_10G:
1000 		speed_bit = HCLGE_SUPPORT_10G_BIT;
1001 		break;
1002 	case HCLGE_MAC_SPEED_25G:
1003 		speed_bit = HCLGE_SUPPORT_25G_BIT;
1004 		break;
1005 	case HCLGE_MAC_SPEED_40G:
1006 		speed_bit = HCLGE_SUPPORT_40G_BIT;
1007 		break;
1008 	case HCLGE_MAC_SPEED_50G:
1009 		speed_bit = HCLGE_SUPPORT_50G_BIT;
1010 		break;
1011 	case HCLGE_MAC_SPEED_100G:
1012 		speed_bit = HCLGE_SUPPORT_100G_BIT;
1013 		break;
1014 	case HCLGE_MAC_SPEED_200G:
1015 		speed_bit = HCLGE_SUPPORT_200G_BIT;
1016 		break;
1017 	default:
1018 		return -EINVAL;
1019 	}
1020 
1021 	if (speed_bit & speed_ability)
1022 		return 0;
1023 
1024 	return -EINVAL;
1025 }
1026 
hclge_convert_setting_sr(struct hclge_mac * mac,u16 speed_ability)1027 static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1028 {
1029 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1030 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1031 				 mac->supported);
1032 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1033 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1034 				 mac->supported);
1035 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1036 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1037 				 mac->supported);
1038 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1039 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1040 				 mac->supported);
1041 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1042 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1043 				 mac->supported);
1044 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1045 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1046 				 mac->supported);
1047 }
1048 
hclge_convert_setting_lr(struct hclge_mac * mac,u16 speed_ability)1049 static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1050 {
1051 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1052 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1053 				 mac->supported);
1054 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1055 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1056 				 mac->supported);
1057 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1058 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1059 				 mac->supported);
1060 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1061 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1062 				 mac->supported);
1063 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1064 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1065 				 mac->supported);
1066 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1067 		linkmode_set_bit(
1068 			ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1069 			mac->supported);
1070 }
1071 
hclge_convert_setting_cr(struct hclge_mac * mac,u16 speed_ability)1072 static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1073 {
1074 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1075 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1076 				 mac->supported);
1077 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1078 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1079 				 mac->supported);
1080 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1081 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1082 				 mac->supported);
1083 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1084 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1085 				 mac->supported);
1086 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1087 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1088 				 mac->supported);
1089 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1090 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1091 				 mac->supported);
1092 }
1093 
hclge_convert_setting_kr(struct hclge_mac * mac,u16 speed_ability)1094 static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1095 {
1096 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1097 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1098 				 mac->supported);
1099 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1100 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1101 				 mac->supported);
1102 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1103 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1104 				 mac->supported);
1105 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1106 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1107 				 mac->supported);
1108 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1109 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1110 				 mac->supported);
1111 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1112 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1113 				 mac->supported);
1114 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1115 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1116 				 mac->supported);
1117 }
1118 
hclge_convert_setting_fec(struct hclge_mac * mac)1119 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1120 {
1121 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1122 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1123 
1124 	switch (mac->speed) {
1125 	case HCLGE_MAC_SPEED_10G:
1126 	case HCLGE_MAC_SPEED_40G:
1127 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1128 				 mac->supported);
1129 		mac->fec_ability =
1130 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1131 		break;
1132 	case HCLGE_MAC_SPEED_25G:
1133 	case HCLGE_MAC_SPEED_50G:
1134 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1135 				 mac->supported);
1136 		mac->fec_ability =
1137 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1138 			BIT(HNAE3_FEC_AUTO);
1139 		break;
1140 	case HCLGE_MAC_SPEED_100G:
1141 	case HCLGE_MAC_SPEED_200G:
1142 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1143 		mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1144 		break;
1145 	default:
1146 		mac->fec_ability = 0;
1147 		break;
1148 	}
1149 }
1150 
hclge_parse_fiber_link_mode(struct hclge_dev * hdev,u16 speed_ability)1151 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1152 					u16 speed_ability)
1153 {
1154 	struct hclge_mac *mac = &hdev->hw.mac;
1155 
1156 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1157 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1158 				 mac->supported);
1159 
1160 	hclge_convert_setting_sr(mac, speed_ability);
1161 	hclge_convert_setting_lr(mac, speed_ability);
1162 	hclge_convert_setting_cr(mac, speed_ability);
1163 	if (hnae3_dev_fec_supported(hdev))
1164 		hclge_convert_setting_fec(mac);
1165 
1166 	linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1167 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1168 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1169 }
1170 
hclge_parse_backplane_link_mode(struct hclge_dev * hdev,u16 speed_ability)1171 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1172 					    u16 speed_ability)
1173 {
1174 	struct hclge_mac *mac = &hdev->hw.mac;
1175 
1176 	hclge_convert_setting_kr(mac, speed_ability);
1177 	if (hnae3_dev_fec_supported(hdev))
1178 		hclge_convert_setting_fec(mac);
1179 	linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1180 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1181 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1182 }
1183 
hclge_parse_copper_link_mode(struct hclge_dev * hdev,u16 speed_ability)1184 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1185 					 u16 speed_ability)
1186 {
1187 	unsigned long *supported = hdev->hw.mac.supported;
1188 
1189 	/* default to support all speed for GE port */
1190 	if (!speed_ability)
1191 		speed_ability = HCLGE_SUPPORT_GE;
1192 
1193 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1194 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1195 				 supported);
1196 
1197 	if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1198 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1199 				 supported);
1200 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1201 				 supported);
1202 	}
1203 
1204 	if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1205 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1206 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1207 	}
1208 
1209 	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1210 	linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1211 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1212 	linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1213 }
1214 
hclge_parse_link_mode(struct hclge_dev * hdev,u16 speed_ability)1215 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1216 {
1217 	u8 media_type = hdev->hw.mac.media_type;
1218 
1219 	if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1220 		hclge_parse_fiber_link_mode(hdev, speed_ability);
1221 	else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1222 		hclge_parse_copper_link_mode(hdev, speed_ability);
1223 	else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1224 		hclge_parse_backplane_link_mode(hdev, speed_ability);
1225 }
1226 
hclge_get_max_speed(u16 speed_ability)1227 static u32 hclge_get_max_speed(u16 speed_ability)
1228 {
1229 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1230 		return HCLGE_MAC_SPEED_200G;
1231 
1232 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1233 		return HCLGE_MAC_SPEED_100G;
1234 
1235 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1236 		return HCLGE_MAC_SPEED_50G;
1237 
1238 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1239 		return HCLGE_MAC_SPEED_40G;
1240 
1241 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1242 		return HCLGE_MAC_SPEED_25G;
1243 
1244 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1245 		return HCLGE_MAC_SPEED_10G;
1246 
1247 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1248 		return HCLGE_MAC_SPEED_1G;
1249 
1250 	if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1251 		return HCLGE_MAC_SPEED_100M;
1252 
1253 	if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1254 		return HCLGE_MAC_SPEED_10M;
1255 
1256 	return HCLGE_MAC_SPEED_1G;
1257 }
1258 
hclge_parse_cfg(struct hclge_cfg * cfg,struct hclge_desc * desc)1259 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1260 {
1261 #define SPEED_ABILITY_EXT_SHIFT			8
1262 
1263 	struct hclge_cfg_param_cmd *req;
1264 	u64 mac_addr_tmp_high;
1265 	u16 speed_ability_ext;
1266 	u64 mac_addr_tmp;
1267 	unsigned int i;
1268 
1269 	req = (struct hclge_cfg_param_cmd *)desc[0].data;
1270 
1271 	/* get the configuration */
1272 	cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1273 					      HCLGE_CFG_VMDQ_M,
1274 					      HCLGE_CFG_VMDQ_S);
1275 	cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1276 				      HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1277 	cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1278 					    HCLGE_CFG_TQP_DESC_N_M,
1279 					    HCLGE_CFG_TQP_DESC_N_S);
1280 
1281 	cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1282 					HCLGE_CFG_PHY_ADDR_M,
1283 					HCLGE_CFG_PHY_ADDR_S);
1284 	cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1285 					  HCLGE_CFG_MEDIA_TP_M,
1286 					  HCLGE_CFG_MEDIA_TP_S);
1287 	cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1288 					  HCLGE_CFG_RX_BUF_LEN_M,
1289 					  HCLGE_CFG_RX_BUF_LEN_S);
1290 	/* get mac_address */
1291 	mac_addr_tmp = __le32_to_cpu(req->param[2]);
1292 	mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1293 					    HCLGE_CFG_MAC_ADDR_H_M,
1294 					    HCLGE_CFG_MAC_ADDR_H_S);
1295 
1296 	mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1297 
1298 	cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1299 					     HCLGE_CFG_DEFAULT_SPEED_M,
1300 					     HCLGE_CFG_DEFAULT_SPEED_S);
1301 	cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1302 					    HCLGE_CFG_RSS_SIZE_M,
1303 					    HCLGE_CFG_RSS_SIZE_S);
1304 
1305 	for (i = 0; i < ETH_ALEN; i++)
1306 		cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1307 
1308 	req = (struct hclge_cfg_param_cmd *)desc[1].data;
1309 	cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1310 
1311 	cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1312 					     HCLGE_CFG_SPEED_ABILITY_M,
1313 					     HCLGE_CFG_SPEED_ABILITY_S);
1314 	speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1315 					    HCLGE_CFG_SPEED_ABILITY_EXT_M,
1316 					    HCLGE_CFG_SPEED_ABILITY_EXT_S);
1317 	cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1318 
1319 	cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1320 					 HCLGE_CFG_UMV_TBL_SPACE_M,
1321 					 HCLGE_CFG_UMV_TBL_SPACE_S);
1322 	if (!cfg->umv_space)
1323 		cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1324 }
1325 
1326 /* hclge_get_cfg: query the static parameter from flash
1327  * @hdev: pointer to struct hclge_dev
1328  * @hcfg: the config structure to be getted
1329  */
hclge_get_cfg(struct hclge_dev * hdev,struct hclge_cfg * hcfg)1330 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1331 {
1332 	struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1333 	struct hclge_cfg_param_cmd *req;
1334 	unsigned int i;
1335 	int ret;
1336 
1337 	for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1338 		u32 offset = 0;
1339 
1340 		req = (struct hclge_cfg_param_cmd *)desc[i].data;
1341 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1342 					   true);
1343 		hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1344 				HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1345 		/* Len should be united by 4 bytes when send to hardware */
1346 		hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1347 				HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1348 		req->offset = cpu_to_le32(offset);
1349 	}
1350 
1351 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1352 	if (ret) {
1353 		dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1354 		return ret;
1355 	}
1356 
1357 	hclge_parse_cfg(hcfg, desc);
1358 
1359 	return 0;
1360 }
1361 
hclge_set_default_dev_specs(struct hclge_dev * hdev)1362 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1363 {
1364 #define HCLGE_MAX_NON_TSO_BD_NUM			8U
1365 
1366 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1367 
1368 	ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1369 	ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1370 	ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1371 	ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1372 }
1373 
hclge_parse_dev_specs(struct hclge_dev * hdev,struct hclge_desc * desc)1374 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1375 				  struct hclge_desc *desc)
1376 {
1377 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1378 	struct hclge_dev_specs_0_cmd *req0;
1379 
1380 	req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1381 
1382 	ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1383 	ae_dev->dev_specs.rss_ind_tbl_size =
1384 		le16_to_cpu(req0->rss_ind_tbl_size);
1385 	ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1386 	ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1387 }
1388 
hclge_check_dev_specs(struct hclge_dev * hdev)1389 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1390 {
1391 	struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1392 
1393 	if (!dev_specs->max_non_tso_bd_num)
1394 		dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1395 	if (!dev_specs->rss_ind_tbl_size)
1396 		dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1397 	if (!dev_specs->rss_key_size)
1398 		dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1399 	if (!dev_specs->max_tm_rate)
1400 		dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1401 }
1402 
hclge_query_dev_specs(struct hclge_dev * hdev)1403 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1404 {
1405 	struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1406 	int ret;
1407 	int i;
1408 
1409 	/* set default specifications as devices lower than version V3 do not
1410 	 * support querying specifications from firmware.
1411 	 */
1412 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1413 		hclge_set_default_dev_specs(hdev);
1414 		return 0;
1415 	}
1416 
1417 	for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1418 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1419 					   true);
1420 		desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1421 	}
1422 	hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1423 
1424 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1425 	if (ret)
1426 		return ret;
1427 
1428 	hclge_parse_dev_specs(hdev, desc);
1429 	hclge_check_dev_specs(hdev);
1430 
1431 	return 0;
1432 }
1433 
hclge_get_cap(struct hclge_dev * hdev)1434 static int hclge_get_cap(struct hclge_dev *hdev)
1435 {
1436 	int ret;
1437 
1438 	ret = hclge_query_function_status(hdev);
1439 	if (ret) {
1440 		dev_err(&hdev->pdev->dev,
1441 			"query function status error %d.\n", ret);
1442 		return ret;
1443 	}
1444 
1445 	/* get pf resource */
1446 	return hclge_query_pf_resource(hdev);
1447 }
1448 
hclge_init_kdump_kernel_config(struct hclge_dev * hdev)1449 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1450 {
1451 #define HCLGE_MIN_TX_DESC	64
1452 #define HCLGE_MIN_RX_DESC	64
1453 
1454 	if (!is_kdump_kernel())
1455 		return;
1456 
1457 	dev_info(&hdev->pdev->dev,
1458 		 "Running kdump kernel. Using minimal resources\n");
1459 
1460 	/* minimal queue pairs equals to the number of vports */
1461 	hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1462 	hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1463 	hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1464 }
1465 
hclge_configure(struct hclge_dev * hdev)1466 static int hclge_configure(struct hclge_dev *hdev)
1467 {
1468 	const struct cpumask *cpumask = cpu_online_mask;
1469 	struct hclge_cfg cfg;
1470 	unsigned int i;
1471 	int node, ret;
1472 
1473 	ret = hclge_get_cfg(hdev, &cfg);
1474 	if (ret)
1475 		return ret;
1476 
1477 	hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1478 	hdev->base_tqp_pid = 0;
1479 	hdev->rss_size_max = cfg.rss_size_max;
1480 	hdev->rx_buf_len = cfg.rx_buf_len;
1481 	ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1482 	hdev->hw.mac.media_type = cfg.media_type;
1483 	hdev->hw.mac.phy_addr = cfg.phy_addr;
1484 	hdev->num_tx_desc = cfg.tqp_desc_num;
1485 	hdev->num_rx_desc = cfg.tqp_desc_num;
1486 	hdev->tm_info.num_pg = 1;
1487 	hdev->tc_max = cfg.tc_num;
1488 	hdev->tm_info.hw_pfc_map = 0;
1489 	hdev->wanted_umv_size = cfg.umv_space;
1490 
1491 	if (hnae3_dev_fd_supported(hdev)) {
1492 		hdev->fd_en = true;
1493 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1494 	}
1495 
1496 	ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1497 	if (ret) {
1498 		dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1499 			cfg.default_speed, ret);
1500 		return ret;
1501 	}
1502 
1503 	hclge_parse_link_mode(hdev, cfg.speed_ability);
1504 
1505 	hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1506 
1507 	if ((hdev->tc_max > HNAE3_MAX_TC) ||
1508 	    (hdev->tc_max < 1)) {
1509 		dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1510 			 hdev->tc_max);
1511 		hdev->tc_max = 1;
1512 	}
1513 
1514 	/* Dev does not support DCB */
1515 	if (!hnae3_dev_dcb_supported(hdev)) {
1516 		hdev->tc_max = 1;
1517 		hdev->pfc_max = 0;
1518 	} else {
1519 		hdev->pfc_max = hdev->tc_max;
1520 	}
1521 
1522 	hdev->tm_info.num_tc = 1;
1523 
1524 	/* Currently not support uncontiuous tc */
1525 	for (i = 0; i < hdev->tm_info.num_tc; i++)
1526 		hnae3_set_bit(hdev->hw_tc_map, i, 1);
1527 
1528 	hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1529 
1530 	hclge_init_kdump_kernel_config(hdev);
1531 
1532 	/* Set the affinity based on numa node */
1533 	node = dev_to_node(&hdev->pdev->dev);
1534 	if (node != NUMA_NO_NODE)
1535 		cpumask = cpumask_of_node(node);
1536 
1537 	cpumask_copy(&hdev->affinity_mask, cpumask);
1538 
1539 	return ret;
1540 }
1541 
hclge_config_tso(struct hclge_dev * hdev,u16 tso_mss_min,u16 tso_mss_max)1542 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1543 			    u16 tso_mss_max)
1544 {
1545 	struct hclge_cfg_tso_status_cmd *req;
1546 	struct hclge_desc desc;
1547 
1548 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1549 
1550 	req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1551 	req->tso_mss_min = cpu_to_le16(tso_mss_min);
1552 	req->tso_mss_max = cpu_to_le16(tso_mss_max);
1553 
1554 	return hclge_cmd_send(&hdev->hw, &desc, 1);
1555 }
1556 
hclge_config_gro(struct hclge_dev * hdev,bool en)1557 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1558 {
1559 	struct hclge_cfg_gro_status_cmd *req;
1560 	struct hclge_desc desc;
1561 	int ret;
1562 
1563 	if (!hnae3_dev_gro_supported(hdev))
1564 		return 0;
1565 
1566 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1567 	req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1568 
1569 	req->gro_en = en ? 1 : 0;
1570 
1571 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1572 	if (ret)
1573 		dev_err(&hdev->pdev->dev,
1574 			"GRO hardware config cmd failed, ret = %d\n", ret);
1575 
1576 	return ret;
1577 }
1578 
hclge_alloc_tqps(struct hclge_dev * hdev)1579 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1580 {
1581 	struct hclge_tqp *tqp;
1582 	int i;
1583 
1584 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1585 				  sizeof(struct hclge_tqp), GFP_KERNEL);
1586 	if (!hdev->htqp)
1587 		return -ENOMEM;
1588 
1589 	tqp = hdev->htqp;
1590 
1591 	for (i = 0; i < hdev->num_tqps; i++) {
1592 		tqp->dev = &hdev->pdev->dev;
1593 		tqp->index = i;
1594 
1595 		tqp->q.ae_algo = &ae_algo;
1596 		tqp->q.buf_size = hdev->rx_buf_len;
1597 		tqp->q.tx_desc_num = hdev->num_tx_desc;
1598 		tqp->q.rx_desc_num = hdev->num_rx_desc;
1599 		tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1600 			i * HCLGE_TQP_REG_SIZE;
1601 
1602 		tqp++;
1603 	}
1604 
1605 	return 0;
1606 }
1607 
hclge_map_tqps_to_func(struct hclge_dev * hdev,u16 func_id,u16 tqp_pid,u16 tqp_vid,bool is_pf)1608 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1609 				  u16 tqp_pid, u16 tqp_vid, bool is_pf)
1610 {
1611 	struct hclge_tqp_map_cmd *req;
1612 	struct hclge_desc desc;
1613 	int ret;
1614 
1615 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1616 
1617 	req = (struct hclge_tqp_map_cmd *)desc.data;
1618 	req->tqp_id = cpu_to_le16(tqp_pid);
1619 	req->tqp_vf = func_id;
1620 	req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1621 	if (!is_pf)
1622 		req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1623 	req->tqp_vid = cpu_to_le16(tqp_vid);
1624 
1625 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1626 	if (ret)
1627 		dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1628 
1629 	return ret;
1630 }
1631 
hclge_assign_tqp(struct hclge_vport * vport,u16 num_tqps)1632 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1633 {
1634 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1635 	struct hclge_dev *hdev = vport->back;
1636 	int i, alloced;
1637 
1638 	for (i = 0, alloced = 0; i < hdev->num_tqps &&
1639 	     alloced < num_tqps; i++) {
1640 		if (!hdev->htqp[i].alloced) {
1641 			hdev->htqp[i].q.handle = &vport->nic;
1642 			hdev->htqp[i].q.tqp_index = alloced;
1643 			hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1644 			hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1645 			kinfo->tqp[alloced] = &hdev->htqp[i].q;
1646 			hdev->htqp[i].alloced = true;
1647 			alloced++;
1648 		}
1649 	}
1650 	vport->alloc_tqps = alloced;
1651 	kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1652 				vport->alloc_tqps / hdev->tm_info.num_tc);
1653 
1654 	/* ensure one to one mapping between irq and queue at default */
1655 	kinfo->rss_size = min_t(u16, kinfo->rss_size,
1656 				(hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1657 
1658 	return 0;
1659 }
1660 
hclge_knic_setup(struct hclge_vport * vport,u16 num_tqps,u16 num_tx_desc,u16 num_rx_desc)1661 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1662 			    u16 num_tx_desc, u16 num_rx_desc)
1663 
1664 {
1665 	struct hnae3_handle *nic = &vport->nic;
1666 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1667 	struct hclge_dev *hdev = vport->back;
1668 	int ret;
1669 
1670 	kinfo->num_tx_desc = num_tx_desc;
1671 	kinfo->num_rx_desc = num_rx_desc;
1672 
1673 	kinfo->rx_buf_len = hdev->rx_buf_len;
1674 
1675 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1676 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
1677 	if (!kinfo->tqp)
1678 		return -ENOMEM;
1679 
1680 	ret = hclge_assign_tqp(vport, num_tqps);
1681 	if (ret)
1682 		dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1683 
1684 	return ret;
1685 }
1686 
hclge_map_tqp_to_vport(struct hclge_dev * hdev,struct hclge_vport * vport)1687 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1688 				  struct hclge_vport *vport)
1689 {
1690 	struct hnae3_handle *nic = &vport->nic;
1691 	struct hnae3_knic_private_info *kinfo;
1692 	u16 i;
1693 
1694 	kinfo = &nic->kinfo;
1695 	for (i = 0; i < vport->alloc_tqps; i++) {
1696 		struct hclge_tqp *q =
1697 			container_of(kinfo->tqp[i], struct hclge_tqp, q);
1698 		bool is_pf;
1699 		int ret;
1700 
1701 		is_pf = !(vport->vport_id);
1702 		ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1703 					     i, is_pf);
1704 		if (ret)
1705 			return ret;
1706 	}
1707 
1708 	return 0;
1709 }
1710 
hclge_map_tqp(struct hclge_dev * hdev)1711 static int hclge_map_tqp(struct hclge_dev *hdev)
1712 {
1713 	struct hclge_vport *vport = hdev->vport;
1714 	u16 i, num_vport;
1715 
1716 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1717 	for (i = 0; i < num_vport; i++)	{
1718 		int ret;
1719 
1720 		ret = hclge_map_tqp_to_vport(hdev, vport);
1721 		if (ret)
1722 			return ret;
1723 
1724 		vport++;
1725 	}
1726 
1727 	return 0;
1728 }
1729 
hclge_vport_setup(struct hclge_vport * vport,u16 num_tqps)1730 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1731 {
1732 	struct hnae3_handle *nic = &vport->nic;
1733 	struct hclge_dev *hdev = vport->back;
1734 	int ret;
1735 
1736 	nic->pdev = hdev->pdev;
1737 	nic->ae_algo = &ae_algo;
1738 	nic->numa_node_mask = hdev->numa_node_mask;
1739 
1740 	ret = hclge_knic_setup(vport, num_tqps,
1741 			       hdev->num_tx_desc, hdev->num_rx_desc);
1742 	if (ret)
1743 		dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1744 
1745 	return ret;
1746 }
1747 
hclge_alloc_vport(struct hclge_dev * hdev)1748 static int hclge_alloc_vport(struct hclge_dev *hdev)
1749 {
1750 	struct pci_dev *pdev = hdev->pdev;
1751 	struct hclge_vport *vport;
1752 	u32 tqp_main_vport;
1753 	u32 tqp_per_vport;
1754 	int num_vport, i;
1755 	int ret;
1756 
1757 	/* We need to alloc a vport for main NIC of PF */
1758 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1759 
1760 	if (hdev->num_tqps < num_vport) {
1761 		dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1762 			hdev->num_tqps, num_vport);
1763 		return -EINVAL;
1764 	}
1765 
1766 	/* Alloc the same number of TQPs for every vport */
1767 	tqp_per_vport = hdev->num_tqps / num_vport;
1768 	tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1769 
1770 	vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1771 			     GFP_KERNEL);
1772 	if (!vport)
1773 		return -ENOMEM;
1774 
1775 	hdev->vport = vport;
1776 	hdev->num_alloc_vport = num_vport;
1777 
1778 	if (IS_ENABLED(CONFIG_PCI_IOV))
1779 		hdev->num_alloc_vfs = hdev->num_req_vfs;
1780 
1781 	for (i = 0; i < num_vport; i++) {
1782 		vport->back = hdev;
1783 		vport->vport_id = i;
1784 		vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1785 		vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1786 		vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1787 		vport->rxvlan_cfg.rx_vlan_offload_en = true;
1788 		INIT_LIST_HEAD(&vport->vlan_list);
1789 		INIT_LIST_HEAD(&vport->uc_mac_list);
1790 		INIT_LIST_HEAD(&vport->mc_mac_list);
1791 		spin_lock_init(&vport->mac_list_lock);
1792 
1793 		if (i == 0)
1794 			ret = hclge_vport_setup(vport, tqp_main_vport);
1795 		else
1796 			ret = hclge_vport_setup(vport, tqp_per_vport);
1797 		if (ret) {
1798 			dev_err(&pdev->dev,
1799 				"vport setup failed for vport %d, %d\n",
1800 				i, ret);
1801 			return ret;
1802 		}
1803 
1804 		vport++;
1805 	}
1806 
1807 	return 0;
1808 }
1809 
hclge_cmd_alloc_tx_buff(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)1810 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1811 				    struct hclge_pkt_buf_alloc *buf_alloc)
1812 {
1813 /* TX buffer size is unit by 128 byte */
1814 #define HCLGE_BUF_SIZE_UNIT_SHIFT	7
1815 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK	BIT(15)
1816 	struct hclge_tx_buff_alloc_cmd *req;
1817 	struct hclge_desc desc;
1818 	int ret;
1819 	u8 i;
1820 
1821 	req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1822 
1823 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1824 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1825 		u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1826 
1827 		req->tx_pkt_buff[i] =
1828 			cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1829 				     HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1830 	}
1831 
1832 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1833 	if (ret)
1834 		dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1835 			ret);
1836 
1837 	return ret;
1838 }
1839 
hclge_tx_buffer_alloc(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)1840 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1841 				 struct hclge_pkt_buf_alloc *buf_alloc)
1842 {
1843 	int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1844 
1845 	if (ret)
1846 		dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1847 
1848 	return ret;
1849 }
1850 
hclge_get_tc_num(struct hclge_dev * hdev)1851 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1852 {
1853 	unsigned int i;
1854 	u32 cnt = 0;
1855 
1856 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1857 		if (hdev->hw_tc_map & BIT(i))
1858 			cnt++;
1859 	return cnt;
1860 }
1861 
1862 /* Get the number of pfc enabled TCs, which have private buffer */
hclge_get_pfc_priv_num(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)1863 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1864 				  struct hclge_pkt_buf_alloc *buf_alloc)
1865 {
1866 	struct hclge_priv_buf *priv;
1867 	unsigned int i;
1868 	int cnt = 0;
1869 
1870 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1871 		priv = &buf_alloc->priv_buf[i];
1872 		if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1873 		    priv->enable)
1874 			cnt++;
1875 	}
1876 
1877 	return cnt;
1878 }
1879 
1880 /* Get the number of pfc disabled TCs, which have private buffer */
hclge_get_no_pfc_priv_num(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)1881 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1882 				     struct hclge_pkt_buf_alloc *buf_alloc)
1883 {
1884 	struct hclge_priv_buf *priv;
1885 	unsigned int i;
1886 	int cnt = 0;
1887 
1888 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1889 		priv = &buf_alloc->priv_buf[i];
1890 		if (hdev->hw_tc_map & BIT(i) &&
1891 		    !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1892 		    priv->enable)
1893 			cnt++;
1894 	}
1895 
1896 	return cnt;
1897 }
1898 
hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc * buf_alloc)1899 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1900 {
1901 	struct hclge_priv_buf *priv;
1902 	u32 rx_priv = 0;
1903 	int i;
1904 
1905 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1906 		priv = &buf_alloc->priv_buf[i];
1907 		if (priv->enable)
1908 			rx_priv += priv->buf_size;
1909 	}
1910 	return rx_priv;
1911 }
1912 
hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc * buf_alloc)1913 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1914 {
1915 	u32 i, total_tx_size = 0;
1916 
1917 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1918 		total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1919 
1920 	return total_tx_size;
1921 }
1922 
hclge_is_rx_buf_ok(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc,u32 rx_all)1923 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1924 				struct hclge_pkt_buf_alloc *buf_alloc,
1925 				u32 rx_all)
1926 {
1927 	u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1928 	u32 tc_num = hclge_get_tc_num(hdev);
1929 	u32 shared_buf, aligned_mps;
1930 	u32 rx_priv;
1931 	int i;
1932 
1933 	aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1934 
1935 	if (hnae3_dev_dcb_supported(hdev))
1936 		shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1937 					hdev->dv_buf_size;
1938 	else
1939 		shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1940 					+ hdev->dv_buf_size;
1941 
1942 	shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1943 	shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1944 			     HCLGE_BUF_SIZE_UNIT);
1945 
1946 	rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1947 	if (rx_all < rx_priv + shared_std)
1948 		return false;
1949 
1950 	shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1951 	buf_alloc->s_buf.buf_size = shared_buf;
1952 	if (hnae3_dev_dcb_supported(hdev)) {
1953 		buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1954 		buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1955 			- roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1956 				  HCLGE_BUF_SIZE_UNIT);
1957 	} else {
1958 		buf_alloc->s_buf.self.high = aligned_mps +
1959 						HCLGE_NON_DCB_ADDITIONAL_BUF;
1960 		buf_alloc->s_buf.self.low = aligned_mps;
1961 	}
1962 
1963 	if (hnae3_dev_dcb_supported(hdev)) {
1964 		hi_thrd = shared_buf - hdev->dv_buf_size;
1965 
1966 		if (tc_num <= NEED_RESERVE_TC_NUM)
1967 			hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1968 					/ BUF_MAX_PERCENT;
1969 
1970 		if (tc_num)
1971 			hi_thrd = hi_thrd / tc_num;
1972 
1973 		hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1974 		hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1975 		lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1976 	} else {
1977 		hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1978 		lo_thrd = aligned_mps;
1979 	}
1980 
1981 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1982 		buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1983 		buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1984 	}
1985 
1986 	return true;
1987 }
1988 
hclge_tx_buffer_calc(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)1989 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1990 				struct hclge_pkt_buf_alloc *buf_alloc)
1991 {
1992 	u32 i, total_size;
1993 
1994 	total_size = hdev->pkt_buf_size;
1995 
1996 	/* alloc tx buffer for all enabled tc */
1997 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1998 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1999 
2000 		if (hdev->hw_tc_map & BIT(i)) {
2001 			if (total_size < hdev->tx_buf_size)
2002 				return -ENOMEM;
2003 
2004 			priv->tx_buf_size = hdev->tx_buf_size;
2005 		} else {
2006 			priv->tx_buf_size = 0;
2007 		}
2008 
2009 		total_size -= priv->tx_buf_size;
2010 	}
2011 
2012 	return 0;
2013 }
2014 
hclge_rx_buf_calc_all(struct hclge_dev * hdev,bool max,struct hclge_pkt_buf_alloc * buf_alloc)2015 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2016 				  struct hclge_pkt_buf_alloc *buf_alloc)
2017 {
2018 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2019 	u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2020 	unsigned int i;
2021 
2022 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2023 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2024 
2025 		priv->enable = 0;
2026 		priv->wl.low = 0;
2027 		priv->wl.high = 0;
2028 		priv->buf_size = 0;
2029 
2030 		if (!(hdev->hw_tc_map & BIT(i)))
2031 			continue;
2032 
2033 		priv->enable = 1;
2034 
2035 		if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2036 			priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2037 			priv->wl.high = roundup(priv->wl.low + aligned_mps,
2038 						HCLGE_BUF_SIZE_UNIT);
2039 		} else {
2040 			priv->wl.low = 0;
2041 			priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2042 					aligned_mps;
2043 		}
2044 
2045 		priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2046 	}
2047 
2048 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2049 }
2050 
hclge_drop_nopfc_buf_till_fit(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2051 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2052 					  struct hclge_pkt_buf_alloc *buf_alloc)
2053 {
2054 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2055 	int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2056 	int i;
2057 
2058 	/* let the last to be cleared first */
2059 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2060 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2061 		unsigned int mask = BIT((unsigned int)i);
2062 
2063 		if (hdev->hw_tc_map & mask &&
2064 		    !(hdev->tm_info.hw_pfc_map & mask)) {
2065 			/* Clear the no pfc TC private buffer */
2066 			priv->wl.low = 0;
2067 			priv->wl.high = 0;
2068 			priv->buf_size = 0;
2069 			priv->enable = 0;
2070 			no_pfc_priv_num--;
2071 		}
2072 
2073 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2074 		    no_pfc_priv_num == 0)
2075 			break;
2076 	}
2077 
2078 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2079 }
2080 
hclge_drop_pfc_buf_till_fit(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2081 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2082 					struct hclge_pkt_buf_alloc *buf_alloc)
2083 {
2084 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2085 	int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2086 	int i;
2087 
2088 	/* let the last to be cleared first */
2089 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2090 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2091 		unsigned int mask = BIT((unsigned int)i);
2092 
2093 		if (hdev->hw_tc_map & mask &&
2094 		    hdev->tm_info.hw_pfc_map & mask) {
2095 			/* Reduce the number of pfc TC with private buffer */
2096 			priv->wl.low = 0;
2097 			priv->enable = 0;
2098 			priv->wl.high = 0;
2099 			priv->buf_size = 0;
2100 			pfc_priv_num--;
2101 		}
2102 
2103 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2104 		    pfc_priv_num == 0)
2105 			break;
2106 	}
2107 
2108 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2109 }
2110 
hclge_only_alloc_priv_buff(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2111 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2112 				      struct hclge_pkt_buf_alloc *buf_alloc)
2113 {
2114 #define COMPENSATE_BUFFER	0x3C00
2115 #define COMPENSATE_HALF_MPS_NUM	5
2116 #define PRIV_WL_GAP		0x1800
2117 
2118 	u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2119 	u32 tc_num = hclge_get_tc_num(hdev);
2120 	u32 half_mps = hdev->mps >> 1;
2121 	u32 min_rx_priv;
2122 	unsigned int i;
2123 
2124 	if (tc_num)
2125 		rx_priv = rx_priv / tc_num;
2126 
2127 	if (tc_num <= NEED_RESERVE_TC_NUM)
2128 		rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2129 
2130 	min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2131 			COMPENSATE_HALF_MPS_NUM * half_mps;
2132 	min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2133 	rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2134 
2135 	if (rx_priv < min_rx_priv)
2136 		return false;
2137 
2138 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2139 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2140 
2141 		priv->enable = 0;
2142 		priv->wl.low = 0;
2143 		priv->wl.high = 0;
2144 		priv->buf_size = 0;
2145 
2146 		if (!(hdev->hw_tc_map & BIT(i)))
2147 			continue;
2148 
2149 		priv->enable = 1;
2150 		priv->buf_size = rx_priv;
2151 		priv->wl.high = rx_priv - hdev->dv_buf_size;
2152 		priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2153 	}
2154 
2155 	buf_alloc->s_buf.buf_size = 0;
2156 
2157 	return true;
2158 }
2159 
2160 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2161  * @hdev: pointer to struct hclge_dev
2162  * @buf_alloc: pointer to buffer calculation data
2163  * @return: 0: calculate sucessful, negative: fail
2164  */
hclge_rx_buffer_calc(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2165 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2166 				struct hclge_pkt_buf_alloc *buf_alloc)
2167 {
2168 	/* When DCB is not supported, rx private buffer is not allocated. */
2169 	if (!hnae3_dev_dcb_supported(hdev)) {
2170 		u32 rx_all = hdev->pkt_buf_size;
2171 
2172 		rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2173 		if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2174 			return -ENOMEM;
2175 
2176 		return 0;
2177 	}
2178 
2179 	if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2180 		return 0;
2181 
2182 	if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2183 		return 0;
2184 
2185 	/* try to decrease the buffer size */
2186 	if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2187 		return 0;
2188 
2189 	if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2190 		return 0;
2191 
2192 	if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2193 		return 0;
2194 
2195 	return -ENOMEM;
2196 }
2197 
hclge_rx_priv_buf_alloc(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2198 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2199 				   struct hclge_pkt_buf_alloc *buf_alloc)
2200 {
2201 	struct hclge_rx_priv_buff_cmd *req;
2202 	struct hclge_desc desc;
2203 	int ret;
2204 	int i;
2205 
2206 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2207 	req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2208 
2209 	/* Alloc private buffer TCs */
2210 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2211 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2212 
2213 		req->buf_num[i] =
2214 			cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2215 		req->buf_num[i] |=
2216 			cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2217 	}
2218 
2219 	req->shared_buf =
2220 		cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2221 			    (1 << HCLGE_TC0_PRI_BUF_EN_B));
2222 
2223 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2224 	if (ret)
2225 		dev_err(&hdev->pdev->dev,
2226 			"rx private buffer alloc cmd failed %d\n", ret);
2227 
2228 	return ret;
2229 }
2230 
hclge_rx_priv_wl_config(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2231 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2232 				   struct hclge_pkt_buf_alloc *buf_alloc)
2233 {
2234 	struct hclge_rx_priv_wl_buf *req;
2235 	struct hclge_priv_buf *priv;
2236 	struct hclge_desc desc[2];
2237 	int i, j;
2238 	int ret;
2239 
2240 	for (i = 0; i < 2; i++) {
2241 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2242 					   false);
2243 		req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2244 
2245 		/* The first descriptor set the NEXT bit to 1 */
2246 		if (i == 0)
2247 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2248 		else
2249 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2250 
2251 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2252 			u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2253 
2254 			priv = &buf_alloc->priv_buf[idx];
2255 			req->tc_wl[j].high =
2256 				cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2257 			req->tc_wl[j].high |=
2258 				cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2259 			req->tc_wl[j].low =
2260 				cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2261 			req->tc_wl[j].low |=
2262 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2263 		}
2264 	}
2265 
2266 	/* Send 2 descriptor at one time */
2267 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2268 	if (ret)
2269 		dev_err(&hdev->pdev->dev,
2270 			"rx private waterline config cmd failed %d\n",
2271 			ret);
2272 	return ret;
2273 }
2274 
hclge_common_thrd_config(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2275 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2276 				    struct hclge_pkt_buf_alloc *buf_alloc)
2277 {
2278 	struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2279 	struct hclge_rx_com_thrd *req;
2280 	struct hclge_desc desc[2];
2281 	struct hclge_tc_thrd *tc;
2282 	int i, j;
2283 	int ret;
2284 
2285 	for (i = 0; i < 2; i++) {
2286 		hclge_cmd_setup_basic_desc(&desc[i],
2287 					   HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2288 		req = (struct hclge_rx_com_thrd *)&desc[i].data;
2289 
2290 		/* The first descriptor set the NEXT bit to 1 */
2291 		if (i == 0)
2292 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2293 		else
2294 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2295 
2296 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2297 			tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2298 
2299 			req->com_thrd[j].high =
2300 				cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2301 			req->com_thrd[j].high |=
2302 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2303 			req->com_thrd[j].low =
2304 				cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2305 			req->com_thrd[j].low |=
2306 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2307 		}
2308 	}
2309 
2310 	/* Send 2 descriptors at one time */
2311 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2312 	if (ret)
2313 		dev_err(&hdev->pdev->dev,
2314 			"common threshold config cmd failed %d\n", ret);
2315 	return ret;
2316 }
2317 
hclge_common_wl_config(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2318 static int hclge_common_wl_config(struct hclge_dev *hdev,
2319 				  struct hclge_pkt_buf_alloc *buf_alloc)
2320 {
2321 	struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2322 	struct hclge_rx_com_wl *req;
2323 	struct hclge_desc desc;
2324 	int ret;
2325 
2326 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2327 
2328 	req = (struct hclge_rx_com_wl *)desc.data;
2329 	req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2330 	req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2331 
2332 	req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2333 	req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2334 
2335 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2336 	if (ret)
2337 		dev_err(&hdev->pdev->dev,
2338 			"common waterline config cmd failed %d\n", ret);
2339 
2340 	return ret;
2341 }
2342 
hclge_buffer_alloc(struct hclge_dev * hdev)2343 int hclge_buffer_alloc(struct hclge_dev *hdev)
2344 {
2345 	struct hclge_pkt_buf_alloc *pkt_buf;
2346 	int ret;
2347 
2348 	pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2349 	if (!pkt_buf)
2350 		return -ENOMEM;
2351 
2352 	ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2353 	if (ret) {
2354 		dev_err(&hdev->pdev->dev,
2355 			"could not calc tx buffer size for all TCs %d\n", ret);
2356 		goto out;
2357 	}
2358 
2359 	ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2360 	if (ret) {
2361 		dev_err(&hdev->pdev->dev,
2362 			"could not alloc tx buffers %d\n", ret);
2363 		goto out;
2364 	}
2365 
2366 	ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2367 	if (ret) {
2368 		dev_err(&hdev->pdev->dev,
2369 			"could not calc rx priv buffer size for all TCs %d\n",
2370 			ret);
2371 		goto out;
2372 	}
2373 
2374 	ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2375 	if (ret) {
2376 		dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2377 			ret);
2378 		goto out;
2379 	}
2380 
2381 	if (hnae3_dev_dcb_supported(hdev)) {
2382 		ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2383 		if (ret) {
2384 			dev_err(&hdev->pdev->dev,
2385 				"could not configure rx private waterline %d\n",
2386 				ret);
2387 			goto out;
2388 		}
2389 
2390 		ret = hclge_common_thrd_config(hdev, pkt_buf);
2391 		if (ret) {
2392 			dev_err(&hdev->pdev->dev,
2393 				"could not configure common threshold %d\n",
2394 				ret);
2395 			goto out;
2396 		}
2397 	}
2398 
2399 	ret = hclge_common_wl_config(hdev, pkt_buf);
2400 	if (ret)
2401 		dev_err(&hdev->pdev->dev,
2402 			"could not configure common waterline %d\n", ret);
2403 
2404 out:
2405 	kfree(pkt_buf);
2406 	return ret;
2407 }
2408 
hclge_init_roce_base_info(struct hclge_vport * vport)2409 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2410 {
2411 	struct hnae3_handle *roce = &vport->roce;
2412 	struct hnae3_handle *nic = &vport->nic;
2413 
2414 	roce->rinfo.num_vectors = vport->back->num_roce_msi;
2415 
2416 	if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2417 	    vport->back->num_msi_left == 0)
2418 		return -EINVAL;
2419 
2420 	roce->rinfo.base_vector = vport->back->roce_base_vector;
2421 
2422 	roce->rinfo.netdev = nic->kinfo.netdev;
2423 	roce->rinfo.roce_io_base = vport->back->hw.io_base;
2424 
2425 	roce->pdev = nic->pdev;
2426 	roce->ae_algo = nic->ae_algo;
2427 	roce->numa_node_mask = nic->numa_node_mask;
2428 
2429 	return 0;
2430 }
2431 
hclge_init_msi(struct hclge_dev * hdev)2432 static int hclge_init_msi(struct hclge_dev *hdev)
2433 {
2434 	struct pci_dev *pdev = hdev->pdev;
2435 	int vectors;
2436 	int i;
2437 
2438 	vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2439 					hdev->num_msi,
2440 					PCI_IRQ_MSI | PCI_IRQ_MSIX);
2441 	if (vectors < 0) {
2442 		dev_err(&pdev->dev,
2443 			"failed(%d) to allocate MSI/MSI-X vectors\n",
2444 			vectors);
2445 		return vectors;
2446 	}
2447 	if (vectors < hdev->num_msi)
2448 		dev_warn(&hdev->pdev->dev,
2449 			 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2450 			 hdev->num_msi, vectors);
2451 
2452 	hdev->num_msi = vectors;
2453 	hdev->num_msi_left = vectors;
2454 
2455 	hdev->base_msi_vector = pdev->irq;
2456 	hdev->roce_base_vector = hdev->base_msi_vector +
2457 				hdev->roce_base_msix_offset;
2458 
2459 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2460 					   sizeof(u16), GFP_KERNEL);
2461 	if (!hdev->vector_status) {
2462 		pci_free_irq_vectors(pdev);
2463 		return -ENOMEM;
2464 	}
2465 
2466 	for (i = 0; i < hdev->num_msi; i++)
2467 		hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2468 
2469 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2470 					sizeof(int), GFP_KERNEL);
2471 	if (!hdev->vector_irq) {
2472 		pci_free_irq_vectors(pdev);
2473 		return -ENOMEM;
2474 	}
2475 
2476 	return 0;
2477 }
2478 
hclge_check_speed_dup(u8 duplex,int speed)2479 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2480 {
2481 	if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2482 		duplex = HCLGE_MAC_FULL;
2483 
2484 	return duplex;
2485 }
2486 
hclge_cfg_mac_speed_dup_hw(struct hclge_dev * hdev,int speed,u8 duplex)2487 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2488 				      u8 duplex)
2489 {
2490 	struct hclge_config_mac_speed_dup_cmd *req;
2491 	struct hclge_desc desc;
2492 	int ret;
2493 
2494 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2495 
2496 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2497 
2498 	if (duplex)
2499 		hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2500 
2501 	switch (speed) {
2502 	case HCLGE_MAC_SPEED_10M:
2503 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2504 				HCLGE_CFG_SPEED_S, 6);
2505 		break;
2506 	case HCLGE_MAC_SPEED_100M:
2507 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2508 				HCLGE_CFG_SPEED_S, 7);
2509 		break;
2510 	case HCLGE_MAC_SPEED_1G:
2511 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2512 				HCLGE_CFG_SPEED_S, 0);
2513 		break;
2514 	case HCLGE_MAC_SPEED_10G:
2515 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2516 				HCLGE_CFG_SPEED_S, 1);
2517 		break;
2518 	case HCLGE_MAC_SPEED_25G:
2519 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2520 				HCLGE_CFG_SPEED_S, 2);
2521 		break;
2522 	case HCLGE_MAC_SPEED_40G:
2523 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2524 				HCLGE_CFG_SPEED_S, 3);
2525 		break;
2526 	case HCLGE_MAC_SPEED_50G:
2527 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2528 				HCLGE_CFG_SPEED_S, 4);
2529 		break;
2530 	case HCLGE_MAC_SPEED_100G:
2531 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2532 				HCLGE_CFG_SPEED_S, 5);
2533 		break;
2534 	case HCLGE_MAC_SPEED_200G:
2535 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2536 				HCLGE_CFG_SPEED_S, 8);
2537 		break;
2538 	default:
2539 		dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2540 		return -EINVAL;
2541 	}
2542 
2543 	hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2544 		      1);
2545 
2546 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2547 	if (ret) {
2548 		dev_err(&hdev->pdev->dev,
2549 			"mac speed/duplex config cmd failed %d.\n", ret);
2550 		return ret;
2551 	}
2552 
2553 	return 0;
2554 }
2555 
hclge_cfg_mac_speed_dup(struct hclge_dev * hdev,int speed,u8 duplex)2556 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2557 {
2558 	struct hclge_mac *mac = &hdev->hw.mac;
2559 	int ret;
2560 
2561 	duplex = hclge_check_speed_dup(duplex, speed);
2562 	if (!mac->support_autoneg && mac->speed == speed &&
2563 	    mac->duplex == duplex)
2564 		return 0;
2565 
2566 	ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2567 	if (ret)
2568 		return ret;
2569 
2570 	hdev->hw.mac.speed = speed;
2571 	hdev->hw.mac.duplex = duplex;
2572 
2573 	return 0;
2574 }
2575 
hclge_cfg_mac_speed_dup_h(struct hnae3_handle * handle,int speed,u8 duplex)2576 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2577 				     u8 duplex)
2578 {
2579 	struct hclge_vport *vport = hclge_get_vport(handle);
2580 	struct hclge_dev *hdev = vport->back;
2581 
2582 	return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2583 }
2584 
hclge_set_autoneg_en(struct hclge_dev * hdev,bool enable)2585 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2586 {
2587 	struct hclge_config_auto_neg_cmd *req;
2588 	struct hclge_desc desc;
2589 	u32 flag = 0;
2590 	int ret;
2591 
2592 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2593 
2594 	req = (struct hclge_config_auto_neg_cmd *)desc.data;
2595 	if (enable)
2596 		hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2597 	req->cfg_an_cmd_flag = cpu_to_le32(flag);
2598 
2599 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2600 	if (ret)
2601 		dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2602 			ret);
2603 
2604 	return ret;
2605 }
2606 
hclge_set_autoneg(struct hnae3_handle * handle,bool enable)2607 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2608 {
2609 	struct hclge_vport *vport = hclge_get_vport(handle);
2610 	struct hclge_dev *hdev = vport->back;
2611 
2612 	if (!hdev->hw.mac.support_autoneg) {
2613 		if (enable) {
2614 			dev_err(&hdev->pdev->dev,
2615 				"autoneg is not supported by current port\n");
2616 			return -EOPNOTSUPP;
2617 		} else {
2618 			return 0;
2619 		}
2620 	}
2621 
2622 	return hclge_set_autoneg_en(hdev, enable);
2623 }
2624 
hclge_get_autoneg(struct hnae3_handle * handle)2625 static int hclge_get_autoneg(struct hnae3_handle *handle)
2626 {
2627 	struct hclge_vport *vport = hclge_get_vport(handle);
2628 	struct hclge_dev *hdev = vport->back;
2629 	struct phy_device *phydev = hdev->hw.mac.phydev;
2630 
2631 	if (phydev)
2632 		return phydev->autoneg;
2633 
2634 	return hdev->hw.mac.autoneg;
2635 }
2636 
hclge_restart_autoneg(struct hnae3_handle * handle)2637 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2638 {
2639 	struct hclge_vport *vport = hclge_get_vport(handle);
2640 	struct hclge_dev *hdev = vport->back;
2641 	int ret;
2642 
2643 	dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2644 
2645 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2646 	if (ret)
2647 		return ret;
2648 	return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2649 }
2650 
hclge_halt_autoneg(struct hnae3_handle * handle,bool halt)2651 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2652 {
2653 	struct hclge_vport *vport = hclge_get_vport(handle);
2654 	struct hclge_dev *hdev = vport->back;
2655 
2656 	if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2657 		return hclge_set_autoneg_en(hdev, !halt);
2658 
2659 	return 0;
2660 }
2661 
hclge_set_fec_hw(struct hclge_dev * hdev,u32 fec_mode)2662 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2663 {
2664 	struct hclge_config_fec_cmd *req;
2665 	struct hclge_desc desc;
2666 	int ret;
2667 
2668 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2669 
2670 	req = (struct hclge_config_fec_cmd *)desc.data;
2671 	if (fec_mode & BIT(HNAE3_FEC_AUTO))
2672 		hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2673 	if (fec_mode & BIT(HNAE3_FEC_RS))
2674 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2675 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2676 	if (fec_mode & BIT(HNAE3_FEC_BASER))
2677 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2678 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2679 
2680 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2681 	if (ret)
2682 		dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2683 
2684 	return ret;
2685 }
2686 
hclge_set_fec(struct hnae3_handle * handle,u32 fec_mode)2687 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2688 {
2689 	struct hclge_vport *vport = hclge_get_vport(handle);
2690 	struct hclge_dev *hdev = vport->back;
2691 	struct hclge_mac *mac = &hdev->hw.mac;
2692 	int ret;
2693 
2694 	if (fec_mode && !(mac->fec_ability & fec_mode)) {
2695 		dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2696 		return -EINVAL;
2697 	}
2698 
2699 	ret = hclge_set_fec_hw(hdev, fec_mode);
2700 	if (ret)
2701 		return ret;
2702 
2703 	mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2704 	return 0;
2705 }
2706 
hclge_get_fec(struct hnae3_handle * handle,u8 * fec_ability,u8 * fec_mode)2707 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2708 			  u8 *fec_mode)
2709 {
2710 	struct hclge_vport *vport = hclge_get_vport(handle);
2711 	struct hclge_dev *hdev = vport->back;
2712 	struct hclge_mac *mac = &hdev->hw.mac;
2713 
2714 	if (fec_ability)
2715 		*fec_ability = mac->fec_ability;
2716 	if (fec_mode)
2717 		*fec_mode = mac->fec_mode;
2718 }
2719 
hclge_mac_init(struct hclge_dev * hdev)2720 static int hclge_mac_init(struct hclge_dev *hdev)
2721 {
2722 	struct hclge_mac *mac = &hdev->hw.mac;
2723 	int ret;
2724 
2725 	hdev->support_sfp_query = true;
2726 	hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2727 	ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2728 					 hdev->hw.mac.duplex);
2729 	if (ret)
2730 		return ret;
2731 
2732 	if (hdev->hw.mac.support_autoneg) {
2733 		ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2734 		if (ret)
2735 			return ret;
2736 	}
2737 
2738 	mac->link = 0;
2739 
2740 	if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2741 		ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2742 		if (ret)
2743 			return ret;
2744 	}
2745 
2746 	ret = hclge_set_mac_mtu(hdev, hdev->mps);
2747 	if (ret) {
2748 		dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2749 		return ret;
2750 	}
2751 
2752 	ret = hclge_set_default_loopback(hdev);
2753 	if (ret)
2754 		return ret;
2755 
2756 	ret = hclge_buffer_alloc(hdev);
2757 	if (ret)
2758 		dev_err(&hdev->pdev->dev,
2759 			"allocate buffer fail, ret=%d\n", ret);
2760 
2761 	return ret;
2762 }
2763 
hclge_mbx_task_schedule(struct hclge_dev * hdev)2764 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2765 {
2766 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2767 	    !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2768 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2769 				    hclge_wq, &hdev->service_task, 0);
2770 }
2771 
hclge_reset_task_schedule(struct hclge_dev * hdev)2772 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2773 {
2774 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2775 	    !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2776 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2777 				    hclge_wq, &hdev->service_task, 0);
2778 }
2779 
hclge_task_schedule(struct hclge_dev * hdev,unsigned long delay_time)2780 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2781 {
2782 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2783 	    !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2784 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2785 				    hclge_wq, &hdev->service_task,
2786 				    delay_time);
2787 }
2788 
hclge_get_mac_link_status(struct hclge_dev * hdev,int * link_status)2789 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2790 {
2791 	struct hclge_link_status_cmd *req;
2792 	struct hclge_desc desc;
2793 	int ret;
2794 
2795 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2796 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2797 	if (ret) {
2798 		dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2799 			ret);
2800 		return ret;
2801 	}
2802 
2803 	req = (struct hclge_link_status_cmd *)desc.data;
2804 	*link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2805 		HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2806 
2807 	return 0;
2808 }
2809 
hclge_get_mac_phy_link(struct hclge_dev * hdev,int * link_status)2810 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2811 {
2812 	struct phy_device *phydev = hdev->hw.mac.phydev;
2813 
2814 	*link_status = HCLGE_LINK_STATUS_DOWN;
2815 
2816 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2817 		return 0;
2818 
2819 	if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2820 		return 0;
2821 
2822 	return hclge_get_mac_link_status(hdev, link_status);
2823 }
2824 
hclge_update_link_status(struct hclge_dev * hdev)2825 static void hclge_update_link_status(struct hclge_dev *hdev)
2826 {
2827 	struct hnae3_client *rclient = hdev->roce_client;
2828 	struct hnae3_client *client = hdev->nic_client;
2829 	struct hnae3_handle *rhandle;
2830 	struct hnae3_handle *handle;
2831 	int state;
2832 	int ret;
2833 	int i;
2834 
2835 	if (!client)
2836 		return;
2837 
2838 	if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2839 		return;
2840 
2841 	ret = hclge_get_mac_phy_link(hdev, &state);
2842 	if (ret) {
2843 		clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2844 		return;
2845 	}
2846 
2847 	if (state != hdev->hw.mac.link) {
2848 		for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2849 			handle = &hdev->vport[i].nic;
2850 			client->ops->link_status_change(handle, state);
2851 			hclge_config_mac_tnl_int(hdev, state);
2852 			rhandle = &hdev->vport[i].roce;
2853 			if (rclient && rclient->ops->link_status_change)
2854 				rclient->ops->link_status_change(rhandle,
2855 								 state);
2856 		}
2857 		hdev->hw.mac.link = state;
2858 	}
2859 
2860 	clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2861 }
2862 
hclge_update_port_capability(struct hclge_mac * mac)2863 static void hclge_update_port_capability(struct hclge_mac *mac)
2864 {
2865 	/* update fec ability by speed */
2866 	hclge_convert_setting_fec(mac);
2867 
2868 	/* firmware can not identify back plane type, the media type
2869 	 * read from configuration can help deal it
2870 	 */
2871 	if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2872 	    mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2873 		mac->module_type = HNAE3_MODULE_TYPE_KR;
2874 	else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2875 		mac->module_type = HNAE3_MODULE_TYPE_TP;
2876 
2877 	if (mac->support_autoneg) {
2878 		linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2879 		linkmode_copy(mac->advertising, mac->supported);
2880 	} else {
2881 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2882 				   mac->supported);
2883 		linkmode_zero(mac->advertising);
2884 	}
2885 }
2886 
hclge_get_sfp_speed(struct hclge_dev * hdev,u32 * speed)2887 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2888 {
2889 	struct hclge_sfp_info_cmd *resp;
2890 	struct hclge_desc desc;
2891 	int ret;
2892 
2893 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2894 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2895 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2896 	if (ret == -EOPNOTSUPP) {
2897 		dev_warn(&hdev->pdev->dev,
2898 			 "IMP do not support get SFP speed %d\n", ret);
2899 		return ret;
2900 	} else if (ret) {
2901 		dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2902 		return ret;
2903 	}
2904 
2905 	*speed = le32_to_cpu(resp->speed);
2906 
2907 	return 0;
2908 }
2909 
hclge_get_sfp_info(struct hclge_dev * hdev,struct hclge_mac * mac)2910 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2911 {
2912 	struct hclge_sfp_info_cmd *resp;
2913 	struct hclge_desc desc;
2914 	int ret;
2915 
2916 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2917 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2918 
2919 	resp->query_type = QUERY_ACTIVE_SPEED;
2920 
2921 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2922 	if (ret == -EOPNOTSUPP) {
2923 		dev_warn(&hdev->pdev->dev,
2924 			 "IMP does not support get SFP info %d\n", ret);
2925 		return ret;
2926 	} else if (ret) {
2927 		dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2928 		return ret;
2929 	}
2930 
2931 	/* In some case, mac speed get from IMP may be 0, it shouldn't be
2932 	 * set to mac->speed.
2933 	 */
2934 	if (!le32_to_cpu(resp->speed))
2935 		return 0;
2936 
2937 	mac->speed = le32_to_cpu(resp->speed);
2938 	/* if resp->speed_ability is 0, it means it's an old version
2939 	 * firmware, do not update these params
2940 	 */
2941 	if (resp->speed_ability) {
2942 		mac->module_type = le32_to_cpu(resp->module_type);
2943 		mac->speed_ability = le32_to_cpu(resp->speed_ability);
2944 		mac->autoneg = resp->autoneg;
2945 		mac->support_autoneg = resp->autoneg_ability;
2946 		mac->speed_type = QUERY_ACTIVE_SPEED;
2947 		if (!resp->active_fec)
2948 			mac->fec_mode = 0;
2949 		else
2950 			mac->fec_mode = BIT(resp->active_fec);
2951 	} else {
2952 		mac->speed_type = QUERY_SFP_SPEED;
2953 	}
2954 
2955 	return 0;
2956 }
2957 
hclge_update_port_info(struct hclge_dev * hdev)2958 static int hclge_update_port_info(struct hclge_dev *hdev)
2959 {
2960 	struct hclge_mac *mac = &hdev->hw.mac;
2961 	int speed = HCLGE_MAC_SPEED_UNKNOWN;
2962 	int ret;
2963 
2964 	/* get the port info from SFP cmd if not copper port */
2965 	if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2966 		return 0;
2967 
2968 	/* if IMP does not support get SFP/qSFP info, return directly */
2969 	if (!hdev->support_sfp_query)
2970 		return 0;
2971 
2972 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
2973 		ret = hclge_get_sfp_info(hdev, mac);
2974 	else
2975 		ret = hclge_get_sfp_speed(hdev, &speed);
2976 
2977 	if (ret == -EOPNOTSUPP) {
2978 		hdev->support_sfp_query = false;
2979 		return ret;
2980 	} else if (ret) {
2981 		return ret;
2982 	}
2983 
2984 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
2985 		if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2986 			hclge_update_port_capability(mac);
2987 			return 0;
2988 		}
2989 		return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2990 					       HCLGE_MAC_FULL);
2991 	} else {
2992 		if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2993 			return 0; /* do nothing if no SFP */
2994 
2995 		/* must config full duplex for SFP */
2996 		return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2997 	}
2998 }
2999 
hclge_get_status(struct hnae3_handle * handle)3000 static int hclge_get_status(struct hnae3_handle *handle)
3001 {
3002 	struct hclge_vport *vport = hclge_get_vport(handle);
3003 	struct hclge_dev *hdev = vport->back;
3004 
3005 	hclge_update_link_status(hdev);
3006 
3007 	return hdev->hw.mac.link;
3008 }
3009 
hclge_get_vf_vport(struct hclge_dev * hdev,int vf)3010 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3011 {
3012 	if (!pci_num_vf(hdev->pdev)) {
3013 		dev_err(&hdev->pdev->dev,
3014 			"SRIOV is disabled, can not get vport(%d) info.\n", vf);
3015 		return NULL;
3016 	}
3017 
3018 	if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3019 		dev_err(&hdev->pdev->dev,
3020 			"vf id(%d) is out of range(0 <= vfid < %d)\n",
3021 			vf, pci_num_vf(hdev->pdev));
3022 		return NULL;
3023 	}
3024 
3025 	/* VF start from 1 in vport */
3026 	vf += HCLGE_VF_VPORT_START_NUM;
3027 	return &hdev->vport[vf];
3028 }
3029 
hclge_get_vf_config(struct hnae3_handle * handle,int vf,struct ifla_vf_info * ivf)3030 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3031 			       struct ifla_vf_info *ivf)
3032 {
3033 	struct hclge_vport *vport = hclge_get_vport(handle);
3034 	struct hclge_dev *hdev = vport->back;
3035 
3036 	vport = hclge_get_vf_vport(hdev, vf);
3037 	if (!vport)
3038 		return -EINVAL;
3039 
3040 	ivf->vf = vf;
3041 	ivf->linkstate = vport->vf_info.link_state;
3042 	ivf->spoofchk = vport->vf_info.spoofchk;
3043 	ivf->trusted = vport->vf_info.trusted;
3044 	ivf->min_tx_rate = 0;
3045 	ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3046 	ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3047 	ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3048 	ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3049 	ether_addr_copy(ivf->mac, vport->vf_info.mac);
3050 
3051 	return 0;
3052 }
3053 
hclge_set_vf_link_state(struct hnae3_handle * handle,int vf,int link_state)3054 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3055 				   int link_state)
3056 {
3057 	struct hclge_vport *vport = hclge_get_vport(handle);
3058 	struct hclge_dev *hdev = vport->back;
3059 
3060 	vport = hclge_get_vf_vport(hdev, vf);
3061 	if (!vport)
3062 		return -EINVAL;
3063 
3064 	vport->vf_info.link_state = link_state;
3065 
3066 	return 0;
3067 }
3068 
hclge_check_event_cause(struct hclge_dev * hdev,u32 * clearval)3069 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3070 {
3071 	u32 cmdq_src_reg, msix_src_reg;
3072 
3073 	/* fetch the events from their corresponding regs */
3074 	cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3075 	msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3076 
3077 	/* Assumption: If by any chance reset and mailbox events are reported
3078 	 * together then we will only process reset event in this go and will
3079 	 * defer the processing of the mailbox events. Since, we would have not
3080 	 * cleared RX CMDQ event this time we would receive again another
3081 	 * interrupt from H/W just for the mailbox.
3082 	 *
3083 	 * check for vector0 reset event sources
3084 	 */
3085 	if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3086 		dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3087 		set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3088 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3089 		*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3090 		hdev->rst_stats.imp_rst_cnt++;
3091 		return HCLGE_VECTOR0_EVENT_RST;
3092 	}
3093 
3094 	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3095 		dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3096 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3097 		set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3098 		*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3099 		hdev->rst_stats.global_rst_cnt++;
3100 		return HCLGE_VECTOR0_EVENT_RST;
3101 	}
3102 
3103 	/* check for vector0 msix event source */
3104 	if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3105 		*clearval = msix_src_reg;
3106 		return HCLGE_VECTOR0_EVENT_ERR;
3107 	}
3108 
3109 	/* check for vector0 mailbox(=CMDQ RX) event source */
3110 	if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3111 		cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3112 		*clearval = cmdq_src_reg;
3113 		return HCLGE_VECTOR0_EVENT_MBX;
3114 	}
3115 
3116 	/* print other vector0 event source */
3117 	dev_info(&hdev->pdev->dev,
3118 		 "CMDQ INT status:0x%x, other INT status:0x%x\n",
3119 		 cmdq_src_reg, msix_src_reg);
3120 	*clearval = msix_src_reg;
3121 
3122 	return HCLGE_VECTOR0_EVENT_OTHER;
3123 }
3124 
hclge_clear_event_cause(struct hclge_dev * hdev,u32 event_type,u32 regclr)3125 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3126 				    u32 regclr)
3127 {
3128 #define HCLGE_IMP_RESET_DELAY		5
3129 
3130 	switch (event_type) {
3131 	case HCLGE_VECTOR0_EVENT_RST:
3132 		if (regclr == BIT(HCLGE_VECTOR0_IMPRESET_INT_B))
3133 			mdelay(HCLGE_IMP_RESET_DELAY);
3134 
3135 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3136 		break;
3137 	case HCLGE_VECTOR0_EVENT_MBX:
3138 		hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3139 		break;
3140 	default:
3141 		break;
3142 	}
3143 }
3144 
hclge_clear_all_event_cause(struct hclge_dev * hdev)3145 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3146 {
3147 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3148 				BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3149 				BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3150 				BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3151 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3152 }
3153 
hclge_enable_vector(struct hclge_misc_vector * vector,bool enable)3154 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3155 {
3156 	writel(enable ? 1 : 0, vector->addr);
3157 }
3158 
hclge_misc_irq_handle(int irq,void * data)3159 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3160 {
3161 	struct hclge_dev *hdev = data;
3162 	u32 clearval = 0;
3163 	u32 event_cause;
3164 
3165 	hclge_enable_vector(&hdev->misc_vector, false);
3166 	event_cause = hclge_check_event_cause(hdev, &clearval);
3167 
3168 	/* vector 0 interrupt is shared with reset and mailbox source events.*/
3169 	switch (event_cause) {
3170 	case HCLGE_VECTOR0_EVENT_ERR:
3171 		/* we do not know what type of reset is required now. This could
3172 		 * only be decided after we fetch the type of errors which
3173 		 * caused this event. Therefore, we will do below for now:
3174 		 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3175 		 *    have defered type of reset to be used.
3176 		 * 2. Schedule the reset serivce task.
3177 		 * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
3178 		 *    will fetch the correct type of reset.  This would be done
3179 		 *    by first decoding the types of errors.
3180 		 */
3181 		set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3182 		fallthrough;
3183 	case HCLGE_VECTOR0_EVENT_RST:
3184 		hclge_reset_task_schedule(hdev);
3185 		break;
3186 	case HCLGE_VECTOR0_EVENT_MBX:
3187 		/* If we are here then,
3188 		 * 1. Either we are not handling any mbx task and we are not
3189 		 *    scheduled as well
3190 		 *                        OR
3191 		 * 2. We could be handling a mbx task but nothing more is
3192 		 *    scheduled.
3193 		 * In both cases, we should schedule mbx task as there are more
3194 		 * mbx messages reported by this interrupt.
3195 		 */
3196 		hclge_mbx_task_schedule(hdev);
3197 		break;
3198 	default:
3199 		dev_warn(&hdev->pdev->dev,
3200 			 "received unknown or unhandled event of vector0\n");
3201 		break;
3202 	}
3203 
3204 	hclge_clear_event_cause(hdev, event_cause, clearval);
3205 
3206 	/* Enable interrupt if it is not cause by reset. And when
3207 	 * clearval equal to 0, it means interrupt status may be
3208 	 * cleared by hardware before driver reads status register.
3209 	 * For this case, vector0 interrupt also should be enabled.
3210 	 */
3211 	if (!clearval ||
3212 	    event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3213 		hclge_enable_vector(&hdev->misc_vector, true);
3214 	}
3215 
3216 	return IRQ_HANDLED;
3217 }
3218 
hclge_free_vector(struct hclge_dev * hdev,int vector_id)3219 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3220 {
3221 	if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3222 		dev_warn(&hdev->pdev->dev,
3223 			 "vector(vector_id %d) has been freed.\n", vector_id);
3224 		return;
3225 	}
3226 
3227 	hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3228 	hdev->num_msi_left += 1;
3229 	hdev->num_msi_used -= 1;
3230 }
3231 
hclge_get_misc_vector(struct hclge_dev * hdev)3232 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3233 {
3234 	struct hclge_misc_vector *vector = &hdev->misc_vector;
3235 
3236 	vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3237 
3238 	vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3239 	hdev->vector_status[0] = 0;
3240 
3241 	hdev->num_msi_left -= 1;
3242 	hdev->num_msi_used += 1;
3243 }
3244 
hclge_irq_affinity_notify(struct irq_affinity_notify * notify,const cpumask_t * mask)3245 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3246 				      const cpumask_t *mask)
3247 {
3248 	struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3249 					      affinity_notify);
3250 
3251 	cpumask_copy(&hdev->affinity_mask, mask);
3252 }
3253 
hclge_irq_affinity_release(struct kref * ref)3254 static void hclge_irq_affinity_release(struct kref *ref)
3255 {
3256 }
3257 
hclge_misc_affinity_setup(struct hclge_dev * hdev)3258 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3259 {
3260 	irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3261 			      &hdev->affinity_mask);
3262 
3263 	hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3264 	hdev->affinity_notify.release = hclge_irq_affinity_release;
3265 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3266 				  &hdev->affinity_notify);
3267 }
3268 
hclge_misc_affinity_teardown(struct hclge_dev * hdev)3269 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3270 {
3271 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3272 	irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3273 }
3274 
hclge_misc_irq_init(struct hclge_dev * hdev)3275 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3276 {
3277 	int ret;
3278 
3279 	hclge_get_misc_vector(hdev);
3280 
3281 	/* this would be explicitly freed in the end */
3282 	snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3283 		 HCLGE_NAME, pci_name(hdev->pdev));
3284 	ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3285 			  0, hdev->misc_vector.name, hdev);
3286 	if (ret) {
3287 		hclge_free_vector(hdev, 0);
3288 		dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3289 			hdev->misc_vector.vector_irq);
3290 	}
3291 
3292 	return ret;
3293 }
3294 
hclge_misc_irq_uninit(struct hclge_dev * hdev)3295 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3296 {
3297 	free_irq(hdev->misc_vector.vector_irq, hdev);
3298 	hclge_free_vector(hdev, 0);
3299 }
3300 
hclge_notify_client(struct hclge_dev * hdev,enum hnae3_reset_notify_type type)3301 int hclge_notify_client(struct hclge_dev *hdev,
3302 			enum hnae3_reset_notify_type type)
3303 {
3304 	struct hnae3_client *client = hdev->nic_client;
3305 	u16 i;
3306 
3307 	if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3308 		return 0;
3309 
3310 	if (!client->ops->reset_notify)
3311 		return -EOPNOTSUPP;
3312 
3313 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3314 		struct hnae3_handle *handle = &hdev->vport[i].nic;
3315 		int ret;
3316 
3317 		ret = client->ops->reset_notify(handle, type);
3318 		if (ret) {
3319 			dev_err(&hdev->pdev->dev,
3320 				"notify nic client failed %d(%d)\n", type, ret);
3321 			return ret;
3322 		}
3323 	}
3324 
3325 	return 0;
3326 }
3327 
hclge_notify_roce_client(struct hclge_dev * hdev,enum hnae3_reset_notify_type type)3328 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3329 				    enum hnae3_reset_notify_type type)
3330 {
3331 	struct hnae3_client *client = hdev->roce_client;
3332 	int ret;
3333 	u16 i;
3334 
3335 	if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3336 		return 0;
3337 
3338 	if (!client->ops->reset_notify)
3339 		return -EOPNOTSUPP;
3340 
3341 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3342 		struct hnae3_handle *handle = &hdev->vport[i].roce;
3343 
3344 		ret = client->ops->reset_notify(handle, type);
3345 		if (ret) {
3346 			dev_err(&hdev->pdev->dev,
3347 				"notify roce client failed %d(%d)",
3348 				type, ret);
3349 			return ret;
3350 		}
3351 	}
3352 
3353 	return ret;
3354 }
3355 
hclge_reset_wait(struct hclge_dev * hdev)3356 static int hclge_reset_wait(struct hclge_dev *hdev)
3357 {
3358 #define HCLGE_RESET_WATI_MS	100
3359 #define HCLGE_RESET_WAIT_CNT	350
3360 
3361 	u32 val, reg, reg_bit;
3362 	u32 cnt = 0;
3363 
3364 	switch (hdev->reset_type) {
3365 	case HNAE3_IMP_RESET:
3366 		reg = HCLGE_GLOBAL_RESET_REG;
3367 		reg_bit = HCLGE_IMP_RESET_BIT;
3368 		break;
3369 	case HNAE3_GLOBAL_RESET:
3370 		reg = HCLGE_GLOBAL_RESET_REG;
3371 		reg_bit = HCLGE_GLOBAL_RESET_BIT;
3372 		break;
3373 	case HNAE3_FUNC_RESET:
3374 		reg = HCLGE_FUN_RST_ING;
3375 		reg_bit = HCLGE_FUN_RST_ING_B;
3376 		break;
3377 	default:
3378 		dev_err(&hdev->pdev->dev,
3379 			"Wait for unsupported reset type: %d\n",
3380 			hdev->reset_type);
3381 		return -EINVAL;
3382 	}
3383 
3384 	val = hclge_read_dev(&hdev->hw, reg);
3385 	while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3386 		msleep(HCLGE_RESET_WATI_MS);
3387 		val = hclge_read_dev(&hdev->hw, reg);
3388 		cnt++;
3389 	}
3390 
3391 	if (cnt >= HCLGE_RESET_WAIT_CNT) {
3392 		dev_warn(&hdev->pdev->dev,
3393 			 "Wait for reset timeout: %d\n", hdev->reset_type);
3394 		return -EBUSY;
3395 	}
3396 
3397 	return 0;
3398 }
3399 
hclge_set_vf_rst(struct hclge_dev * hdev,int func_id,bool reset)3400 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3401 {
3402 	struct hclge_vf_rst_cmd *req;
3403 	struct hclge_desc desc;
3404 
3405 	req = (struct hclge_vf_rst_cmd *)desc.data;
3406 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3407 	req->dest_vfid = func_id;
3408 
3409 	if (reset)
3410 		req->vf_rst = 0x1;
3411 
3412 	return hclge_cmd_send(&hdev->hw, &desc, 1);
3413 }
3414 
hclge_set_all_vf_rst(struct hclge_dev * hdev,bool reset)3415 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3416 {
3417 	int i;
3418 
3419 	for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3420 		struct hclge_vport *vport = &hdev->vport[i];
3421 		int ret;
3422 
3423 		/* Send cmd to set/clear VF's FUNC_RST_ING */
3424 		ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3425 		if (ret) {
3426 			dev_err(&hdev->pdev->dev,
3427 				"set vf(%u) rst failed %d!\n",
3428 				vport->vport_id, ret);
3429 			return ret;
3430 		}
3431 
3432 		if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3433 			continue;
3434 
3435 		/* Inform VF to process the reset.
3436 		 * hclge_inform_reset_assert_to_vf may fail if VF
3437 		 * driver is not loaded.
3438 		 */
3439 		ret = hclge_inform_reset_assert_to_vf(vport);
3440 		if (ret)
3441 			dev_warn(&hdev->pdev->dev,
3442 				 "inform reset to vf(%u) failed %d!\n",
3443 				 vport->vport_id, ret);
3444 	}
3445 
3446 	return 0;
3447 }
3448 
hclge_mailbox_service_task(struct hclge_dev * hdev)3449 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3450 {
3451 	if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3452 	    test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3453 	    test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3454 		return;
3455 
3456 	hclge_mbx_handler(hdev);
3457 
3458 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3459 }
3460 
hclge_func_reset_sync_vf(struct hclge_dev * hdev)3461 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3462 {
3463 	struct hclge_pf_rst_sync_cmd *req;
3464 	struct hclge_desc desc;
3465 	int cnt = 0;
3466 	int ret;
3467 
3468 	req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3469 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3470 
3471 	do {
3472 		/* vf need to down netdev by mbx during PF or FLR reset */
3473 		hclge_mailbox_service_task(hdev);
3474 
3475 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3476 		/* for compatible with old firmware, wait
3477 		 * 100 ms for VF to stop IO
3478 		 */
3479 		if (ret == -EOPNOTSUPP) {
3480 			msleep(HCLGE_RESET_SYNC_TIME);
3481 			return;
3482 		} else if (ret) {
3483 			dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3484 				 ret);
3485 			return;
3486 		} else if (req->all_vf_ready) {
3487 			return;
3488 		}
3489 		msleep(HCLGE_PF_RESET_SYNC_TIME);
3490 		hclge_cmd_reuse_desc(&desc, true);
3491 	} while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3492 
3493 	dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3494 }
3495 
hclge_report_hw_error(struct hclge_dev * hdev,enum hnae3_hw_error_type type)3496 void hclge_report_hw_error(struct hclge_dev *hdev,
3497 			   enum hnae3_hw_error_type type)
3498 {
3499 	struct hnae3_client *client = hdev->nic_client;
3500 	u16 i;
3501 
3502 	if (!client || !client->ops->process_hw_error ||
3503 	    !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3504 		return;
3505 
3506 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3507 		client->ops->process_hw_error(&hdev->vport[i].nic, type);
3508 }
3509 
hclge_handle_imp_error(struct hclge_dev * hdev)3510 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3511 {
3512 	u32 reg_val;
3513 
3514 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3515 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3516 		hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3517 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3518 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3519 	}
3520 
3521 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3522 		hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3523 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3524 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3525 	}
3526 }
3527 
hclge_func_reset_cmd(struct hclge_dev * hdev,int func_id)3528 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3529 {
3530 	struct hclge_desc desc;
3531 	struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3532 	int ret;
3533 
3534 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3535 	hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3536 	req->fun_reset_vfid = func_id;
3537 
3538 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3539 	if (ret)
3540 		dev_err(&hdev->pdev->dev,
3541 			"send function reset cmd fail, status =%d\n", ret);
3542 
3543 	return ret;
3544 }
3545 
hclge_do_reset(struct hclge_dev * hdev)3546 static void hclge_do_reset(struct hclge_dev *hdev)
3547 {
3548 	struct hnae3_handle *handle = &hdev->vport[0].nic;
3549 	struct pci_dev *pdev = hdev->pdev;
3550 	u32 val;
3551 
3552 	if (hclge_get_hw_reset_stat(handle)) {
3553 		dev_info(&pdev->dev, "hardware reset not finish\n");
3554 		dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3555 			 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3556 			 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3557 		return;
3558 	}
3559 
3560 	switch (hdev->reset_type) {
3561 	case HNAE3_GLOBAL_RESET:
3562 		dev_info(&pdev->dev, "global reset requested\n");
3563 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3564 		hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3565 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3566 		break;
3567 	case HNAE3_FUNC_RESET:
3568 		dev_info(&pdev->dev, "PF reset requested\n");
3569 		/* schedule again to check later */
3570 		set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3571 		hclge_reset_task_schedule(hdev);
3572 		break;
3573 	default:
3574 		dev_warn(&pdev->dev,
3575 			 "unsupported reset type: %d\n", hdev->reset_type);
3576 		break;
3577 	}
3578 }
3579 
hclge_get_reset_level(struct hnae3_ae_dev * ae_dev,unsigned long * addr)3580 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3581 						   unsigned long *addr)
3582 {
3583 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3584 	struct hclge_dev *hdev = ae_dev->priv;
3585 
3586 	/* first, resolve any unknown reset type to the known type(s) */
3587 	if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3588 		u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3589 					HCLGE_MISC_VECTOR_INT_STS);
3590 		/* we will intentionally ignore any errors from this function
3591 		 *  as we will end up in *some* reset request in any case
3592 		 */
3593 		if (hclge_handle_hw_msix_error(hdev, addr))
3594 			dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3595 				 msix_sts_reg);
3596 
3597 		clear_bit(HNAE3_UNKNOWN_RESET, addr);
3598 		/* We defered the clearing of the error event which caused
3599 		 * interrupt since it was not posssible to do that in
3600 		 * interrupt context (and this is the reason we introduced
3601 		 * new UNKNOWN reset type). Now, the errors have been
3602 		 * handled and cleared in hardware we can safely enable
3603 		 * interrupts. This is an exception to the norm.
3604 		 */
3605 		hclge_enable_vector(&hdev->misc_vector, true);
3606 	}
3607 
3608 	/* return the highest priority reset level amongst all */
3609 	if (test_bit(HNAE3_IMP_RESET, addr)) {
3610 		rst_level = HNAE3_IMP_RESET;
3611 		clear_bit(HNAE3_IMP_RESET, addr);
3612 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3613 		clear_bit(HNAE3_FUNC_RESET, addr);
3614 	} else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3615 		rst_level = HNAE3_GLOBAL_RESET;
3616 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3617 		clear_bit(HNAE3_FUNC_RESET, addr);
3618 	} else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3619 		rst_level = HNAE3_FUNC_RESET;
3620 		clear_bit(HNAE3_FUNC_RESET, addr);
3621 	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
3622 		rst_level = HNAE3_FLR_RESET;
3623 		clear_bit(HNAE3_FLR_RESET, addr);
3624 	}
3625 
3626 	if (hdev->reset_type != HNAE3_NONE_RESET &&
3627 	    rst_level < hdev->reset_type)
3628 		return HNAE3_NONE_RESET;
3629 
3630 	return rst_level;
3631 }
3632 
hclge_clear_reset_cause(struct hclge_dev * hdev)3633 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3634 {
3635 	u32 clearval = 0;
3636 
3637 	switch (hdev->reset_type) {
3638 	case HNAE3_IMP_RESET:
3639 		clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3640 		break;
3641 	case HNAE3_GLOBAL_RESET:
3642 		clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3643 		break;
3644 	default:
3645 		break;
3646 	}
3647 
3648 	if (!clearval)
3649 		return;
3650 
3651 	/* For revision 0x20, the reset interrupt source
3652 	 * can only be cleared after hardware reset done
3653 	 */
3654 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3655 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3656 				clearval);
3657 
3658 	hclge_enable_vector(&hdev->misc_vector, true);
3659 }
3660 
hclge_reset_handshake(struct hclge_dev * hdev,bool enable)3661 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3662 {
3663 	u32 reg_val;
3664 
3665 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3666 	if (enable)
3667 		reg_val |= HCLGE_NIC_SW_RST_RDY;
3668 	else
3669 		reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3670 
3671 	hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3672 }
3673 
hclge_func_reset_notify_vf(struct hclge_dev * hdev)3674 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3675 {
3676 	int ret;
3677 
3678 	ret = hclge_set_all_vf_rst(hdev, true);
3679 	if (ret)
3680 		return ret;
3681 
3682 	hclge_func_reset_sync_vf(hdev);
3683 
3684 	return 0;
3685 }
3686 
hclge_reset_prepare_wait(struct hclge_dev * hdev)3687 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3688 {
3689 	u32 reg_val;
3690 	int ret = 0;
3691 
3692 	switch (hdev->reset_type) {
3693 	case HNAE3_FUNC_RESET:
3694 		ret = hclge_func_reset_notify_vf(hdev);
3695 		if (ret)
3696 			return ret;
3697 
3698 		ret = hclge_func_reset_cmd(hdev, 0);
3699 		if (ret) {
3700 			dev_err(&hdev->pdev->dev,
3701 				"asserting function reset fail %d!\n", ret);
3702 			return ret;
3703 		}
3704 
3705 		/* After performaning pf reset, it is not necessary to do the
3706 		 * mailbox handling or send any command to firmware, because
3707 		 * any mailbox handling or command to firmware is only valid
3708 		 * after hclge_cmd_init is called.
3709 		 */
3710 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3711 		hdev->rst_stats.pf_rst_cnt++;
3712 		break;
3713 	case HNAE3_FLR_RESET:
3714 		ret = hclge_func_reset_notify_vf(hdev);
3715 		if (ret)
3716 			return ret;
3717 		break;
3718 	case HNAE3_IMP_RESET:
3719 		hclge_handle_imp_error(hdev);
3720 		reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3721 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3722 				BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3723 		break;
3724 	default:
3725 		break;
3726 	}
3727 
3728 	/* inform hardware that preparatory work is done */
3729 	msleep(HCLGE_RESET_SYNC_TIME);
3730 	hclge_reset_handshake(hdev, true);
3731 	dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3732 
3733 	return ret;
3734 }
3735 
hclge_reset_err_handle(struct hclge_dev * hdev)3736 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3737 {
3738 #define MAX_RESET_FAIL_CNT 5
3739 
3740 	if (hdev->reset_pending) {
3741 		dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3742 			 hdev->reset_pending);
3743 		return true;
3744 	} else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3745 		   HCLGE_RESET_INT_M) {
3746 		dev_info(&hdev->pdev->dev,
3747 			 "reset failed because new reset interrupt\n");
3748 		hclge_clear_reset_cause(hdev);
3749 		return false;
3750 	} else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3751 		hdev->rst_stats.reset_fail_cnt++;
3752 		set_bit(hdev->reset_type, &hdev->reset_pending);
3753 		dev_info(&hdev->pdev->dev,
3754 			 "re-schedule reset task(%u)\n",
3755 			 hdev->rst_stats.reset_fail_cnt);
3756 		return true;
3757 	}
3758 
3759 	hclge_clear_reset_cause(hdev);
3760 
3761 	/* recover the handshake status when reset fail */
3762 	hclge_reset_handshake(hdev, true);
3763 
3764 	dev_err(&hdev->pdev->dev, "Reset fail!\n");
3765 
3766 	hclge_dbg_dump_rst_info(hdev);
3767 
3768 	set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3769 
3770 	return false;
3771 }
3772 
hclge_set_rst_done(struct hclge_dev * hdev)3773 static int hclge_set_rst_done(struct hclge_dev *hdev)
3774 {
3775 	struct hclge_pf_rst_done_cmd *req;
3776 	struct hclge_desc desc;
3777 	int ret;
3778 
3779 	req = (struct hclge_pf_rst_done_cmd *)desc.data;
3780 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3781 	req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3782 
3783 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3784 	/* To be compatible with the old firmware, which does not support
3785 	 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3786 	 * return success
3787 	 */
3788 	if (ret == -EOPNOTSUPP) {
3789 		dev_warn(&hdev->pdev->dev,
3790 			 "current firmware does not support command(0x%x)!\n",
3791 			 HCLGE_OPC_PF_RST_DONE);
3792 		return 0;
3793 	} else if (ret) {
3794 		dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3795 			ret);
3796 	}
3797 
3798 	return ret;
3799 }
3800 
hclge_reset_prepare_up(struct hclge_dev * hdev)3801 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3802 {
3803 	int ret = 0;
3804 
3805 	switch (hdev->reset_type) {
3806 	case HNAE3_FUNC_RESET:
3807 	case HNAE3_FLR_RESET:
3808 		ret = hclge_set_all_vf_rst(hdev, false);
3809 		break;
3810 	case HNAE3_GLOBAL_RESET:
3811 	case HNAE3_IMP_RESET:
3812 		ret = hclge_set_rst_done(hdev);
3813 		break;
3814 	default:
3815 		break;
3816 	}
3817 
3818 	/* clear up the handshake status after re-initialize done */
3819 	hclge_reset_handshake(hdev, false);
3820 
3821 	return ret;
3822 }
3823 
hclge_reset_stack(struct hclge_dev * hdev)3824 static int hclge_reset_stack(struct hclge_dev *hdev)
3825 {
3826 	int ret;
3827 
3828 	ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3829 	if (ret)
3830 		return ret;
3831 
3832 	ret = hclge_reset_ae_dev(hdev->ae_dev);
3833 	if (ret)
3834 		return ret;
3835 
3836 	return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3837 }
3838 
hclge_reset_prepare(struct hclge_dev * hdev)3839 static int hclge_reset_prepare(struct hclge_dev *hdev)
3840 {
3841 	int ret;
3842 
3843 	hdev->rst_stats.reset_cnt++;
3844 	/* perform reset of the stack & ae device for a client */
3845 	ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3846 	if (ret)
3847 		return ret;
3848 
3849 	rtnl_lock();
3850 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3851 	rtnl_unlock();
3852 	if (ret)
3853 		return ret;
3854 
3855 	return hclge_reset_prepare_wait(hdev);
3856 }
3857 
hclge_reset_rebuild(struct hclge_dev * hdev)3858 static int hclge_reset_rebuild(struct hclge_dev *hdev)
3859 {
3860 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3861 	enum hnae3_reset_type reset_level;
3862 	int ret;
3863 
3864 	hdev->rst_stats.hw_reset_done_cnt++;
3865 
3866 	ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3867 	if (ret)
3868 		return ret;
3869 
3870 	rtnl_lock();
3871 	ret = hclge_reset_stack(hdev);
3872 	rtnl_unlock();
3873 	if (ret)
3874 		return ret;
3875 
3876 	hclge_clear_reset_cause(hdev);
3877 
3878 	ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3879 	/* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3880 	 * times
3881 	 */
3882 	if (ret &&
3883 	    hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3884 		return ret;
3885 
3886 	ret = hclge_reset_prepare_up(hdev);
3887 	if (ret)
3888 		return ret;
3889 
3890 	rtnl_lock();
3891 	ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3892 	rtnl_unlock();
3893 	if (ret)
3894 		return ret;
3895 
3896 	ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3897 	if (ret)
3898 		return ret;
3899 
3900 	hdev->last_reset_time = jiffies;
3901 	hdev->rst_stats.reset_fail_cnt = 0;
3902 	hdev->rst_stats.reset_done_cnt++;
3903 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3904 
3905 	/* if default_reset_request has a higher level reset request,
3906 	 * it should be handled as soon as possible. since some errors
3907 	 * need this kind of reset to fix.
3908 	 */
3909 	reset_level = hclge_get_reset_level(ae_dev,
3910 					    &hdev->default_reset_request);
3911 	if (reset_level != HNAE3_NONE_RESET)
3912 		set_bit(reset_level, &hdev->reset_request);
3913 
3914 	return 0;
3915 }
3916 
hclge_reset(struct hclge_dev * hdev)3917 static void hclge_reset(struct hclge_dev *hdev)
3918 {
3919 	if (hclge_reset_prepare(hdev))
3920 		goto err_reset;
3921 
3922 	if (hclge_reset_wait(hdev))
3923 		goto err_reset;
3924 
3925 	if (hclge_reset_rebuild(hdev))
3926 		goto err_reset;
3927 
3928 	return;
3929 
3930 err_reset:
3931 	if (hclge_reset_err_handle(hdev))
3932 		hclge_reset_task_schedule(hdev);
3933 }
3934 
hclge_reset_event(struct pci_dev * pdev,struct hnae3_handle * handle)3935 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3936 {
3937 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3938 	struct hclge_dev *hdev = ae_dev->priv;
3939 
3940 	/* We might end up getting called broadly because of 2 below cases:
3941 	 * 1. Recoverable error was conveyed through APEI and only way to bring
3942 	 *    normalcy is to reset.
3943 	 * 2. A new reset request from the stack due to timeout
3944 	 *
3945 	 * For the first case,error event might not have ae handle available.
3946 	 * check if this is a new reset request and we are not here just because
3947 	 * last reset attempt did not succeed and watchdog hit us again. We will
3948 	 * know this if last reset request did not occur very recently (watchdog
3949 	 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3950 	 * In case of new request we reset the "reset level" to PF reset.
3951 	 * And if it is a repeat reset request of the most recent one then we
3952 	 * want to make sure we throttle the reset request. Therefore, we will
3953 	 * not allow it again before 3*HZ times.
3954 	 */
3955 	if (!handle)
3956 		handle = &hdev->vport[0].nic;
3957 
3958 	if (time_before(jiffies, (hdev->last_reset_time +
3959 				  HCLGE_RESET_INTERVAL))) {
3960 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3961 		return;
3962 	} else if (hdev->default_reset_request) {
3963 		hdev->reset_level =
3964 			hclge_get_reset_level(ae_dev,
3965 					      &hdev->default_reset_request);
3966 	} else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
3967 		hdev->reset_level = HNAE3_FUNC_RESET;
3968 	}
3969 
3970 	dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3971 		 hdev->reset_level);
3972 
3973 	/* request reset & schedule reset task */
3974 	set_bit(hdev->reset_level, &hdev->reset_request);
3975 	hclge_reset_task_schedule(hdev);
3976 
3977 	if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3978 		hdev->reset_level++;
3979 }
3980 
hclge_set_def_reset_request(struct hnae3_ae_dev * ae_dev,enum hnae3_reset_type rst_type)3981 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3982 					enum hnae3_reset_type rst_type)
3983 {
3984 	struct hclge_dev *hdev = ae_dev->priv;
3985 
3986 	set_bit(rst_type, &hdev->default_reset_request);
3987 }
3988 
hclge_reset_timer(struct timer_list * t)3989 static void hclge_reset_timer(struct timer_list *t)
3990 {
3991 	struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3992 
3993 	/* if default_reset_request has no value, it means that this reset
3994 	 * request has already be handled, so just return here
3995 	 */
3996 	if (!hdev->default_reset_request)
3997 		return;
3998 
3999 	dev_info(&hdev->pdev->dev,
4000 		 "triggering reset in reset timer\n");
4001 	hclge_reset_event(hdev->pdev, NULL);
4002 }
4003 
hclge_reset_subtask(struct hclge_dev * hdev)4004 static void hclge_reset_subtask(struct hclge_dev *hdev)
4005 {
4006 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4007 
4008 	/* check if there is any ongoing reset in the hardware. This status can
4009 	 * be checked from reset_pending. If there is then, we need to wait for
4010 	 * hardware to complete reset.
4011 	 *    a. If we are able to figure out in reasonable time that hardware
4012 	 *       has fully resetted then, we can proceed with driver, client
4013 	 *       reset.
4014 	 *    b. else, we can come back later to check this status so re-sched
4015 	 *       now.
4016 	 */
4017 	hdev->last_reset_time = jiffies;
4018 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4019 	if (hdev->reset_type != HNAE3_NONE_RESET)
4020 		hclge_reset(hdev);
4021 
4022 	/* check if we got any *new* reset requests to be honored */
4023 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4024 	if (hdev->reset_type != HNAE3_NONE_RESET)
4025 		hclge_do_reset(hdev);
4026 
4027 	hdev->reset_type = HNAE3_NONE_RESET;
4028 }
4029 
hclge_reset_service_task(struct hclge_dev * hdev)4030 static void hclge_reset_service_task(struct hclge_dev *hdev)
4031 {
4032 	if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4033 		return;
4034 
4035 	down(&hdev->reset_sem);
4036 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4037 
4038 	hclge_reset_subtask(hdev);
4039 
4040 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4041 	up(&hdev->reset_sem);
4042 }
4043 
hclge_update_vport_alive(struct hclge_dev * hdev)4044 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4045 {
4046 	int i;
4047 
4048 	/* start from vport 1 for PF is always alive */
4049 	for (i = 1; i < hdev->num_alloc_vport; i++) {
4050 		struct hclge_vport *vport = &hdev->vport[i];
4051 
4052 		if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4053 			clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4054 
4055 		/* If vf is not alive, set to default value */
4056 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4057 			vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4058 	}
4059 }
4060 
hclge_periodic_service_task(struct hclge_dev * hdev)4061 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4062 {
4063 	unsigned long delta = round_jiffies_relative(HZ);
4064 
4065 	if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4066 		return;
4067 
4068 	/* Always handle the link updating to make sure link state is
4069 	 * updated when it is triggered by mbx.
4070 	 */
4071 	hclge_update_link_status(hdev);
4072 	hclge_sync_mac_table(hdev);
4073 	hclge_sync_promisc_mode(hdev);
4074 
4075 	if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4076 		delta = jiffies - hdev->last_serv_processed;
4077 
4078 		if (delta < round_jiffies_relative(HZ)) {
4079 			delta = round_jiffies_relative(HZ) - delta;
4080 			goto out;
4081 		}
4082 	}
4083 
4084 	hdev->serv_processed_cnt++;
4085 	hclge_update_vport_alive(hdev);
4086 
4087 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4088 		hdev->last_serv_processed = jiffies;
4089 		goto out;
4090 	}
4091 
4092 	if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4093 		hclge_update_stats_for_all(hdev);
4094 
4095 	hclge_update_port_info(hdev);
4096 	hclge_sync_vlan_filter(hdev);
4097 
4098 	if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4099 		hclge_rfs_filter_expire(hdev);
4100 
4101 	hdev->last_serv_processed = jiffies;
4102 
4103 out:
4104 	hclge_task_schedule(hdev, delta);
4105 }
4106 
hclge_service_task(struct work_struct * work)4107 static void hclge_service_task(struct work_struct *work)
4108 {
4109 	struct hclge_dev *hdev =
4110 		container_of(work, struct hclge_dev, service_task.work);
4111 
4112 	hclge_reset_service_task(hdev);
4113 	hclge_mailbox_service_task(hdev);
4114 	hclge_periodic_service_task(hdev);
4115 
4116 	/* Handle reset and mbx again in case periodical task delays the
4117 	 * handling by calling hclge_task_schedule() in
4118 	 * hclge_periodic_service_task().
4119 	 */
4120 	hclge_reset_service_task(hdev);
4121 	hclge_mailbox_service_task(hdev);
4122 }
4123 
hclge_get_vport(struct hnae3_handle * handle)4124 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4125 {
4126 	/* VF handle has no client */
4127 	if (!handle->client)
4128 		return container_of(handle, struct hclge_vport, nic);
4129 	else if (handle->client->type == HNAE3_CLIENT_ROCE)
4130 		return container_of(handle, struct hclge_vport, roce);
4131 	else
4132 		return container_of(handle, struct hclge_vport, nic);
4133 }
4134 
hclge_get_vector(struct hnae3_handle * handle,u16 vector_num,struct hnae3_vector_info * vector_info)4135 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4136 			    struct hnae3_vector_info *vector_info)
4137 {
4138 	struct hclge_vport *vport = hclge_get_vport(handle);
4139 	struct hnae3_vector_info *vector = vector_info;
4140 	struct hclge_dev *hdev = vport->back;
4141 	int alloc = 0;
4142 	int i, j;
4143 
4144 	vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4145 	vector_num = min(hdev->num_msi_left, vector_num);
4146 
4147 	for (j = 0; j < vector_num; j++) {
4148 		for (i = 1; i < hdev->num_msi; i++) {
4149 			if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4150 				vector->vector = pci_irq_vector(hdev->pdev, i);
4151 				vector->io_addr = hdev->hw.io_base +
4152 					HCLGE_VECTOR_REG_BASE +
4153 					(i - 1) * HCLGE_VECTOR_REG_OFFSET +
4154 					vport->vport_id *
4155 					HCLGE_VECTOR_VF_OFFSET;
4156 				hdev->vector_status[i] = vport->vport_id;
4157 				hdev->vector_irq[i] = vector->vector;
4158 
4159 				vector++;
4160 				alloc++;
4161 
4162 				break;
4163 			}
4164 		}
4165 	}
4166 	hdev->num_msi_left -= alloc;
4167 	hdev->num_msi_used += alloc;
4168 
4169 	return alloc;
4170 }
4171 
hclge_get_vector_index(struct hclge_dev * hdev,int vector)4172 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4173 {
4174 	int i;
4175 
4176 	for (i = 0; i < hdev->num_msi; i++)
4177 		if (vector == hdev->vector_irq[i])
4178 			return i;
4179 
4180 	return -EINVAL;
4181 }
4182 
hclge_put_vector(struct hnae3_handle * handle,int vector)4183 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4184 {
4185 	struct hclge_vport *vport = hclge_get_vport(handle);
4186 	struct hclge_dev *hdev = vport->back;
4187 	int vector_id;
4188 
4189 	vector_id = hclge_get_vector_index(hdev, vector);
4190 	if (vector_id < 0) {
4191 		dev_err(&hdev->pdev->dev,
4192 			"Get vector index fail. vector = %d\n", vector);
4193 		return vector_id;
4194 	}
4195 
4196 	hclge_free_vector(hdev, vector_id);
4197 
4198 	return 0;
4199 }
4200 
hclge_get_rss_key_size(struct hnae3_handle * handle)4201 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4202 {
4203 	return HCLGE_RSS_KEY_SIZE;
4204 }
4205 
hclge_get_rss_indir_size(struct hnae3_handle * handle)4206 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
4207 {
4208 	return HCLGE_RSS_IND_TBL_SIZE;
4209 }
4210 
hclge_set_rss_algo_key(struct hclge_dev * hdev,const u8 hfunc,const u8 * key)4211 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4212 				  const u8 hfunc, const u8 *key)
4213 {
4214 	struct hclge_rss_config_cmd *req;
4215 	unsigned int key_offset = 0;
4216 	struct hclge_desc desc;
4217 	int key_counts;
4218 	int key_size;
4219 	int ret;
4220 
4221 	key_counts = HCLGE_RSS_KEY_SIZE;
4222 	req = (struct hclge_rss_config_cmd *)desc.data;
4223 
4224 	while (key_counts) {
4225 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4226 					   false);
4227 
4228 		req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4229 		req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4230 
4231 		key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4232 		memcpy(req->hash_key,
4233 		       key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4234 
4235 		key_counts -= key_size;
4236 		key_offset++;
4237 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4238 		if (ret) {
4239 			dev_err(&hdev->pdev->dev,
4240 				"Configure RSS config fail, status = %d\n",
4241 				ret);
4242 			return ret;
4243 		}
4244 	}
4245 	return 0;
4246 }
4247 
hclge_set_rss_indir_table(struct hclge_dev * hdev,const u8 * indir)4248 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
4249 {
4250 	struct hclge_rss_indirection_table_cmd *req;
4251 	struct hclge_desc desc;
4252 	int i, j;
4253 	int ret;
4254 
4255 	req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4256 
4257 	for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4258 		hclge_cmd_setup_basic_desc
4259 			(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4260 
4261 		req->start_table_index =
4262 			cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4263 		req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4264 
4265 		for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
4266 			req->rss_result[j] =
4267 				indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4268 
4269 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4270 		if (ret) {
4271 			dev_err(&hdev->pdev->dev,
4272 				"Configure rss indir table fail,status = %d\n",
4273 				ret);
4274 			return ret;
4275 		}
4276 	}
4277 	return 0;
4278 }
4279 
hclge_set_rss_tc_mode(struct hclge_dev * hdev,u16 * tc_valid,u16 * tc_size,u16 * tc_offset)4280 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4281 				 u16 *tc_size, u16 *tc_offset)
4282 {
4283 	struct hclge_rss_tc_mode_cmd *req;
4284 	struct hclge_desc desc;
4285 	int ret;
4286 	int i;
4287 
4288 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4289 	req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4290 
4291 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4292 		u16 mode = 0;
4293 
4294 		hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4295 		hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4296 				HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4297 		hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4298 				HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4299 
4300 		req->rss_tc_mode[i] = cpu_to_le16(mode);
4301 	}
4302 
4303 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4304 	if (ret)
4305 		dev_err(&hdev->pdev->dev,
4306 			"Configure rss tc mode fail, status = %d\n", ret);
4307 
4308 	return ret;
4309 }
4310 
hclge_get_rss_type(struct hclge_vport * vport)4311 static void hclge_get_rss_type(struct hclge_vport *vport)
4312 {
4313 	if (vport->rss_tuple_sets.ipv4_tcp_en ||
4314 	    vport->rss_tuple_sets.ipv4_udp_en ||
4315 	    vport->rss_tuple_sets.ipv4_sctp_en ||
4316 	    vport->rss_tuple_sets.ipv6_tcp_en ||
4317 	    vport->rss_tuple_sets.ipv6_udp_en ||
4318 	    vport->rss_tuple_sets.ipv6_sctp_en)
4319 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4320 	else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4321 		 vport->rss_tuple_sets.ipv6_fragment_en)
4322 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4323 	else
4324 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4325 }
4326 
hclge_set_rss_input_tuple(struct hclge_dev * hdev)4327 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4328 {
4329 	struct hclge_rss_input_tuple_cmd *req;
4330 	struct hclge_desc desc;
4331 	int ret;
4332 
4333 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4334 
4335 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4336 
4337 	/* Get the tuple cfg from pf */
4338 	req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4339 	req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4340 	req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4341 	req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4342 	req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4343 	req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4344 	req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4345 	req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4346 	hclge_get_rss_type(&hdev->vport[0]);
4347 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4348 	if (ret)
4349 		dev_err(&hdev->pdev->dev,
4350 			"Configure rss input fail, status = %d\n", ret);
4351 	return ret;
4352 }
4353 
hclge_get_rss(struct hnae3_handle * handle,u32 * indir,u8 * key,u8 * hfunc)4354 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4355 			 u8 *key, u8 *hfunc)
4356 {
4357 	struct hclge_vport *vport = hclge_get_vport(handle);
4358 	int i;
4359 
4360 	/* Get hash algorithm */
4361 	if (hfunc) {
4362 		switch (vport->rss_algo) {
4363 		case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4364 			*hfunc = ETH_RSS_HASH_TOP;
4365 			break;
4366 		case HCLGE_RSS_HASH_ALGO_SIMPLE:
4367 			*hfunc = ETH_RSS_HASH_XOR;
4368 			break;
4369 		default:
4370 			*hfunc = ETH_RSS_HASH_UNKNOWN;
4371 			break;
4372 		}
4373 	}
4374 
4375 	/* Get the RSS Key required by the user */
4376 	if (key)
4377 		memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4378 
4379 	/* Get indirect table */
4380 	if (indir)
4381 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4382 			indir[i] =  vport->rss_indirection_tbl[i];
4383 
4384 	return 0;
4385 }
4386 
hclge_parse_rss_hfunc(struct hclge_vport * vport,const u8 hfunc,u8 * hash_algo)4387 static int hclge_parse_rss_hfunc(struct hclge_vport *vport, const u8 hfunc,
4388 				 u8 *hash_algo)
4389 {
4390 	switch (hfunc) {
4391 	case ETH_RSS_HASH_TOP:
4392 		*hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4393 		return 0;
4394 	case ETH_RSS_HASH_XOR:
4395 		*hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4396 		return 0;
4397 	case ETH_RSS_HASH_NO_CHANGE:
4398 		*hash_algo = vport->rss_algo;
4399 		return 0;
4400 	default:
4401 		return -EINVAL;
4402 	}
4403 }
4404 
hclge_set_rss(struct hnae3_handle * handle,const u32 * indir,const u8 * key,const u8 hfunc)4405 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4406 			 const  u8 *key, const  u8 hfunc)
4407 {
4408 	struct hclge_vport *vport = hclge_get_vport(handle);
4409 	struct hclge_dev *hdev = vport->back;
4410 	u8 hash_algo;
4411 	int ret, i;
4412 
4413 	ret = hclge_parse_rss_hfunc(vport, hfunc, &hash_algo);
4414 	if (ret) {
4415 		dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc);
4416 		return ret;
4417 	}
4418 
4419 	/* Set the RSS Hash Key if specififed by the user */
4420 	if (key) {
4421 		ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4422 		if (ret)
4423 			return ret;
4424 
4425 		/* Update the shadow RSS key with user specified qids */
4426 		memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4427 	} else {
4428 		ret = hclge_set_rss_algo_key(hdev, hash_algo,
4429 					     vport->rss_hash_key);
4430 		if (ret)
4431 			return ret;
4432 	}
4433 	vport->rss_algo = hash_algo;
4434 
4435 	/* Update the shadow RSS table with user specified qids */
4436 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4437 		vport->rss_indirection_tbl[i] = indir[i];
4438 
4439 	/* Update the hardware */
4440 	return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4441 }
4442 
hclge_get_rss_hash_bits(struct ethtool_rxnfc * nfc)4443 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4444 {
4445 	u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4446 
4447 	if (nfc->data & RXH_L4_B_2_3)
4448 		hash_sets |= HCLGE_D_PORT_BIT;
4449 	else
4450 		hash_sets &= ~HCLGE_D_PORT_BIT;
4451 
4452 	if (nfc->data & RXH_IP_SRC)
4453 		hash_sets |= HCLGE_S_IP_BIT;
4454 	else
4455 		hash_sets &= ~HCLGE_S_IP_BIT;
4456 
4457 	if (nfc->data & RXH_IP_DST)
4458 		hash_sets |= HCLGE_D_IP_BIT;
4459 	else
4460 		hash_sets &= ~HCLGE_D_IP_BIT;
4461 
4462 	if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4463 		hash_sets |= HCLGE_V_TAG_BIT;
4464 
4465 	return hash_sets;
4466 }
4467 
hclge_set_rss_tuple(struct hnae3_handle * handle,struct ethtool_rxnfc * nfc)4468 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4469 			       struct ethtool_rxnfc *nfc)
4470 {
4471 	struct hclge_vport *vport = hclge_get_vport(handle);
4472 	struct hclge_dev *hdev = vport->back;
4473 	struct hclge_rss_input_tuple_cmd *req;
4474 	struct hclge_desc desc;
4475 	u8 tuple_sets;
4476 	int ret;
4477 
4478 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4479 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
4480 		return -EINVAL;
4481 
4482 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4483 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4484 
4485 	req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4486 	req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4487 	req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4488 	req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4489 	req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4490 	req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4491 	req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4492 	req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4493 
4494 	tuple_sets = hclge_get_rss_hash_bits(nfc);
4495 	switch (nfc->flow_type) {
4496 	case TCP_V4_FLOW:
4497 		req->ipv4_tcp_en = tuple_sets;
4498 		break;
4499 	case TCP_V6_FLOW:
4500 		req->ipv6_tcp_en = tuple_sets;
4501 		break;
4502 	case UDP_V4_FLOW:
4503 		req->ipv4_udp_en = tuple_sets;
4504 		break;
4505 	case UDP_V6_FLOW:
4506 		req->ipv6_udp_en = tuple_sets;
4507 		break;
4508 	case SCTP_V4_FLOW:
4509 		req->ipv4_sctp_en = tuple_sets;
4510 		break;
4511 	case SCTP_V6_FLOW:
4512 		if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
4513 		    (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
4514 			return -EINVAL;
4515 
4516 		req->ipv6_sctp_en = tuple_sets;
4517 		break;
4518 	case IPV4_FLOW:
4519 		req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4520 		break;
4521 	case IPV6_FLOW:
4522 		req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4523 		break;
4524 	default:
4525 		return -EINVAL;
4526 	}
4527 
4528 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4529 	if (ret) {
4530 		dev_err(&hdev->pdev->dev,
4531 			"Set rss tuple fail, status = %d\n", ret);
4532 		return ret;
4533 	}
4534 
4535 	vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4536 	vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4537 	vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4538 	vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4539 	vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4540 	vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4541 	vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4542 	vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4543 	hclge_get_rss_type(vport);
4544 	return 0;
4545 }
4546 
hclge_get_rss_tuple(struct hnae3_handle * handle,struct ethtool_rxnfc * nfc)4547 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4548 			       struct ethtool_rxnfc *nfc)
4549 {
4550 	struct hclge_vport *vport = hclge_get_vport(handle);
4551 	u8 tuple_sets;
4552 
4553 	nfc->data = 0;
4554 
4555 	switch (nfc->flow_type) {
4556 	case TCP_V4_FLOW:
4557 		tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4558 		break;
4559 	case UDP_V4_FLOW:
4560 		tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4561 		break;
4562 	case TCP_V6_FLOW:
4563 		tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4564 		break;
4565 	case UDP_V6_FLOW:
4566 		tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4567 		break;
4568 	case SCTP_V4_FLOW:
4569 		tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4570 		break;
4571 	case SCTP_V6_FLOW:
4572 		tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4573 		break;
4574 	case IPV4_FLOW:
4575 	case IPV6_FLOW:
4576 		tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4577 		break;
4578 	default:
4579 		return -EINVAL;
4580 	}
4581 
4582 	if (!tuple_sets)
4583 		return 0;
4584 
4585 	if (tuple_sets & HCLGE_D_PORT_BIT)
4586 		nfc->data |= RXH_L4_B_2_3;
4587 	if (tuple_sets & HCLGE_S_PORT_BIT)
4588 		nfc->data |= RXH_L4_B_0_1;
4589 	if (tuple_sets & HCLGE_D_IP_BIT)
4590 		nfc->data |= RXH_IP_DST;
4591 	if (tuple_sets & HCLGE_S_IP_BIT)
4592 		nfc->data |= RXH_IP_SRC;
4593 
4594 	return 0;
4595 }
4596 
hclge_get_tc_size(struct hnae3_handle * handle)4597 static int hclge_get_tc_size(struct hnae3_handle *handle)
4598 {
4599 	struct hclge_vport *vport = hclge_get_vport(handle);
4600 	struct hclge_dev *hdev = vport->back;
4601 
4602 	return hdev->rss_size_max;
4603 }
4604 
hclge_rss_init_hw(struct hclge_dev * hdev)4605 int hclge_rss_init_hw(struct hclge_dev *hdev)
4606 {
4607 	struct hclge_vport *vport = hdev->vport;
4608 	u8 *rss_indir = vport[0].rss_indirection_tbl;
4609 	u16 rss_size = vport[0].alloc_rss_size;
4610 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4611 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4612 	u8 *key = vport[0].rss_hash_key;
4613 	u8 hfunc = vport[0].rss_algo;
4614 	u16 tc_valid[HCLGE_MAX_TC_NUM];
4615 	u16 roundup_size;
4616 	unsigned int i;
4617 	int ret;
4618 
4619 	ret = hclge_set_rss_indir_table(hdev, rss_indir);
4620 	if (ret)
4621 		return ret;
4622 
4623 	ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4624 	if (ret)
4625 		return ret;
4626 
4627 	ret = hclge_set_rss_input_tuple(hdev);
4628 	if (ret)
4629 		return ret;
4630 
4631 	/* Each TC have the same queue size, and tc_size set to hardware is
4632 	 * the log2 of roundup power of two of rss_size, the acutal queue
4633 	 * size is limited by indirection table.
4634 	 */
4635 	if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4636 		dev_err(&hdev->pdev->dev,
4637 			"Configure rss tc size failed, invalid TC_SIZE = %u\n",
4638 			rss_size);
4639 		return -EINVAL;
4640 	}
4641 
4642 	roundup_size = roundup_pow_of_two(rss_size);
4643 	roundup_size = ilog2(roundup_size);
4644 
4645 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4646 		tc_valid[i] = 0;
4647 
4648 		if (!(hdev->hw_tc_map & BIT(i)))
4649 			continue;
4650 
4651 		tc_valid[i] = 1;
4652 		tc_size[i] = roundup_size;
4653 		tc_offset[i] = rss_size * i;
4654 	}
4655 
4656 	return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4657 }
4658 
hclge_rss_indir_init_cfg(struct hclge_dev * hdev)4659 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4660 {
4661 	struct hclge_vport *vport = hdev->vport;
4662 	int i, j;
4663 
4664 	for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4665 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4666 			vport[j].rss_indirection_tbl[i] =
4667 				i % vport[j].alloc_rss_size;
4668 	}
4669 }
4670 
hclge_rss_init_cfg(struct hclge_dev * hdev)4671 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4672 {
4673 	int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4674 	struct hclge_vport *vport = hdev->vport;
4675 
4676 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
4677 		rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4678 
4679 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4680 		vport[i].rss_tuple_sets.ipv4_tcp_en =
4681 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4682 		vport[i].rss_tuple_sets.ipv4_udp_en =
4683 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4684 		vport[i].rss_tuple_sets.ipv4_sctp_en =
4685 			HCLGE_RSS_INPUT_TUPLE_SCTP;
4686 		vport[i].rss_tuple_sets.ipv4_fragment_en =
4687 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4688 		vport[i].rss_tuple_sets.ipv6_tcp_en =
4689 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4690 		vport[i].rss_tuple_sets.ipv6_udp_en =
4691 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4692 		vport[i].rss_tuple_sets.ipv6_sctp_en =
4693 			hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
4694 			HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
4695 			HCLGE_RSS_INPUT_TUPLE_SCTP;
4696 		vport[i].rss_tuple_sets.ipv6_fragment_en =
4697 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4698 
4699 		vport[i].rss_algo = rss_algo;
4700 
4701 		memcpy(vport[i].rss_hash_key, hclge_hash_key,
4702 		       HCLGE_RSS_KEY_SIZE);
4703 	}
4704 
4705 	hclge_rss_indir_init_cfg(hdev);
4706 }
4707 
hclge_bind_ring_with_vector(struct hclge_vport * vport,int vector_id,bool en,struct hnae3_ring_chain_node * ring_chain)4708 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4709 				int vector_id, bool en,
4710 				struct hnae3_ring_chain_node *ring_chain)
4711 {
4712 	struct hclge_dev *hdev = vport->back;
4713 	struct hnae3_ring_chain_node *node;
4714 	struct hclge_desc desc;
4715 	struct hclge_ctrl_vector_chain_cmd *req =
4716 		(struct hclge_ctrl_vector_chain_cmd *)desc.data;
4717 	enum hclge_cmd_status status;
4718 	enum hclge_opcode_type op;
4719 	u16 tqp_type_and_id;
4720 	int i;
4721 
4722 	op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4723 	hclge_cmd_setup_basic_desc(&desc, op, false);
4724 	req->int_vector_id = vector_id;
4725 
4726 	i = 0;
4727 	for (node = ring_chain; node; node = node->next) {
4728 		tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4729 		hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4730 				HCLGE_INT_TYPE_S,
4731 				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4732 		hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4733 				HCLGE_TQP_ID_S, node->tqp_index);
4734 		hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4735 				HCLGE_INT_GL_IDX_S,
4736 				hnae3_get_field(node->int_gl_idx,
4737 						HNAE3_RING_GL_IDX_M,
4738 						HNAE3_RING_GL_IDX_S));
4739 		req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4740 		if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4741 			req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4742 			req->vfid = vport->vport_id;
4743 
4744 			status = hclge_cmd_send(&hdev->hw, &desc, 1);
4745 			if (status) {
4746 				dev_err(&hdev->pdev->dev,
4747 					"Map TQP fail, status is %d.\n",
4748 					status);
4749 				return -EIO;
4750 			}
4751 			i = 0;
4752 
4753 			hclge_cmd_setup_basic_desc(&desc,
4754 						   op,
4755 						   false);
4756 			req->int_vector_id = vector_id;
4757 		}
4758 	}
4759 
4760 	if (i > 0) {
4761 		req->int_cause_num = i;
4762 		req->vfid = vport->vport_id;
4763 		status = hclge_cmd_send(&hdev->hw, &desc, 1);
4764 		if (status) {
4765 			dev_err(&hdev->pdev->dev,
4766 				"Map TQP fail, status is %d.\n", status);
4767 			return -EIO;
4768 		}
4769 	}
4770 
4771 	return 0;
4772 }
4773 
hclge_map_ring_to_vector(struct hnae3_handle * handle,int vector,struct hnae3_ring_chain_node * ring_chain)4774 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4775 				    struct hnae3_ring_chain_node *ring_chain)
4776 {
4777 	struct hclge_vport *vport = hclge_get_vport(handle);
4778 	struct hclge_dev *hdev = vport->back;
4779 	int vector_id;
4780 
4781 	vector_id = hclge_get_vector_index(hdev, vector);
4782 	if (vector_id < 0) {
4783 		dev_err(&hdev->pdev->dev,
4784 			"failed to get vector index. vector=%d\n", vector);
4785 		return vector_id;
4786 	}
4787 
4788 	return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4789 }
4790 
hclge_unmap_ring_frm_vector(struct hnae3_handle * handle,int vector,struct hnae3_ring_chain_node * ring_chain)4791 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4792 				       struct hnae3_ring_chain_node *ring_chain)
4793 {
4794 	struct hclge_vport *vport = hclge_get_vport(handle);
4795 	struct hclge_dev *hdev = vport->back;
4796 	int vector_id, ret;
4797 
4798 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4799 		return 0;
4800 
4801 	vector_id = hclge_get_vector_index(hdev, vector);
4802 	if (vector_id < 0) {
4803 		dev_err(&handle->pdev->dev,
4804 			"Get vector index fail. ret =%d\n", vector_id);
4805 		return vector_id;
4806 	}
4807 
4808 	ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4809 	if (ret)
4810 		dev_err(&handle->pdev->dev,
4811 			"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4812 			vector_id, ret);
4813 
4814 	return ret;
4815 }
4816 
hclge_cmd_set_promisc_mode(struct hclge_dev * hdev,struct hclge_promisc_param * param)4817 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4818 				      struct hclge_promisc_param *param)
4819 {
4820 	struct hclge_promisc_cfg_cmd *req;
4821 	struct hclge_desc desc;
4822 	int ret;
4823 
4824 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4825 
4826 	req = (struct hclge_promisc_cfg_cmd *)desc.data;
4827 	req->vf_id = param->vf_id;
4828 
4829 	/* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4830 	 * pdev revision(0x20), new revision support them. The
4831 	 * value of this two fields will not return error when driver
4832 	 * send command to fireware in revision(0x20).
4833 	 */
4834 	req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4835 		HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4836 
4837 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4838 	if (ret)
4839 		dev_err(&hdev->pdev->dev,
4840 			"failed to set vport %d promisc mode, ret = %d.\n",
4841 			param->vf_id, ret);
4842 
4843 	return ret;
4844 }
4845 
hclge_promisc_param_init(struct hclge_promisc_param * param,bool en_uc,bool en_mc,bool en_bc,int vport_id)4846 static void hclge_promisc_param_init(struct hclge_promisc_param *param,
4847 				     bool en_uc, bool en_mc, bool en_bc,
4848 				     int vport_id)
4849 {
4850 	if (!param)
4851 		return;
4852 
4853 	memset(param, 0, sizeof(struct hclge_promisc_param));
4854 	if (en_uc)
4855 		param->enable = HCLGE_PROMISC_EN_UC;
4856 	if (en_mc)
4857 		param->enable |= HCLGE_PROMISC_EN_MC;
4858 	if (en_bc)
4859 		param->enable |= HCLGE_PROMISC_EN_BC;
4860 	param->vf_id = vport_id;
4861 }
4862 
hclge_set_vport_promisc_mode(struct hclge_vport * vport,bool en_uc_pmc,bool en_mc_pmc,bool en_bc_pmc)4863 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4864 				 bool en_mc_pmc, bool en_bc_pmc)
4865 {
4866 	struct hclge_dev *hdev = vport->back;
4867 	struct hclge_promisc_param param;
4868 
4869 	hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4870 				 vport->vport_id);
4871 	return hclge_cmd_set_promisc_mode(hdev, &param);
4872 }
4873 
hclge_set_promisc_mode(struct hnae3_handle * handle,bool en_uc_pmc,bool en_mc_pmc)4874 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4875 				  bool en_mc_pmc)
4876 {
4877 	struct hclge_vport *vport = hclge_get_vport(handle);
4878 	struct hclge_dev *hdev = vport->back;
4879 	bool en_bc_pmc = true;
4880 
4881 	/* For device whose version below V2, if broadcast promisc enabled,
4882 	 * vlan filter is always bypassed. So broadcast promisc should be
4883 	 * disabled until user enable promisc mode
4884 	 */
4885 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
4886 		en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4887 
4888 	return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4889 					    en_bc_pmc);
4890 }
4891 
hclge_request_update_promisc_mode(struct hnae3_handle * handle)4892 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
4893 {
4894 	struct hclge_vport *vport = hclge_get_vport(handle);
4895 	struct hclge_dev *hdev = vport->back;
4896 
4897 	set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
4898 }
4899 
hclge_get_fd_mode(struct hclge_dev * hdev,u8 * fd_mode)4900 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4901 {
4902 	struct hclge_get_fd_mode_cmd *req;
4903 	struct hclge_desc desc;
4904 	int ret;
4905 
4906 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4907 
4908 	req = (struct hclge_get_fd_mode_cmd *)desc.data;
4909 
4910 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4911 	if (ret) {
4912 		dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4913 		return ret;
4914 	}
4915 
4916 	*fd_mode = req->mode;
4917 
4918 	return ret;
4919 }
4920 
hclge_get_fd_allocation(struct hclge_dev * hdev,u32 * stage1_entry_num,u32 * stage2_entry_num,u16 * stage1_counter_num,u16 * stage2_counter_num)4921 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4922 				   u32 *stage1_entry_num,
4923 				   u32 *stage2_entry_num,
4924 				   u16 *stage1_counter_num,
4925 				   u16 *stage2_counter_num)
4926 {
4927 	struct hclge_get_fd_allocation_cmd *req;
4928 	struct hclge_desc desc;
4929 	int ret;
4930 
4931 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4932 
4933 	req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4934 
4935 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4936 	if (ret) {
4937 		dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4938 			ret);
4939 		return ret;
4940 	}
4941 
4942 	*stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4943 	*stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4944 	*stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4945 	*stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4946 
4947 	return ret;
4948 }
4949 
hclge_set_fd_key_config(struct hclge_dev * hdev,enum HCLGE_FD_STAGE stage_num)4950 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
4951 				   enum HCLGE_FD_STAGE stage_num)
4952 {
4953 	struct hclge_set_fd_key_config_cmd *req;
4954 	struct hclge_fd_key_cfg *stage;
4955 	struct hclge_desc desc;
4956 	int ret;
4957 
4958 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4959 
4960 	req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4961 	stage = &hdev->fd_cfg.key_cfg[stage_num];
4962 	req->stage = stage_num;
4963 	req->key_select = stage->key_sel;
4964 	req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4965 	req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4966 	req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4967 	req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4968 	req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4969 	req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4970 
4971 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4972 	if (ret)
4973 		dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4974 
4975 	return ret;
4976 }
4977 
hclge_init_fd_config(struct hclge_dev * hdev)4978 static int hclge_init_fd_config(struct hclge_dev *hdev)
4979 {
4980 #define LOW_2_WORDS		0x03
4981 	struct hclge_fd_key_cfg *key_cfg;
4982 	int ret;
4983 
4984 	if (!hnae3_dev_fd_supported(hdev))
4985 		return 0;
4986 
4987 	ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4988 	if (ret)
4989 		return ret;
4990 
4991 	switch (hdev->fd_cfg.fd_mode) {
4992 	case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4993 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4994 		break;
4995 	case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4996 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4997 		break;
4998 	default:
4999 		dev_err(&hdev->pdev->dev,
5000 			"Unsupported flow director mode %u\n",
5001 			hdev->fd_cfg.fd_mode);
5002 		return -EOPNOTSUPP;
5003 	}
5004 
5005 	key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5006 	key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
5007 	key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5008 	key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5009 	key_cfg->outer_sipv6_word_en = 0;
5010 	key_cfg->outer_dipv6_word_en = 0;
5011 
5012 	key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5013 				BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5014 				BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5015 				BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5016 
5017 	/* If use max 400bit key, we can support tuples for ether type */
5018 	if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1)
5019 		key_cfg->tuple_active |=
5020 				BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5021 
5022 	/* roce_type is used to filter roce frames
5023 	 * dst_vport is used to specify the rule
5024 	 */
5025 	key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5026 
5027 	ret = hclge_get_fd_allocation(hdev,
5028 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5029 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5030 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5031 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5032 	if (ret)
5033 		return ret;
5034 
5035 	return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5036 }
5037 
hclge_fd_tcam_config(struct hclge_dev * hdev,u8 stage,bool sel_x,int loc,u8 * key,bool is_add)5038 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5039 				int loc, u8 *key, bool is_add)
5040 {
5041 	struct hclge_fd_tcam_config_1_cmd *req1;
5042 	struct hclge_fd_tcam_config_2_cmd *req2;
5043 	struct hclge_fd_tcam_config_3_cmd *req3;
5044 	struct hclge_desc desc[3];
5045 	int ret;
5046 
5047 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5048 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5049 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5050 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5051 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5052 
5053 	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5054 	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5055 	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5056 
5057 	req1->stage = stage;
5058 	req1->xy_sel = sel_x ? 1 : 0;
5059 	hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5060 	req1->index = cpu_to_le32(loc);
5061 	req1->entry_vld = sel_x ? is_add : 0;
5062 
5063 	if (key) {
5064 		memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5065 		memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5066 		       sizeof(req2->tcam_data));
5067 		memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5068 		       sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5069 	}
5070 
5071 	ret = hclge_cmd_send(&hdev->hw, desc, 3);
5072 	if (ret)
5073 		dev_err(&hdev->pdev->dev,
5074 			"config tcam key fail, ret=%d\n",
5075 			ret);
5076 
5077 	return ret;
5078 }
5079 
hclge_fd_ad_config(struct hclge_dev * hdev,u8 stage,int loc,struct hclge_fd_ad_data * action)5080 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5081 			      struct hclge_fd_ad_data *action)
5082 {
5083 	struct hclge_fd_ad_config_cmd *req;
5084 	struct hclge_desc desc;
5085 	u64 ad_data = 0;
5086 	int ret;
5087 
5088 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5089 
5090 	req = (struct hclge_fd_ad_config_cmd *)desc.data;
5091 	req->index = cpu_to_le32(loc);
5092 	req->stage = stage;
5093 
5094 	hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5095 		      action->write_rule_id_to_bd);
5096 	hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5097 			action->rule_id);
5098 	ad_data <<= 32;
5099 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5100 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5101 		      action->forward_to_direct_queue);
5102 	hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5103 			action->queue_id);
5104 	hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5105 	hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5106 			HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5107 	hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5108 	hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5109 			action->counter_id);
5110 
5111 	req->ad_data = cpu_to_le64(ad_data);
5112 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5113 	if (ret)
5114 		dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5115 
5116 	return ret;
5117 }
5118 
hclge_fd_convert_tuple(u32 tuple_bit,u8 * key_x,u8 * key_y,struct hclge_fd_rule * rule)5119 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5120 				   struct hclge_fd_rule *rule)
5121 {
5122 	u16 tmp_x_s, tmp_y_s;
5123 	u32 tmp_x_l, tmp_y_l;
5124 	int i;
5125 
5126 	if (rule->unused_tuple & tuple_bit)
5127 		return true;
5128 
5129 	switch (tuple_bit) {
5130 	case BIT(INNER_DST_MAC):
5131 		for (i = 0; i < ETH_ALEN; i++) {
5132 			calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5133 			       rule->tuples_mask.dst_mac[i]);
5134 			calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5135 			       rule->tuples_mask.dst_mac[i]);
5136 		}
5137 
5138 		return true;
5139 	case BIT(INNER_SRC_MAC):
5140 		for (i = 0; i < ETH_ALEN; i++) {
5141 			calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5142 			       rule->tuples_mask.src_mac[i]);
5143 			calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5144 			       rule->tuples_mask.src_mac[i]);
5145 		}
5146 
5147 		return true;
5148 	case BIT(INNER_VLAN_TAG_FST):
5149 		calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5150 		       rule->tuples_mask.vlan_tag1);
5151 		calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5152 		       rule->tuples_mask.vlan_tag1);
5153 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5154 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5155 
5156 		return true;
5157 	case BIT(INNER_ETH_TYPE):
5158 		calc_x(tmp_x_s, rule->tuples.ether_proto,
5159 		       rule->tuples_mask.ether_proto);
5160 		calc_y(tmp_y_s, rule->tuples.ether_proto,
5161 		       rule->tuples_mask.ether_proto);
5162 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5163 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5164 
5165 		return true;
5166 	case BIT(INNER_IP_TOS):
5167 		calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5168 		calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5169 
5170 		return true;
5171 	case BIT(INNER_IP_PROTO):
5172 		calc_x(*key_x, rule->tuples.ip_proto,
5173 		       rule->tuples_mask.ip_proto);
5174 		calc_y(*key_y, rule->tuples.ip_proto,
5175 		       rule->tuples_mask.ip_proto);
5176 
5177 		return true;
5178 	case BIT(INNER_SRC_IP):
5179 		calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5180 		       rule->tuples_mask.src_ip[IPV4_INDEX]);
5181 		calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5182 		       rule->tuples_mask.src_ip[IPV4_INDEX]);
5183 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5184 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5185 
5186 		return true;
5187 	case BIT(INNER_DST_IP):
5188 		calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5189 		       rule->tuples_mask.dst_ip[IPV4_INDEX]);
5190 		calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5191 		       rule->tuples_mask.dst_ip[IPV4_INDEX]);
5192 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5193 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5194 
5195 		return true;
5196 	case BIT(INNER_SRC_PORT):
5197 		calc_x(tmp_x_s, rule->tuples.src_port,
5198 		       rule->tuples_mask.src_port);
5199 		calc_y(tmp_y_s, rule->tuples.src_port,
5200 		       rule->tuples_mask.src_port);
5201 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5202 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5203 
5204 		return true;
5205 	case BIT(INNER_DST_PORT):
5206 		calc_x(tmp_x_s, rule->tuples.dst_port,
5207 		       rule->tuples_mask.dst_port);
5208 		calc_y(tmp_y_s, rule->tuples.dst_port,
5209 		       rule->tuples_mask.dst_port);
5210 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5211 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5212 
5213 		return true;
5214 	default:
5215 		return false;
5216 	}
5217 }
5218 
hclge_get_port_number(enum HLCGE_PORT_TYPE port_type,u8 pf_id,u8 vf_id,u8 network_port_id)5219 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5220 				 u8 vf_id, u8 network_port_id)
5221 {
5222 	u32 port_number = 0;
5223 
5224 	if (port_type == HOST_PORT) {
5225 		hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5226 				pf_id);
5227 		hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5228 				vf_id);
5229 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5230 	} else {
5231 		hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5232 				HCLGE_NETWORK_PORT_ID_S, network_port_id);
5233 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5234 	}
5235 
5236 	return port_number;
5237 }
5238 
hclge_fd_convert_meta_data(struct hclge_fd_key_cfg * key_cfg,__le32 * key_x,__le32 * key_y,struct hclge_fd_rule * rule)5239 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5240 				       __le32 *key_x, __le32 *key_y,
5241 				       struct hclge_fd_rule *rule)
5242 {
5243 	u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5244 	u8 cur_pos = 0, tuple_size, shift_bits;
5245 	unsigned int i;
5246 
5247 	for (i = 0; i < MAX_META_DATA; i++) {
5248 		tuple_size = meta_data_key_info[i].key_length;
5249 		tuple_bit = key_cfg->meta_data_active & BIT(i);
5250 
5251 		switch (tuple_bit) {
5252 		case BIT(ROCE_TYPE):
5253 			hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5254 			cur_pos += tuple_size;
5255 			break;
5256 		case BIT(DST_VPORT):
5257 			port_number = hclge_get_port_number(HOST_PORT, 0,
5258 							    rule->vf_id, 0);
5259 			hnae3_set_field(meta_data,
5260 					GENMASK(cur_pos + tuple_size, cur_pos),
5261 					cur_pos, port_number);
5262 			cur_pos += tuple_size;
5263 			break;
5264 		default:
5265 			break;
5266 		}
5267 	}
5268 
5269 	calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5270 	calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5271 	shift_bits = sizeof(meta_data) * 8 - cur_pos;
5272 
5273 	*key_x = cpu_to_le32(tmp_x << shift_bits);
5274 	*key_y = cpu_to_le32(tmp_y << shift_bits);
5275 }
5276 
5277 /* A complete key is combined with meta data key and tuple key.
5278  * Meta data key is stored at the MSB region, and tuple key is stored at
5279  * the LSB region, unused bits will be filled 0.
5280  */
hclge_config_key(struct hclge_dev * hdev,u8 stage,struct hclge_fd_rule * rule)5281 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5282 			    struct hclge_fd_rule *rule)
5283 {
5284 	struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5285 	u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5286 	u8 *cur_key_x, *cur_key_y;
5287 	u8 meta_data_region;
5288 	u8 tuple_size;
5289 	int ret;
5290 	u32 i;
5291 
5292 	memset(key_x, 0, sizeof(key_x));
5293 	memset(key_y, 0, sizeof(key_y));
5294 	cur_key_x = key_x;
5295 	cur_key_y = key_y;
5296 
5297 	for (i = 0 ; i < MAX_TUPLE; i++) {
5298 		bool tuple_valid;
5299 		u32 check_tuple;
5300 
5301 		tuple_size = tuple_key_info[i].key_length / 8;
5302 		check_tuple = key_cfg->tuple_active & BIT(i);
5303 
5304 		tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5305 						     cur_key_y, rule);
5306 		if (tuple_valid) {
5307 			cur_key_x += tuple_size;
5308 			cur_key_y += tuple_size;
5309 		}
5310 	}
5311 
5312 	meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5313 			MAX_META_DATA_LENGTH / 8;
5314 
5315 	hclge_fd_convert_meta_data(key_cfg,
5316 				   (__le32 *)(key_x + meta_data_region),
5317 				   (__le32 *)(key_y + meta_data_region),
5318 				   rule);
5319 
5320 	ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5321 				   true);
5322 	if (ret) {
5323 		dev_err(&hdev->pdev->dev,
5324 			"fd key_y config fail, loc=%u, ret=%d\n",
5325 			rule->queue_id, ret);
5326 		return ret;
5327 	}
5328 
5329 	ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5330 				   true);
5331 	if (ret)
5332 		dev_err(&hdev->pdev->dev,
5333 			"fd key_x config fail, loc=%u, ret=%d\n",
5334 			rule->queue_id, ret);
5335 	return ret;
5336 }
5337 
hclge_config_action(struct hclge_dev * hdev,u8 stage,struct hclge_fd_rule * rule)5338 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5339 			       struct hclge_fd_rule *rule)
5340 {
5341 	struct hclge_fd_ad_data ad_data;
5342 
5343 	ad_data.ad_id = rule->location;
5344 
5345 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5346 		ad_data.drop_packet = true;
5347 		ad_data.forward_to_direct_queue = false;
5348 		ad_data.queue_id = 0;
5349 	} else {
5350 		ad_data.drop_packet = false;
5351 		ad_data.forward_to_direct_queue = true;
5352 		ad_data.queue_id = rule->queue_id;
5353 	}
5354 
5355 	ad_data.use_counter = false;
5356 	ad_data.counter_id = 0;
5357 
5358 	ad_data.use_next_stage = false;
5359 	ad_data.next_input_key = 0;
5360 
5361 	ad_data.write_rule_id_to_bd = true;
5362 	ad_data.rule_id = rule->location;
5363 
5364 	return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5365 }
5366 
hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec * spec,u32 * unused_tuple)5367 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5368 				       u32 *unused_tuple)
5369 {
5370 	if (!spec || !unused_tuple)
5371 		return -EINVAL;
5372 
5373 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5374 
5375 	if (!spec->ip4src)
5376 		*unused_tuple |= BIT(INNER_SRC_IP);
5377 
5378 	if (!spec->ip4dst)
5379 		*unused_tuple |= BIT(INNER_DST_IP);
5380 
5381 	if (!spec->psrc)
5382 		*unused_tuple |= BIT(INNER_SRC_PORT);
5383 
5384 	if (!spec->pdst)
5385 		*unused_tuple |= BIT(INNER_DST_PORT);
5386 
5387 	if (!spec->tos)
5388 		*unused_tuple |= BIT(INNER_IP_TOS);
5389 
5390 	return 0;
5391 }
5392 
hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec * spec,u32 * unused_tuple)5393 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5394 				    u32 *unused_tuple)
5395 {
5396 	if (!spec || !unused_tuple)
5397 		return -EINVAL;
5398 
5399 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5400 		BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5401 
5402 	if (!spec->ip4src)
5403 		*unused_tuple |= BIT(INNER_SRC_IP);
5404 
5405 	if (!spec->ip4dst)
5406 		*unused_tuple |= BIT(INNER_DST_IP);
5407 
5408 	if (!spec->tos)
5409 		*unused_tuple |= BIT(INNER_IP_TOS);
5410 
5411 	if (!spec->proto)
5412 		*unused_tuple |= BIT(INNER_IP_PROTO);
5413 
5414 	if (spec->l4_4_bytes)
5415 		return -EOPNOTSUPP;
5416 
5417 	if (spec->ip_ver != ETH_RX_NFC_IP4)
5418 		return -EOPNOTSUPP;
5419 
5420 	return 0;
5421 }
5422 
hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec * spec,u32 * unused_tuple)5423 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5424 				       u32 *unused_tuple)
5425 {
5426 	if (!spec || !unused_tuple)
5427 		return -EINVAL;
5428 
5429 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5430 		BIT(INNER_IP_TOS);
5431 
5432 	/* check whether src/dst ip address used */
5433 	if (!spec->ip6src[0] && !spec->ip6src[1] &&
5434 	    !spec->ip6src[2] && !spec->ip6src[3])
5435 		*unused_tuple |= BIT(INNER_SRC_IP);
5436 
5437 	if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5438 	    !spec->ip6dst[2] && !spec->ip6dst[3])
5439 		*unused_tuple |= BIT(INNER_DST_IP);
5440 
5441 	if (!spec->psrc)
5442 		*unused_tuple |= BIT(INNER_SRC_PORT);
5443 
5444 	if (!spec->pdst)
5445 		*unused_tuple |= BIT(INNER_DST_PORT);
5446 
5447 	if (spec->tclass)
5448 		return -EOPNOTSUPP;
5449 
5450 	return 0;
5451 }
5452 
hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec * spec,u32 * unused_tuple)5453 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
5454 				    u32 *unused_tuple)
5455 {
5456 	if (!spec || !unused_tuple)
5457 		return -EINVAL;
5458 
5459 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5460 		BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5461 
5462 	/* check whether src/dst ip address used */
5463 	if (!spec->ip6src[0] && !spec->ip6src[1] &&
5464 	    !spec->ip6src[2] && !spec->ip6src[3])
5465 		*unused_tuple |= BIT(INNER_SRC_IP);
5466 
5467 	if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5468 	    !spec->ip6dst[2] && !spec->ip6dst[3])
5469 		*unused_tuple |= BIT(INNER_DST_IP);
5470 
5471 	if (!spec->l4_proto)
5472 		*unused_tuple |= BIT(INNER_IP_PROTO);
5473 
5474 	if (spec->tclass)
5475 		return -EOPNOTSUPP;
5476 
5477 	if (spec->l4_4_bytes)
5478 		return -EOPNOTSUPP;
5479 
5480 	return 0;
5481 }
5482 
hclge_fd_check_ether_tuple(struct ethhdr * spec,u32 * unused_tuple)5483 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
5484 {
5485 	if (!spec || !unused_tuple)
5486 		return -EINVAL;
5487 
5488 	*unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5489 		BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5490 		BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5491 
5492 	if (is_zero_ether_addr(spec->h_source))
5493 		*unused_tuple |= BIT(INNER_SRC_MAC);
5494 
5495 	if (is_zero_ether_addr(spec->h_dest))
5496 		*unused_tuple |= BIT(INNER_DST_MAC);
5497 
5498 	if (!spec->h_proto)
5499 		*unused_tuple |= BIT(INNER_ETH_TYPE);
5500 
5501 	return 0;
5502 }
5503 
hclge_fd_check_ext_tuple(struct hclge_dev * hdev,struct ethtool_rx_flow_spec * fs,u32 * unused_tuple)5504 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
5505 				    struct ethtool_rx_flow_spec *fs,
5506 				    u32 *unused_tuple)
5507 {
5508 	if (fs->flow_type & FLOW_EXT) {
5509 		if (fs->h_ext.vlan_etype) {
5510 			dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
5511 			return -EOPNOTSUPP;
5512 		}
5513 
5514 		if (!fs->h_ext.vlan_tci)
5515 			*unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5516 
5517 		if (fs->m_ext.vlan_tci &&
5518 		    be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
5519 			dev_err(&hdev->pdev->dev,
5520 				"failed to config vlan_tci, invalid vlan_tci: %u, max is %u.\n",
5521 				ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
5522 			return -EINVAL;
5523 		}
5524 	} else {
5525 		*unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5526 	}
5527 
5528 	if (fs->flow_type & FLOW_MAC_EXT) {
5529 		if (hdev->fd_cfg.fd_mode !=
5530 		    HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5531 			dev_err(&hdev->pdev->dev,
5532 				"FLOW_MAC_EXT is not supported in current fd mode!\n");
5533 			return -EOPNOTSUPP;
5534 		}
5535 
5536 		if (is_zero_ether_addr(fs->h_ext.h_dest))
5537 			*unused_tuple |= BIT(INNER_DST_MAC);
5538 		else
5539 			*unused_tuple &= ~BIT(INNER_DST_MAC);
5540 	}
5541 
5542 	return 0;
5543 }
5544 
hclge_fd_check_spec(struct hclge_dev * hdev,struct ethtool_rx_flow_spec * fs,u32 * unused_tuple)5545 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5546 			       struct ethtool_rx_flow_spec *fs,
5547 			       u32 *unused_tuple)
5548 {
5549 	u32 flow_type;
5550 	int ret;
5551 
5552 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5553 		dev_err(&hdev->pdev->dev,
5554 			"failed to config fd rules, invalid rule location: %u, max is %u\n.",
5555 			fs->location,
5556 			hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
5557 		return -EINVAL;
5558 	}
5559 
5560 	if ((fs->flow_type & FLOW_EXT) &&
5561 	    (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5562 		dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5563 		return -EOPNOTSUPP;
5564 	}
5565 
5566 	flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5567 	switch (flow_type) {
5568 	case SCTP_V4_FLOW:
5569 	case TCP_V4_FLOW:
5570 	case UDP_V4_FLOW:
5571 		ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
5572 						  unused_tuple);
5573 		break;
5574 	case IP_USER_FLOW:
5575 		ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
5576 					       unused_tuple);
5577 		break;
5578 	case SCTP_V6_FLOW:
5579 	case TCP_V6_FLOW:
5580 	case UDP_V6_FLOW:
5581 		ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
5582 						  unused_tuple);
5583 		break;
5584 	case IPV6_USER_FLOW:
5585 		ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
5586 					       unused_tuple);
5587 		break;
5588 	case ETHER_FLOW:
5589 		if (hdev->fd_cfg.fd_mode !=
5590 			HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5591 			dev_err(&hdev->pdev->dev,
5592 				"ETHER_FLOW is not supported in current fd mode!\n");
5593 			return -EOPNOTSUPP;
5594 		}
5595 
5596 		ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
5597 						 unused_tuple);
5598 		break;
5599 	default:
5600 		dev_err(&hdev->pdev->dev,
5601 			"unsupported protocol type, protocol type = %#x\n",
5602 			flow_type);
5603 		return -EOPNOTSUPP;
5604 	}
5605 
5606 	if (ret) {
5607 		dev_err(&hdev->pdev->dev,
5608 			"failed to check flow union tuple, ret = %d\n",
5609 			ret);
5610 		return ret;
5611 	}
5612 
5613 	return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
5614 }
5615 
hclge_fd_rule_exist(struct hclge_dev * hdev,u16 location)5616 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5617 {
5618 	struct hclge_fd_rule *rule = NULL;
5619 	struct hlist_node *node2;
5620 
5621 	spin_lock_bh(&hdev->fd_rule_lock);
5622 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5623 		if (rule->location >= location)
5624 			break;
5625 	}
5626 
5627 	spin_unlock_bh(&hdev->fd_rule_lock);
5628 
5629 	return  rule && rule->location == location;
5630 }
5631 
5632 /* make sure being called after lock up with fd_rule_lock */
hclge_fd_update_rule_list(struct hclge_dev * hdev,struct hclge_fd_rule * new_rule,u16 location,bool is_add)5633 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5634 				     struct hclge_fd_rule *new_rule,
5635 				     u16 location,
5636 				     bool is_add)
5637 {
5638 	struct hclge_fd_rule *rule = NULL, *parent = NULL;
5639 	struct hlist_node *node2;
5640 
5641 	if (is_add && !new_rule)
5642 		return -EINVAL;
5643 
5644 	hlist_for_each_entry_safe(rule, node2,
5645 				  &hdev->fd_rule_list, rule_node) {
5646 		if (rule->location >= location)
5647 			break;
5648 		parent = rule;
5649 	}
5650 
5651 	if (rule && rule->location == location) {
5652 		hlist_del(&rule->rule_node);
5653 		kfree(rule);
5654 		hdev->hclge_fd_rule_num--;
5655 
5656 		if (!is_add) {
5657 			if (!hdev->hclge_fd_rule_num)
5658 				hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5659 			clear_bit(location, hdev->fd_bmap);
5660 
5661 			return 0;
5662 		}
5663 	} else if (!is_add) {
5664 		dev_err(&hdev->pdev->dev,
5665 			"delete fail, rule %u is inexistent\n",
5666 			location);
5667 		return -EINVAL;
5668 	}
5669 
5670 	INIT_HLIST_NODE(&new_rule->rule_node);
5671 
5672 	if (parent)
5673 		hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5674 	else
5675 		hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5676 
5677 	set_bit(location, hdev->fd_bmap);
5678 	hdev->hclge_fd_rule_num++;
5679 	hdev->fd_active_type = new_rule->rule_type;
5680 
5681 	return 0;
5682 }
5683 
hclge_fd_get_tuple(struct hclge_dev * hdev,struct ethtool_rx_flow_spec * fs,struct hclge_fd_rule * rule)5684 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5685 			      struct ethtool_rx_flow_spec *fs,
5686 			      struct hclge_fd_rule *rule)
5687 {
5688 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5689 
5690 	switch (flow_type) {
5691 	case SCTP_V4_FLOW:
5692 	case TCP_V4_FLOW:
5693 	case UDP_V4_FLOW:
5694 		rule->tuples.src_ip[IPV4_INDEX] =
5695 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5696 		rule->tuples_mask.src_ip[IPV4_INDEX] =
5697 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5698 
5699 		rule->tuples.dst_ip[IPV4_INDEX] =
5700 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5701 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
5702 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5703 
5704 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5705 		rule->tuples_mask.src_port =
5706 				be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5707 
5708 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5709 		rule->tuples_mask.dst_port =
5710 				be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5711 
5712 		rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5713 		rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5714 
5715 		rule->tuples.ether_proto = ETH_P_IP;
5716 		rule->tuples_mask.ether_proto = 0xFFFF;
5717 
5718 		break;
5719 	case IP_USER_FLOW:
5720 		rule->tuples.src_ip[IPV4_INDEX] =
5721 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5722 		rule->tuples_mask.src_ip[IPV4_INDEX] =
5723 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5724 
5725 		rule->tuples.dst_ip[IPV4_INDEX] =
5726 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5727 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
5728 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5729 
5730 		rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5731 		rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5732 
5733 		rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5734 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5735 
5736 		rule->tuples.ether_proto = ETH_P_IP;
5737 		rule->tuples_mask.ether_proto = 0xFFFF;
5738 
5739 		break;
5740 	case SCTP_V6_FLOW:
5741 	case TCP_V6_FLOW:
5742 	case UDP_V6_FLOW:
5743 		be32_to_cpu_array(rule->tuples.src_ip,
5744 				  fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5745 		be32_to_cpu_array(rule->tuples_mask.src_ip,
5746 				  fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5747 
5748 		be32_to_cpu_array(rule->tuples.dst_ip,
5749 				  fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5750 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
5751 				  fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5752 
5753 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5754 		rule->tuples_mask.src_port =
5755 				be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5756 
5757 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5758 		rule->tuples_mask.dst_port =
5759 				be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5760 
5761 		rule->tuples.ether_proto = ETH_P_IPV6;
5762 		rule->tuples_mask.ether_proto = 0xFFFF;
5763 
5764 		break;
5765 	case IPV6_USER_FLOW:
5766 		be32_to_cpu_array(rule->tuples.src_ip,
5767 				  fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5768 		be32_to_cpu_array(rule->tuples_mask.src_ip,
5769 				  fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5770 
5771 		be32_to_cpu_array(rule->tuples.dst_ip,
5772 				  fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5773 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
5774 				  fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5775 
5776 		rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5777 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5778 
5779 		rule->tuples.ether_proto = ETH_P_IPV6;
5780 		rule->tuples_mask.ether_proto = 0xFFFF;
5781 
5782 		break;
5783 	case ETHER_FLOW:
5784 		ether_addr_copy(rule->tuples.src_mac,
5785 				fs->h_u.ether_spec.h_source);
5786 		ether_addr_copy(rule->tuples_mask.src_mac,
5787 				fs->m_u.ether_spec.h_source);
5788 
5789 		ether_addr_copy(rule->tuples.dst_mac,
5790 				fs->h_u.ether_spec.h_dest);
5791 		ether_addr_copy(rule->tuples_mask.dst_mac,
5792 				fs->m_u.ether_spec.h_dest);
5793 
5794 		rule->tuples.ether_proto =
5795 				be16_to_cpu(fs->h_u.ether_spec.h_proto);
5796 		rule->tuples_mask.ether_proto =
5797 				be16_to_cpu(fs->m_u.ether_spec.h_proto);
5798 
5799 		break;
5800 	default:
5801 		return -EOPNOTSUPP;
5802 	}
5803 
5804 	switch (flow_type) {
5805 	case SCTP_V4_FLOW:
5806 	case SCTP_V6_FLOW:
5807 		rule->tuples.ip_proto = IPPROTO_SCTP;
5808 		rule->tuples_mask.ip_proto = 0xFF;
5809 		break;
5810 	case TCP_V4_FLOW:
5811 	case TCP_V6_FLOW:
5812 		rule->tuples.ip_proto = IPPROTO_TCP;
5813 		rule->tuples_mask.ip_proto = 0xFF;
5814 		break;
5815 	case UDP_V4_FLOW:
5816 	case UDP_V6_FLOW:
5817 		rule->tuples.ip_proto = IPPROTO_UDP;
5818 		rule->tuples_mask.ip_proto = 0xFF;
5819 		break;
5820 	default:
5821 		break;
5822 	}
5823 
5824 	if (fs->flow_type & FLOW_EXT) {
5825 		rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5826 		rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5827 	}
5828 
5829 	if (fs->flow_type & FLOW_MAC_EXT) {
5830 		ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5831 		ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5832 	}
5833 
5834 	return 0;
5835 }
5836 
5837 /* make sure being called after lock up with fd_rule_lock */
hclge_fd_config_rule(struct hclge_dev * hdev,struct hclge_fd_rule * rule)5838 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5839 				struct hclge_fd_rule *rule)
5840 {
5841 	int ret;
5842 
5843 	if (!rule) {
5844 		dev_err(&hdev->pdev->dev,
5845 			"The flow director rule is NULL\n");
5846 		return -EINVAL;
5847 	}
5848 
5849 	/* it will never fail here, so needn't to check return value */
5850 	hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5851 
5852 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5853 	if (ret)
5854 		goto clear_rule;
5855 
5856 	ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5857 	if (ret)
5858 		goto clear_rule;
5859 
5860 	return 0;
5861 
5862 clear_rule:
5863 	hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5864 	return ret;
5865 }
5866 
hclge_add_fd_entry(struct hnae3_handle * handle,struct ethtool_rxnfc * cmd)5867 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5868 			      struct ethtool_rxnfc *cmd)
5869 {
5870 	struct hclge_vport *vport = hclge_get_vport(handle);
5871 	struct hclge_dev *hdev = vport->back;
5872 	u16 dst_vport_id = 0, q_index = 0;
5873 	struct ethtool_rx_flow_spec *fs;
5874 	struct hclge_fd_rule *rule;
5875 	u32 unused = 0;
5876 	u8 action;
5877 	int ret;
5878 
5879 	if (!hnae3_dev_fd_supported(hdev)) {
5880 		dev_err(&hdev->pdev->dev,
5881 			"flow table director is not supported\n");
5882 		return -EOPNOTSUPP;
5883 	}
5884 
5885 	if (!hdev->fd_en) {
5886 		dev_err(&hdev->pdev->dev,
5887 			"please enable flow director first\n");
5888 		return -EOPNOTSUPP;
5889 	}
5890 
5891 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5892 
5893 	ret = hclge_fd_check_spec(hdev, fs, &unused);
5894 	if (ret)
5895 		return ret;
5896 
5897 	if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5898 		action = HCLGE_FD_ACTION_DROP_PACKET;
5899 	} else {
5900 		u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5901 		u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5902 		u16 tqps;
5903 
5904 		if (vf > hdev->num_req_vfs) {
5905 			dev_err(&hdev->pdev->dev,
5906 				"Error: vf id (%u) > max vf num (%u)\n",
5907 				vf, hdev->num_req_vfs);
5908 			return -EINVAL;
5909 		}
5910 
5911 		dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5912 		tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5913 
5914 		if (ring >= tqps) {
5915 			dev_err(&hdev->pdev->dev,
5916 				"Error: queue id (%u) > max tqp num (%u)\n",
5917 				ring, tqps - 1);
5918 			return -EINVAL;
5919 		}
5920 
5921 		action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5922 		q_index = ring;
5923 	}
5924 
5925 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5926 	if (!rule)
5927 		return -ENOMEM;
5928 
5929 	ret = hclge_fd_get_tuple(hdev, fs, rule);
5930 	if (ret) {
5931 		kfree(rule);
5932 		return ret;
5933 	}
5934 
5935 	rule->flow_type = fs->flow_type;
5936 	rule->location = fs->location;
5937 	rule->unused_tuple = unused;
5938 	rule->vf_id = dst_vport_id;
5939 	rule->queue_id = q_index;
5940 	rule->action = action;
5941 	rule->rule_type = HCLGE_FD_EP_ACTIVE;
5942 
5943 	/* to avoid rule conflict, when user configure rule by ethtool,
5944 	 * we need to clear all arfs rules
5945 	 */
5946 	spin_lock_bh(&hdev->fd_rule_lock);
5947 	hclge_clear_arfs_rules(handle);
5948 
5949 	ret = hclge_fd_config_rule(hdev, rule);
5950 
5951 	spin_unlock_bh(&hdev->fd_rule_lock);
5952 
5953 	return ret;
5954 }
5955 
hclge_del_fd_entry(struct hnae3_handle * handle,struct ethtool_rxnfc * cmd)5956 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5957 			      struct ethtool_rxnfc *cmd)
5958 {
5959 	struct hclge_vport *vport = hclge_get_vport(handle);
5960 	struct hclge_dev *hdev = vport->back;
5961 	struct ethtool_rx_flow_spec *fs;
5962 	int ret;
5963 
5964 	if (!hnae3_dev_fd_supported(hdev))
5965 		return -EOPNOTSUPP;
5966 
5967 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5968 
5969 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5970 		return -EINVAL;
5971 
5972 	if (!hclge_fd_rule_exist(hdev, fs->location)) {
5973 		dev_err(&hdev->pdev->dev,
5974 			"Delete fail, rule %u is inexistent\n", fs->location);
5975 		return -ENOENT;
5976 	}
5977 
5978 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5979 				   NULL, false);
5980 	if (ret)
5981 		return ret;
5982 
5983 	spin_lock_bh(&hdev->fd_rule_lock);
5984 	ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5985 
5986 	spin_unlock_bh(&hdev->fd_rule_lock);
5987 
5988 	return ret;
5989 }
5990 
5991 /* make sure being called after lock up with fd_rule_lock */
hclge_del_all_fd_entries(struct hnae3_handle * handle,bool clear_list)5992 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5993 				     bool clear_list)
5994 {
5995 	struct hclge_vport *vport = hclge_get_vport(handle);
5996 	struct hclge_dev *hdev = vport->back;
5997 	struct hclge_fd_rule *rule;
5998 	struct hlist_node *node;
5999 	u16 location;
6000 
6001 	if (!hnae3_dev_fd_supported(hdev))
6002 		return;
6003 
6004 	for_each_set_bit(location, hdev->fd_bmap,
6005 			 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6006 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6007 				     NULL, false);
6008 
6009 	if (clear_list) {
6010 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6011 					  rule_node) {
6012 			hlist_del(&rule->rule_node);
6013 			kfree(rule);
6014 		}
6015 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6016 		hdev->hclge_fd_rule_num = 0;
6017 		bitmap_zero(hdev->fd_bmap,
6018 			    hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6019 	}
6020 }
6021 
hclge_restore_fd_entries(struct hnae3_handle * handle)6022 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6023 {
6024 	struct hclge_vport *vport = hclge_get_vport(handle);
6025 	struct hclge_dev *hdev = vport->back;
6026 	struct hclge_fd_rule *rule;
6027 	struct hlist_node *node;
6028 	int ret;
6029 
6030 	/* Return ok here, because reset error handling will check this
6031 	 * return value. If error is returned here, the reset process will
6032 	 * fail.
6033 	 */
6034 	if (!hnae3_dev_fd_supported(hdev))
6035 		return 0;
6036 
6037 	/* if fd is disabled, should not restore it when reset */
6038 	if (!hdev->fd_en)
6039 		return 0;
6040 
6041 	spin_lock_bh(&hdev->fd_rule_lock);
6042 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6043 		ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6044 		if (!ret)
6045 			ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6046 
6047 		if (ret) {
6048 			dev_warn(&hdev->pdev->dev,
6049 				 "Restore rule %u failed, remove it\n",
6050 				 rule->location);
6051 			clear_bit(rule->location, hdev->fd_bmap);
6052 			hlist_del(&rule->rule_node);
6053 			kfree(rule);
6054 			hdev->hclge_fd_rule_num--;
6055 		}
6056 	}
6057 
6058 	if (hdev->hclge_fd_rule_num)
6059 		hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
6060 
6061 	spin_unlock_bh(&hdev->fd_rule_lock);
6062 
6063 	return 0;
6064 }
6065 
hclge_get_fd_rule_cnt(struct hnae3_handle * handle,struct ethtool_rxnfc * cmd)6066 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6067 				 struct ethtool_rxnfc *cmd)
6068 {
6069 	struct hclge_vport *vport = hclge_get_vport(handle);
6070 	struct hclge_dev *hdev = vport->back;
6071 
6072 	if (!hnae3_dev_fd_supported(hdev))
6073 		return -EOPNOTSUPP;
6074 
6075 	cmd->rule_cnt = hdev->hclge_fd_rule_num;
6076 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6077 
6078 	return 0;
6079 }
6080 
hclge_fd_get_tcpip4_info(struct hclge_fd_rule * rule,struct ethtool_tcpip4_spec * spec,struct ethtool_tcpip4_spec * spec_mask)6081 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6082 				     struct ethtool_tcpip4_spec *spec,
6083 				     struct ethtool_tcpip4_spec *spec_mask)
6084 {
6085 	spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6086 	spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6087 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6088 
6089 	spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6090 	spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6091 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6092 
6093 	spec->psrc = cpu_to_be16(rule->tuples.src_port);
6094 	spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6095 			0 : cpu_to_be16(rule->tuples_mask.src_port);
6096 
6097 	spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6098 	spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6099 			0 : cpu_to_be16(rule->tuples_mask.dst_port);
6100 
6101 	spec->tos = rule->tuples.ip_tos;
6102 	spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6103 			0 : rule->tuples_mask.ip_tos;
6104 }
6105 
hclge_fd_get_ip4_info(struct hclge_fd_rule * rule,struct ethtool_usrip4_spec * spec,struct ethtool_usrip4_spec * spec_mask)6106 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6107 				  struct ethtool_usrip4_spec *spec,
6108 				  struct ethtool_usrip4_spec *spec_mask)
6109 {
6110 	spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6111 	spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6112 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6113 
6114 	spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6115 	spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6116 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6117 
6118 	spec->tos = rule->tuples.ip_tos;
6119 	spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6120 			0 : rule->tuples_mask.ip_tos;
6121 
6122 	spec->proto = rule->tuples.ip_proto;
6123 	spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6124 			0 : rule->tuples_mask.ip_proto;
6125 
6126 	spec->ip_ver = ETH_RX_NFC_IP4;
6127 }
6128 
hclge_fd_get_tcpip6_info(struct hclge_fd_rule * rule,struct ethtool_tcpip6_spec * spec,struct ethtool_tcpip6_spec * spec_mask)6129 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6130 				     struct ethtool_tcpip6_spec *spec,
6131 				     struct ethtool_tcpip6_spec *spec_mask)
6132 {
6133 	cpu_to_be32_array(spec->ip6src,
6134 			  rule->tuples.src_ip, IPV6_SIZE);
6135 	cpu_to_be32_array(spec->ip6dst,
6136 			  rule->tuples.dst_ip, IPV6_SIZE);
6137 	if (rule->unused_tuple & BIT(INNER_SRC_IP))
6138 		memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6139 	else
6140 		cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6141 				  IPV6_SIZE);
6142 
6143 	if (rule->unused_tuple & BIT(INNER_DST_IP))
6144 		memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6145 	else
6146 		cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6147 				  IPV6_SIZE);
6148 
6149 	spec->psrc = cpu_to_be16(rule->tuples.src_port);
6150 	spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6151 			0 : cpu_to_be16(rule->tuples_mask.src_port);
6152 
6153 	spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6154 	spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6155 			0 : cpu_to_be16(rule->tuples_mask.dst_port);
6156 }
6157 
hclge_fd_get_ip6_info(struct hclge_fd_rule * rule,struct ethtool_usrip6_spec * spec,struct ethtool_usrip6_spec * spec_mask)6158 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6159 				  struct ethtool_usrip6_spec *spec,
6160 				  struct ethtool_usrip6_spec *spec_mask)
6161 {
6162 	cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6163 	cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6164 	if (rule->unused_tuple & BIT(INNER_SRC_IP))
6165 		memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6166 	else
6167 		cpu_to_be32_array(spec_mask->ip6src,
6168 				  rule->tuples_mask.src_ip, IPV6_SIZE);
6169 
6170 	if (rule->unused_tuple & BIT(INNER_DST_IP))
6171 		memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6172 	else
6173 		cpu_to_be32_array(spec_mask->ip6dst,
6174 				  rule->tuples_mask.dst_ip, IPV6_SIZE);
6175 
6176 	spec->l4_proto = rule->tuples.ip_proto;
6177 	spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6178 			0 : rule->tuples_mask.ip_proto;
6179 }
6180 
hclge_fd_get_ether_info(struct hclge_fd_rule * rule,struct ethhdr * spec,struct ethhdr * spec_mask)6181 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6182 				    struct ethhdr *spec,
6183 				    struct ethhdr *spec_mask)
6184 {
6185 	ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6186 	ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6187 
6188 	if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6189 		eth_zero_addr(spec_mask->h_source);
6190 	else
6191 		ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6192 
6193 	if (rule->unused_tuple & BIT(INNER_DST_MAC))
6194 		eth_zero_addr(spec_mask->h_dest);
6195 	else
6196 		ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6197 
6198 	spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6199 	spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6200 			0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6201 }
6202 
hclge_fd_get_ext_info(struct ethtool_rx_flow_spec * fs,struct hclge_fd_rule * rule)6203 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6204 				  struct hclge_fd_rule *rule)
6205 {
6206 	if (fs->flow_type & FLOW_EXT) {
6207 		fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6208 		fs->m_ext.vlan_tci =
6209 				rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6210 				0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
6211 	}
6212 
6213 	if (fs->flow_type & FLOW_MAC_EXT) {
6214 		ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6215 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
6216 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
6217 		else
6218 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
6219 					rule->tuples_mask.dst_mac);
6220 	}
6221 }
6222 
hclge_get_fd_rule_info(struct hnae3_handle * handle,struct ethtool_rxnfc * cmd)6223 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6224 				  struct ethtool_rxnfc *cmd)
6225 {
6226 	struct hclge_vport *vport = hclge_get_vport(handle);
6227 	struct hclge_fd_rule *rule = NULL;
6228 	struct hclge_dev *hdev = vport->back;
6229 	struct ethtool_rx_flow_spec *fs;
6230 	struct hlist_node *node2;
6231 
6232 	if (!hnae3_dev_fd_supported(hdev))
6233 		return -EOPNOTSUPP;
6234 
6235 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6236 
6237 	spin_lock_bh(&hdev->fd_rule_lock);
6238 
6239 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6240 		if (rule->location >= fs->location)
6241 			break;
6242 	}
6243 
6244 	if (!rule || fs->location != rule->location) {
6245 		spin_unlock_bh(&hdev->fd_rule_lock);
6246 
6247 		return -ENOENT;
6248 	}
6249 
6250 	fs->flow_type = rule->flow_type;
6251 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6252 	case SCTP_V4_FLOW:
6253 	case TCP_V4_FLOW:
6254 	case UDP_V4_FLOW:
6255 		hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6256 					 &fs->m_u.tcp_ip4_spec);
6257 		break;
6258 	case IP_USER_FLOW:
6259 		hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6260 				      &fs->m_u.usr_ip4_spec);
6261 		break;
6262 	case SCTP_V6_FLOW:
6263 	case TCP_V6_FLOW:
6264 	case UDP_V6_FLOW:
6265 		hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6266 					 &fs->m_u.tcp_ip6_spec);
6267 		break;
6268 	case IPV6_USER_FLOW:
6269 		hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6270 				      &fs->m_u.usr_ip6_spec);
6271 		break;
6272 	/* The flow type of fd rule has been checked before adding in to rule
6273 	 * list. As other flow types have been handled, it must be ETHER_FLOW
6274 	 * for the default case
6275 	 */
6276 	default:
6277 		hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6278 					&fs->m_u.ether_spec);
6279 		break;
6280 	}
6281 
6282 	hclge_fd_get_ext_info(fs, rule);
6283 
6284 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6285 		fs->ring_cookie = RX_CLS_FLOW_DISC;
6286 	} else {
6287 		u64 vf_id;
6288 
6289 		fs->ring_cookie = rule->queue_id;
6290 		vf_id = rule->vf_id;
6291 		vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6292 		fs->ring_cookie |= vf_id;
6293 	}
6294 
6295 	spin_unlock_bh(&hdev->fd_rule_lock);
6296 
6297 	return 0;
6298 }
6299 
hclge_get_all_rules(struct hnae3_handle * handle,struct ethtool_rxnfc * cmd,u32 * rule_locs)6300 static int hclge_get_all_rules(struct hnae3_handle *handle,
6301 			       struct ethtool_rxnfc *cmd, u32 *rule_locs)
6302 {
6303 	struct hclge_vport *vport = hclge_get_vport(handle);
6304 	struct hclge_dev *hdev = vport->back;
6305 	struct hclge_fd_rule *rule;
6306 	struct hlist_node *node2;
6307 	int cnt = 0;
6308 
6309 	if (!hnae3_dev_fd_supported(hdev))
6310 		return -EOPNOTSUPP;
6311 
6312 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6313 
6314 	spin_lock_bh(&hdev->fd_rule_lock);
6315 	hlist_for_each_entry_safe(rule, node2,
6316 				  &hdev->fd_rule_list, rule_node) {
6317 		if (cnt == cmd->rule_cnt) {
6318 			spin_unlock_bh(&hdev->fd_rule_lock);
6319 			return -EMSGSIZE;
6320 		}
6321 
6322 		rule_locs[cnt] = rule->location;
6323 		cnt++;
6324 	}
6325 
6326 	spin_unlock_bh(&hdev->fd_rule_lock);
6327 
6328 	cmd->rule_cnt = cnt;
6329 
6330 	return 0;
6331 }
6332 
hclge_fd_get_flow_tuples(const struct flow_keys * fkeys,struct hclge_fd_rule_tuples * tuples)6333 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6334 				     struct hclge_fd_rule_tuples *tuples)
6335 {
6336 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6337 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6338 
6339 	tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6340 	tuples->ip_proto = fkeys->basic.ip_proto;
6341 	tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6342 
6343 	if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6344 		tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6345 		tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6346 	} else {
6347 		int i;
6348 
6349 		for (i = 0; i < IPV6_SIZE; i++) {
6350 			tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6351 			tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6352 		}
6353 	}
6354 }
6355 
6356 /* traverse all rules, check whether an existed rule has the same tuples */
6357 static struct hclge_fd_rule *
hclge_fd_search_flow_keys(struct hclge_dev * hdev,const struct hclge_fd_rule_tuples * tuples)6358 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6359 			  const struct hclge_fd_rule_tuples *tuples)
6360 {
6361 	struct hclge_fd_rule *rule = NULL;
6362 	struct hlist_node *node;
6363 
6364 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6365 		if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6366 			return rule;
6367 	}
6368 
6369 	return NULL;
6370 }
6371 
hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples * tuples,struct hclge_fd_rule * rule)6372 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6373 				     struct hclge_fd_rule *rule)
6374 {
6375 	rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6376 			     BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6377 			     BIT(INNER_SRC_PORT);
6378 	rule->action = 0;
6379 	rule->vf_id = 0;
6380 	rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6381 	if (tuples->ether_proto == ETH_P_IP) {
6382 		if (tuples->ip_proto == IPPROTO_TCP)
6383 			rule->flow_type = TCP_V4_FLOW;
6384 		else
6385 			rule->flow_type = UDP_V4_FLOW;
6386 	} else {
6387 		if (tuples->ip_proto == IPPROTO_TCP)
6388 			rule->flow_type = TCP_V6_FLOW;
6389 		else
6390 			rule->flow_type = UDP_V6_FLOW;
6391 	}
6392 	memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6393 	memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6394 }
6395 
hclge_add_fd_entry_by_arfs(struct hnae3_handle * handle,u16 queue_id,u16 flow_id,struct flow_keys * fkeys)6396 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6397 				      u16 flow_id, struct flow_keys *fkeys)
6398 {
6399 	struct hclge_vport *vport = hclge_get_vport(handle);
6400 	struct hclge_fd_rule_tuples new_tuples = {};
6401 	struct hclge_dev *hdev = vport->back;
6402 	struct hclge_fd_rule *rule;
6403 	u16 tmp_queue_id;
6404 	u16 bit_id;
6405 	int ret;
6406 
6407 	if (!hnae3_dev_fd_supported(hdev))
6408 		return -EOPNOTSUPP;
6409 
6410 	/* when there is already fd rule existed add by user,
6411 	 * arfs should not work
6412 	 */
6413 	spin_lock_bh(&hdev->fd_rule_lock);
6414 	if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6415 		spin_unlock_bh(&hdev->fd_rule_lock);
6416 		return -EOPNOTSUPP;
6417 	}
6418 
6419 	hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6420 
6421 	/* check is there flow director filter existed for this flow,
6422 	 * if not, create a new filter for it;
6423 	 * if filter exist with different queue id, modify the filter;
6424 	 * if filter exist with same queue id, do nothing
6425 	 */
6426 	rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6427 	if (!rule) {
6428 		bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6429 		if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6430 			spin_unlock_bh(&hdev->fd_rule_lock);
6431 			return -ENOSPC;
6432 		}
6433 
6434 		rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6435 		if (!rule) {
6436 			spin_unlock_bh(&hdev->fd_rule_lock);
6437 			return -ENOMEM;
6438 		}
6439 
6440 		set_bit(bit_id, hdev->fd_bmap);
6441 		rule->location = bit_id;
6442 		rule->flow_id = flow_id;
6443 		rule->queue_id = queue_id;
6444 		hclge_fd_build_arfs_rule(&new_tuples, rule);
6445 		ret = hclge_fd_config_rule(hdev, rule);
6446 
6447 		spin_unlock_bh(&hdev->fd_rule_lock);
6448 
6449 		if (ret)
6450 			return ret;
6451 
6452 		return rule->location;
6453 	}
6454 
6455 	spin_unlock_bh(&hdev->fd_rule_lock);
6456 
6457 	if (rule->queue_id == queue_id)
6458 		return rule->location;
6459 
6460 	tmp_queue_id = rule->queue_id;
6461 	rule->queue_id = queue_id;
6462 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6463 	if (ret) {
6464 		rule->queue_id = tmp_queue_id;
6465 		return ret;
6466 	}
6467 
6468 	return rule->location;
6469 }
6470 
hclge_rfs_filter_expire(struct hclge_dev * hdev)6471 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6472 {
6473 #ifdef CONFIG_RFS_ACCEL
6474 	struct hnae3_handle *handle = &hdev->vport[0].nic;
6475 	struct hclge_fd_rule *rule;
6476 	struct hlist_node *node;
6477 	HLIST_HEAD(del_list);
6478 
6479 	spin_lock_bh(&hdev->fd_rule_lock);
6480 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6481 		spin_unlock_bh(&hdev->fd_rule_lock);
6482 		return;
6483 	}
6484 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6485 		if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6486 					rule->flow_id, rule->location)) {
6487 			hlist_del_init(&rule->rule_node);
6488 			hlist_add_head(&rule->rule_node, &del_list);
6489 			hdev->hclge_fd_rule_num--;
6490 			clear_bit(rule->location, hdev->fd_bmap);
6491 		}
6492 	}
6493 	spin_unlock_bh(&hdev->fd_rule_lock);
6494 
6495 	hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6496 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6497 				     rule->location, NULL, false);
6498 		kfree(rule);
6499 	}
6500 #endif
6501 }
6502 
6503 /* make sure being called after lock up with fd_rule_lock */
hclge_clear_arfs_rules(struct hnae3_handle * handle)6504 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6505 {
6506 #ifdef CONFIG_RFS_ACCEL
6507 	struct hclge_vport *vport = hclge_get_vport(handle);
6508 	struct hclge_dev *hdev = vport->back;
6509 
6510 	if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6511 		hclge_del_all_fd_entries(handle, true);
6512 #endif
6513 }
6514 
hclge_get_hw_reset_stat(struct hnae3_handle * handle)6515 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6516 {
6517 	struct hclge_vport *vport = hclge_get_vport(handle);
6518 	struct hclge_dev *hdev = vport->back;
6519 
6520 	return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6521 	       hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6522 }
6523 
hclge_get_cmdq_stat(struct hnae3_handle * handle)6524 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
6525 {
6526 	struct hclge_vport *vport = hclge_get_vport(handle);
6527 	struct hclge_dev *hdev = vport->back;
6528 
6529 	return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
6530 }
6531 
hclge_ae_dev_resetting(struct hnae3_handle * handle)6532 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6533 {
6534 	struct hclge_vport *vport = hclge_get_vport(handle);
6535 	struct hclge_dev *hdev = vport->back;
6536 
6537 	return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6538 }
6539 
hclge_ae_dev_reset_cnt(struct hnae3_handle * handle)6540 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6541 {
6542 	struct hclge_vport *vport = hclge_get_vport(handle);
6543 	struct hclge_dev *hdev = vport->back;
6544 
6545 	return hdev->rst_stats.hw_reset_done_cnt;
6546 }
6547 
hclge_enable_fd(struct hnae3_handle * handle,bool enable)6548 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6549 {
6550 	struct hclge_vport *vport = hclge_get_vport(handle);
6551 	struct hclge_dev *hdev = vport->back;
6552 	bool clear;
6553 
6554 	hdev->fd_en = enable;
6555 	clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6556 
6557 	if (!enable) {
6558 		spin_lock_bh(&hdev->fd_rule_lock);
6559 		hclge_del_all_fd_entries(handle, clear);
6560 		spin_unlock_bh(&hdev->fd_rule_lock);
6561 	} else {
6562 		hclge_restore_fd_entries(handle);
6563 	}
6564 }
6565 
hclge_cfg_mac_mode(struct hclge_dev * hdev,bool enable)6566 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6567 {
6568 #define HCLGE_LINK_STATUS_WAIT_CNT  3
6569 
6570 	struct hclge_desc desc;
6571 	struct hclge_config_mac_mode_cmd *req =
6572 		(struct hclge_config_mac_mode_cmd *)desc.data;
6573 	u32 loop_en = 0;
6574 	int ret;
6575 
6576 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6577 
6578 	if (enable) {
6579 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6580 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6581 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6582 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6583 		hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6584 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6585 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6586 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6587 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6588 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6589 	}
6590 
6591 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6592 
6593 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6594 	if (ret) {
6595 		dev_err(&hdev->pdev->dev,
6596 			"mac enable fail, ret =%d.\n", ret);
6597 		return;
6598 	}
6599 
6600 	if (!enable)
6601 		hclge_mac_link_status_wait(hdev, HCLGE_LINK_STATUS_DOWN,
6602 					   HCLGE_LINK_STATUS_WAIT_CNT);
6603 }
6604 
hclge_config_switch_param(struct hclge_dev * hdev,int vfid,u8 switch_param,u8 param_mask)6605 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6606 				     u8 switch_param, u8 param_mask)
6607 {
6608 	struct hclge_mac_vlan_switch_cmd *req;
6609 	struct hclge_desc desc;
6610 	u32 func_id;
6611 	int ret;
6612 
6613 	func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6614 	req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6615 
6616 	/* read current config parameter */
6617 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6618 				   true);
6619 	req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6620 	req->func_id = cpu_to_le32(func_id);
6621 
6622 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6623 	if (ret) {
6624 		dev_err(&hdev->pdev->dev,
6625 			"read mac vlan switch parameter fail, ret = %d\n", ret);
6626 		return ret;
6627 	}
6628 
6629 	/* modify and write new config parameter */
6630 	hclge_cmd_reuse_desc(&desc, false);
6631 	req->switch_param = (req->switch_param & param_mask) | switch_param;
6632 	req->param_mask = param_mask;
6633 
6634 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6635 	if (ret)
6636 		dev_err(&hdev->pdev->dev,
6637 			"set mac vlan switch parameter fail, ret = %d\n", ret);
6638 	return ret;
6639 }
6640 
hclge_phy_link_status_wait(struct hclge_dev * hdev,int link_ret)6641 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6642 				       int link_ret)
6643 {
6644 #define HCLGE_PHY_LINK_STATUS_NUM  200
6645 
6646 	struct phy_device *phydev = hdev->hw.mac.phydev;
6647 	int i = 0;
6648 	int ret;
6649 
6650 	do {
6651 		ret = phy_read_status(phydev);
6652 		if (ret) {
6653 			dev_err(&hdev->pdev->dev,
6654 				"phy update link status fail, ret = %d\n", ret);
6655 			return;
6656 		}
6657 
6658 		if (phydev->link == link_ret)
6659 			break;
6660 
6661 		msleep(HCLGE_LINK_STATUS_MS);
6662 	} while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6663 }
6664 
hclge_mac_link_status_wait(struct hclge_dev * hdev,int link_ret,int wait_cnt)6665 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret,
6666 				      int wait_cnt)
6667 {
6668 	int link_status;
6669 	int i = 0;
6670 	int ret;
6671 
6672 	do {
6673 		ret = hclge_get_mac_link_status(hdev, &link_status);
6674 		if (ret)
6675 			return ret;
6676 		if (link_status == link_ret)
6677 			return 0;
6678 
6679 		msleep(HCLGE_LINK_STATUS_MS);
6680 	} while (++i < wait_cnt);
6681 	return -EBUSY;
6682 }
6683 
hclge_mac_phy_link_status_wait(struct hclge_dev * hdev,bool en,bool is_phy)6684 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6685 					  bool is_phy)
6686 {
6687 #define HCLGE_MAC_LINK_STATUS_NUM  100
6688 
6689 	int link_ret;
6690 
6691 	link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6692 
6693 	if (is_phy)
6694 		hclge_phy_link_status_wait(hdev, link_ret);
6695 
6696 	return hclge_mac_link_status_wait(hdev, link_ret,
6697 					  HCLGE_MAC_LINK_STATUS_NUM);
6698 }
6699 
hclge_set_app_loopback(struct hclge_dev * hdev,bool en)6700 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
6701 {
6702 	struct hclge_config_mac_mode_cmd *req;
6703 	struct hclge_desc desc;
6704 	u32 loop_en;
6705 	int ret;
6706 
6707 	req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6708 	/* 1 Read out the MAC mode config at first */
6709 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6710 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6711 	if (ret) {
6712 		dev_err(&hdev->pdev->dev,
6713 			"mac loopback get fail, ret =%d.\n", ret);
6714 		return ret;
6715 	}
6716 
6717 	/* 2 Then setup the loopback flag */
6718 	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
6719 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
6720 
6721 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6722 
6723 	/* 3 Config mac work mode with loopback flag
6724 	 * and its original configure parameters
6725 	 */
6726 	hclge_cmd_reuse_desc(&desc, false);
6727 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6728 	if (ret)
6729 		dev_err(&hdev->pdev->dev,
6730 			"mac loopback set fail, ret =%d.\n", ret);
6731 	return ret;
6732 }
6733 
hclge_cfg_serdes_loopback(struct hclge_dev * hdev,bool en,enum hnae3_loop loop_mode)6734 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
6735 				     enum hnae3_loop loop_mode)
6736 {
6737 #define HCLGE_SERDES_RETRY_MS	10
6738 #define HCLGE_SERDES_RETRY_NUM	100
6739 
6740 	struct hclge_serdes_lb_cmd *req;
6741 	struct hclge_desc desc;
6742 	int ret, i = 0;
6743 	u8 loop_mode_b;
6744 
6745 	req = (struct hclge_serdes_lb_cmd *)desc.data;
6746 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6747 
6748 	switch (loop_mode) {
6749 	case HNAE3_LOOP_SERIAL_SERDES:
6750 		loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6751 		break;
6752 	case HNAE3_LOOP_PARALLEL_SERDES:
6753 		loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6754 		break;
6755 	default:
6756 		dev_err(&hdev->pdev->dev,
6757 			"unsupported serdes loopback mode %d\n", loop_mode);
6758 		return -ENOTSUPP;
6759 	}
6760 
6761 	if (en) {
6762 		req->enable = loop_mode_b;
6763 		req->mask = loop_mode_b;
6764 	} else {
6765 		req->mask = loop_mode_b;
6766 	}
6767 
6768 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6769 	if (ret) {
6770 		dev_err(&hdev->pdev->dev,
6771 			"serdes loopback set fail, ret = %d\n", ret);
6772 		return ret;
6773 	}
6774 
6775 	do {
6776 		msleep(HCLGE_SERDES_RETRY_MS);
6777 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6778 					   true);
6779 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6780 		if (ret) {
6781 			dev_err(&hdev->pdev->dev,
6782 				"serdes loopback get, ret = %d\n", ret);
6783 			return ret;
6784 		}
6785 	} while (++i < HCLGE_SERDES_RETRY_NUM &&
6786 		 !(req->result & HCLGE_CMD_SERDES_DONE_B));
6787 
6788 	if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6789 		dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6790 		return -EBUSY;
6791 	} else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6792 		dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6793 		return -EIO;
6794 	}
6795 	return ret;
6796 }
6797 
hclge_set_serdes_loopback(struct hclge_dev * hdev,bool en,enum hnae3_loop loop_mode)6798 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6799 				     enum hnae3_loop loop_mode)
6800 {
6801 	int ret;
6802 
6803 	ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
6804 	if (ret)
6805 		return ret;
6806 
6807 	hclge_cfg_mac_mode(hdev, en);
6808 
6809 	ret = hclge_mac_phy_link_status_wait(hdev, en, false);
6810 	if (ret)
6811 		dev_err(&hdev->pdev->dev,
6812 			"serdes loopback config mac mode timeout\n");
6813 
6814 	return ret;
6815 }
6816 
hclge_enable_phy_loopback(struct hclge_dev * hdev,struct phy_device * phydev)6817 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6818 				     struct phy_device *phydev)
6819 {
6820 	int ret;
6821 
6822 	if (!phydev->suspended) {
6823 		ret = phy_suspend(phydev);
6824 		if (ret)
6825 			return ret;
6826 	}
6827 
6828 	ret = phy_resume(phydev);
6829 	if (ret)
6830 		return ret;
6831 
6832 	return phy_loopback(phydev, true);
6833 }
6834 
hclge_disable_phy_loopback(struct hclge_dev * hdev,struct phy_device * phydev)6835 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6836 				      struct phy_device *phydev)
6837 {
6838 	int ret;
6839 
6840 	ret = phy_loopback(phydev, false);
6841 	if (ret)
6842 		return ret;
6843 
6844 	return phy_suspend(phydev);
6845 }
6846 
hclge_set_phy_loopback(struct hclge_dev * hdev,bool en)6847 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6848 {
6849 	struct phy_device *phydev = hdev->hw.mac.phydev;
6850 	int ret;
6851 
6852 	if (!phydev)
6853 		return -ENOTSUPP;
6854 
6855 	if (en)
6856 		ret = hclge_enable_phy_loopback(hdev, phydev);
6857 	else
6858 		ret = hclge_disable_phy_loopback(hdev, phydev);
6859 	if (ret) {
6860 		dev_err(&hdev->pdev->dev,
6861 			"set phy loopback fail, ret = %d\n", ret);
6862 		return ret;
6863 	}
6864 
6865 	hclge_cfg_mac_mode(hdev, en);
6866 
6867 	ret = hclge_mac_phy_link_status_wait(hdev, en, true);
6868 	if (ret)
6869 		dev_err(&hdev->pdev->dev,
6870 			"phy loopback config mac mode timeout\n");
6871 
6872 	return ret;
6873 }
6874 
hclge_tqp_enable(struct hclge_dev * hdev,unsigned int tqp_id,int stream_id,bool enable)6875 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6876 			    int stream_id, bool enable)
6877 {
6878 	struct hclge_desc desc;
6879 	struct hclge_cfg_com_tqp_queue_cmd *req =
6880 		(struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6881 	int ret;
6882 
6883 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6884 	req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6885 	req->stream_id = cpu_to_le16(stream_id);
6886 	if (enable)
6887 		req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6888 
6889 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6890 	if (ret)
6891 		dev_err(&hdev->pdev->dev,
6892 			"Tqp enable fail, status =%d.\n", ret);
6893 	return ret;
6894 }
6895 
hclge_set_loopback(struct hnae3_handle * handle,enum hnae3_loop loop_mode,bool en)6896 static int hclge_set_loopback(struct hnae3_handle *handle,
6897 			      enum hnae3_loop loop_mode, bool en)
6898 {
6899 	struct hclge_vport *vport = hclge_get_vport(handle);
6900 	struct hnae3_knic_private_info *kinfo;
6901 	struct hclge_dev *hdev = vport->back;
6902 	int i, ret;
6903 
6904 	/* Loopback can be enabled in three places: SSU, MAC, and serdes. By
6905 	 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
6906 	 * the same, the packets are looped back in the SSU. If SSU loopback
6907 	 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
6908 	 */
6909 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
6910 		u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6911 
6912 		ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6913 						HCLGE_SWITCH_ALW_LPBK_MASK);
6914 		if (ret)
6915 			return ret;
6916 	}
6917 
6918 	switch (loop_mode) {
6919 	case HNAE3_LOOP_APP:
6920 		ret = hclge_set_app_loopback(hdev, en);
6921 		break;
6922 	case HNAE3_LOOP_SERIAL_SERDES:
6923 	case HNAE3_LOOP_PARALLEL_SERDES:
6924 		ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6925 		break;
6926 	case HNAE3_LOOP_PHY:
6927 		ret = hclge_set_phy_loopback(hdev, en);
6928 		break;
6929 	default:
6930 		ret = -ENOTSUPP;
6931 		dev_err(&hdev->pdev->dev,
6932 			"loop_mode %d is not supported\n", loop_mode);
6933 		break;
6934 	}
6935 
6936 	if (ret)
6937 		return ret;
6938 
6939 	kinfo = &vport->nic.kinfo;
6940 	for (i = 0; i < kinfo->num_tqps; i++) {
6941 		ret = hclge_tqp_enable(hdev, i, 0, en);
6942 		if (ret)
6943 			return ret;
6944 	}
6945 
6946 	return 0;
6947 }
6948 
hclge_set_default_loopback(struct hclge_dev * hdev)6949 static int hclge_set_default_loopback(struct hclge_dev *hdev)
6950 {
6951 	int ret;
6952 
6953 	ret = hclge_set_app_loopback(hdev, false);
6954 	if (ret)
6955 		return ret;
6956 
6957 	ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
6958 	if (ret)
6959 		return ret;
6960 
6961 	return hclge_cfg_serdes_loopback(hdev, false,
6962 					 HNAE3_LOOP_PARALLEL_SERDES);
6963 }
6964 
hclge_reset_tqp_stats(struct hnae3_handle * handle)6965 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6966 {
6967 	struct hclge_vport *vport = hclge_get_vport(handle);
6968 	struct hnae3_knic_private_info *kinfo;
6969 	struct hnae3_queue *queue;
6970 	struct hclge_tqp *tqp;
6971 	int i;
6972 
6973 	kinfo = &vport->nic.kinfo;
6974 	for (i = 0; i < kinfo->num_tqps; i++) {
6975 		queue = handle->kinfo.tqp[i];
6976 		tqp = container_of(queue, struct hclge_tqp, q);
6977 		memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6978 	}
6979 }
6980 
hclge_flush_link_update(struct hclge_dev * hdev)6981 static void hclge_flush_link_update(struct hclge_dev *hdev)
6982 {
6983 #define HCLGE_FLUSH_LINK_TIMEOUT	100000
6984 
6985 	unsigned long last = hdev->serv_processed_cnt;
6986 	int i = 0;
6987 
6988 	while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
6989 	       i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
6990 	       last == hdev->serv_processed_cnt)
6991 		usleep_range(1, 1);
6992 }
6993 
hclge_set_timer_task(struct hnae3_handle * handle,bool enable)6994 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6995 {
6996 	struct hclge_vport *vport = hclge_get_vport(handle);
6997 	struct hclge_dev *hdev = vport->back;
6998 
6999 	if (enable) {
7000 		hclge_task_schedule(hdev, 0);
7001 	} else {
7002 		/* Set the DOWN flag here to disable link updating */
7003 		set_bit(HCLGE_STATE_DOWN, &hdev->state);
7004 
7005 		/* flush memory to make sure DOWN is seen by service task */
7006 		smp_mb__before_atomic();
7007 		hclge_flush_link_update(hdev);
7008 	}
7009 }
7010 
hclge_ae_start(struct hnae3_handle * handle)7011 static int hclge_ae_start(struct hnae3_handle *handle)
7012 {
7013 	struct hclge_vport *vport = hclge_get_vport(handle);
7014 	struct hclge_dev *hdev = vport->back;
7015 
7016 	/* mac enable */
7017 	hclge_cfg_mac_mode(hdev, true);
7018 	clear_bit(HCLGE_STATE_DOWN, &hdev->state);
7019 	hdev->hw.mac.link = 0;
7020 
7021 	/* reset tqp stats */
7022 	hclge_reset_tqp_stats(handle);
7023 
7024 	hclge_mac_start_phy(hdev);
7025 
7026 	return 0;
7027 }
7028 
hclge_ae_stop(struct hnae3_handle * handle)7029 static void hclge_ae_stop(struct hnae3_handle *handle)
7030 {
7031 	struct hclge_vport *vport = hclge_get_vport(handle);
7032 	struct hclge_dev *hdev = vport->back;
7033 	int i;
7034 
7035 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
7036 	spin_lock_bh(&hdev->fd_rule_lock);
7037 	hclge_clear_arfs_rules(handle);
7038 	spin_unlock_bh(&hdev->fd_rule_lock);
7039 
7040 	/* If it is not PF reset or FLR, the firmware will disable the MAC,
7041 	 * so it only need to stop phy here.
7042 	 */
7043 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) {
7044 		hclge_pfc_pause_en_cfg(hdev, HCLGE_PFC_TX_RX_DISABLE,
7045 				       HCLGE_PFC_DISABLE);
7046 		if (hdev->reset_type != HNAE3_FUNC_RESET &&
7047 		    hdev->reset_type != HNAE3_FLR_RESET) {
7048 			hclge_mac_stop_phy(hdev);
7049 			hclge_update_link_status(hdev);
7050 			return;
7051 		}
7052 	}
7053 
7054 	for (i = 0; i < handle->kinfo.num_tqps; i++)
7055 		hclge_reset_tqp(handle, i);
7056 
7057 	hclge_config_mac_tnl_int(hdev, false);
7058 
7059 	/* Mac disable */
7060 	hclge_cfg_mac_mode(hdev, false);
7061 
7062 	hclge_mac_stop_phy(hdev);
7063 
7064 	/* reset tqp stats */
7065 	hclge_reset_tqp_stats(handle);
7066 	hclge_update_link_status(hdev);
7067 }
7068 
hclge_vport_start(struct hclge_vport * vport)7069 int hclge_vport_start(struct hclge_vport *vport)
7070 {
7071 	struct hclge_dev *hdev = vport->back;
7072 
7073 	set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7074 	vport->last_active_jiffies = jiffies;
7075 
7076 	if (test_bit(vport->vport_id, hdev->vport_config_block)) {
7077 		if (vport->vport_id) {
7078 			hclge_restore_mac_table_common(vport);
7079 			hclge_restore_vport_vlan_table(vport);
7080 		} else {
7081 			hclge_restore_hw_table(hdev);
7082 		}
7083 	}
7084 
7085 	clear_bit(vport->vport_id, hdev->vport_config_block);
7086 
7087 	return 0;
7088 }
7089 
hclge_vport_stop(struct hclge_vport * vport)7090 void hclge_vport_stop(struct hclge_vport *vport)
7091 {
7092 	clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7093 }
7094 
hclge_client_start(struct hnae3_handle * handle)7095 static int hclge_client_start(struct hnae3_handle *handle)
7096 {
7097 	struct hclge_vport *vport = hclge_get_vport(handle);
7098 
7099 	return hclge_vport_start(vport);
7100 }
7101 
hclge_client_stop(struct hnae3_handle * handle)7102 static void hclge_client_stop(struct hnae3_handle *handle)
7103 {
7104 	struct hclge_vport *vport = hclge_get_vport(handle);
7105 
7106 	hclge_vport_stop(vport);
7107 }
7108 
hclge_get_mac_vlan_cmd_status(struct hclge_vport * vport,u16 cmdq_resp,u8 resp_code,enum hclge_mac_vlan_tbl_opcode op)7109 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
7110 					 u16 cmdq_resp, u8  resp_code,
7111 					 enum hclge_mac_vlan_tbl_opcode op)
7112 {
7113 	struct hclge_dev *hdev = vport->back;
7114 
7115 	if (cmdq_resp) {
7116 		dev_err(&hdev->pdev->dev,
7117 			"cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
7118 			cmdq_resp);
7119 		return -EIO;
7120 	}
7121 
7122 	if (op == HCLGE_MAC_VLAN_ADD) {
7123 		if (!resp_code || resp_code == 1)
7124 			return 0;
7125 		else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
7126 			 resp_code == HCLGE_ADD_MC_OVERFLOW)
7127 			return -ENOSPC;
7128 
7129 		dev_err(&hdev->pdev->dev,
7130 			"add mac addr failed for undefined, code=%u.\n",
7131 			resp_code);
7132 		return -EIO;
7133 	} else if (op == HCLGE_MAC_VLAN_REMOVE) {
7134 		if (!resp_code) {
7135 			return 0;
7136 		} else if (resp_code == 1) {
7137 			dev_dbg(&hdev->pdev->dev,
7138 				"remove mac addr failed for miss.\n");
7139 			return -ENOENT;
7140 		}
7141 
7142 		dev_err(&hdev->pdev->dev,
7143 			"remove mac addr failed for undefined, code=%u.\n",
7144 			resp_code);
7145 		return -EIO;
7146 	} else if (op == HCLGE_MAC_VLAN_LKUP) {
7147 		if (!resp_code) {
7148 			return 0;
7149 		} else if (resp_code == 1) {
7150 			dev_dbg(&hdev->pdev->dev,
7151 				"lookup mac addr failed for miss.\n");
7152 			return -ENOENT;
7153 		}
7154 
7155 		dev_err(&hdev->pdev->dev,
7156 			"lookup mac addr failed for undefined, code=%u.\n",
7157 			resp_code);
7158 		return -EIO;
7159 	}
7160 
7161 	dev_err(&hdev->pdev->dev,
7162 		"unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
7163 
7164 	return -EINVAL;
7165 }
7166 
hclge_update_desc_vfid(struct hclge_desc * desc,int vfid,bool clr)7167 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
7168 {
7169 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
7170 
7171 	unsigned int word_num;
7172 	unsigned int bit_num;
7173 
7174 	if (vfid > 255 || vfid < 0)
7175 		return -EIO;
7176 
7177 	if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
7178 		word_num = vfid / 32;
7179 		bit_num  = vfid % 32;
7180 		if (clr)
7181 			desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7182 		else
7183 			desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
7184 	} else {
7185 		word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
7186 		bit_num  = vfid % 32;
7187 		if (clr)
7188 			desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7189 		else
7190 			desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
7191 	}
7192 
7193 	return 0;
7194 }
7195 
hclge_is_all_function_id_zero(struct hclge_desc * desc)7196 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
7197 {
7198 #define HCLGE_DESC_NUMBER 3
7199 #define HCLGE_FUNC_NUMBER_PER_DESC 6
7200 	int i, j;
7201 
7202 	for (i = 1; i < HCLGE_DESC_NUMBER; i++)
7203 		for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
7204 			if (desc[i].data[j])
7205 				return false;
7206 
7207 	return true;
7208 }
7209 
hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd * new_req,const u8 * addr,bool is_mc)7210 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
7211 				   const u8 *addr, bool is_mc)
7212 {
7213 	const unsigned char *mac_addr = addr;
7214 	u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
7215 		       (mac_addr[0]) | (mac_addr[1] << 8);
7216 	u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
7217 
7218 	hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7219 	if (is_mc) {
7220 		hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
7221 		hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7222 	}
7223 
7224 	new_req->mac_addr_hi32 = cpu_to_le32(high_val);
7225 	new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
7226 }
7227 
hclge_remove_mac_vlan_tbl(struct hclge_vport * vport,struct hclge_mac_vlan_tbl_entry_cmd * req)7228 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
7229 				     struct hclge_mac_vlan_tbl_entry_cmd *req)
7230 {
7231 	struct hclge_dev *hdev = vport->back;
7232 	struct hclge_desc desc;
7233 	u8 resp_code;
7234 	u16 retval;
7235 	int ret;
7236 
7237 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
7238 
7239 	memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7240 
7241 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7242 	if (ret) {
7243 		dev_err(&hdev->pdev->dev,
7244 			"del mac addr failed for cmd_send, ret =%d.\n",
7245 			ret);
7246 		return ret;
7247 	}
7248 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7249 	retval = le16_to_cpu(desc.retval);
7250 
7251 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7252 					     HCLGE_MAC_VLAN_REMOVE);
7253 }
7254 
hclge_lookup_mac_vlan_tbl(struct hclge_vport * vport,struct hclge_mac_vlan_tbl_entry_cmd * req,struct hclge_desc * desc,bool is_mc)7255 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
7256 				     struct hclge_mac_vlan_tbl_entry_cmd *req,
7257 				     struct hclge_desc *desc,
7258 				     bool is_mc)
7259 {
7260 	struct hclge_dev *hdev = vport->back;
7261 	u8 resp_code;
7262 	u16 retval;
7263 	int ret;
7264 
7265 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7266 	if (is_mc) {
7267 		desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7268 		memcpy(desc[0].data,
7269 		       req,
7270 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7271 		hclge_cmd_setup_basic_desc(&desc[1],
7272 					   HCLGE_OPC_MAC_VLAN_ADD,
7273 					   true);
7274 		desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7275 		hclge_cmd_setup_basic_desc(&desc[2],
7276 					   HCLGE_OPC_MAC_VLAN_ADD,
7277 					   true);
7278 		ret = hclge_cmd_send(&hdev->hw, desc, 3);
7279 	} else {
7280 		memcpy(desc[0].data,
7281 		       req,
7282 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7283 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
7284 	}
7285 	if (ret) {
7286 		dev_err(&hdev->pdev->dev,
7287 			"lookup mac addr failed for cmd_send, ret =%d.\n",
7288 			ret);
7289 		return ret;
7290 	}
7291 	resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7292 	retval = le16_to_cpu(desc[0].retval);
7293 
7294 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7295 					     HCLGE_MAC_VLAN_LKUP);
7296 }
7297 
hclge_add_mac_vlan_tbl(struct hclge_vport * vport,struct hclge_mac_vlan_tbl_entry_cmd * req,struct hclge_desc * mc_desc)7298 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7299 				  struct hclge_mac_vlan_tbl_entry_cmd *req,
7300 				  struct hclge_desc *mc_desc)
7301 {
7302 	struct hclge_dev *hdev = vport->back;
7303 	int cfg_status;
7304 	u8 resp_code;
7305 	u16 retval;
7306 	int ret;
7307 
7308 	if (!mc_desc) {
7309 		struct hclge_desc desc;
7310 
7311 		hclge_cmd_setup_basic_desc(&desc,
7312 					   HCLGE_OPC_MAC_VLAN_ADD,
7313 					   false);
7314 		memcpy(desc.data, req,
7315 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7316 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7317 		resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7318 		retval = le16_to_cpu(desc.retval);
7319 
7320 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7321 							   resp_code,
7322 							   HCLGE_MAC_VLAN_ADD);
7323 	} else {
7324 		hclge_cmd_reuse_desc(&mc_desc[0], false);
7325 		mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7326 		hclge_cmd_reuse_desc(&mc_desc[1], false);
7327 		mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7328 		hclge_cmd_reuse_desc(&mc_desc[2], false);
7329 		mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7330 		memcpy(mc_desc[0].data, req,
7331 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7332 		ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7333 		resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7334 		retval = le16_to_cpu(mc_desc[0].retval);
7335 
7336 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7337 							   resp_code,
7338 							   HCLGE_MAC_VLAN_ADD);
7339 	}
7340 
7341 	if (ret) {
7342 		dev_err(&hdev->pdev->dev,
7343 			"add mac addr failed for cmd_send, ret =%d.\n",
7344 			ret);
7345 		return ret;
7346 	}
7347 
7348 	return cfg_status;
7349 }
7350 
hclge_set_umv_space(struct hclge_dev * hdev,u16 space_size,u16 * allocated_size)7351 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7352 			       u16 *allocated_size)
7353 {
7354 	struct hclge_umv_spc_alc_cmd *req;
7355 	struct hclge_desc desc;
7356 	int ret;
7357 
7358 	req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7359 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7360 
7361 	req->space_size = cpu_to_le32(space_size);
7362 
7363 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7364 	if (ret) {
7365 		dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
7366 			ret);
7367 		return ret;
7368 	}
7369 
7370 	*allocated_size = le32_to_cpu(desc.data[1]);
7371 
7372 	return 0;
7373 }
7374 
hclge_init_umv_space(struct hclge_dev * hdev)7375 static int hclge_init_umv_space(struct hclge_dev *hdev)
7376 {
7377 	u16 allocated_size = 0;
7378 	int ret;
7379 
7380 	ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
7381 	if (ret)
7382 		return ret;
7383 
7384 	if (allocated_size < hdev->wanted_umv_size)
7385 		dev_warn(&hdev->pdev->dev,
7386 			 "failed to alloc umv space, want %u, get %u\n",
7387 			 hdev->wanted_umv_size, allocated_size);
7388 
7389 	hdev->max_umv_size = allocated_size;
7390 	hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
7391 	hdev->share_umv_size = hdev->priv_umv_size +
7392 			hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7393 
7394 	return 0;
7395 }
7396 
hclge_reset_umv_space(struct hclge_dev * hdev)7397 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7398 {
7399 	struct hclge_vport *vport;
7400 	int i;
7401 
7402 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7403 		vport = &hdev->vport[i];
7404 		vport->used_umv_num = 0;
7405 	}
7406 
7407 	mutex_lock(&hdev->vport_lock);
7408 	hdev->share_umv_size = hdev->priv_umv_size +
7409 			hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7410 	mutex_unlock(&hdev->vport_lock);
7411 }
7412 
hclge_is_umv_space_full(struct hclge_vport * vport,bool need_lock)7413 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
7414 {
7415 	struct hclge_dev *hdev = vport->back;
7416 	bool is_full;
7417 
7418 	if (need_lock)
7419 		mutex_lock(&hdev->vport_lock);
7420 
7421 	is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7422 		   hdev->share_umv_size == 0);
7423 
7424 	if (need_lock)
7425 		mutex_unlock(&hdev->vport_lock);
7426 
7427 	return is_full;
7428 }
7429 
hclge_update_umv_space(struct hclge_vport * vport,bool is_free)7430 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7431 {
7432 	struct hclge_dev *hdev = vport->back;
7433 
7434 	if (is_free) {
7435 		if (vport->used_umv_num > hdev->priv_umv_size)
7436 			hdev->share_umv_size++;
7437 
7438 		if (vport->used_umv_num > 0)
7439 			vport->used_umv_num--;
7440 	} else {
7441 		if (vport->used_umv_num >= hdev->priv_umv_size &&
7442 		    hdev->share_umv_size > 0)
7443 			hdev->share_umv_size--;
7444 		vport->used_umv_num++;
7445 	}
7446 }
7447 
hclge_find_mac_node(struct list_head * list,const u8 * mac_addr)7448 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
7449 						  const u8 *mac_addr)
7450 {
7451 	struct hclge_mac_node *mac_node, *tmp;
7452 
7453 	list_for_each_entry_safe(mac_node, tmp, list, node)
7454 		if (ether_addr_equal(mac_addr, mac_node->mac_addr))
7455 			return mac_node;
7456 
7457 	return NULL;
7458 }
7459 
hclge_update_mac_node(struct hclge_mac_node * mac_node,enum HCLGE_MAC_NODE_STATE state)7460 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
7461 				  enum HCLGE_MAC_NODE_STATE state)
7462 {
7463 	switch (state) {
7464 	/* from set_rx_mode or tmp_add_list */
7465 	case HCLGE_MAC_TO_ADD:
7466 		if (mac_node->state == HCLGE_MAC_TO_DEL)
7467 			mac_node->state = HCLGE_MAC_ACTIVE;
7468 		break;
7469 	/* only from set_rx_mode */
7470 	case HCLGE_MAC_TO_DEL:
7471 		if (mac_node->state == HCLGE_MAC_TO_ADD) {
7472 			list_del(&mac_node->node);
7473 			kfree(mac_node);
7474 		} else {
7475 			mac_node->state = HCLGE_MAC_TO_DEL;
7476 		}
7477 		break;
7478 	/* only from tmp_add_list, the mac_node->state won't be
7479 	 * ACTIVE.
7480 	 */
7481 	case HCLGE_MAC_ACTIVE:
7482 		if (mac_node->state == HCLGE_MAC_TO_ADD)
7483 			mac_node->state = HCLGE_MAC_ACTIVE;
7484 
7485 		break;
7486 	}
7487 }
7488 
hclge_update_mac_list(struct hclge_vport * vport,enum HCLGE_MAC_NODE_STATE state,enum HCLGE_MAC_ADDR_TYPE mac_type,const unsigned char * addr)7489 int hclge_update_mac_list(struct hclge_vport *vport,
7490 			  enum HCLGE_MAC_NODE_STATE state,
7491 			  enum HCLGE_MAC_ADDR_TYPE mac_type,
7492 			  const unsigned char *addr)
7493 {
7494 	struct hclge_dev *hdev = vport->back;
7495 	struct hclge_mac_node *mac_node;
7496 	struct list_head *list;
7497 
7498 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7499 		&vport->uc_mac_list : &vport->mc_mac_list;
7500 
7501 	spin_lock_bh(&vport->mac_list_lock);
7502 
7503 	/* if the mac addr is already in the mac list, no need to add a new
7504 	 * one into it, just check the mac addr state, convert it to a new
7505 	 * new state, or just remove it, or do nothing.
7506 	 */
7507 	mac_node = hclge_find_mac_node(list, addr);
7508 	if (mac_node) {
7509 		hclge_update_mac_node(mac_node, state);
7510 		spin_unlock_bh(&vport->mac_list_lock);
7511 		set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7512 		return 0;
7513 	}
7514 
7515 	/* if this address is never added, unnecessary to delete */
7516 	if (state == HCLGE_MAC_TO_DEL) {
7517 		spin_unlock_bh(&vport->mac_list_lock);
7518 		dev_err(&hdev->pdev->dev,
7519 			"failed to delete address %pM from mac list\n",
7520 			addr);
7521 		return -ENOENT;
7522 	}
7523 
7524 	mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
7525 	if (!mac_node) {
7526 		spin_unlock_bh(&vport->mac_list_lock);
7527 		return -ENOMEM;
7528 	}
7529 
7530 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7531 
7532 	mac_node->state = state;
7533 	ether_addr_copy(mac_node->mac_addr, addr);
7534 	list_add_tail(&mac_node->node, list);
7535 
7536 	spin_unlock_bh(&vport->mac_list_lock);
7537 
7538 	return 0;
7539 }
7540 
hclge_add_uc_addr(struct hnae3_handle * handle,const unsigned char * addr)7541 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7542 			     const unsigned char *addr)
7543 {
7544 	struct hclge_vport *vport = hclge_get_vport(handle);
7545 
7546 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
7547 				     addr);
7548 }
7549 
hclge_add_uc_addr_common(struct hclge_vport * vport,const unsigned char * addr)7550 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7551 			     const unsigned char *addr)
7552 {
7553 	struct hclge_dev *hdev = vport->back;
7554 	struct hclge_mac_vlan_tbl_entry_cmd req;
7555 	struct hclge_desc desc;
7556 	u16 egress_port = 0;
7557 	int ret;
7558 
7559 	/* mac addr check */
7560 	if (is_zero_ether_addr(addr) ||
7561 	    is_broadcast_ether_addr(addr) ||
7562 	    is_multicast_ether_addr(addr)) {
7563 		dev_err(&hdev->pdev->dev,
7564 			"Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7565 			 addr, is_zero_ether_addr(addr),
7566 			 is_broadcast_ether_addr(addr),
7567 			 is_multicast_ether_addr(addr));
7568 		return -EINVAL;
7569 	}
7570 
7571 	memset(&req, 0, sizeof(req));
7572 
7573 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7574 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7575 
7576 	req.egress_port = cpu_to_le16(egress_port);
7577 
7578 	hclge_prepare_mac_addr(&req, addr, false);
7579 
7580 	/* Lookup the mac address in the mac_vlan table, and add
7581 	 * it if the entry is inexistent. Repeated unicast entry
7582 	 * is not allowed in the mac vlan table.
7583 	 */
7584 	ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7585 	if (ret == -ENOENT) {
7586 		mutex_lock(&hdev->vport_lock);
7587 		if (!hclge_is_umv_space_full(vport, false)) {
7588 			ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7589 			if (!ret)
7590 				hclge_update_umv_space(vport, false);
7591 			mutex_unlock(&hdev->vport_lock);
7592 			return ret;
7593 		}
7594 		mutex_unlock(&hdev->vport_lock);
7595 
7596 		if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
7597 			dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7598 				hdev->priv_umv_size);
7599 
7600 		return -ENOSPC;
7601 	}
7602 
7603 	/* check if we just hit the duplicate */
7604 	if (!ret)
7605 		return -EEXIST;
7606 
7607 	return ret;
7608 }
7609 
hclge_rm_uc_addr(struct hnae3_handle * handle,const unsigned char * addr)7610 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7611 			    const unsigned char *addr)
7612 {
7613 	struct hclge_vport *vport = hclge_get_vport(handle);
7614 
7615 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
7616 				     addr);
7617 }
7618 
hclge_rm_uc_addr_common(struct hclge_vport * vport,const unsigned char * addr)7619 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7620 			    const unsigned char *addr)
7621 {
7622 	struct hclge_dev *hdev = vport->back;
7623 	struct hclge_mac_vlan_tbl_entry_cmd req;
7624 	int ret;
7625 
7626 	/* mac addr check */
7627 	if (is_zero_ether_addr(addr) ||
7628 	    is_broadcast_ether_addr(addr) ||
7629 	    is_multicast_ether_addr(addr)) {
7630 		dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7631 			addr);
7632 		return -EINVAL;
7633 	}
7634 
7635 	memset(&req, 0, sizeof(req));
7636 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7637 	hclge_prepare_mac_addr(&req, addr, false);
7638 	ret = hclge_remove_mac_vlan_tbl(vport, &req);
7639 	if (!ret || ret == -ENOENT) {
7640 		mutex_lock(&hdev->vport_lock);
7641 		hclge_update_umv_space(vport, true);
7642 		mutex_unlock(&hdev->vport_lock);
7643 		return 0;
7644 	}
7645 
7646 	return ret;
7647 }
7648 
hclge_add_mc_addr(struct hnae3_handle * handle,const unsigned char * addr)7649 static int hclge_add_mc_addr(struct hnae3_handle *handle,
7650 			     const unsigned char *addr)
7651 {
7652 	struct hclge_vport *vport = hclge_get_vport(handle);
7653 
7654 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
7655 				     addr);
7656 }
7657 
hclge_add_mc_addr_common(struct hclge_vport * vport,const unsigned char * addr)7658 int hclge_add_mc_addr_common(struct hclge_vport *vport,
7659 			     const unsigned char *addr)
7660 {
7661 	struct hclge_dev *hdev = vport->back;
7662 	struct hclge_mac_vlan_tbl_entry_cmd req;
7663 	struct hclge_desc desc[3];
7664 	int status;
7665 
7666 	/* mac addr check */
7667 	if (!is_multicast_ether_addr(addr)) {
7668 		dev_err(&hdev->pdev->dev,
7669 			"Add mc mac err! invalid mac:%pM.\n",
7670 			 addr);
7671 		return -EINVAL;
7672 	}
7673 	memset(&req, 0, sizeof(req));
7674 	hclge_prepare_mac_addr(&req, addr, true);
7675 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7676 	if (status) {
7677 		/* This mac addr do not exist, add new entry for it */
7678 		memset(desc[0].data, 0, sizeof(desc[0].data));
7679 		memset(desc[1].data, 0, sizeof(desc[0].data));
7680 		memset(desc[2].data, 0, sizeof(desc[0].data));
7681 	}
7682 	status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7683 	if (status)
7684 		return status;
7685 	status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7686 
7687 	/* if already overflow, not to print each time */
7688 	if (status == -ENOSPC &&
7689 	    !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
7690 		dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
7691 
7692 	return status;
7693 }
7694 
hclge_rm_mc_addr(struct hnae3_handle * handle,const unsigned char * addr)7695 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
7696 			    const unsigned char *addr)
7697 {
7698 	struct hclge_vport *vport = hclge_get_vport(handle);
7699 
7700 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
7701 				     addr);
7702 }
7703 
hclge_rm_mc_addr_common(struct hclge_vport * vport,const unsigned char * addr)7704 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
7705 			    const unsigned char *addr)
7706 {
7707 	struct hclge_dev *hdev = vport->back;
7708 	struct hclge_mac_vlan_tbl_entry_cmd req;
7709 	enum hclge_cmd_status status;
7710 	struct hclge_desc desc[3];
7711 
7712 	/* mac addr check */
7713 	if (!is_multicast_ether_addr(addr)) {
7714 		dev_dbg(&hdev->pdev->dev,
7715 			"Remove mc mac err! invalid mac:%pM.\n",
7716 			 addr);
7717 		return -EINVAL;
7718 	}
7719 
7720 	memset(&req, 0, sizeof(req));
7721 	hclge_prepare_mac_addr(&req, addr, true);
7722 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7723 	if (!status) {
7724 		/* This mac addr exist, remove this handle's VFID for it */
7725 		status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7726 		if (status)
7727 			return status;
7728 
7729 		if (hclge_is_all_function_id_zero(desc))
7730 			/* All the vfid is zero, so need to delete this entry */
7731 			status = hclge_remove_mac_vlan_tbl(vport, &req);
7732 		else
7733 			/* Not all the vfid is zero, update the vfid */
7734 			status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7735 
7736 	} else if (status == -ENOENT) {
7737 		status = 0;
7738 	}
7739 
7740 	return status;
7741 }
7742 
hclge_sync_vport_mac_list(struct hclge_vport * vport,struct list_head * list,int (* sync)(struct hclge_vport *,const unsigned char *))7743 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
7744 				      struct list_head *list,
7745 				      int (*sync)(struct hclge_vport *,
7746 						  const unsigned char *))
7747 {
7748 	struct hclge_mac_node *mac_node, *tmp;
7749 	int ret;
7750 
7751 	list_for_each_entry_safe(mac_node, tmp, list, node) {
7752 		ret = sync(vport, mac_node->mac_addr);
7753 		if (!ret) {
7754 			mac_node->state = HCLGE_MAC_ACTIVE;
7755 		} else {
7756 			set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
7757 				&vport->state);
7758 
7759 			/* If one unicast mac address is existing in hardware,
7760 			 * we need to try whether other unicast mac addresses
7761 			 * are new addresses that can be added.
7762 			 */
7763 			if (ret != -EEXIST)
7764 				break;
7765 		}
7766 	}
7767 }
7768 
hclge_unsync_vport_mac_list(struct hclge_vport * vport,struct list_head * list,int (* unsync)(struct hclge_vport *,const unsigned char *))7769 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
7770 					struct list_head *list,
7771 					int (*unsync)(struct hclge_vport *,
7772 						      const unsigned char *))
7773 {
7774 	struct hclge_mac_node *mac_node, *tmp;
7775 	int ret;
7776 
7777 	list_for_each_entry_safe(mac_node, tmp, list, node) {
7778 		ret = unsync(vport, mac_node->mac_addr);
7779 		if (!ret || ret == -ENOENT) {
7780 			list_del(&mac_node->node);
7781 			kfree(mac_node);
7782 		} else {
7783 			set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
7784 				&vport->state);
7785 			break;
7786 		}
7787 	}
7788 }
7789 
hclge_sync_from_add_list(struct list_head * add_list,struct list_head * mac_list)7790 static bool hclge_sync_from_add_list(struct list_head *add_list,
7791 				     struct list_head *mac_list)
7792 {
7793 	struct hclge_mac_node *mac_node, *tmp, *new_node;
7794 	bool all_added = true;
7795 
7796 	list_for_each_entry_safe(mac_node, tmp, add_list, node) {
7797 		if (mac_node->state == HCLGE_MAC_TO_ADD)
7798 			all_added = false;
7799 
7800 		/* if the mac address from tmp_add_list is not in the
7801 		 * uc/mc_mac_list, it means have received a TO_DEL request
7802 		 * during the time window of adding the mac address into mac
7803 		 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
7804 		 * then it will be removed at next time. else it must be TO_ADD,
7805 		 * this address hasn't been added into mac table,
7806 		 * so just remove the mac node.
7807 		 */
7808 		new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
7809 		if (new_node) {
7810 			hclge_update_mac_node(new_node, mac_node->state);
7811 			list_del(&mac_node->node);
7812 			kfree(mac_node);
7813 		} else if (mac_node->state == HCLGE_MAC_ACTIVE) {
7814 			mac_node->state = HCLGE_MAC_TO_DEL;
7815 			list_del(&mac_node->node);
7816 			list_add_tail(&mac_node->node, mac_list);
7817 		} else {
7818 			list_del(&mac_node->node);
7819 			kfree(mac_node);
7820 		}
7821 	}
7822 
7823 	return all_added;
7824 }
7825 
hclge_sync_from_del_list(struct list_head * del_list,struct list_head * mac_list)7826 static void hclge_sync_from_del_list(struct list_head *del_list,
7827 				     struct list_head *mac_list)
7828 {
7829 	struct hclge_mac_node *mac_node, *tmp, *new_node;
7830 
7831 	list_for_each_entry_safe(mac_node, tmp, del_list, node) {
7832 		new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
7833 		if (new_node) {
7834 			/* If the mac addr exists in the mac list, it means
7835 			 * received a new TO_ADD request during the time window
7836 			 * of configuring the mac address. For the mac node
7837 			 * state is TO_ADD, and the address is already in the
7838 			 * in the hardware(due to delete fail), so we just need
7839 			 * to change the mac node state to ACTIVE.
7840 			 */
7841 			new_node->state = HCLGE_MAC_ACTIVE;
7842 			list_del(&mac_node->node);
7843 			kfree(mac_node);
7844 		} else {
7845 			list_del(&mac_node->node);
7846 			list_add_tail(&mac_node->node, mac_list);
7847 		}
7848 	}
7849 }
7850 
hclge_update_overflow_flags(struct hclge_vport * vport,enum HCLGE_MAC_ADDR_TYPE mac_type,bool is_all_added)7851 static void hclge_update_overflow_flags(struct hclge_vport *vport,
7852 					enum HCLGE_MAC_ADDR_TYPE mac_type,
7853 					bool is_all_added)
7854 {
7855 	if (mac_type == HCLGE_MAC_ADDR_UC) {
7856 		if (is_all_added)
7857 			vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
7858 		else if (hclge_is_umv_space_full(vport, true))
7859 			vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
7860 	} else {
7861 		if (is_all_added)
7862 			vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
7863 		else
7864 			vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
7865 	}
7866 }
7867 
hclge_sync_vport_mac_table(struct hclge_vport * vport,enum HCLGE_MAC_ADDR_TYPE mac_type)7868 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
7869 				       enum HCLGE_MAC_ADDR_TYPE mac_type)
7870 {
7871 	struct hclge_mac_node *mac_node, *tmp, *new_node;
7872 	struct list_head tmp_add_list, tmp_del_list;
7873 	struct list_head *list;
7874 	bool all_added;
7875 
7876 	INIT_LIST_HEAD(&tmp_add_list);
7877 	INIT_LIST_HEAD(&tmp_del_list);
7878 
7879 	/* move the mac addr to the tmp_add_list and tmp_del_list, then
7880 	 * we can add/delete these mac addr outside the spin lock
7881 	 */
7882 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7883 		&vport->uc_mac_list : &vport->mc_mac_list;
7884 
7885 	spin_lock_bh(&vport->mac_list_lock);
7886 
7887 	list_for_each_entry_safe(mac_node, tmp, list, node) {
7888 		switch (mac_node->state) {
7889 		case HCLGE_MAC_TO_DEL:
7890 			list_del(&mac_node->node);
7891 			list_add_tail(&mac_node->node, &tmp_del_list);
7892 			break;
7893 		case HCLGE_MAC_TO_ADD:
7894 			new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
7895 			if (!new_node)
7896 				goto stop_traverse;
7897 			ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
7898 			new_node->state = mac_node->state;
7899 			list_add_tail(&new_node->node, &tmp_add_list);
7900 			break;
7901 		default:
7902 			break;
7903 		}
7904 	}
7905 
7906 stop_traverse:
7907 	spin_unlock_bh(&vport->mac_list_lock);
7908 
7909 	/* delete first, in order to get max mac table space for adding */
7910 	if (mac_type == HCLGE_MAC_ADDR_UC) {
7911 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7912 					    hclge_rm_uc_addr_common);
7913 		hclge_sync_vport_mac_list(vport, &tmp_add_list,
7914 					  hclge_add_uc_addr_common);
7915 	} else {
7916 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7917 					    hclge_rm_mc_addr_common);
7918 		hclge_sync_vport_mac_list(vport, &tmp_add_list,
7919 					  hclge_add_mc_addr_common);
7920 	}
7921 
7922 	/* if some mac addresses were added/deleted fail, move back to the
7923 	 * mac_list, and retry at next time.
7924 	 */
7925 	spin_lock_bh(&vport->mac_list_lock);
7926 
7927 	hclge_sync_from_del_list(&tmp_del_list, list);
7928 	all_added = hclge_sync_from_add_list(&tmp_add_list, list);
7929 
7930 	spin_unlock_bh(&vport->mac_list_lock);
7931 
7932 	hclge_update_overflow_flags(vport, mac_type, all_added);
7933 }
7934 
hclge_need_sync_mac_table(struct hclge_vport * vport)7935 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
7936 {
7937 	struct hclge_dev *hdev = vport->back;
7938 
7939 	if (test_bit(vport->vport_id, hdev->vport_config_block))
7940 		return false;
7941 
7942 	if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
7943 		return true;
7944 
7945 	return false;
7946 }
7947 
hclge_sync_mac_table(struct hclge_dev * hdev)7948 static void hclge_sync_mac_table(struct hclge_dev *hdev)
7949 {
7950 	int i;
7951 
7952 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7953 		struct hclge_vport *vport = &hdev->vport[i];
7954 
7955 		if (!hclge_need_sync_mac_table(vport))
7956 			continue;
7957 
7958 		hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
7959 		hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
7960 	}
7961 }
7962 
hclge_rm_vport_all_mac_table(struct hclge_vport * vport,bool is_del_list,enum HCLGE_MAC_ADDR_TYPE mac_type)7963 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7964 				  enum HCLGE_MAC_ADDR_TYPE mac_type)
7965 {
7966 	int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
7967 	struct hclge_mac_node *mac_cfg, *tmp;
7968 	struct hclge_dev *hdev = vport->back;
7969 	struct list_head tmp_del_list, *list;
7970 	int ret;
7971 
7972 	if (mac_type == HCLGE_MAC_ADDR_UC) {
7973 		list = &vport->uc_mac_list;
7974 		unsync = hclge_rm_uc_addr_common;
7975 	} else {
7976 		list = &vport->mc_mac_list;
7977 		unsync = hclge_rm_mc_addr_common;
7978 	}
7979 
7980 	INIT_LIST_HEAD(&tmp_del_list);
7981 
7982 	if (!is_del_list)
7983 		set_bit(vport->vport_id, hdev->vport_config_block);
7984 
7985 	spin_lock_bh(&vport->mac_list_lock);
7986 
7987 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7988 		switch (mac_cfg->state) {
7989 		case HCLGE_MAC_TO_DEL:
7990 		case HCLGE_MAC_ACTIVE:
7991 			list_del(&mac_cfg->node);
7992 			list_add_tail(&mac_cfg->node, &tmp_del_list);
7993 			break;
7994 		case HCLGE_MAC_TO_ADD:
7995 			if (is_del_list) {
7996 				list_del(&mac_cfg->node);
7997 				kfree(mac_cfg);
7998 			}
7999 			break;
8000 		}
8001 	}
8002 
8003 	spin_unlock_bh(&vport->mac_list_lock);
8004 
8005 	list_for_each_entry_safe(mac_cfg, tmp, &tmp_del_list, node) {
8006 		ret = unsync(vport, mac_cfg->mac_addr);
8007 		if (!ret || ret == -ENOENT) {
8008 			/* clear all mac addr from hardware, but remain these
8009 			 * mac addr in the mac list, and restore them after
8010 			 * vf reset finished.
8011 			 */
8012 			if (!is_del_list &&
8013 			    mac_cfg->state == HCLGE_MAC_ACTIVE) {
8014 				mac_cfg->state = HCLGE_MAC_TO_ADD;
8015 			} else {
8016 				list_del(&mac_cfg->node);
8017 				kfree(mac_cfg);
8018 			}
8019 		} else if (is_del_list) {
8020 			mac_cfg->state = HCLGE_MAC_TO_DEL;
8021 		}
8022 	}
8023 
8024 	spin_lock_bh(&vport->mac_list_lock);
8025 
8026 	hclge_sync_from_del_list(&tmp_del_list, list);
8027 
8028 	spin_unlock_bh(&vport->mac_list_lock);
8029 }
8030 
8031 /* remove all mac address when uninitailize */
hclge_uninit_vport_mac_list(struct hclge_vport * vport,enum HCLGE_MAC_ADDR_TYPE mac_type)8032 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
8033 					enum HCLGE_MAC_ADDR_TYPE mac_type)
8034 {
8035 	struct hclge_mac_node *mac_node, *tmp;
8036 	struct hclge_dev *hdev = vport->back;
8037 	struct list_head tmp_del_list, *list;
8038 
8039 	INIT_LIST_HEAD(&tmp_del_list);
8040 
8041 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8042 		&vport->uc_mac_list : &vport->mc_mac_list;
8043 
8044 	spin_lock_bh(&vport->mac_list_lock);
8045 
8046 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8047 		switch (mac_node->state) {
8048 		case HCLGE_MAC_TO_DEL:
8049 		case HCLGE_MAC_ACTIVE:
8050 			list_del(&mac_node->node);
8051 			list_add_tail(&mac_node->node, &tmp_del_list);
8052 			break;
8053 		case HCLGE_MAC_TO_ADD:
8054 			list_del(&mac_node->node);
8055 			kfree(mac_node);
8056 			break;
8057 		}
8058 	}
8059 
8060 	spin_unlock_bh(&vport->mac_list_lock);
8061 
8062 	if (mac_type == HCLGE_MAC_ADDR_UC)
8063 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8064 					    hclge_rm_uc_addr_common);
8065 	else
8066 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8067 					    hclge_rm_mc_addr_common);
8068 
8069 	if (!list_empty(&tmp_del_list))
8070 		dev_warn(&hdev->pdev->dev,
8071 			 "uninit %s mac list for vport %u not completely.\n",
8072 			 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
8073 			 vport->vport_id);
8074 
8075 	list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
8076 		list_del(&mac_node->node);
8077 		kfree(mac_node);
8078 	}
8079 }
8080 
hclge_uninit_mac_table(struct hclge_dev * hdev)8081 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
8082 {
8083 	struct hclge_vport *vport;
8084 	int i;
8085 
8086 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8087 		vport = &hdev->vport[i];
8088 		hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
8089 		hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
8090 	}
8091 }
8092 
hclge_get_mac_ethertype_cmd_status(struct hclge_dev * hdev,u16 cmdq_resp,u8 resp_code)8093 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
8094 					      u16 cmdq_resp, u8 resp_code)
8095 {
8096 #define HCLGE_ETHERTYPE_SUCCESS_ADD		0
8097 #define HCLGE_ETHERTYPE_ALREADY_ADD		1
8098 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW	2
8099 #define HCLGE_ETHERTYPE_KEY_CONFLICT		3
8100 
8101 	int return_status;
8102 
8103 	if (cmdq_resp) {
8104 		dev_err(&hdev->pdev->dev,
8105 			"cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
8106 			cmdq_resp);
8107 		return -EIO;
8108 	}
8109 
8110 	switch (resp_code) {
8111 	case HCLGE_ETHERTYPE_SUCCESS_ADD:
8112 	case HCLGE_ETHERTYPE_ALREADY_ADD:
8113 		return_status = 0;
8114 		break;
8115 	case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
8116 		dev_err(&hdev->pdev->dev,
8117 			"add mac ethertype failed for manager table overflow.\n");
8118 		return_status = -EIO;
8119 		break;
8120 	case HCLGE_ETHERTYPE_KEY_CONFLICT:
8121 		dev_err(&hdev->pdev->dev,
8122 			"add mac ethertype failed for key conflict.\n");
8123 		return_status = -EIO;
8124 		break;
8125 	default:
8126 		dev_err(&hdev->pdev->dev,
8127 			"add mac ethertype failed for undefined, code=%u.\n",
8128 			resp_code);
8129 		return_status = -EIO;
8130 	}
8131 
8132 	return return_status;
8133 }
8134 
hclge_check_vf_mac_exist(struct hclge_vport * vport,int vf_idx,u8 * mac_addr)8135 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
8136 				     u8 *mac_addr)
8137 {
8138 	struct hclge_mac_vlan_tbl_entry_cmd req;
8139 	struct hclge_dev *hdev = vport->back;
8140 	struct hclge_desc desc;
8141 	u16 egress_port = 0;
8142 	int i;
8143 
8144 	if (is_zero_ether_addr(mac_addr))
8145 		return false;
8146 
8147 	memset(&req, 0, sizeof(req));
8148 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8149 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8150 	req.egress_port = cpu_to_le16(egress_port);
8151 	hclge_prepare_mac_addr(&req, mac_addr, false);
8152 
8153 	if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
8154 		return true;
8155 
8156 	vf_idx += HCLGE_VF_VPORT_START_NUM;
8157 	for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
8158 		if (i != vf_idx &&
8159 		    ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
8160 			return true;
8161 
8162 	return false;
8163 }
8164 
hclge_set_vf_mac(struct hnae3_handle * handle,int vf,u8 * mac_addr)8165 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
8166 			    u8 *mac_addr)
8167 {
8168 	struct hclge_vport *vport = hclge_get_vport(handle);
8169 	struct hclge_dev *hdev = vport->back;
8170 
8171 	vport = hclge_get_vf_vport(hdev, vf);
8172 	if (!vport)
8173 		return -EINVAL;
8174 
8175 	if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
8176 		dev_info(&hdev->pdev->dev,
8177 			 "Specified MAC(=%pM) is same as before, no change committed!\n",
8178 			 mac_addr);
8179 		return 0;
8180 	}
8181 
8182 	if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
8183 		dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
8184 			mac_addr);
8185 		return -EEXIST;
8186 	}
8187 
8188 	ether_addr_copy(vport->vf_info.mac, mac_addr);
8189 
8190 	if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8191 		dev_info(&hdev->pdev->dev,
8192 			 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
8193 			 vf, mac_addr);
8194 		return hclge_inform_reset_assert_to_vf(vport);
8195 	}
8196 
8197 	dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
8198 		 vf, mac_addr);
8199 	return 0;
8200 }
8201 
hclge_add_mgr_tbl(struct hclge_dev * hdev,const struct hclge_mac_mgr_tbl_entry_cmd * req)8202 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
8203 			     const struct hclge_mac_mgr_tbl_entry_cmd *req)
8204 {
8205 	struct hclge_desc desc;
8206 	u8 resp_code;
8207 	u16 retval;
8208 	int ret;
8209 
8210 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
8211 	memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
8212 
8213 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8214 	if (ret) {
8215 		dev_err(&hdev->pdev->dev,
8216 			"add mac ethertype failed for cmd_send, ret =%d.\n",
8217 			ret);
8218 		return ret;
8219 	}
8220 
8221 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8222 	retval = le16_to_cpu(desc.retval);
8223 
8224 	return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
8225 }
8226 
init_mgr_tbl(struct hclge_dev * hdev)8227 static int init_mgr_tbl(struct hclge_dev *hdev)
8228 {
8229 	int ret;
8230 	int i;
8231 
8232 	for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
8233 		ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
8234 		if (ret) {
8235 			dev_err(&hdev->pdev->dev,
8236 				"add mac ethertype failed, ret =%d.\n",
8237 				ret);
8238 			return ret;
8239 		}
8240 	}
8241 
8242 	return 0;
8243 }
8244 
hclge_get_mac_addr(struct hnae3_handle * handle,u8 * p)8245 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
8246 {
8247 	struct hclge_vport *vport = hclge_get_vport(handle);
8248 	struct hclge_dev *hdev = vport->back;
8249 
8250 	ether_addr_copy(p, hdev->hw.mac.mac_addr);
8251 }
8252 
hclge_update_mac_node_for_dev_addr(struct hclge_vport * vport,const u8 * old_addr,const u8 * new_addr)8253 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
8254 				       const u8 *old_addr, const u8 *new_addr)
8255 {
8256 	struct list_head *list = &vport->uc_mac_list;
8257 	struct hclge_mac_node *old_node, *new_node;
8258 
8259 	new_node = hclge_find_mac_node(list, new_addr);
8260 	if (!new_node) {
8261 		new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8262 		if (!new_node)
8263 			return -ENOMEM;
8264 
8265 		new_node->state = HCLGE_MAC_TO_ADD;
8266 		ether_addr_copy(new_node->mac_addr, new_addr);
8267 		list_add(&new_node->node, list);
8268 	} else {
8269 		if (new_node->state == HCLGE_MAC_TO_DEL)
8270 			new_node->state = HCLGE_MAC_ACTIVE;
8271 
8272 		/* make sure the new addr is in the list head, avoid dev
8273 		 * addr may be not re-added into mac table for the umv space
8274 		 * limitation after global/imp reset which will clear mac
8275 		 * table by hardware.
8276 		 */
8277 		list_move(&new_node->node, list);
8278 	}
8279 
8280 	if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
8281 		old_node = hclge_find_mac_node(list, old_addr);
8282 		if (old_node) {
8283 			if (old_node->state == HCLGE_MAC_TO_ADD) {
8284 				list_del(&old_node->node);
8285 				kfree(old_node);
8286 			} else {
8287 				old_node->state = HCLGE_MAC_TO_DEL;
8288 			}
8289 		}
8290 	}
8291 
8292 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8293 
8294 	return 0;
8295 }
8296 
hclge_set_mac_addr(struct hnae3_handle * handle,void * p,bool is_first)8297 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
8298 			      bool is_first)
8299 {
8300 	const unsigned char *new_addr = (const unsigned char *)p;
8301 	struct hclge_vport *vport = hclge_get_vport(handle);
8302 	struct hclge_dev *hdev = vport->back;
8303 	unsigned char *old_addr = NULL;
8304 	int ret;
8305 
8306 	/* mac addr check */
8307 	if (is_zero_ether_addr(new_addr) ||
8308 	    is_broadcast_ether_addr(new_addr) ||
8309 	    is_multicast_ether_addr(new_addr)) {
8310 		dev_err(&hdev->pdev->dev,
8311 			"change uc mac err! invalid mac: %pM.\n",
8312 			 new_addr);
8313 		return -EINVAL;
8314 	}
8315 
8316 	ret = hclge_pause_addr_cfg(hdev, new_addr);
8317 	if (ret) {
8318 		dev_err(&hdev->pdev->dev,
8319 			"failed to configure mac pause address, ret = %d\n",
8320 			ret);
8321 		return ret;
8322 	}
8323 
8324 	if (!is_first)
8325 		old_addr = hdev->hw.mac.mac_addr;
8326 
8327 	spin_lock_bh(&vport->mac_list_lock);
8328 	ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
8329 	if (ret) {
8330 		dev_err(&hdev->pdev->dev,
8331 			"failed to change the mac addr:%pM, ret = %d\n",
8332 			new_addr, ret);
8333 		spin_unlock_bh(&vport->mac_list_lock);
8334 
8335 		if (!is_first)
8336 			hclge_pause_addr_cfg(hdev, old_addr);
8337 
8338 		return ret;
8339 	}
8340 	/* we must update dev addr with spin lock protect, preventing dev addr
8341 	 * being removed by set_rx_mode path.
8342 	 */
8343 	ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
8344 	spin_unlock_bh(&vport->mac_list_lock);
8345 
8346 	hclge_task_schedule(hdev, 0);
8347 
8348 	return 0;
8349 }
8350 
hclge_do_ioctl(struct hnae3_handle * handle,struct ifreq * ifr,int cmd)8351 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
8352 			  int cmd)
8353 {
8354 	struct hclge_vport *vport = hclge_get_vport(handle);
8355 	struct hclge_dev *hdev = vport->back;
8356 
8357 	if (!hdev->hw.mac.phydev)
8358 		return -EOPNOTSUPP;
8359 
8360 	return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
8361 }
8362 
hclge_set_vlan_filter_ctrl(struct hclge_dev * hdev,u8 vlan_type,u8 fe_type,bool filter_en,u8 vf_id)8363 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
8364 				      u8 fe_type, bool filter_en, u8 vf_id)
8365 {
8366 	struct hclge_vlan_filter_ctrl_cmd *req;
8367 	struct hclge_desc desc;
8368 	int ret;
8369 
8370 	/* read current vlan filter parameter */
8371 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
8372 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
8373 	req->vlan_type = vlan_type;
8374 	req->vf_id = vf_id;
8375 
8376 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8377 	if (ret) {
8378 		dev_err(&hdev->pdev->dev,
8379 			"failed to get vlan filter config, ret = %d.\n", ret);
8380 		return ret;
8381 	}
8382 
8383 	/* modify and write new config parameter */
8384 	hclge_cmd_reuse_desc(&desc, false);
8385 	req->vlan_fe = filter_en ?
8386 			(req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
8387 
8388 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8389 	if (ret)
8390 		dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
8391 			ret);
8392 
8393 	return ret;
8394 }
8395 
8396 #define HCLGE_FILTER_TYPE_VF		0
8397 #define HCLGE_FILTER_TYPE_PORT		1
8398 #define HCLGE_FILTER_FE_EGRESS_V1_B	BIT(0)
8399 #define HCLGE_FILTER_FE_NIC_INGRESS_B	BIT(0)
8400 #define HCLGE_FILTER_FE_NIC_EGRESS_B	BIT(1)
8401 #define HCLGE_FILTER_FE_ROCE_INGRESS_B	BIT(2)
8402 #define HCLGE_FILTER_FE_ROCE_EGRESS_B	BIT(3)
8403 #define HCLGE_FILTER_FE_EGRESS		(HCLGE_FILTER_FE_NIC_EGRESS_B \
8404 					| HCLGE_FILTER_FE_ROCE_EGRESS_B)
8405 #define HCLGE_FILTER_FE_INGRESS		(HCLGE_FILTER_FE_NIC_INGRESS_B \
8406 					| HCLGE_FILTER_FE_ROCE_INGRESS_B)
8407 
hclge_enable_vlan_filter(struct hnae3_handle * handle,bool enable)8408 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
8409 {
8410 	struct hclge_vport *vport = hclge_get_vport(handle);
8411 	struct hclge_dev *hdev = vport->back;
8412 
8413 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
8414 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8415 					   HCLGE_FILTER_FE_EGRESS, enable, 0);
8416 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8417 					   HCLGE_FILTER_FE_INGRESS, enable, 0);
8418 	} else {
8419 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8420 					   HCLGE_FILTER_FE_EGRESS_V1_B, enable,
8421 					   0);
8422 	}
8423 	if (enable)
8424 		handle->netdev_flags |= HNAE3_VLAN_FLTR;
8425 	else
8426 		handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
8427 }
8428 
hclge_set_vf_vlan_common(struct hclge_dev * hdev,u16 vfid,bool is_kill,u16 vlan,__be16 proto)8429 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
8430 				    bool is_kill, u16 vlan,
8431 				    __be16 proto)
8432 {
8433 	struct hclge_vport *vport = &hdev->vport[vfid];
8434 	struct hclge_vlan_filter_vf_cfg_cmd *req0;
8435 	struct hclge_vlan_filter_vf_cfg_cmd *req1;
8436 	struct hclge_desc desc[2];
8437 	u8 vf_byte_val;
8438 	u8 vf_byte_off;
8439 	int ret;
8440 
8441 	/* if vf vlan table is full, firmware will close vf vlan filter, it
8442 	 * is unable and unnecessary to add new vlan id to vf vlan filter.
8443 	 * If spoof check is enable, and vf vlan is full, it shouldn't add
8444 	 * new vlan, because tx packets with these vlan id will be dropped.
8445 	 */
8446 	if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
8447 		if (vport->vf_info.spoofchk && vlan) {
8448 			dev_err(&hdev->pdev->dev,
8449 				"Can't add vlan due to spoof check is on and vf vlan table is full\n");
8450 			return -EPERM;
8451 		}
8452 		return 0;
8453 	}
8454 
8455 	hclge_cmd_setup_basic_desc(&desc[0],
8456 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8457 	hclge_cmd_setup_basic_desc(&desc[1],
8458 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8459 
8460 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8461 
8462 	vf_byte_off = vfid / 8;
8463 	vf_byte_val = 1 << (vfid % 8);
8464 
8465 	req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
8466 	req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
8467 
8468 	req0->vlan_id  = cpu_to_le16(vlan);
8469 	req0->vlan_cfg = is_kill;
8470 
8471 	if (vf_byte_off < HCLGE_MAX_VF_BYTES)
8472 		req0->vf_bitmap[vf_byte_off] = vf_byte_val;
8473 	else
8474 		req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
8475 
8476 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
8477 	if (ret) {
8478 		dev_err(&hdev->pdev->dev,
8479 			"Send vf vlan command fail, ret =%d.\n",
8480 			ret);
8481 		return ret;
8482 	}
8483 
8484 	if (!is_kill) {
8485 #define HCLGE_VF_VLAN_NO_ENTRY	2
8486 		if (!req0->resp_code || req0->resp_code == 1)
8487 			return 0;
8488 
8489 		if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
8490 			set_bit(vfid, hdev->vf_vlan_full);
8491 			dev_warn(&hdev->pdev->dev,
8492 				 "vf vlan table is full, vf vlan filter is disabled\n");
8493 			return 0;
8494 		}
8495 
8496 		dev_err(&hdev->pdev->dev,
8497 			"Add vf vlan filter fail, ret =%u.\n",
8498 			req0->resp_code);
8499 	} else {
8500 #define HCLGE_VF_VLAN_DEL_NO_FOUND	1
8501 		if (!req0->resp_code)
8502 			return 0;
8503 
8504 		/* vf vlan filter is disabled when vf vlan table is full,
8505 		 * then new vlan id will not be added into vf vlan table.
8506 		 * Just return 0 without warning, avoid massive verbose
8507 		 * print logs when unload.
8508 		 */
8509 		if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
8510 			return 0;
8511 
8512 		dev_err(&hdev->pdev->dev,
8513 			"Kill vf vlan filter fail, ret =%u.\n",
8514 			req0->resp_code);
8515 	}
8516 
8517 	return -EIO;
8518 }
8519 
hclge_set_port_vlan_filter(struct hclge_dev * hdev,__be16 proto,u16 vlan_id,bool is_kill)8520 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
8521 				      u16 vlan_id, bool is_kill)
8522 {
8523 	struct hclge_vlan_filter_pf_cfg_cmd *req;
8524 	struct hclge_desc desc;
8525 	u8 vlan_offset_byte_val;
8526 	u8 vlan_offset_byte;
8527 	u8 vlan_offset_160;
8528 	int ret;
8529 
8530 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
8531 
8532 	vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
8533 	vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
8534 			   HCLGE_VLAN_BYTE_SIZE;
8535 	vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
8536 
8537 	req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
8538 	req->vlan_offset = vlan_offset_160;
8539 	req->vlan_cfg = is_kill;
8540 	req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
8541 
8542 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8543 	if (ret)
8544 		dev_err(&hdev->pdev->dev,
8545 			"port vlan command, send fail, ret =%d.\n", ret);
8546 	return ret;
8547 }
8548 
hclge_set_vlan_filter_hw(struct hclge_dev * hdev,__be16 proto,u16 vport_id,u16 vlan_id,bool is_kill)8549 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
8550 				    u16 vport_id, u16 vlan_id,
8551 				    bool is_kill)
8552 {
8553 	u16 vport_idx, vport_num = 0;
8554 	int ret;
8555 
8556 	if (is_kill && !vlan_id)
8557 		return 0;
8558 
8559 	ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
8560 				       proto);
8561 	if (ret) {
8562 		dev_err(&hdev->pdev->dev,
8563 			"Set %u vport vlan filter config fail, ret =%d.\n",
8564 			vport_id, ret);
8565 		return ret;
8566 	}
8567 
8568 	/* vlan 0 may be added twice when 8021q module is enabled */
8569 	if (!is_kill && !vlan_id &&
8570 	    test_bit(vport_id, hdev->vlan_table[vlan_id]))
8571 		return 0;
8572 
8573 	if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
8574 		dev_err(&hdev->pdev->dev,
8575 			"Add port vlan failed, vport %u is already in vlan %u\n",
8576 			vport_id, vlan_id);
8577 		return -EINVAL;
8578 	}
8579 
8580 	if (is_kill &&
8581 	    !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
8582 		dev_err(&hdev->pdev->dev,
8583 			"Delete port vlan failed, vport %u is not in vlan %u\n",
8584 			vport_id, vlan_id);
8585 		return -EINVAL;
8586 	}
8587 
8588 	for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
8589 		vport_num++;
8590 
8591 	if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
8592 		ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
8593 						 is_kill);
8594 
8595 	return ret;
8596 }
8597 
hclge_set_vlan_tx_offload_cfg(struct hclge_vport * vport)8598 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
8599 {
8600 	struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
8601 	struct hclge_vport_vtag_tx_cfg_cmd *req;
8602 	struct hclge_dev *hdev = vport->back;
8603 	struct hclge_desc desc;
8604 	u16 bmap_index;
8605 	int status;
8606 
8607 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
8608 
8609 	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
8610 	req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
8611 	req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
8612 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
8613 		      vcfg->accept_tag1 ? 1 : 0);
8614 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
8615 		      vcfg->accept_untag1 ? 1 : 0);
8616 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
8617 		      vcfg->accept_tag2 ? 1 : 0);
8618 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
8619 		      vcfg->accept_untag2 ? 1 : 0);
8620 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
8621 		      vcfg->insert_tag1_en ? 1 : 0);
8622 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
8623 		      vcfg->insert_tag2_en ? 1 : 0);
8624 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
8625 
8626 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8627 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8628 			HCLGE_VF_NUM_PER_BYTE;
8629 	req->vf_bitmap[bmap_index] =
8630 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8631 
8632 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
8633 	if (status)
8634 		dev_err(&hdev->pdev->dev,
8635 			"Send port txvlan cfg command fail, ret =%d\n",
8636 			status);
8637 
8638 	return status;
8639 }
8640 
hclge_set_vlan_rx_offload_cfg(struct hclge_vport * vport)8641 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
8642 {
8643 	struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
8644 	struct hclge_vport_vtag_rx_cfg_cmd *req;
8645 	struct hclge_dev *hdev = vport->back;
8646 	struct hclge_desc desc;
8647 	u16 bmap_index;
8648 	int status;
8649 
8650 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
8651 
8652 	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
8653 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
8654 		      vcfg->strip_tag1_en ? 1 : 0);
8655 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
8656 		      vcfg->strip_tag2_en ? 1 : 0);
8657 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
8658 		      vcfg->vlan1_vlan_prionly ? 1 : 0);
8659 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
8660 		      vcfg->vlan2_vlan_prionly ? 1 : 0);
8661 
8662 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8663 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8664 			HCLGE_VF_NUM_PER_BYTE;
8665 	req->vf_bitmap[bmap_index] =
8666 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8667 
8668 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
8669 	if (status)
8670 		dev_err(&hdev->pdev->dev,
8671 			"Send port rxvlan cfg command fail, ret =%d\n",
8672 			status);
8673 
8674 	return status;
8675 }
8676 
hclge_vlan_offload_cfg(struct hclge_vport * vport,u16 port_base_vlan_state,u16 vlan_tag)8677 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
8678 				  u16 port_base_vlan_state,
8679 				  u16 vlan_tag)
8680 {
8681 	int ret;
8682 
8683 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8684 		vport->txvlan_cfg.accept_tag1 = true;
8685 		vport->txvlan_cfg.insert_tag1_en = false;
8686 		vport->txvlan_cfg.default_tag1 = 0;
8687 	} else {
8688 		vport->txvlan_cfg.accept_tag1 = false;
8689 		vport->txvlan_cfg.insert_tag1_en = true;
8690 		vport->txvlan_cfg.default_tag1 = vlan_tag;
8691 	}
8692 
8693 	vport->txvlan_cfg.accept_untag1 = true;
8694 
8695 	/* accept_tag2 and accept_untag2 are not supported on
8696 	 * pdev revision(0x20), new revision support them,
8697 	 * this two fields can not be configured by user.
8698 	 */
8699 	vport->txvlan_cfg.accept_tag2 = true;
8700 	vport->txvlan_cfg.accept_untag2 = true;
8701 	vport->txvlan_cfg.insert_tag2_en = false;
8702 	vport->txvlan_cfg.default_tag2 = 0;
8703 
8704 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8705 		vport->rxvlan_cfg.strip_tag1_en = false;
8706 		vport->rxvlan_cfg.strip_tag2_en =
8707 				vport->rxvlan_cfg.rx_vlan_offload_en;
8708 	} else {
8709 		vport->rxvlan_cfg.strip_tag1_en =
8710 				vport->rxvlan_cfg.rx_vlan_offload_en;
8711 		vport->rxvlan_cfg.strip_tag2_en = true;
8712 	}
8713 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8714 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8715 
8716 	ret = hclge_set_vlan_tx_offload_cfg(vport);
8717 	if (ret)
8718 		return ret;
8719 
8720 	return hclge_set_vlan_rx_offload_cfg(vport);
8721 }
8722 
hclge_set_vlan_protocol_type(struct hclge_dev * hdev)8723 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
8724 {
8725 	struct hclge_rx_vlan_type_cfg_cmd *rx_req;
8726 	struct hclge_tx_vlan_type_cfg_cmd *tx_req;
8727 	struct hclge_desc desc;
8728 	int status;
8729 
8730 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
8731 	rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
8732 	rx_req->ot_fst_vlan_type =
8733 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
8734 	rx_req->ot_sec_vlan_type =
8735 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
8736 	rx_req->in_fst_vlan_type =
8737 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
8738 	rx_req->in_sec_vlan_type =
8739 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
8740 
8741 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
8742 	if (status) {
8743 		dev_err(&hdev->pdev->dev,
8744 			"Send rxvlan protocol type command fail, ret =%d\n",
8745 			status);
8746 		return status;
8747 	}
8748 
8749 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
8750 
8751 	tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
8752 	tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
8753 	tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
8754 
8755 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
8756 	if (status)
8757 		dev_err(&hdev->pdev->dev,
8758 			"Send txvlan protocol type command fail, ret =%d\n",
8759 			status);
8760 
8761 	return status;
8762 }
8763 
hclge_init_vlan_config(struct hclge_dev * hdev)8764 static int hclge_init_vlan_config(struct hclge_dev *hdev)
8765 {
8766 #define HCLGE_DEF_VLAN_TYPE		0x8100
8767 
8768 	struct hnae3_handle *handle = &hdev->vport[0].nic;
8769 	struct hclge_vport *vport;
8770 	int ret;
8771 	int i;
8772 
8773 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
8774 		/* for revision 0x21, vf vlan filter is per function */
8775 		for (i = 0; i < hdev->num_alloc_vport; i++) {
8776 			vport = &hdev->vport[i];
8777 			ret = hclge_set_vlan_filter_ctrl(hdev,
8778 							 HCLGE_FILTER_TYPE_VF,
8779 							 HCLGE_FILTER_FE_EGRESS,
8780 							 true,
8781 							 vport->vport_id);
8782 			if (ret)
8783 				return ret;
8784 		}
8785 
8786 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8787 						 HCLGE_FILTER_FE_INGRESS, true,
8788 						 0);
8789 		if (ret)
8790 			return ret;
8791 	} else {
8792 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8793 						 HCLGE_FILTER_FE_EGRESS_V1_B,
8794 						 true, 0);
8795 		if (ret)
8796 			return ret;
8797 	}
8798 
8799 	handle->netdev_flags |= HNAE3_VLAN_FLTR;
8800 
8801 	hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8802 	hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8803 	hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8804 	hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8805 	hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
8806 	hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
8807 
8808 	ret = hclge_set_vlan_protocol_type(hdev);
8809 	if (ret)
8810 		return ret;
8811 
8812 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8813 		u16 vlan_tag;
8814 
8815 		vport = &hdev->vport[i];
8816 		vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8817 
8818 		ret = hclge_vlan_offload_cfg(vport,
8819 					     vport->port_base_vlan_cfg.state,
8820 					     vlan_tag);
8821 		if (ret)
8822 			return ret;
8823 	}
8824 
8825 	return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
8826 }
8827 
hclge_add_vport_vlan_table(struct hclge_vport * vport,u16 vlan_id,bool writen_to_tbl)8828 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8829 				       bool writen_to_tbl)
8830 {
8831 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8832 
8833 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
8834 		if (vlan->vlan_id == vlan_id)
8835 			return;
8836 
8837 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
8838 	if (!vlan)
8839 		return;
8840 
8841 	vlan->hd_tbl_status = writen_to_tbl;
8842 	vlan->vlan_id = vlan_id;
8843 
8844 	list_add_tail(&vlan->node, &vport->vlan_list);
8845 }
8846 
hclge_add_vport_all_vlan_table(struct hclge_vport * vport)8847 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
8848 {
8849 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8850 	struct hclge_dev *hdev = vport->back;
8851 	int ret;
8852 
8853 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8854 		if (!vlan->hd_tbl_status) {
8855 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8856 						       vport->vport_id,
8857 						       vlan->vlan_id, false);
8858 			if (ret) {
8859 				dev_err(&hdev->pdev->dev,
8860 					"restore vport vlan list failed, ret=%d\n",
8861 					ret);
8862 				return ret;
8863 			}
8864 		}
8865 		vlan->hd_tbl_status = true;
8866 	}
8867 
8868 	return 0;
8869 }
8870 
hclge_rm_vport_vlan_table(struct hclge_vport * vport,u16 vlan_id,bool is_write_tbl)8871 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8872 				      bool is_write_tbl)
8873 {
8874 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8875 	struct hclge_dev *hdev = vport->back;
8876 
8877 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8878 		if (vlan->vlan_id == vlan_id) {
8879 			if (is_write_tbl && vlan->hd_tbl_status)
8880 				hclge_set_vlan_filter_hw(hdev,
8881 							 htons(ETH_P_8021Q),
8882 							 vport->vport_id,
8883 							 vlan_id,
8884 							 true);
8885 
8886 			list_del(&vlan->node);
8887 			kfree(vlan);
8888 			break;
8889 		}
8890 	}
8891 }
8892 
hclge_rm_vport_all_vlan_table(struct hclge_vport * vport,bool is_del_list)8893 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
8894 {
8895 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8896 	struct hclge_dev *hdev = vport->back;
8897 
8898 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8899 		if (vlan->hd_tbl_status)
8900 			hclge_set_vlan_filter_hw(hdev,
8901 						 htons(ETH_P_8021Q),
8902 						 vport->vport_id,
8903 						 vlan->vlan_id,
8904 						 true);
8905 
8906 		vlan->hd_tbl_status = false;
8907 		if (is_del_list) {
8908 			list_del(&vlan->node);
8909 			kfree(vlan);
8910 		}
8911 	}
8912 	clear_bit(vport->vport_id, hdev->vf_vlan_full);
8913 }
8914 
hclge_uninit_vport_vlan_table(struct hclge_dev * hdev)8915 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8916 {
8917 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8918 	struct hclge_vport *vport;
8919 	int i;
8920 
8921 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8922 		vport = &hdev->vport[i];
8923 		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8924 			list_del(&vlan->node);
8925 			kfree(vlan);
8926 		}
8927 	}
8928 }
8929 
hclge_restore_vport_vlan_table(struct hclge_vport * vport)8930 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
8931 {
8932 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8933 	struct hclge_dev *hdev = vport->back;
8934 	u16 vlan_proto;
8935 	u16 vlan_id;
8936 	u16 state;
8937 	int ret;
8938 
8939 	vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8940 	vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8941 	state = vport->port_base_vlan_cfg.state;
8942 
8943 	if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8944 		clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
8945 		hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8946 					 vport->vport_id, vlan_id,
8947 					 false);
8948 		return;
8949 	}
8950 
8951 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8952 		ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8953 					       vport->vport_id,
8954 					       vlan->vlan_id, false);
8955 		if (ret)
8956 			break;
8957 		vlan->hd_tbl_status = true;
8958 	}
8959 }
8960 
8961 /* For global reset and imp reset, hardware will clear the mac table,
8962  * so we change the mac address state from ACTIVE to TO_ADD, then they
8963  * can be restored in the service task after reset complete. Furtherly,
8964  * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
8965  * be restored after reset, so just remove these mac nodes from mac_list.
8966  */
hclge_mac_node_convert_for_reset(struct list_head * list)8967 static void hclge_mac_node_convert_for_reset(struct list_head *list)
8968 {
8969 	struct hclge_mac_node *mac_node, *tmp;
8970 
8971 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8972 		if (mac_node->state == HCLGE_MAC_ACTIVE) {
8973 			mac_node->state = HCLGE_MAC_TO_ADD;
8974 		} else if (mac_node->state == HCLGE_MAC_TO_DEL) {
8975 			list_del(&mac_node->node);
8976 			kfree(mac_node);
8977 		}
8978 	}
8979 }
8980 
hclge_restore_mac_table_common(struct hclge_vport * vport)8981 void hclge_restore_mac_table_common(struct hclge_vport *vport)
8982 {
8983 	spin_lock_bh(&vport->mac_list_lock);
8984 
8985 	hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
8986 	hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
8987 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8988 
8989 	spin_unlock_bh(&vport->mac_list_lock);
8990 }
8991 
hclge_restore_hw_table(struct hclge_dev * hdev)8992 static void hclge_restore_hw_table(struct hclge_dev *hdev)
8993 {
8994 	struct hclge_vport *vport = &hdev->vport[0];
8995 	struct hnae3_handle *handle = &vport->nic;
8996 
8997 	hclge_restore_mac_table_common(vport);
8998 	hclge_restore_vport_vlan_table(vport);
8999 	set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
9000 
9001 	hclge_restore_fd_entries(handle);
9002 }
9003 
hclge_en_hw_strip_rxvtag(struct hnae3_handle * handle,bool enable)9004 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
9005 {
9006 	struct hclge_vport *vport = hclge_get_vport(handle);
9007 
9008 	if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9009 		vport->rxvlan_cfg.strip_tag1_en = false;
9010 		vport->rxvlan_cfg.strip_tag2_en = enable;
9011 	} else {
9012 		vport->rxvlan_cfg.strip_tag1_en = enable;
9013 		vport->rxvlan_cfg.strip_tag2_en = true;
9014 	}
9015 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9016 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9017 	vport->rxvlan_cfg.rx_vlan_offload_en = enable;
9018 
9019 	return hclge_set_vlan_rx_offload_cfg(vport);
9020 }
9021 
hclge_update_vlan_filter_entries(struct hclge_vport * vport,u16 port_base_vlan_state,struct hclge_vlan_info * new_info,struct hclge_vlan_info * old_info)9022 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
9023 					    u16 port_base_vlan_state,
9024 					    struct hclge_vlan_info *new_info,
9025 					    struct hclge_vlan_info *old_info)
9026 {
9027 	struct hclge_dev *hdev = vport->back;
9028 	int ret;
9029 
9030 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
9031 		hclge_rm_vport_all_vlan_table(vport, false);
9032 		return hclge_set_vlan_filter_hw(hdev,
9033 						 htons(new_info->vlan_proto),
9034 						 vport->vport_id,
9035 						 new_info->vlan_tag,
9036 						 false);
9037 	}
9038 
9039 	ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
9040 				       vport->vport_id, old_info->vlan_tag,
9041 				       true);
9042 	if (ret)
9043 		return ret;
9044 
9045 	return hclge_add_vport_all_vlan_table(vport);
9046 }
9047 
hclge_update_port_base_vlan_cfg(struct hclge_vport * vport,u16 state,struct hclge_vlan_info * vlan_info)9048 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
9049 				    struct hclge_vlan_info *vlan_info)
9050 {
9051 	struct hnae3_handle *nic = &vport->nic;
9052 	struct hclge_vlan_info *old_vlan_info;
9053 	struct hclge_dev *hdev = vport->back;
9054 	int ret;
9055 
9056 	old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9057 
9058 	ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
9059 	if (ret)
9060 		return ret;
9061 
9062 	if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
9063 		/* add new VLAN tag */
9064 		ret = hclge_set_vlan_filter_hw(hdev,
9065 					       htons(vlan_info->vlan_proto),
9066 					       vport->vport_id,
9067 					       vlan_info->vlan_tag,
9068 					       false);
9069 		if (ret)
9070 			return ret;
9071 
9072 		/* remove old VLAN tag */
9073 		ret = hclge_set_vlan_filter_hw(hdev,
9074 					       htons(old_vlan_info->vlan_proto),
9075 					       vport->vport_id,
9076 					       old_vlan_info->vlan_tag,
9077 					       true);
9078 		if (ret)
9079 			return ret;
9080 
9081 		goto update;
9082 	}
9083 
9084 	ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
9085 					       old_vlan_info);
9086 	if (ret)
9087 		return ret;
9088 
9089 	/* update state only when disable/enable port based VLAN */
9090 	vport->port_base_vlan_cfg.state = state;
9091 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
9092 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
9093 	else
9094 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
9095 
9096 update:
9097 	vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
9098 	vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
9099 	vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
9100 
9101 	return 0;
9102 }
9103 
hclge_get_port_base_vlan_state(struct hclge_vport * vport,enum hnae3_port_base_vlan_state state,u16 vlan)9104 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
9105 					  enum hnae3_port_base_vlan_state state,
9106 					  u16 vlan)
9107 {
9108 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9109 		if (!vlan)
9110 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9111 		else
9112 			return HNAE3_PORT_BASE_VLAN_ENABLE;
9113 	} else {
9114 		if (!vlan)
9115 			return HNAE3_PORT_BASE_VLAN_DISABLE;
9116 		else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
9117 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9118 		else
9119 			return HNAE3_PORT_BASE_VLAN_MODIFY;
9120 	}
9121 }
9122 
hclge_set_vf_vlan_filter(struct hnae3_handle * handle,int vfid,u16 vlan,u8 qos,__be16 proto)9123 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
9124 				    u16 vlan, u8 qos, __be16 proto)
9125 {
9126 	struct hclge_vport *vport = hclge_get_vport(handle);
9127 	struct hclge_dev *hdev = vport->back;
9128 	struct hclge_vlan_info vlan_info;
9129 	u16 state;
9130 	int ret;
9131 
9132 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9133 		return -EOPNOTSUPP;
9134 
9135 	vport = hclge_get_vf_vport(hdev, vfid);
9136 	if (!vport)
9137 		return -EINVAL;
9138 
9139 	/* qos is a 3 bits value, so can not be bigger than 7 */
9140 	if (vlan > VLAN_N_VID - 1 || qos > 7)
9141 		return -EINVAL;
9142 	if (proto != htons(ETH_P_8021Q))
9143 		return -EPROTONOSUPPORT;
9144 
9145 	state = hclge_get_port_base_vlan_state(vport,
9146 					       vport->port_base_vlan_cfg.state,
9147 					       vlan);
9148 	if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
9149 		return 0;
9150 
9151 	vlan_info.vlan_tag = vlan;
9152 	vlan_info.qos = qos;
9153 	vlan_info.vlan_proto = ntohs(proto);
9154 
9155 	if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9156 		return hclge_update_port_base_vlan_cfg(vport, state,
9157 						       &vlan_info);
9158 	} else {
9159 		ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
9160 							vport->vport_id, state,
9161 							vlan, qos,
9162 							ntohs(proto));
9163 		return ret;
9164 	}
9165 }
9166 
hclge_clear_vf_vlan(struct hclge_dev * hdev)9167 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
9168 {
9169 	struct hclge_vlan_info *vlan_info;
9170 	struct hclge_vport *vport;
9171 	int ret;
9172 	int vf;
9173 
9174 	/* clear port base vlan for all vf */
9175 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9176 		vport = &hdev->vport[vf];
9177 		vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9178 
9179 		ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9180 					       vport->vport_id,
9181 					       vlan_info->vlan_tag, true);
9182 		if (ret)
9183 			dev_err(&hdev->pdev->dev,
9184 				"failed to clear vf vlan for vf%d, ret = %d\n",
9185 				vf - HCLGE_VF_VPORT_START_NUM, ret);
9186 	}
9187 }
9188 
hclge_set_vlan_filter(struct hnae3_handle * handle,__be16 proto,u16 vlan_id,bool is_kill)9189 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
9190 			  u16 vlan_id, bool is_kill)
9191 {
9192 	struct hclge_vport *vport = hclge_get_vport(handle);
9193 	struct hclge_dev *hdev = vport->back;
9194 	bool writen_to_tbl = false;
9195 	int ret = 0;
9196 
9197 	/* When device is resetting or reset failed, firmware is unable to
9198 	 * handle mailbox. Just record the vlan id, and remove it after
9199 	 * reset finished.
9200 	 */
9201 	if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9202 	     test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
9203 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
9204 		return -EBUSY;
9205 	}
9206 
9207 	/* when port base vlan enabled, we use port base vlan as the vlan
9208 	 * filter entry. In this case, we don't update vlan filter table
9209 	 * when user add new vlan or remove exist vlan, just update the vport
9210 	 * vlan list. The vlan id in vlan list will be writen in vlan filter
9211 	 * table until port base vlan disabled
9212 	 */
9213 	if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9214 		ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
9215 					       vlan_id, is_kill);
9216 		writen_to_tbl = true;
9217 	}
9218 
9219 	if (!ret) {
9220 		if (!is_kill)
9221 			hclge_add_vport_vlan_table(vport, vlan_id,
9222 						   writen_to_tbl);
9223 		else if (is_kill && vlan_id != 0)
9224 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
9225 	} else if (is_kill) {
9226 		/* when remove hw vlan filter failed, record the vlan id,
9227 		 * and try to remove it from hw later, to be consistence
9228 		 * with stack
9229 		 */
9230 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
9231 	}
9232 	return ret;
9233 }
9234 
hclge_sync_vlan_filter(struct hclge_dev * hdev)9235 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
9236 {
9237 #define HCLGE_MAX_SYNC_COUNT	60
9238 
9239 	int i, ret, sync_cnt = 0;
9240 	u16 vlan_id;
9241 
9242 	/* start from vport 1 for PF is always alive */
9243 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9244 		struct hclge_vport *vport = &hdev->vport[i];
9245 
9246 		vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9247 					 VLAN_N_VID);
9248 		while (vlan_id != VLAN_N_VID) {
9249 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9250 						       vport->vport_id, vlan_id,
9251 						       true);
9252 			if (ret && ret != -EINVAL)
9253 				return;
9254 
9255 			clear_bit(vlan_id, vport->vlan_del_fail_bmap);
9256 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
9257 
9258 			sync_cnt++;
9259 			if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
9260 				return;
9261 
9262 			vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9263 						 VLAN_N_VID);
9264 		}
9265 	}
9266 }
9267 
hclge_set_mac_mtu(struct hclge_dev * hdev,int new_mps)9268 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
9269 {
9270 	struct hclge_config_max_frm_size_cmd *req;
9271 	struct hclge_desc desc;
9272 
9273 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
9274 
9275 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
9276 	req->max_frm_size = cpu_to_le16(new_mps);
9277 	req->min_frm_size = HCLGE_MAC_MIN_FRAME;
9278 
9279 	return hclge_cmd_send(&hdev->hw, &desc, 1);
9280 }
9281 
hclge_set_mtu(struct hnae3_handle * handle,int new_mtu)9282 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
9283 {
9284 	struct hclge_vport *vport = hclge_get_vport(handle);
9285 
9286 	return hclge_set_vport_mtu(vport, new_mtu);
9287 }
9288 
hclge_set_vport_mtu(struct hclge_vport * vport,int new_mtu)9289 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
9290 {
9291 	struct hclge_dev *hdev = vport->back;
9292 	int i, max_frm_size, ret;
9293 
9294 	/* HW supprt 2 layer vlan */
9295 	max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9296 	if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
9297 	    max_frm_size > HCLGE_MAC_MAX_FRAME)
9298 		return -EINVAL;
9299 
9300 	max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
9301 	mutex_lock(&hdev->vport_lock);
9302 	/* VF's mps must fit within hdev->mps */
9303 	if (vport->vport_id && max_frm_size > hdev->mps) {
9304 		mutex_unlock(&hdev->vport_lock);
9305 		return -EINVAL;
9306 	} else if (vport->vport_id) {
9307 		vport->mps = max_frm_size;
9308 		mutex_unlock(&hdev->vport_lock);
9309 		return 0;
9310 	}
9311 
9312 	/* PF's mps must be greater then VF's mps */
9313 	for (i = 1; i < hdev->num_alloc_vport; i++)
9314 		if (max_frm_size < hdev->vport[i].mps) {
9315 			mutex_unlock(&hdev->vport_lock);
9316 			return -EINVAL;
9317 		}
9318 
9319 	hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
9320 
9321 	ret = hclge_set_mac_mtu(hdev, max_frm_size);
9322 	if (ret) {
9323 		dev_err(&hdev->pdev->dev,
9324 			"Change mtu fail, ret =%d\n", ret);
9325 		goto out;
9326 	}
9327 
9328 	hdev->mps = max_frm_size;
9329 	vport->mps = max_frm_size;
9330 
9331 	ret = hclge_buffer_alloc(hdev);
9332 	if (ret)
9333 		dev_err(&hdev->pdev->dev,
9334 			"Allocate buffer fail, ret =%d\n", ret);
9335 
9336 out:
9337 	hclge_notify_client(hdev, HNAE3_UP_CLIENT);
9338 	mutex_unlock(&hdev->vport_lock);
9339 	return ret;
9340 }
9341 
hclge_send_reset_tqp_cmd(struct hclge_dev * hdev,u16 queue_id,bool enable)9342 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
9343 				    bool enable)
9344 {
9345 	struct hclge_reset_tqp_queue_cmd *req;
9346 	struct hclge_desc desc;
9347 	int ret;
9348 
9349 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
9350 
9351 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9352 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
9353 	if (enable)
9354 		hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
9355 
9356 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9357 	if (ret) {
9358 		dev_err(&hdev->pdev->dev,
9359 			"Send tqp reset cmd error, status =%d\n", ret);
9360 		return ret;
9361 	}
9362 
9363 	return 0;
9364 }
9365 
hclge_get_reset_status(struct hclge_dev * hdev,u16 queue_id)9366 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
9367 {
9368 	struct hclge_reset_tqp_queue_cmd *req;
9369 	struct hclge_desc desc;
9370 	int ret;
9371 
9372 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
9373 
9374 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9375 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
9376 
9377 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9378 	if (ret) {
9379 		dev_err(&hdev->pdev->dev,
9380 			"Get reset status error, status =%d\n", ret);
9381 		return ret;
9382 	}
9383 
9384 	return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
9385 }
9386 
hclge_covert_handle_qid_global(struct hnae3_handle * handle,u16 queue_id)9387 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
9388 {
9389 	struct hnae3_queue *queue;
9390 	struct hclge_tqp *tqp;
9391 
9392 	queue = handle->kinfo.tqp[queue_id];
9393 	tqp = container_of(queue, struct hclge_tqp, q);
9394 
9395 	return tqp->index;
9396 }
9397 
hclge_reset_tqp(struct hnae3_handle * handle,u16 queue_id)9398 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
9399 {
9400 	struct hclge_vport *vport = hclge_get_vport(handle);
9401 	struct hclge_dev *hdev = vport->back;
9402 	int reset_try_times = 0;
9403 	int reset_status;
9404 	u16 queue_gid;
9405 	int ret;
9406 
9407 	queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
9408 
9409 	ret = hclge_tqp_enable(hdev, queue_id, 0, false);
9410 	if (ret) {
9411 		dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
9412 		return ret;
9413 	}
9414 
9415 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9416 	if (ret) {
9417 		dev_err(&hdev->pdev->dev,
9418 			"Send reset tqp cmd fail, ret = %d\n", ret);
9419 		return ret;
9420 	}
9421 
9422 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9423 		reset_status = hclge_get_reset_status(hdev, queue_gid);
9424 		if (reset_status)
9425 			break;
9426 
9427 		/* Wait for tqp hw reset */
9428 		usleep_range(1000, 1200);
9429 	}
9430 
9431 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9432 		dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
9433 		return ret;
9434 	}
9435 
9436 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9437 	if (ret)
9438 		dev_err(&hdev->pdev->dev,
9439 			"Deassert the soft reset fail, ret = %d\n", ret);
9440 
9441 	return ret;
9442 }
9443 
hclge_reset_vf_queue(struct hclge_vport * vport,u16 queue_id)9444 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
9445 {
9446 	struct hnae3_handle *handle = &vport->nic;
9447 	struct hclge_dev *hdev = vport->back;
9448 	int reset_try_times = 0;
9449 	int reset_status;
9450 	u16 queue_gid;
9451 	int ret;
9452 
9453 	if (queue_id >= handle->kinfo.num_tqps) {
9454 		dev_warn(&hdev->pdev->dev, "Invalid vf queue id(%u)\n",
9455 			 queue_id);
9456 		return;
9457 	}
9458 
9459 	queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
9460 
9461 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9462 	if (ret) {
9463 		dev_warn(&hdev->pdev->dev,
9464 			 "Send reset tqp cmd fail, ret = %d\n", ret);
9465 		return;
9466 	}
9467 
9468 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9469 		reset_status = hclge_get_reset_status(hdev, queue_gid);
9470 		if (reset_status)
9471 			break;
9472 
9473 		/* Wait for tqp hw reset */
9474 		usleep_range(1000, 1200);
9475 	}
9476 
9477 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9478 		dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
9479 		return;
9480 	}
9481 
9482 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9483 	if (ret)
9484 		dev_warn(&hdev->pdev->dev,
9485 			 "Deassert the soft reset fail, ret = %d\n", ret);
9486 }
9487 
hclge_get_fw_version(struct hnae3_handle * handle)9488 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
9489 {
9490 	struct hclge_vport *vport = hclge_get_vport(handle);
9491 	struct hclge_dev *hdev = vport->back;
9492 
9493 	return hdev->fw_version;
9494 }
9495 
hclge_set_flowctrl_adv(struct hclge_dev * hdev,u32 rx_en,u32 tx_en)9496 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9497 {
9498 	struct phy_device *phydev = hdev->hw.mac.phydev;
9499 
9500 	if (!phydev)
9501 		return;
9502 
9503 	phy_set_asym_pause(phydev, rx_en, tx_en);
9504 }
9505 
hclge_cfg_pauseparam(struct hclge_dev * hdev,u32 rx_en,u32 tx_en)9506 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9507 {
9508 	int ret;
9509 
9510 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
9511 		return 0;
9512 
9513 	ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
9514 	if (ret)
9515 		dev_err(&hdev->pdev->dev,
9516 			"configure pauseparam error, ret = %d.\n", ret);
9517 
9518 	return ret;
9519 }
9520 
hclge_cfg_flowctrl(struct hclge_dev * hdev)9521 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
9522 {
9523 	struct phy_device *phydev = hdev->hw.mac.phydev;
9524 	u16 remote_advertising = 0;
9525 	u16 local_advertising;
9526 	u32 rx_pause, tx_pause;
9527 	u8 flowctl;
9528 
9529 	if (!phydev->link || !phydev->autoneg)
9530 		return 0;
9531 
9532 	local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
9533 
9534 	if (phydev->pause)
9535 		remote_advertising = LPA_PAUSE_CAP;
9536 
9537 	if (phydev->asym_pause)
9538 		remote_advertising |= LPA_PAUSE_ASYM;
9539 
9540 	flowctl = mii_resolve_flowctrl_fdx(local_advertising,
9541 					   remote_advertising);
9542 	tx_pause = flowctl & FLOW_CTRL_TX;
9543 	rx_pause = flowctl & FLOW_CTRL_RX;
9544 
9545 	if (phydev->duplex == HCLGE_MAC_HALF) {
9546 		tx_pause = 0;
9547 		rx_pause = 0;
9548 	}
9549 
9550 	return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
9551 }
9552 
hclge_get_pauseparam(struct hnae3_handle * handle,u32 * auto_neg,u32 * rx_en,u32 * tx_en)9553 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
9554 				 u32 *rx_en, u32 *tx_en)
9555 {
9556 	struct hclge_vport *vport = hclge_get_vport(handle);
9557 	struct hclge_dev *hdev = vport->back;
9558 	struct phy_device *phydev = hdev->hw.mac.phydev;
9559 
9560 	*auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
9561 
9562 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
9563 		*rx_en = 0;
9564 		*tx_en = 0;
9565 		return;
9566 	}
9567 
9568 	if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
9569 		*rx_en = 1;
9570 		*tx_en = 0;
9571 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
9572 		*tx_en = 1;
9573 		*rx_en = 0;
9574 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
9575 		*rx_en = 1;
9576 		*tx_en = 1;
9577 	} else {
9578 		*rx_en = 0;
9579 		*tx_en = 0;
9580 	}
9581 }
9582 
hclge_record_user_pauseparam(struct hclge_dev * hdev,u32 rx_en,u32 tx_en)9583 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
9584 					 u32 rx_en, u32 tx_en)
9585 {
9586 	if (rx_en && tx_en)
9587 		hdev->fc_mode_last_time = HCLGE_FC_FULL;
9588 	else if (rx_en && !tx_en)
9589 		hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
9590 	else if (!rx_en && tx_en)
9591 		hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
9592 	else
9593 		hdev->fc_mode_last_time = HCLGE_FC_NONE;
9594 
9595 	hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
9596 }
9597 
hclge_set_pauseparam(struct hnae3_handle * handle,u32 auto_neg,u32 rx_en,u32 tx_en)9598 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
9599 				u32 rx_en, u32 tx_en)
9600 {
9601 	struct hclge_vport *vport = hclge_get_vport(handle);
9602 	struct hclge_dev *hdev = vport->back;
9603 	struct phy_device *phydev = hdev->hw.mac.phydev;
9604 	u32 fc_autoneg;
9605 
9606 	if (phydev) {
9607 		fc_autoneg = hclge_get_autoneg(handle);
9608 		if (auto_neg != fc_autoneg) {
9609 			dev_info(&hdev->pdev->dev,
9610 				 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
9611 			return -EOPNOTSUPP;
9612 		}
9613 	}
9614 
9615 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
9616 		dev_info(&hdev->pdev->dev,
9617 			 "Priority flow control enabled. Cannot set link flow control.\n");
9618 		return -EOPNOTSUPP;
9619 	}
9620 
9621 	hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
9622 
9623 	hclge_record_user_pauseparam(hdev, rx_en, tx_en);
9624 
9625 	if (!auto_neg)
9626 		return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
9627 
9628 	if (phydev)
9629 		return phy_start_aneg(phydev);
9630 
9631 	return -EOPNOTSUPP;
9632 }
9633 
hclge_get_ksettings_an_result(struct hnae3_handle * handle,u8 * auto_neg,u32 * speed,u8 * duplex)9634 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
9635 					  u8 *auto_neg, u32 *speed, u8 *duplex)
9636 {
9637 	struct hclge_vport *vport = hclge_get_vport(handle);
9638 	struct hclge_dev *hdev = vport->back;
9639 
9640 	if (speed)
9641 		*speed = hdev->hw.mac.speed;
9642 	if (duplex)
9643 		*duplex = hdev->hw.mac.duplex;
9644 	if (auto_neg)
9645 		*auto_neg = hdev->hw.mac.autoneg;
9646 }
9647 
hclge_get_media_type(struct hnae3_handle * handle,u8 * media_type,u8 * module_type)9648 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
9649 				 u8 *module_type)
9650 {
9651 	struct hclge_vport *vport = hclge_get_vport(handle);
9652 	struct hclge_dev *hdev = vport->back;
9653 
9654 	/* When nic is down, the service task is not running, doesn't update
9655 	 * the port information per second. Query the port information before
9656 	 * return the media type, ensure getting the correct media information.
9657 	 */
9658 	hclge_update_port_info(hdev);
9659 
9660 	if (media_type)
9661 		*media_type = hdev->hw.mac.media_type;
9662 
9663 	if (module_type)
9664 		*module_type = hdev->hw.mac.module_type;
9665 }
9666 
hclge_get_mdix_mode(struct hnae3_handle * handle,u8 * tp_mdix_ctrl,u8 * tp_mdix)9667 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
9668 				u8 *tp_mdix_ctrl, u8 *tp_mdix)
9669 {
9670 	struct hclge_vport *vport = hclge_get_vport(handle);
9671 	struct hclge_dev *hdev = vport->back;
9672 	struct phy_device *phydev = hdev->hw.mac.phydev;
9673 	int mdix_ctrl, mdix, is_resolved;
9674 	unsigned int retval;
9675 
9676 	if (!phydev) {
9677 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9678 		*tp_mdix = ETH_TP_MDI_INVALID;
9679 		return;
9680 	}
9681 
9682 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
9683 
9684 	retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
9685 	mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
9686 				    HCLGE_PHY_MDIX_CTRL_S);
9687 
9688 	retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
9689 	mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
9690 	is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
9691 
9692 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
9693 
9694 	switch (mdix_ctrl) {
9695 	case 0x0:
9696 		*tp_mdix_ctrl = ETH_TP_MDI;
9697 		break;
9698 	case 0x1:
9699 		*tp_mdix_ctrl = ETH_TP_MDI_X;
9700 		break;
9701 	case 0x3:
9702 		*tp_mdix_ctrl = ETH_TP_MDI_AUTO;
9703 		break;
9704 	default:
9705 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9706 		break;
9707 	}
9708 
9709 	if (!is_resolved)
9710 		*tp_mdix = ETH_TP_MDI_INVALID;
9711 	else if (mdix)
9712 		*tp_mdix = ETH_TP_MDI_X;
9713 	else
9714 		*tp_mdix = ETH_TP_MDI;
9715 }
9716 
hclge_info_show(struct hclge_dev * hdev)9717 static void hclge_info_show(struct hclge_dev *hdev)
9718 {
9719 	struct device *dev = &hdev->pdev->dev;
9720 
9721 	dev_info(dev, "PF info begin:\n");
9722 
9723 	dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
9724 	dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
9725 	dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
9726 	dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
9727 	dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
9728 	dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
9729 	dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
9730 	dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
9731 	dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
9732 	dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
9733 	dev_info(dev, "This is %s PF\n",
9734 		 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
9735 	dev_info(dev, "DCB %s\n",
9736 		 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
9737 	dev_info(dev, "MQPRIO %s\n",
9738 		 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
9739 
9740 	dev_info(dev, "PF info end.\n");
9741 }
9742 
hclge_init_nic_client_instance(struct hnae3_ae_dev * ae_dev,struct hclge_vport * vport)9743 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
9744 					  struct hclge_vport *vport)
9745 {
9746 	struct hnae3_client *client = vport->nic.client;
9747 	struct hclge_dev *hdev = ae_dev->priv;
9748 	int rst_cnt = hdev->rst_stats.reset_cnt;
9749 	int ret;
9750 
9751 	ret = client->ops->init_instance(&vport->nic);
9752 	if (ret)
9753 		return ret;
9754 
9755 	set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9756 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9757 	    rst_cnt != hdev->rst_stats.reset_cnt) {
9758 		ret = -EBUSY;
9759 		goto init_nic_err;
9760 	}
9761 
9762 	/* Enable nic hw error interrupts */
9763 	ret = hclge_config_nic_hw_error(hdev, true);
9764 	if (ret) {
9765 		dev_err(&ae_dev->pdev->dev,
9766 			"fail(%d) to enable hw error interrupts\n", ret);
9767 		goto init_nic_err;
9768 	}
9769 
9770 	hnae3_set_client_init_flag(client, ae_dev, 1);
9771 
9772 	if (netif_msg_drv(&hdev->vport->nic))
9773 		hclge_info_show(hdev);
9774 
9775 	return ret;
9776 
9777 init_nic_err:
9778 	clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9779 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9780 		msleep(HCLGE_WAIT_RESET_DONE);
9781 
9782 	client->ops->uninit_instance(&vport->nic, 0);
9783 
9784 	return ret;
9785 }
9786 
hclge_init_roce_client_instance(struct hnae3_ae_dev * ae_dev,struct hclge_vport * vport)9787 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
9788 					   struct hclge_vport *vport)
9789 {
9790 	struct hclge_dev *hdev = ae_dev->priv;
9791 	struct hnae3_client *client;
9792 	int rst_cnt;
9793 	int ret;
9794 
9795 	if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
9796 	    !hdev->nic_client)
9797 		return 0;
9798 
9799 	client = hdev->roce_client;
9800 	ret = hclge_init_roce_base_info(vport);
9801 	if (ret)
9802 		return ret;
9803 
9804 	rst_cnt = hdev->rst_stats.reset_cnt;
9805 	ret = client->ops->init_instance(&vport->roce);
9806 	if (ret)
9807 		return ret;
9808 
9809 	set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9810 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9811 	    rst_cnt != hdev->rst_stats.reset_cnt) {
9812 		ret = -EBUSY;
9813 		goto init_roce_err;
9814 	}
9815 
9816 	/* Enable roce ras interrupts */
9817 	ret = hclge_config_rocee_ras_interrupt(hdev, true);
9818 	if (ret) {
9819 		dev_err(&ae_dev->pdev->dev,
9820 			"fail(%d) to enable roce ras interrupts\n", ret);
9821 		goto init_roce_err;
9822 	}
9823 
9824 	hnae3_set_client_init_flag(client, ae_dev, 1);
9825 
9826 	return 0;
9827 
9828 init_roce_err:
9829 	clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9830 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9831 		msleep(HCLGE_WAIT_RESET_DONE);
9832 
9833 	hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
9834 
9835 	return ret;
9836 }
9837 
hclge_init_client_instance(struct hnae3_client * client,struct hnae3_ae_dev * ae_dev)9838 static int hclge_init_client_instance(struct hnae3_client *client,
9839 				      struct hnae3_ae_dev *ae_dev)
9840 {
9841 	struct hclge_dev *hdev = ae_dev->priv;
9842 	struct hclge_vport *vport;
9843 	int i, ret;
9844 
9845 	for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
9846 		vport = &hdev->vport[i];
9847 
9848 		switch (client->type) {
9849 		case HNAE3_CLIENT_KNIC:
9850 			hdev->nic_client = client;
9851 			vport->nic.client = client;
9852 			ret = hclge_init_nic_client_instance(ae_dev, vport);
9853 			if (ret)
9854 				goto clear_nic;
9855 
9856 			ret = hclge_init_roce_client_instance(ae_dev, vport);
9857 			if (ret)
9858 				goto clear_roce;
9859 
9860 			break;
9861 		case HNAE3_CLIENT_ROCE:
9862 			if (hnae3_dev_roce_supported(hdev)) {
9863 				hdev->roce_client = client;
9864 				vport->roce.client = client;
9865 			}
9866 
9867 			ret = hclge_init_roce_client_instance(ae_dev, vport);
9868 			if (ret)
9869 				goto clear_roce;
9870 
9871 			break;
9872 		default:
9873 			return -EINVAL;
9874 		}
9875 	}
9876 
9877 	return 0;
9878 
9879 clear_nic:
9880 	hdev->nic_client = NULL;
9881 	vport->nic.client = NULL;
9882 	return ret;
9883 clear_roce:
9884 	hdev->roce_client = NULL;
9885 	vport->roce.client = NULL;
9886 	return ret;
9887 }
9888 
hclge_uninit_client_instance(struct hnae3_client * client,struct hnae3_ae_dev * ae_dev)9889 static void hclge_uninit_client_instance(struct hnae3_client *client,
9890 					 struct hnae3_ae_dev *ae_dev)
9891 {
9892 	struct hclge_dev *hdev = ae_dev->priv;
9893 	struct hclge_vport *vport;
9894 	int i;
9895 
9896 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9897 		vport = &hdev->vport[i];
9898 		if (hdev->roce_client) {
9899 			clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9900 			while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9901 				msleep(HCLGE_WAIT_RESET_DONE);
9902 
9903 			hdev->roce_client->ops->uninit_instance(&vport->roce,
9904 								0);
9905 			hdev->roce_client = NULL;
9906 			vport->roce.client = NULL;
9907 		}
9908 		if (client->type == HNAE3_CLIENT_ROCE)
9909 			return;
9910 		if (hdev->nic_client && client->ops->uninit_instance) {
9911 			clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9912 			while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9913 				msleep(HCLGE_WAIT_RESET_DONE);
9914 
9915 			client->ops->uninit_instance(&vport->nic, 0);
9916 			hdev->nic_client = NULL;
9917 			vport->nic.client = NULL;
9918 		}
9919 	}
9920 }
9921 
hclge_pci_init(struct hclge_dev * hdev)9922 static int hclge_pci_init(struct hclge_dev *hdev)
9923 {
9924 	struct pci_dev *pdev = hdev->pdev;
9925 	struct hclge_hw *hw;
9926 	int ret;
9927 
9928 	ret = pci_enable_device(pdev);
9929 	if (ret) {
9930 		dev_err(&pdev->dev, "failed to enable PCI device\n");
9931 		return ret;
9932 	}
9933 
9934 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9935 	if (ret) {
9936 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9937 		if (ret) {
9938 			dev_err(&pdev->dev,
9939 				"can't set consistent PCI DMA");
9940 			goto err_disable_device;
9941 		}
9942 		dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
9943 	}
9944 
9945 	ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
9946 	if (ret) {
9947 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
9948 		goto err_disable_device;
9949 	}
9950 
9951 	pci_set_master(pdev);
9952 	hw = &hdev->hw;
9953 	hw->io_base = pcim_iomap(pdev, 2, 0);
9954 	if (!hw->io_base) {
9955 		dev_err(&pdev->dev, "Can't map configuration register space\n");
9956 		ret = -ENOMEM;
9957 		goto err_clr_master;
9958 	}
9959 
9960 	hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
9961 
9962 	return 0;
9963 err_clr_master:
9964 	pci_clear_master(pdev);
9965 	pci_release_regions(pdev);
9966 err_disable_device:
9967 	pci_disable_device(pdev);
9968 
9969 	return ret;
9970 }
9971 
hclge_pci_uninit(struct hclge_dev * hdev)9972 static void hclge_pci_uninit(struct hclge_dev *hdev)
9973 {
9974 	struct pci_dev *pdev = hdev->pdev;
9975 
9976 	pcim_iounmap(pdev, hdev->hw.io_base);
9977 	pci_free_irq_vectors(pdev);
9978 	pci_clear_master(pdev);
9979 	pci_release_mem_regions(pdev);
9980 	pci_disable_device(pdev);
9981 }
9982 
hclge_state_init(struct hclge_dev * hdev)9983 static void hclge_state_init(struct hclge_dev *hdev)
9984 {
9985 	set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
9986 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
9987 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
9988 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9989 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
9990 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
9991 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
9992 }
9993 
hclge_state_uninit(struct hclge_dev * hdev)9994 static void hclge_state_uninit(struct hclge_dev *hdev)
9995 {
9996 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
9997 	set_bit(HCLGE_STATE_REMOVING, &hdev->state);
9998 
9999 	if (hdev->reset_timer.function)
10000 		del_timer_sync(&hdev->reset_timer);
10001 	if (hdev->service_task.work.func)
10002 		cancel_delayed_work_sync(&hdev->service_task);
10003 }
10004 
hclge_flr_prepare(struct hnae3_ae_dev * ae_dev)10005 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
10006 {
10007 #define HCLGE_FLR_RETRY_WAIT_MS	500
10008 #define HCLGE_FLR_RETRY_CNT	5
10009 
10010 	struct hclge_dev *hdev = ae_dev->priv;
10011 	int retry_cnt = 0;
10012 	int ret;
10013 
10014 retry:
10015 	down(&hdev->reset_sem);
10016 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10017 	hdev->reset_type = HNAE3_FLR_RESET;
10018 	ret = hclge_reset_prepare(hdev);
10019 	if (ret || hdev->reset_pending) {
10020 		dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
10021 			ret);
10022 		if (hdev->reset_pending ||
10023 		    retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
10024 			dev_err(&hdev->pdev->dev,
10025 				"reset_pending:0x%lx, retry_cnt:%d\n",
10026 				hdev->reset_pending, retry_cnt);
10027 			clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10028 			up(&hdev->reset_sem);
10029 			msleep(HCLGE_FLR_RETRY_WAIT_MS);
10030 			goto retry;
10031 		}
10032 	}
10033 
10034 	/* disable misc vector before FLR done */
10035 	hclge_enable_vector(&hdev->misc_vector, false);
10036 	set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
10037 	hdev->rst_stats.flr_rst_cnt++;
10038 }
10039 
hclge_flr_done(struct hnae3_ae_dev * ae_dev)10040 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
10041 {
10042 	struct hclge_dev *hdev = ae_dev->priv;
10043 	int ret;
10044 
10045 	hclge_enable_vector(&hdev->misc_vector, true);
10046 
10047 	ret = hclge_reset_rebuild(hdev);
10048 	if (ret)
10049 		dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
10050 
10051 	hdev->reset_type = HNAE3_NONE_RESET;
10052 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10053 	up(&hdev->reset_sem);
10054 }
10055 
hclge_clear_resetting_state(struct hclge_dev * hdev)10056 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
10057 {
10058 	u16 i;
10059 
10060 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10061 		struct hclge_vport *vport = &hdev->vport[i];
10062 		int ret;
10063 
10064 		 /* Send cmd to clear VF's FUNC_RST_ING */
10065 		ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
10066 		if (ret)
10067 			dev_warn(&hdev->pdev->dev,
10068 				 "clear vf(%u) rst failed %d!\n",
10069 				 vport->vport_id, ret);
10070 	}
10071 }
10072 
hclge_clear_hw_resource(struct hclge_dev * hdev)10073 static int hclge_clear_hw_resource(struct hclge_dev *hdev)
10074 {
10075 	struct hclge_desc desc;
10076 	int ret;
10077 
10078 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false);
10079 
10080 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10081 	/* This new command is only supported by new firmware, it will
10082 	 * fail with older firmware. Error value -EOPNOSUPP can only be
10083 	 * returned by older firmware running this command, to keep code
10084 	 * backward compatible we will override this value and return
10085 	 * success.
10086 	 */
10087 	if (ret && ret != -EOPNOTSUPP) {
10088 		dev_err(&hdev->pdev->dev,
10089 			"failed to clear hw resource, ret = %d\n", ret);
10090 		return ret;
10091 	}
10092 	return 0;
10093 }
10094 
hclge_init_ae_dev(struct hnae3_ae_dev * ae_dev)10095 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
10096 {
10097 	struct pci_dev *pdev = ae_dev->pdev;
10098 	struct hclge_dev *hdev;
10099 	int ret;
10100 
10101 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
10102 	if (!hdev)
10103 		return -ENOMEM;
10104 
10105 	hdev->pdev = pdev;
10106 	hdev->ae_dev = ae_dev;
10107 	hdev->reset_type = HNAE3_NONE_RESET;
10108 	hdev->reset_level = HNAE3_FUNC_RESET;
10109 	ae_dev->priv = hdev;
10110 
10111 	/* HW supprt 2 layer vlan */
10112 	hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10113 
10114 	mutex_init(&hdev->vport_lock);
10115 	spin_lock_init(&hdev->fd_rule_lock);
10116 	sema_init(&hdev->reset_sem, 1);
10117 
10118 	ret = hclge_pci_init(hdev);
10119 	if (ret)
10120 		goto out;
10121 
10122 	/* Firmware command queue initialize */
10123 	ret = hclge_cmd_queue_init(hdev);
10124 	if (ret)
10125 		goto err_pci_uninit;
10126 
10127 	/* Firmware command initialize */
10128 	ret = hclge_cmd_init(hdev);
10129 	if (ret)
10130 		goto err_cmd_uninit;
10131 
10132 	ret  = hclge_clear_hw_resource(hdev);
10133 	if (ret)
10134 		goto err_cmd_uninit;
10135 
10136 	ret = hclge_get_cap(hdev);
10137 	if (ret)
10138 		goto err_cmd_uninit;
10139 
10140 	ret = hclge_query_dev_specs(hdev);
10141 	if (ret) {
10142 		dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
10143 			ret);
10144 		goto err_cmd_uninit;
10145 	}
10146 
10147 	ret = hclge_configure(hdev);
10148 	if (ret) {
10149 		dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
10150 		goto err_cmd_uninit;
10151 	}
10152 
10153 	ret = hclge_init_msi(hdev);
10154 	if (ret) {
10155 		dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
10156 		goto err_cmd_uninit;
10157 	}
10158 
10159 	ret = hclge_misc_irq_init(hdev);
10160 	if (ret)
10161 		goto err_msi_uninit;
10162 
10163 	ret = hclge_alloc_tqps(hdev);
10164 	if (ret) {
10165 		dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
10166 		goto err_msi_irq_uninit;
10167 	}
10168 
10169 	ret = hclge_alloc_vport(hdev);
10170 	if (ret)
10171 		goto err_msi_irq_uninit;
10172 
10173 	ret = hclge_map_tqp(hdev);
10174 	if (ret)
10175 		goto err_msi_irq_uninit;
10176 
10177 	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
10178 		ret = hclge_mac_mdio_config(hdev);
10179 		if (ret)
10180 			goto err_msi_irq_uninit;
10181 	}
10182 
10183 	ret = hclge_init_umv_space(hdev);
10184 	if (ret)
10185 		goto err_mdiobus_unreg;
10186 
10187 	ret = hclge_mac_init(hdev);
10188 	if (ret) {
10189 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10190 		goto err_mdiobus_unreg;
10191 	}
10192 
10193 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10194 	if (ret) {
10195 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10196 		goto err_mdiobus_unreg;
10197 	}
10198 
10199 	ret = hclge_config_gro(hdev, true);
10200 	if (ret)
10201 		goto err_mdiobus_unreg;
10202 
10203 	ret = hclge_init_vlan_config(hdev);
10204 	if (ret) {
10205 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10206 		goto err_mdiobus_unreg;
10207 	}
10208 
10209 	ret = hclge_tm_schd_init(hdev);
10210 	if (ret) {
10211 		dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
10212 		goto err_mdiobus_unreg;
10213 	}
10214 
10215 	hclge_rss_init_cfg(hdev);
10216 	ret = hclge_rss_init_hw(hdev);
10217 	if (ret) {
10218 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10219 		goto err_mdiobus_unreg;
10220 	}
10221 
10222 	ret = init_mgr_tbl(hdev);
10223 	if (ret) {
10224 		dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
10225 		goto err_mdiobus_unreg;
10226 	}
10227 
10228 	ret = hclge_init_fd_config(hdev);
10229 	if (ret) {
10230 		dev_err(&pdev->dev,
10231 			"fd table init fail, ret=%d\n", ret);
10232 		goto err_mdiobus_unreg;
10233 	}
10234 
10235 	INIT_KFIFO(hdev->mac_tnl_log);
10236 
10237 	hclge_dcb_ops_set(hdev);
10238 
10239 	timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
10240 	INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
10241 
10242 	/* Setup affinity after service timer setup because add_timer_on
10243 	 * is called in affinity notify.
10244 	 */
10245 	hclge_misc_affinity_setup(hdev);
10246 
10247 	hclge_clear_all_event_cause(hdev);
10248 	hclge_clear_resetting_state(hdev);
10249 
10250 	/* Log and clear the hw errors those already occurred */
10251 	hclge_handle_all_hns_hw_errors(ae_dev);
10252 
10253 	/* request delayed reset for the error recovery because an immediate
10254 	 * global reset on a PF affecting pending initialization of other PFs
10255 	 */
10256 	if (ae_dev->hw_err_reset_req) {
10257 		enum hnae3_reset_type reset_level;
10258 
10259 		reset_level = hclge_get_reset_level(ae_dev,
10260 						    &ae_dev->hw_err_reset_req);
10261 		hclge_set_def_reset_request(ae_dev, reset_level);
10262 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
10263 	}
10264 
10265 	/* Enable MISC vector(vector0) */
10266 	hclge_enable_vector(&hdev->misc_vector, true);
10267 
10268 	hclge_state_init(hdev);
10269 	hdev->last_reset_time = jiffies;
10270 
10271 	dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
10272 		 HCLGE_DRIVER_NAME);
10273 
10274 	hclge_task_schedule(hdev, round_jiffies_relative(HZ));
10275 
10276 	return 0;
10277 
10278 err_mdiobus_unreg:
10279 	if (hdev->hw.mac.phydev)
10280 		mdiobus_unregister(hdev->hw.mac.mdio_bus);
10281 err_msi_irq_uninit:
10282 	hclge_misc_irq_uninit(hdev);
10283 err_msi_uninit:
10284 	pci_free_irq_vectors(pdev);
10285 err_cmd_uninit:
10286 	hclge_cmd_uninit(hdev);
10287 err_pci_uninit:
10288 	pcim_iounmap(pdev, hdev->hw.io_base);
10289 	pci_clear_master(pdev);
10290 	pci_release_regions(pdev);
10291 	pci_disable_device(pdev);
10292 out:
10293 	mutex_destroy(&hdev->vport_lock);
10294 	return ret;
10295 }
10296 
hclge_stats_clear(struct hclge_dev * hdev)10297 static void hclge_stats_clear(struct hclge_dev *hdev)
10298 {
10299 	memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
10300 }
10301 
hclge_set_mac_spoofchk(struct hclge_dev * hdev,int vf,bool enable)10302 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10303 {
10304 	return hclge_config_switch_param(hdev, vf, enable,
10305 					 HCLGE_SWITCH_ANTI_SPOOF_MASK);
10306 }
10307 
hclge_set_vlan_spoofchk(struct hclge_dev * hdev,int vf,bool enable)10308 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10309 {
10310 	return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10311 					  HCLGE_FILTER_FE_NIC_INGRESS_B,
10312 					  enable, vf);
10313 }
10314 
hclge_set_vf_spoofchk_hw(struct hclge_dev * hdev,int vf,bool enable)10315 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
10316 {
10317 	int ret;
10318 
10319 	ret = hclge_set_mac_spoofchk(hdev, vf, enable);
10320 	if (ret) {
10321 		dev_err(&hdev->pdev->dev,
10322 			"Set vf %d mac spoof check %s failed, ret=%d\n",
10323 			vf, enable ? "on" : "off", ret);
10324 		return ret;
10325 	}
10326 
10327 	ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
10328 	if (ret)
10329 		dev_err(&hdev->pdev->dev,
10330 			"Set vf %d vlan spoof check %s failed, ret=%d\n",
10331 			vf, enable ? "on" : "off", ret);
10332 
10333 	return ret;
10334 }
10335 
hclge_set_vf_spoofchk(struct hnae3_handle * handle,int vf,bool enable)10336 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
10337 				 bool enable)
10338 {
10339 	struct hclge_vport *vport = hclge_get_vport(handle);
10340 	struct hclge_dev *hdev = vport->back;
10341 	u32 new_spoofchk = enable ? 1 : 0;
10342 	int ret;
10343 
10344 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10345 		return -EOPNOTSUPP;
10346 
10347 	vport = hclge_get_vf_vport(hdev, vf);
10348 	if (!vport)
10349 		return -EINVAL;
10350 
10351 	if (vport->vf_info.spoofchk == new_spoofchk)
10352 		return 0;
10353 
10354 	if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
10355 		dev_warn(&hdev->pdev->dev,
10356 			 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
10357 			 vf);
10358 	else if (enable && hclge_is_umv_space_full(vport, true))
10359 		dev_warn(&hdev->pdev->dev,
10360 			 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
10361 			 vf);
10362 
10363 	ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
10364 	if (ret)
10365 		return ret;
10366 
10367 	vport->vf_info.spoofchk = new_spoofchk;
10368 	return 0;
10369 }
10370 
hclge_reset_vport_spoofchk(struct hclge_dev * hdev)10371 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
10372 {
10373 	struct hclge_vport *vport = hdev->vport;
10374 	int ret;
10375 	int i;
10376 
10377 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10378 		return 0;
10379 
10380 	/* resume the vf spoof check state after reset */
10381 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10382 		ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
10383 					       vport->vf_info.spoofchk);
10384 		if (ret)
10385 			return ret;
10386 
10387 		vport++;
10388 	}
10389 
10390 	return 0;
10391 }
10392 
hclge_set_vf_trust(struct hnae3_handle * handle,int vf,bool enable)10393 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
10394 {
10395 	struct hclge_vport *vport = hclge_get_vport(handle);
10396 	struct hclge_dev *hdev = vport->back;
10397 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
10398 	u32 new_trusted = enable ? 1 : 0;
10399 	bool en_bc_pmc;
10400 	int ret;
10401 
10402 	vport = hclge_get_vf_vport(hdev, vf);
10403 	if (!vport)
10404 		return -EINVAL;
10405 
10406 	if (vport->vf_info.trusted == new_trusted)
10407 		return 0;
10408 
10409 	/* Disable promisc mode for VF if it is not trusted any more. */
10410 	if (!enable && vport->vf_info.promisc_enable) {
10411 		en_bc_pmc = ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
10412 		ret = hclge_set_vport_promisc_mode(vport, false, false,
10413 						   en_bc_pmc);
10414 		if (ret)
10415 			return ret;
10416 		vport->vf_info.promisc_enable = 0;
10417 		hclge_inform_vf_promisc_info(vport);
10418 	}
10419 
10420 	vport->vf_info.trusted = new_trusted;
10421 
10422 	return 0;
10423 }
10424 
hclge_reset_vf_rate(struct hclge_dev * hdev)10425 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
10426 {
10427 	int ret;
10428 	int vf;
10429 
10430 	/* reset vf rate to default value */
10431 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10432 		struct hclge_vport *vport = &hdev->vport[vf];
10433 
10434 		vport->vf_info.max_tx_rate = 0;
10435 		ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
10436 		if (ret)
10437 			dev_err(&hdev->pdev->dev,
10438 				"vf%d failed to reset to default, ret=%d\n",
10439 				vf - HCLGE_VF_VPORT_START_NUM, ret);
10440 	}
10441 }
10442 
hclge_vf_rate_param_check(struct hclge_dev * hdev,int vf,int min_tx_rate,int max_tx_rate)10443 static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
10444 				     int min_tx_rate, int max_tx_rate)
10445 {
10446 	if (min_tx_rate != 0 ||
10447 	    max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
10448 		dev_err(&hdev->pdev->dev,
10449 			"min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
10450 			min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
10451 		return -EINVAL;
10452 	}
10453 
10454 	return 0;
10455 }
10456 
hclge_set_vf_rate(struct hnae3_handle * handle,int vf,int min_tx_rate,int max_tx_rate,bool force)10457 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
10458 			     int min_tx_rate, int max_tx_rate, bool force)
10459 {
10460 	struct hclge_vport *vport = hclge_get_vport(handle);
10461 	struct hclge_dev *hdev = vport->back;
10462 	int ret;
10463 
10464 	ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
10465 	if (ret)
10466 		return ret;
10467 
10468 	vport = hclge_get_vf_vport(hdev, vf);
10469 	if (!vport)
10470 		return -EINVAL;
10471 
10472 	if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
10473 		return 0;
10474 
10475 	ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
10476 	if (ret)
10477 		return ret;
10478 
10479 	vport->vf_info.max_tx_rate = max_tx_rate;
10480 
10481 	return 0;
10482 }
10483 
hclge_resume_vf_rate(struct hclge_dev * hdev)10484 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
10485 {
10486 	struct hnae3_handle *handle = &hdev->vport->nic;
10487 	struct hclge_vport *vport;
10488 	int ret;
10489 	int vf;
10490 
10491 	/* resume the vf max_tx_rate after reset */
10492 	for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
10493 		vport = hclge_get_vf_vport(hdev, vf);
10494 		if (!vport)
10495 			return -EINVAL;
10496 
10497 		/* zero means max rate, after reset, firmware already set it to
10498 		 * max rate, so just continue.
10499 		 */
10500 		if (!vport->vf_info.max_tx_rate)
10501 			continue;
10502 
10503 		ret = hclge_set_vf_rate(handle, vf, 0,
10504 					vport->vf_info.max_tx_rate, true);
10505 		if (ret) {
10506 			dev_err(&hdev->pdev->dev,
10507 				"vf%d failed to resume tx_rate:%u, ret=%d\n",
10508 				vf, vport->vf_info.max_tx_rate, ret);
10509 			return ret;
10510 		}
10511 	}
10512 
10513 	return 0;
10514 }
10515 
hclge_reset_vport_state(struct hclge_dev * hdev)10516 static void hclge_reset_vport_state(struct hclge_dev *hdev)
10517 {
10518 	struct hclge_vport *vport = hdev->vport;
10519 	int i;
10520 
10521 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10522 		hclge_vport_stop(vport);
10523 		vport++;
10524 	}
10525 }
10526 
hclge_reset_ae_dev(struct hnae3_ae_dev * ae_dev)10527 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
10528 {
10529 	struct hclge_dev *hdev = ae_dev->priv;
10530 	struct pci_dev *pdev = ae_dev->pdev;
10531 	int ret;
10532 
10533 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
10534 
10535 	hclge_stats_clear(hdev);
10536 	/* NOTE: pf reset needn't to clear or restore pf and vf table entry.
10537 	 * so here should not clean table in memory.
10538 	 */
10539 	if (hdev->reset_type == HNAE3_IMP_RESET ||
10540 	    hdev->reset_type == HNAE3_GLOBAL_RESET) {
10541 		memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
10542 		memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
10543 		bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
10544 		hclge_reset_umv_space(hdev);
10545 	}
10546 
10547 	ret = hclge_cmd_init(hdev);
10548 	if (ret) {
10549 		dev_err(&pdev->dev, "Cmd queue init failed\n");
10550 		return ret;
10551 	}
10552 
10553 	ret = hclge_map_tqp(hdev);
10554 	if (ret) {
10555 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
10556 		return ret;
10557 	}
10558 
10559 	ret = hclge_mac_init(hdev);
10560 	if (ret) {
10561 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10562 		return ret;
10563 	}
10564 
10565 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10566 	if (ret) {
10567 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10568 		return ret;
10569 	}
10570 
10571 	ret = hclge_config_gro(hdev, true);
10572 	if (ret)
10573 		return ret;
10574 
10575 	ret = hclge_init_vlan_config(hdev);
10576 	if (ret) {
10577 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10578 		return ret;
10579 	}
10580 
10581 	ret = hclge_tm_init_hw(hdev, true);
10582 	if (ret) {
10583 		dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
10584 		return ret;
10585 	}
10586 
10587 	ret = hclge_rss_init_hw(hdev);
10588 	if (ret) {
10589 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10590 		return ret;
10591 	}
10592 
10593 	ret = init_mgr_tbl(hdev);
10594 	if (ret) {
10595 		dev_err(&pdev->dev,
10596 			"failed to reinit manager table, ret = %d\n", ret);
10597 		return ret;
10598 	}
10599 
10600 	ret = hclge_init_fd_config(hdev);
10601 	if (ret) {
10602 		dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
10603 		return ret;
10604 	}
10605 
10606 	/* Log and clear the hw errors those already occurred */
10607 	hclge_handle_all_hns_hw_errors(ae_dev);
10608 
10609 	/* Re-enable the hw error interrupts because
10610 	 * the interrupts get disabled on global reset.
10611 	 */
10612 	ret = hclge_config_nic_hw_error(hdev, true);
10613 	if (ret) {
10614 		dev_err(&pdev->dev,
10615 			"fail(%d) to re-enable NIC hw error interrupts\n",
10616 			ret);
10617 		return ret;
10618 	}
10619 
10620 	if (hdev->roce_client) {
10621 		ret = hclge_config_rocee_ras_interrupt(hdev, true);
10622 		if (ret) {
10623 			dev_err(&pdev->dev,
10624 				"fail(%d) to re-enable roce ras interrupts\n",
10625 				ret);
10626 			return ret;
10627 		}
10628 	}
10629 
10630 	hclge_reset_vport_state(hdev);
10631 	ret = hclge_reset_vport_spoofchk(hdev);
10632 	if (ret)
10633 		return ret;
10634 
10635 	ret = hclge_resume_vf_rate(hdev);
10636 	if (ret)
10637 		return ret;
10638 
10639 	dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
10640 		 HCLGE_DRIVER_NAME);
10641 
10642 	return 0;
10643 }
10644 
hclge_uninit_ae_dev(struct hnae3_ae_dev * ae_dev)10645 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
10646 {
10647 	struct hclge_dev *hdev = ae_dev->priv;
10648 	struct hclge_mac *mac = &hdev->hw.mac;
10649 
10650 	hclge_reset_vf_rate(hdev);
10651 	hclge_clear_vf_vlan(hdev);
10652 	hclge_misc_affinity_teardown(hdev);
10653 	hclge_state_uninit(hdev);
10654 	hclge_uninit_mac_table(hdev);
10655 
10656 	if (mac->phydev)
10657 		mdiobus_unregister(mac->mdio_bus);
10658 
10659 	/* Disable MISC vector(vector0) */
10660 	hclge_enable_vector(&hdev->misc_vector, false);
10661 	synchronize_irq(hdev->misc_vector.vector_irq);
10662 
10663 	/* Disable all hw interrupts */
10664 	hclge_config_mac_tnl_int(hdev, false);
10665 	hclge_config_nic_hw_error(hdev, false);
10666 	hclge_config_rocee_ras_interrupt(hdev, false);
10667 
10668 	hclge_cmd_uninit(hdev);
10669 	hclge_misc_irq_uninit(hdev);
10670 	hclge_pci_uninit(hdev);
10671 	mutex_destroy(&hdev->vport_lock);
10672 	hclge_uninit_vport_vlan_table(hdev);
10673 	ae_dev->priv = NULL;
10674 }
10675 
hclge_get_max_channels(struct hnae3_handle * handle)10676 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
10677 {
10678 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10679 	struct hclge_vport *vport = hclge_get_vport(handle);
10680 	struct hclge_dev *hdev = vport->back;
10681 
10682 	return min_t(u32, hdev->rss_size_max,
10683 		     vport->alloc_tqps / kinfo->num_tc);
10684 }
10685 
hclge_get_channels(struct hnae3_handle * handle,struct ethtool_channels * ch)10686 static void hclge_get_channels(struct hnae3_handle *handle,
10687 			       struct ethtool_channels *ch)
10688 {
10689 	ch->max_combined = hclge_get_max_channels(handle);
10690 	ch->other_count = 1;
10691 	ch->max_other = 1;
10692 	ch->combined_count = handle->kinfo.rss_size;
10693 }
10694 
hclge_get_tqps_and_rss_info(struct hnae3_handle * handle,u16 * alloc_tqps,u16 * max_rss_size)10695 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
10696 					u16 *alloc_tqps, u16 *max_rss_size)
10697 {
10698 	struct hclge_vport *vport = hclge_get_vport(handle);
10699 	struct hclge_dev *hdev = vport->back;
10700 
10701 	*alloc_tqps = vport->alloc_tqps;
10702 	*max_rss_size = hdev->rss_size_max;
10703 }
10704 
hclge_set_channels(struct hnae3_handle * handle,u32 new_tqps_num,bool rxfh_configured)10705 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
10706 			      bool rxfh_configured)
10707 {
10708 	struct hclge_vport *vport = hclge_get_vport(handle);
10709 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
10710 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
10711 	struct hclge_dev *hdev = vport->back;
10712 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
10713 	u16 cur_rss_size = kinfo->rss_size;
10714 	u16 cur_tqps = kinfo->num_tqps;
10715 	u16 tc_valid[HCLGE_MAX_TC_NUM];
10716 	u16 roundup_size;
10717 	u32 *rss_indir;
10718 	unsigned int i;
10719 	int ret;
10720 
10721 	kinfo->req_rss_size = new_tqps_num;
10722 
10723 	ret = hclge_tm_vport_map_update(hdev);
10724 	if (ret) {
10725 		dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
10726 		return ret;
10727 	}
10728 
10729 	roundup_size = roundup_pow_of_two(kinfo->rss_size);
10730 	roundup_size = ilog2(roundup_size);
10731 	/* Set the RSS TC mode according to the new RSS size */
10732 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
10733 		tc_valid[i] = 0;
10734 
10735 		if (!(hdev->hw_tc_map & BIT(i)))
10736 			continue;
10737 
10738 		tc_valid[i] = 1;
10739 		tc_size[i] = roundup_size;
10740 		tc_offset[i] = kinfo->rss_size * i;
10741 	}
10742 	ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
10743 	if (ret)
10744 		return ret;
10745 
10746 	/* RSS indirection table has been configuared by user */
10747 	if (rxfh_configured)
10748 		goto out;
10749 
10750 	/* Reinitializes the rss indirect table according to the new RSS size */
10751 	rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
10752 	if (!rss_indir)
10753 		return -ENOMEM;
10754 
10755 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
10756 		rss_indir[i] = i % kinfo->rss_size;
10757 
10758 	ret = hclge_set_rss(handle, rss_indir, NULL, 0);
10759 	if (ret)
10760 		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
10761 			ret);
10762 
10763 	kfree(rss_indir);
10764 
10765 out:
10766 	if (!ret)
10767 		dev_info(&hdev->pdev->dev,
10768 			 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
10769 			 cur_rss_size, kinfo->rss_size,
10770 			 cur_tqps, kinfo->rss_size * kinfo->num_tc);
10771 
10772 	return ret;
10773 }
10774 
hclge_get_regs_num(struct hclge_dev * hdev,u32 * regs_num_32_bit,u32 * regs_num_64_bit)10775 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
10776 			      u32 *regs_num_64_bit)
10777 {
10778 	struct hclge_desc desc;
10779 	u32 total_num;
10780 	int ret;
10781 
10782 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
10783 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10784 	if (ret) {
10785 		dev_err(&hdev->pdev->dev,
10786 			"Query register number cmd failed, ret = %d.\n", ret);
10787 		return ret;
10788 	}
10789 
10790 	*regs_num_32_bit = le32_to_cpu(desc.data[0]);
10791 	*regs_num_64_bit = le32_to_cpu(desc.data[1]);
10792 
10793 	total_num = *regs_num_32_bit + *regs_num_64_bit;
10794 	if (!total_num)
10795 		return -EINVAL;
10796 
10797 	return 0;
10798 }
10799 
hclge_get_32_bit_regs(struct hclge_dev * hdev,u32 regs_num,void * data)10800 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10801 				 void *data)
10802 {
10803 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
10804 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
10805 
10806 	struct hclge_desc *desc;
10807 	u32 *reg_val = data;
10808 	__le32 *desc_data;
10809 	int nodata_num;
10810 	int cmd_num;
10811 	int i, k, n;
10812 	int ret;
10813 
10814 	if (regs_num == 0)
10815 		return 0;
10816 
10817 	nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
10818 	cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
10819 			       HCLGE_32_BIT_REG_RTN_DATANUM);
10820 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10821 	if (!desc)
10822 		return -ENOMEM;
10823 
10824 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
10825 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10826 	if (ret) {
10827 		dev_err(&hdev->pdev->dev,
10828 			"Query 32 bit register cmd failed, ret = %d.\n", ret);
10829 		kfree(desc);
10830 		return ret;
10831 	}
10832 
10833 	for (i = 0; i < cmd_num; i++) {
10834 		if (i == 0) {
10835 			desc_data = (__le32 *)(&desc[i].data[0]);
10836 			n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
10837 		} else {
10838 			desc_data = (__le32 *)(&desc[i]);
10839 			n = HCLGE_32_BIT_REG_RTN_DATANUM;
10840 		}
10841 		for (k = 0; k < n; k++) {
10842 			*reg_val++ = le32_to_cpu(*desc_data++);
10843 
10844 			regs_num--;
10845 			if (!regs_num)
10846 				break;
10847 		}
10848 	}
10849 
10850 	kfree(desc);
10851 	return 0;
10852 }
10853 
hclge_get_64_bit_regs(struct hclge_dev * hdev,u32 regs_num,void * data)10854 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10855 				 void *data)
10856 {
10857 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
10858 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
10859 
10860 	struct hclge_desc *desc;
10861 	u64 *reg_val = data;
10862 	__le64 *desc_data;
10863 	int nodata_len;
10864 	int cmd_num;
10865 	int i, k, n;
10866 	int ret;
10867 
10868 	if (regs_num == 0)
10869 		return 0;
10870 
10871 	nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
10872 	cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
10873 			       HCLGE_64_BIT_REG_RTN_DATANUM);
10874 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10875 	if (!desc)
10876 		return -ENOMEM;
10877 
10878 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
10879 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10880 	if (ret) {
10881 		dev_err(&hdev->pdev->dev,
10882 			"Query 64 bit register cmd failed, ret = %d.\n", ret);
10883 		kfree(desc);
10884 		return ret;
10885 	}
10886 
10887 	for (i = 0; i < cmd_num; i++) {
10888 		if (i == 0) {
10889 			desc_data = (__le64 *)(&desc[i].data[0]);
10890 			n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
10891 		} else {
10892 			desc_data = (__le64 *)(&desc[i]);
10893 			n = HCLGE_64_BIT_REG_RTN_DATANUM;
10894 		}
10895 		for (k = 0; k < n; k++) {
10896 			*reg_val++ = le64_to_cpu(*desc_data++);
10897 
10898 			regs_num--;
10899 			if (!regs_num)
10900 				break;
10901 		}
10902 	}
10903 
10904 	kfree(desc);
10905 	return 0;
10906 }
10907 
10908 #define MAX_SEPARATE_NUM	4
10909 #define SEPARATOR_VALUE		0xFDFCFBFA
10910 #define REG_NUM_PER_LINE	4
10911 #define REG_LEN_PER_LINE	(REG_NUM_PER_LINE * sizeof(u32))
10912 #define REG_SEPARATOR_LINE	1
10913 #define REG_NUM_REMAIN_MASK	3
10914 
hclge_query_bd_num_cmd_send(struct hclge_dev * hdev,struct hclge_desc * desc)10915 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
10916 {
10917 	int i;
10918 
10919 	/* initialize command BD except the last one */
10920 	for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
10921 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
10922 					   true);
10923 		desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10924 	}
10925 
10926 	/* initialize the last command BD */
10927 	hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
10928 
10929 	return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
10930 }
10931 
hclge_get_dfx_reg_bd_num(struct hclge_dev * hdev,int * bd_num_list,u32 type_num)10932 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
10933 				    int *bd_num_list,
10934 				    u32 type_num)
10935 {
10936 	u32 entries_per_desc, desc_index, index, offset, i;
10937 	struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
10938 	int ret;
10939 
10940 	ret = hclge_query_bd_num_cmd_send(hdev, desc);
10941 	if (ret) {
10942 		dev_err(&hdev->pdev->dev,
10943 			"Get dfx bd num fail, status is %d.\n", ret);
10944 		return ret;
10945 	}
10946 
10947 	entries_per_desc = ARRAY_SIZE(desc[0].data);
10948 	for (i = 0; i < type_num; i++) {
10949 		offset = hclge_dfx_bd_offset_list[i];
10950 		index = offset % entries_per_desc;
10951 		desc_index = offset / entries_per_desc;
10952 		bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
10953 	}
10954 
10955 	return ret;
10956 }
10957 
hclge_dfx_reg_cmd_send(struct hclge_dev * hdev,struct hclge_desc * desc_src,int bd_num,enum hclge_opcode_type cmd)10958 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
10959 				  struct hclge_desc *desc_src, int bd_num,
10960 				  enum hclge_opcode_type cmd)
10961 {
10962 	struct hclge_desc *desc = desc_src;
10963 	int i, ret;
10964 
10965 	hclge_cmd_setup_basic_desc(desc, cmd, true);
10966 	for (i = 0; i < bd_num - 1; i++) {
10967 		desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10968 		desc++;
10969 		hclge_cmd_setup_basic_desc(desc, cmd, true);
10970 	}
10971 
10972 	desc = desc_src;
10973 	ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
10974 	if (ret)
10975 		dev_err(&hdev->pdev->dev,
10976 			"Query dfx reg cmd(0x%x) send fail, status is %d.\n",
10977 			cmd, ret);
10978 
10979 	return ret;
10980 }
10981 
hclge_dfx_reg_fetch_data(struct hclge_desc * desc_src,int bd_num,void * data)10982 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
10983 				    void *data)
10984 {
10985 	int entries_per_desc, reg_num, separator_num, desc_index, index, i;
10986 	struct hclge_desc *desc = desc_src;
10987 	u32 *reg = data;
10988 
10989 	entries_per_desc = ARRAY_SIZE(desc->data);
10990 	reg_num = entries_per_desc * bd_num;
10991 	separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
10992 	for (i = 0; i < reg_num; i++) {
10993 		index = i % entries_per_desc;
10994 		desc_index = i / entries_per_desc;
10995 		*reg++ = le32_to_cpu(desc[desc_index].data[index]);
10996 	}
10997 	for (i = 0; i < separator_num; i++)
10998 		*reg++ = SEPARATOR_VALUE;
10999 
11000 	return reg_num + separator_num;
11001 }
11002 
hclge_get_dfx_reg_len(struct hclge_dev * hdev,int * len)11003 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
11004 {
11005 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
11006 	int data_len_per_desc, bd_num, i;
11007 	int *bd_num_list;
11008 	u32 data_len;
11009 	int ret;
11010 
11011 	bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
11012 	if (!bd_num_list)
11013 		return -ENOMEM;
11014 
11015 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
11016 	if (ret) {
11017 		dev_err(&hdev->pdev->dev,
11018 			"Get dfx reg bd num fail, status is %d.\n", ret);
11019 		goto out;
11020 	}
11021 
11022 	data_len_per_desc = sizeof_field(struct hclge_desc, data);
11023 	*len = 0;
11024 	for (i = 0; i < dfx_reg_type_num; i++) {
11025 		bd_num = bd_num_list[i];
11026 		data_len = data_len_per_desc * bd_num;
11027 		*len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
11028 	}
11029 
11030 out:
11031 	kfree(bd_num_list);
11032 	return ret;
11033 }
11034 
hclge_get_dfx_reg(struct hclge_dev * hdev,void * data)11035 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
11036 {
11037 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
11038 	int bd_num, bd_num_max, buf_len, i;
11039 	struct hclge_desc *desc_src;
11040 	int *bd_num_list;
11041 	u32 *reg = data;
11042 	int ret;
11043 
11044 	bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
11045 	if (!bd_num_list)
11046 		return -ENOMEM;
11047 
11048 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
11049 	if (ret) {
11050 		dev_err(&hdev->pdev->dev,
11051 			"Get dfx reg bd num fail, status is %d.\n", ret);
11052 		goto out;
11053 	}
11054 
11055 	bd_num_max = bd_num_list[0];
11056 	for (i = 1; i < dfx_reg_type_num; i++)
11057 		bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
11058 
11059 	buf_len = sizeof(*desc_src) * bd_num_max;
11060 	desc_src = kzalloc(buf_len, GFP_KERNEL);
11061 	if (!desc_src) {
11062 		ret = -ENOMEM;
11063 		goto out;
11064 	}
11065 
11066 	for (i = 0; i < dfx_reg_type_num; i++) {
11067 		bd_num = bd_num_list[i];
11068 		ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
11069 					     hclge_dfx_reg_opcode_list[i]);
11070 		if (ret) {
11071 			dev_err(&hdev->pdev->dev,
11072 				"Get dfx reg fail, status is %d.\n", ret);
11073 			break;
11074 		}
11075 
11076 		reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
11077 	}
11078 
11079 	kfree(desc_src);
11080 out:
11081 	kfree(bd_num_list);
11082 	return ret;
11083 }
11084 
hclge_fetch_pf_reg(struct hclge_dev * hdev,void * data,struct hnae3_knic_private_info * kinfo)11085 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
11086 			      struct hnae3_knic_private_info *kinfo)
11087 {
11088 #define HCLGE_RING_REG_OFFSET		0x200
11089 #define HCLGE_RING_INT_REG_OFFSET	0x4
11090 
11091 	int i, j, reg_num, separator_num;
11092 	int data_num_sum;
11093 	u32 *reg = data;
11094 
11095 	/* fetching per-PF registers valus from PF PCIe register space */
11096 	reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
11097 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11098 	for (i = 0; i < reg_num; i++)
11099 		*reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
11100 	for (i = 0; i < separator_num; i++)
11101 		*reg++ = SEPARATOR_VALUE;
11102 	data_num_sum = reg_num + separator_num;
11103 
11104 	reg_num = ARRAY_SIZE(common_reg_addr_list);
11105 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11106 	for (i = 0; i < reg_num; i++)
11107 		*reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
11108 	for (i = 0; i < separator_num; i++)
11109 		*reg++ = SEPARATOR_VALUE;
11110 	data_num_sum += reg_num + separator_num;
11111 
11112 	reg_num = ARRAY_SIZE(ring_reg_addr_list);
11113 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11114 	for (j = 0; j < kinfo->num_tqps; j++) {
11115 		for (i = 0; i < reg_num; i++)
11116 			*reg++ = hclge_read_dev(&hdev->hw,
11117 						ring_reg_addr_list[i] +
11118 						HCLGE_RING_REG_OFFSET * j);
11119 		for (i = 0; i < separator_num; i++)
11120 			*reg++ = SEPARATOR_VALUE;
11121 	}
11122 	data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
11123 
11124 	reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
11125 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11126 	for (j = 0; j < hdev->num_msi_used - 1; j++) {
11127 		for (i = 0; i < reg_num; i++)
11128 			*reg++ = hclge_read_dev(&hdev->hw,
11129 						tqp_intr_reg_addr_list[i] +
11130 						HCLGE_RING_INT_REG_OFFSET * j);
11131 		for (i = 0; i < separator_num; i++)
11132 			*reg++ = SEPARATOR_VALUE;
11133 	}
11134 	data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
11135 
11136 	return data_num_sum;
11137 }
11138 
hclge_get_regs_len(struct hnae3_handle * handle)11139 static int hclge_get_regs_len(struct hnae3_handle *handle)
11140 {
11141 	int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
11142 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
11143 	struct hclge_vport *vport = hclge_get_vport(handle);
11144 	struct hclge_dev *hdev = vport->back;
11145 	int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
11146 	int regs_lines_32_bit, regs_lines_64_bit;
11147 	int ret;
11148 
11149 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
11150 	if (ret) {
11151 		dev_err(&hdev->pdev->dev,
11152 			"Get register number failed, ret = %d.\n", ret);
11153 		return ret;
11154 	}
11155 
11156 	ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
11157 	if (ret) {
11158 		dev_err(&hdev->pdev->dev,
11159 			"Get dfx reg len failed, ret = %d.\n", ret);
11160 		return ret;
11161 	}
11162 
11163 	cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
11164 		REG_SEPARATOR_LINE;
11165 	common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
11166 		REG_SEPARATOR_LINE;
11167 	ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
11168 		REG_SEPARATOR_LINE;
11169 	tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
11170 		REG_SEPARATOR_LINE;
11171 	regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
11172 		REG_SEPARATOR_LINE;
11173 	regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
11174 		REG_SEPARATOR_LINE;
11175 
11176 	return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
11177 		tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
11178 		regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
11179 }
11180 
hclge_get_regs(struct hnae3_handle * handle,u32 * version,void * data)11181 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
11182 			   void *data)
11183 {
11184 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
11185 	struct hclge_vport *vport = hclge_get_vport(handle);
11186 	struct hclge_dev *hdev = vport->back;
11187 	u32 regs_num_32_bit, regs_num_64_bit;
11188 	int i, reg_num, separator_num, ret;
11189 	u32 *reg = data;
11190 
11191 	*version = hdev->fw_version;
11192 
11193 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
11194 	if (ret) {
11195 		dev_err(&hdev->pdev->dev,
11196 			"Get register number failed, ret = %d.\n", ret);
11197 		return;
11198 	}
11199 
11200 	reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
11201 
11202 	ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
11203 	if (ret) {
11204 		dev_err(&hdev->pdev->dev,
11205 			"Get 32 bit register failed, ret = %d.\n", ret);
11206 		return;
11207 	}
11208 	reg_num = regs_num_32_bit;
11209 	reg += reg_num;
11210 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11211 	for (i = 0; i < separator_num; i++)
11212 		*reg++ = SEPARATOR_VALUE;
11213 
11214 	ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
11215 	if (ret) {
11216 		dev_err(&hdev->pdev->dev,
11217 			"Get 64 bit register failed, ret = %d.\n", ret);
11218 		return;
11219 	}
11220 	reg_num = regs_num_64_bit * 2;
11221 	reg += reg_num;
11222 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11223 	for (i = 0; i < separator_num; i++)
11224 		*reg++ = SEPARATOR_VALUE;
11225 
11226 	ret = hclge_get_dfx_reg(hdev, reg);
11227 	if (ret)
11228 		dev_err(&hdev->pdev->dev,
11229 			"Get dfx register failed, ret = %d.\n", ret);
11230 }
11231 
hclge_set_led_status(struct hclge_dev * hdev,u8 locate_led_status)11232 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
11233 {
11234 	struct hclge_set_led_state_cmd *req;
11235 	struct hclge_desc desc;
11236 	int ret;
11237 
11238 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
11239 
11240 	req = (struct hclge_set_led_state_cmd *)desc.data;
11241 	hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
11242 			HCLGE_LED_LOCATE_STATE_S, locate_led_status);
11243 
11244 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11245 	if (ret)
11246 		dev_err(&hdev->pdev->dev,
11247 			"Send set led state cmd error, ret =%d\n", ret);
11248 
11249 	return ret;
11250 }
11251 
11252 enum hclge_led_status {
11253 	HCLGE_LED_OFF,
11254 	HCLGE_LED_ON,
11255 	HCLGE_LED_NO_CHANGE = 0xFF,
11256 };
11257 
hclge_set_led_id(struct hnae3_handle * handle,enum ethtool_phys_id_state status)11258 static int hclge_set_led_id(struct hnae3_handle *handle,
11259 			    enum ethtool_phys_id_state status)
11260 {
11261 	struct hclge_vport *vport = hclge_get_vport(handle);
11262 	struct hclge_dev *hdev = vport->back;
11263 
11264 	switch (status) {
11265 	case ETHTOOL_ID_ACTIVE:
11266 		return hclge_set_led_status(hdev, HCLGE_LED_ON);
11267 	case ETHTOOL_ID_INACTIVE:
11268 		return hclge_set_led_status(hdev, HCLGE_LED_OFF);
11269 	default:
11270 		return -EINVAL;
11271 	}
11272 }
11273 
hclge_get_link_mode(struct hnae3_handle * handle,unsigned long * supported,unsigned long * advertising)11274 static void hclge_get_link_mode(struct hnae3_handle *handle,
11275 				unsigned long *supported,
11276 				unsigned long *advertising)
11277 {
11278 	unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
11279 	struct hclge_vport *vport = hclge_get_vport(handle);
11280 	struct hclge_dev *hdev = vport->back;
11281 	unsigned int idx = 0;
11282 
11283 	for (; idx < size; idx++) {
11284 		supported[idx] = hdev->hw.mac.supported[idx];
11285 		advertising[idx] = hdev->hw.mac.advertising[idx];
11286 	}
11287 }
11288 
hclge_gro_en(struct hnae3_handle * handle,bool enable)11289 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
11290 {
11291 	struct hclge_vport *vport = hclge_get_vport(handle);
11292 	struct hclge_dev *hdev = vport->back;
11293 
11294 	return hclge_config_gro(hdev, enable);
11295 }
11296 
hclge_sync_promisc_mode(struct hclge_dev * hdev)11297 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
11298 {
11299 	struct hclge_vport *vport = &hdev->vport[0];
11300 	struct hnae3_handle *handle = &vport->nic;
11301 	u8 tmp_flags;
11302 	int ret;
11303 
11304 	if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
11305 		set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11306 		vport->last_promisc_flags = vport->overflow_promisc_flags;
11307 	}
11308 
11309 	if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) {
11310 		tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
11311 		ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
11312 					     tmp_flags & HNAE3_MPE);
11313 		if (!ret) {
11314 			clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11315 			hclge_enable_vlan_filter(handle,
11316 						 tmp_flags & HNAE3_VLAN_FLTR);
11317 		}
11318 	}
11319 }
11320 
hclge_module_existed(struct hclge_dev * hdev)11321 static bool hclge_module_existed(struct hclge_dev *hdev)
11322 {
11323 	struct hclge_desc desc;
11324 	u32 existed;
11325 	int ret;
11326 
11327 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
11328 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11329 	if (ret) {
11330 		dev_err(&hdev->pdev->dev,
11331 			"failed to get SFP exist state, ret = %d\n", ret);
11332 		return false;
11333 	}
11334 
11335 	existed = le32_to_cpu(desc.data[0]);
11336 
11337 	return existed != 0;
11338 }
11339 
11340 /* need 6 bds(total 140 bytes) in one reading
11341  * return the number of bytes actually read, 0 means read failed.
11342  */
hclge_get_sfp_eeprom_info(struct hclge_dev * hdev,u32 offset,u32 len,u8 * data)11343 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
11344 				     u32 len, u8 *data)
11345 {
11346 	struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
11347 	struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
11348 	u16 read_len;
11349 	u16 copy_len;
11350 	int ret;
11351 	int i;
11352 
11353 	/* setup all 6 bds to read module eeprom info. */
11354 	for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11355 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
11356 					   true);
11357 
11358 		/* bd0~bd4 need next flag */
11359 		if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
11360 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11361 	}
11362 
11363 	/* setup bd0, this bd contains offset and read length. */
11364 	sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
11365 	sfp_info_bd0->offset = cpu_to_le16((u16)offset);
11366 	read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
11367 	sfp_info_bd0->read_len = cpu_to_le16(read_len);
11368 
11369 	ret = hclge_cmd_send(&hdev->hw, desc, i);
11370 	if (ret) {
11371 		dev_err(&hdev->pdev->dev,
11372 			"failed to get SFP eeprom info, ret = %d\n", ret);
11373 		return 0;
11374 	}
11375 
11376 	/* copy sfp info from bd0 to out buffer. */
11377 	copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
11378 	memcpy(data, sfp_info_bd0->data, copy_len);
11379 	read_len = copy_len;
11380 
11381 	/* copy sfp info from bd1~bd5 to out buffer if needed. */
11382 	for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11383 		if (read_len >= len)
11384 			return read_len;
11385 
11386 		copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
11387 		memcpy(data + read_len, desc[i].data, copy_len);
11388 		read_len += copy_len;
11389 	}
11390 
11391 	return read_len;
11392 }
11393 
hclge_get_module_eeprom(struct hnae3_handle * handle,u32 offset,u32 len,u8 * data)11394 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
11395 				   u32 len, u8 *data)
11396 {
11397 	struct hclge_vport *vport = hclge_get_vport(handle);
11398 	struct hclge_dev *hdev = vport->back;
11399 	u32 read_len = 0;
11400 	u16 data_len;
11401 
11402 	if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
11403 		return -EOPNOTSUPP;
11404 
11405 	if (!hclge_module_existed(hdev))
11406 		return -ENXIO;
11407 
11408 	while (read_len < len) {
11409 		data_len = hclge_get_sfp_eeprom_info(hdev,
11410 						     offset + read_len,
11411 						     len - read_len,
11412 						     data + read_len);
11413 		if (!data_len)
11414 			return -EIO;
11415 
11416 		read_len += data_len;
11417 	}
11418 
11419 	return 0;
11420 }
11421 
11422 static const struct hnae3_ae_ops hclge_ops = {
11423 	.init_ae_dev = hclge_init_ae_dev,
11424 	.uninit_ae_dev = hclge_uninit_ae_dev,
11425 	.flr_prepare = hclge_flr_prepare,
11426 	.flr_done = hclge_flr_done,
11427 	.init_client_instance = hclge_init_client_instance,
11428 	.uninit_client_instance = hclge_uninit_client_instance,
11429 	.map_ring_to_vector = hclge_map_ring_to_vector,
11430 	.unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
11431 	.get_vector = hclge_get_vector,
11432 	.put_vector = hclge_put_vector,
11433 	.set_promisc_mode = hclge_set_promisc_mode,
11434 	.request_update_promisc_mode = hclge_request_update_promisc_mode,
11435 	.set_loopback = hclge_set_loopback,
11436 	.start = hclge_ae_start,
11437 	.stop = hclge_ae_stop,
11438 	.client_start = hclge_client_start,
11439 	.client_stop = hclge_client_stop,
11440 	.get_status = hclge_get_status,
11441 	.get_ksettings_an_result = hclge_get_ksettings_an_result,
11442 	.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
11443 	.get_media_type = hclge_get_media_type,
11444 	.check_port_speed = hclge_check_port_speed,
11445 	.get_fec = hclge_get_fec,
11446 	.set_fec = hclge_set_fec,
11447 	.get_rss_key_size = hclge_get_rss_key_size,
11448 	.get_rss_indir_size = hclge_get_rss_indir_size,
11449 	.get_rss = hclge_get_rss,
11450 	.set_rss = hclge_set_rss,
11451 	.set_rss_tuple = hclge_set_rss_tuple,
11452 	.get_rss_tuple = hclge_get_rss_tuple,
11453 	.get_tc_size = hclge_get_tc_size,
11454 	.get_mac_addr = hclge_get_mac_addr,
11455 	.set_mac_addr = hclge_set_mac_addr,
11456 	.do_ioctl = hclge_do_ioctl,
11457 	.add_uc_addr = hclge_add_uc_addr,
11458 	.rm_uc_addr = hclge_rm_uc_addr,
11459 	.add_mc_addr = hclge_add_mc_addr,
11460 	.rm_mc_addr = hclge_rm_mc_addr,
11461 	.set_autoneg = hclge_set_autoneg,
11462 	.get_autoneg = hclge_get_autoneg,
11463 	.restart_autoneg = hclge_restart_autoneg,
11464 	.halt_autoneg = hclge_halt_autoneg,
11465 	.get_pauseparam = hclge_get_pauseparam,
11466 	.set_pauseparam = hclge_set_pauseparam,
11467 	.set_mtu = hclge_set_mtu,
11468 	.reset_queue = hclge_reset_tqp,
11469 	.get_stats = hclge_get_stats,
11470 	.get_mac_stats = hclge_get_mac_stat,
11471 	.update_stats = hclge_update_stats,
11472 	.get_strings = hclge_get_strings,
11473 	.get_sset_count = hclge_get_sset_count,
11474 	.get_fw_version = hclge_get_fw_version,
11475 	.get_mdix_mode = hclge_get_mdix_mode,
11476 	.enable_vlan_filter = hclge_enable_vlan_filter,
11477 	.set_vlan_filter = hclge_set_vlan_filter,
11478 	.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
11479 	.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
11480 	.reset_event = hclge_reset_event,
11481 	.get_reset_level = hclge_get_reset_level,
11482 	.set_default_reset_request = hclge_set_def_reset_request,
11483 	.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
11484 	.set_channels = hclge_set_channels,
11485 	.get_channels = hclge_get_channels,
11486 	.get_regs_len = hclge_get_regs_len,
11487 	.get_regs = hclge_get_regs,
11488 	.set_led_id = hclge_set_led_id,
11489 	.get_link_mode = hclge_get_link_mode,
11490 	.add_fd_entry = hclge_add_fd_entry,
11491 	.del_fd_entry = hclge_del_fd_entry,
11492 	.del_all_fd_entries = hclge_del_all_fd_entries,
11493 	.get_fd_rule_cnt = hclge_get_fd_rule_cnt,
11494 	.get_fd_rule_info = hclge_get_fd_rule_info,
11495 	.get_fd_all_rules = hclge_get_all_rules,
11496 	.enable_fd = hclge_enable_fd,
11497 	.add_arfs_entry = hclge_add_fd_entry_by_arfs,
11498 	.dbg_run_cmd = hclge_dbg_run_cmd,
11499 	.handle_hw_ras_error = hclge_handle_hw_ras_error,
11500 	.get_hw_reset_stat = hclge_get_hw_reset_stat,
11501 	.ae_dev_resetting = hclge_ae_dev_resetting,
11502 	.ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
11503 	.set_gro_en = hclge_gro_en,
11504 	.get_global_queue_id = hclge_covert_handle_qid_global,
11505 	.set_timer_task = hclge_set_timer_task,
11506 	.mac_connect_phy = hclge_mac_connect_phy,
11507 	.mac_disconnect_phy = hclge_mac_disconnect_phy,
11508 	.get_vf_config = hclge_get_vf_config,
11509 	.set_vf_link_state = hclge_set_vf_link_state,
11510 	.set_vf_spoofchk = hclge_set_vf_spoofchk,
11511 	.set_vf_trust = hclge_set_vf_trust,
11512 	.set_vf_rate = hclge_set_vf_rate,
11513 	.set_vf_mac = hclge_set_vf_mac,
11514 	.get_module_eeprom = hclge_get_module_eeprom,
11515 	.get_cmdq_stat = hclge_get_cmdq_stat,
11516 };
11517 
11518 static struct hnae3_ae_algo ae_algo = {
11519 	.ops = &hclge_ops,
11520 	.pdev_id_table = ae_algo_pci_tbl,
11521 };
11522 
hclge_init(void)11523 static int hclge_init(void)
11524 {
11525 	pr_info("%s is initializing\n", HCLGE_NAME);
11526 
11527 	hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
11528 	if (!hclge_wq) {
11529 		pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
11530 		return -ENOMEM;
11531 	}
11532 
11533 	hnae3_register_ae_algo(&ae_algo);
11534 
11535 	return 0;
11536 }
11537 
hclge_exit(void)11538 static void hclge_exit(void)
11539 {
11540 	hnae3_unregister_ae_algo_prepare(&ae_algo);
11541 	hnae3_unregister_ae_algo(&ae_algo);
11542 	destroy_workqueue(hclge_wq);
11543 }
11544 module_init(hclge_init);
11545 module_exit(hclge_exit);
11546 
11547 MODULE_LICENSE("GPL");
11548 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
11549 MODULE_DESCRIPTION("HCLGE Driver");
11550 MODULE_VERSION(HCLGE_MOD_VERSION);
11551