• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/etherdevice.h>
5 #include <linux/iopoll.h>
6 #include <net/rtnetlink.h>
7 #include "hclgevf_cmd.h"
8 #include "hclgevf_main.h"
9 #include "hclgevf_regs.h"
10 #include "hclge_mbx.h"
11 #include "hnae3.h"
12 #include "hclgevf_devlink.h"
13 #include "hclge_comm_rss.h"
14 #include "hclgevf_trace.h"
15 
16 #define HCLGEVF_NAME	"hclgevf"
17 
18 #define HCLGEVF_RESET_MAX_FAIL_CNT	5
19 
20 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev);
21 static void hclgevf_task_schedule(struct hclgevf_dev *hdev,
22 				  unsigned long delay);
23 
24 static struct hnae3_ae_algo ae_algovf;
25 
26 static struct workqueue_struct *hclgevf_wq;
27 
28 static const struct pci_device_id ae_algovf_pci_tbl[] = {
29 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0},
30 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF),
31 	 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
32 	/* required last entry */
33 	{0, }
34 };
35 
36 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl);
37 
38 /* hclgevf_cmd_send - send command to command queue
39  * @hw: pointer to the hw struct
40  * @desc: prefilled descriptor for describing the command
41  * @num : the number of descriptors to be sent
42  *
43  * This is the main send command for command queue, it
44  * sends the queue, cleans the queue, etc
45  */
hclgevf_cmd_send(struct hclgevf_hw * hw,struct hclge_desc * desc,int num)46 int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclge_desc *desc, int num)
47 {
48 	return hclge_comm_cmd_send(&hw->hw, desc, num);
49 }
50 
hclgevf_trace_cmd_send(struct hclge_comm_hw * hw,struct hclge_desc * desc,int num,bool is_special)51 static void hclgevf_trace_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc,
52 				   int num, bool is_special)
53 {
54 	int i;
55 
56 	trace_hclge_vf_cmd_send(hw, desc, 0, num);
57 
58 	if (is_special)
59 		return;
60 
61 	for (i = 1; i < num; i++)
62 		trace_hclge_vf_cmd_send(hw, &desc[i], i, num);
63 }
64 
hclgevf_trace_cmd_get(struct hclge_comm_hw * hw,struct hclge_desc * desc,int num,bool is_special)65 static void hclgevf_trace_cmd_get(struct hclge_comm_hw *hw, struct hclge_desc *desc,
66 				  int num, bool is_special)
67 {
68 	int i;
69 
70 	if (!HCLGE_COMM_SEND_SYNC(le16_to_cpu(desc->flag)))
71 		return;
72 
73 	trace_hclge_vf_cmd_get(hw, desc, 0, num);
74 
75 	if (is_special)
76 		return;
77 
78 	for (i = 1; i < num; i++)
79 		trace_hclge_vf_cmd_get(hw, &desc[i], i, num);
80 }
81 
82 static const struct hclge_comm_cmq_ops hclgevf_cmq_ops = {
83 	.trace_cmd_send = hclgevf_trace_cmd_send,
84 	.trace_cmd_get = hclgevf_trace_cmd_get,
85 };
86 
hclgevf_arq_init(struct hclgevf_dev * hdev)87 void hclgevf_arq_init(struct hclgevf_dev *hdev)
88 {
89 	struct hclge_comm_cmq *cmdq = &hdev->hw.hw.cmq;
90 
91 	spin_lock(&cmdq->crq.lock);
92 	/* initialize the pointers of async rx queue of mailbox */
93 	hdev->arq.hdev = hdev;
94 	hdev->arq.head = 0;
95 	hdev->arq.tail = 0;
96 	atomic_set(&hdev->arq.count, 0);
97 	spin_unlock(&cmdq->crq.lock);
98 }
99 
hclgevf_ae_get_hdev(struct hnae3_handle * handle)100 struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle)
101 {
102 	if (!handle->client)
103 		return container_of(handle, struct hclgevf_dev, nic);
104 	else if (handle->client->type == HNAE3_CLIENT_ROCE)
105 		return container_of(handle, struct hclgevf_dev, roce);
106 	else
107 		return container_of(handle, struct hclgevf_dev, nic);
108 }
109 
hclgevf_update_stats(struct hnae3_handle * handle)110 static void hclgevf_update_stats(struct hnae3_handle *handle)
111 {
112 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
113 	int status;
114 
115 	status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
116 	if (status)
117 		dev_err(&hdev->pdev->dev,
118 			"VF update of TQPS stats fail, status = %d.\n",
119 			status);
120 }
121 
hclgevf_get_sset_count(struct hnae3_handle * handle,int strset)122 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset)
123 {
124 	if (strset == ETH_SS_TEST)
125 		return -EOPNOTSUPP;
126 	else if (strset == ETH_SS_STATS)
127 		return hclge_comm_tqps_get_sset_count(handle);
128 
129 	return 0;
130 }
131 
hclgevf_get_strings(struct hnae3_handle * handle,u32 strset,u8 * data)132 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset,
133 				u8 *data)
134 {
135 	u8 *p = (char *)data;
136 
137 	if (strset == ETH_SS_STATS)
138 		p = hclge_comm_tqps_get_strings(handle, p);
139 }
140 
hclgevf_get_stats(struct hnae3_handle * handle,u64 * data)141 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data)
142 {
143 	hclge_comm_tqps_get_stats(handle, data);
144 }
145 
hclgevf_build_send_msg(struct hclge_vf_to_pf_msg * msg,u8 code,u8 subcode)146 static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg *msg, u8 code,
147 				   u8 subcode)
148 {
149 	if (msg) {
150 		memset(msg, 0, sizeof(struct hclge_vf_to_pf_msg));
151 		msg->code = code;
152 		msg->subcode = subcode;
153 	}
154 }
155 
hclgevf_get_basic_info(struct hclgevf_dev * hdev)156 static int hclgevf_get_basic_info(struct hclgevf_dev *hdev)
157 {
158 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
159 	u8 resp_msg[HCLGE_MBX_MAX_RESP_DATA_SIZE];
160 	struct hclge_basic_info *basic_info;
161 	struct hclge_vf_to_pf_msg send_msg;
162 	unsigned long caps;
163 	int status;
164 
165 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_BASIC_INFO, 0);
166 	status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
167 				      sizeof(resp_msg));
168 	if (status) {
169 		dev_err(&hdev->pdev->dev,
170 			"failed to get basic info from pf, ret = %d", status);
171 		return status;
172 	}
173 
174 	basic_info = (struct hclge_basic_info *)resp_msg;
175 
176 	hdev->hw_tc_map = basic_info->hw_tc_map;
177 	hdev->mbx_api_version = le16_to_cpu(basic_info->mbx_api_version);
178 	caps = le32_to_cpu(basic_info->pf_caps);
179 	if (test_bit(HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, &caps))
180 		set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
181 
182 	return 0;
183 }
184 
hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev * hdev)185 static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev)
186 {
187 	struct hnae3_handle *nic = &hdev->nic;
188 	struct hclge_vf_to_pf_msg send_msg;
189 	u8 resp_msg;
190 	int ret;
191 
192 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
193 			       HCLGE_MBX_GET_PORT_BASE_VLAN_STATE);
194 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg,
195 				   sizeof(u8));
196 	if (ret) {
197 		dev_err(&hdev->pdev->dev,
198 			"VF request to get port based vlan state failed %d",
199 			ret);
200 		return ret;
201 	}
202 
203 	nic->port_base_vlan_state = resp_msg;
204 
205 	return 0;
206 }
207 
hclgevf_get_queue_info(struct hclgevf_dev * hdev)208 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev)
209 {
210 #define HCLGEVF_TQPS_RSS_INFO_LEN	6
211 
212 	struct hclge_mbx_vf_queue_info *queue_info;
213 	u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN];
214 	struct hclge_vf_to_pf_msg send_msg;
215 	int status;
216 
217 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QINFO, 0);
218 	status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
219 				      HCLGEVF_TQPS_RSS_INFO_LEN);
220 	if (status) {
221 		dev_err(&hdev->pdev->dev,
222 			"VF request to get tqp info from PF failed %d",
223 			status);
224 		return status;
225 	}
226 
227 	queue_info = (struct hclge_mbx_vf_queue_info *)resp_msg;
228 	hdev->num_tqps = le16_to_cpu(queue_info->num_tqps);
229 	hdev->rss_size_max = le16_to_cpu(queue_info->rss_size);
230 	hdev->rx_buf_len = le16_to_cpu(queue_info->rx_buf_len);
231 
232 	return 0;
233 }
234 
hclgevf_get_queue_depth(struct hclgevf_dev * hdev)235 static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev)
236 {
237 #define HCLGEVF_TQPS_DEPTH_INFO_LEN	4
238 
239 	struct hclge_mbx_vf_queue_depth *queue_depth;
240 	u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN];
241 	struct hclge_vf_to_pf_msg send_msg;
242 	int ret;
243 
244 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QDEPTH, 0);
245 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
246 				   HCLGEVF_TQPS_DEPTH_INFO_LEN);
247 	if (ret) {
248 		dev_err(&hdev->pdev->dev,
249 			"VF request to get tqp depth info from PF failed %d",
250 			ret);
251 		return ret;
252 	}
253 
254 	queue_depth = (struct hclge_mbx_vf_queue_depth *)resp_msg;
255 	hdev->num_tx_desc = le16_to_cpu(queue_depth->num_tx_desc);
256 	hdev->num_rx_desc = le16_to_cpu(queue_depth->num_rx_desc);
257 
258 	return 0;
259 }
260 
hclgevf_get_qid_global(struct hnae3_handle * handle,u16 queue_id)261 static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id)
262 {
263 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
264 	struct hclge_vf_to_pf_msg send_msg;
265 	u16 qid_in_pf = 0;
266 	u8 resp_data[2];
267 	int ret;
268 
269 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QID_IN_PF, 0);
270 	*(__le16 *)send_msg.data = cpu_to_le16(queue_id);
271 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_data,
272 				   sizeof(resp_data));
273 	if (!ret)
274 		qid_in_pf = le16_to_cpu(*(__le16 *)resp_data);
275 
276 	return qid_in_pf;
277 }
278 
hclgevf_get_pf_media_type(struct hclgevf_dev * hdev)279 static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev)
280 {
281 	struct hclge_vf_to_pf_msg send_msg;
282 	u8 resp_msg[2];
283 	int ret;
284 
285 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MEDIA_TYPE, 0);
286 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
287 				   sizeof(resp_msg));
288 	if (ret) {
289 		dev_err(&hdev->pdev->dev,
290 			"VF request to get the pf port media type failed %d",
291 			ret);
292 		return ret;
293 	}
294 
295 	hdev->hw.mac.media_type = resp_msg[0];
296 	hdev->hw.mac.module_type = resp_msg[1];
297 
298 	return 0;
299 }
300 
hclgevf_alloc_tqps(struct hclgevf_dev * hdev)301 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
302 {
303 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
304 	struct hclge_comm_tqp *tqp;
305 	int i;
306 
307 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
308 				  sizeof(struct hclge_comm_tqp), GFP_KERNEL);
309 	if (!hdev->htqp)
310 		return -ENOMEM;
311 
312 	tqp = hdev->htqp;
313 
314 	for (i = 0; i < hdev->num_tqps; i++) {
315 		tqp->dev = &hdev->pdev->dev;
316 		tqp->index = i;
317 
318 		tqp->q.ae_algo = &ae_algovf;
319 		tqp->q.buf_size = hdev->rx_buf_len;
320 		tqp->q.tx_desc_num = hdev->num_tx_desc;
321 		tqp->q.rx_desc_num = hdev->num_rx_desc;
322 
323 		/* need an extended offset to configure queues >=
324 		 * HCLGEVF_TQP_MAX_SIZE_DEV_V2.
325 		 */
326 		if (i < HCLGEVF_TQP_MAX_SIZE_DEV_V2)
327 			tqp->q.io_base = hdev->hw.hw.io_base +
328 					 HCLGEVF_TQP_REG_OFFSET +
329 					 i * HCLGEVF_TQP_REG_SIZE;
330 		else
331 			tqp->q.io_base = hdev->hw.hw.io_base +
332 					 HCLGEVF_TQP_REG_OFFSET +
333 					 HCLGEVF_TQP_EXT_REG_OFFSET +
334 					 (i - HCLGEVF_TQP_MAX_SIZE_DEV_V2) *
335 					 HCLGEVF_TQP_REG_SIZE;
336 
337 		/* when device supports tx push and has device memory,
338 		 * the queue can execute push mode or doorbell mode on
339 		 * device memory.
340 		 */
341 		if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps))
342 			tqp->q.mem_base = hdev->hw.hw.mem_base +
343 					  HCLGEVF_TQP_MEM_OFFSET(hdev, i);
344 
345 		tqp++;
346 	}
347 
348 	return 0;
349 }
350 
hclgevf_knic_setup(struct hclgevf_dev * hdev)351 static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
352 {
353 	struct hnae3_handle *nic = &hdev->nic;
354 	struct hnae3_knic_private_info *kinfo;
355 	u16 new_tqps = hdev->num_tqps;
356 	unsigned int i;
357 	u8 num_tc = 0;
358 
359 	kinfo = &nic->kinfo;
360 	kinfo->num_tx_desc = hdev->num_tx_desc;
361 	kinfo->num_rx_desc = hdev->num_rx_desc;
362 	kinfo->rx_buf_len = hdev->rx_buf_len;
363 	for (i = 0; i < HCLGE_COMM_MAX_TC_NUM; i++)
364 		if (hdev->hw_tc_map & BIT(i))
365 			num_tc++;
366 
367 	num_tc = num_tc ? num_tc : 1;
368 	kinfo->tc_info.num_tc = num_tc;
369 	kinfo->rss_size = min_t(u16, hdev->rss_size_max, new_tqps / num_tc);
370 	new_tqps = kinfo->rss_size * num_tc;
371 	kinfo->num_tqps = min(new_tqps, hdev->num_tqps);
372 
373 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
374 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
375 	if (!kinfo->tqp)
376 		return -ENOMEM;
377 
378 	for (i = 0; i < kinfo->num_tqps; i++) {
379 		hdev->htqp[i].q.handle = &hdev->nic;
380 		hdev->htqp[i].q.tqp_index = i;
381 		kinfo->tqp[i] = &hdev->htqp[i].q;
382 	}
383 
384 	/* after init the max rss_size and tqps, adjust the default tqp numbers
385 	 * and rss size with the actual vector numbers
386 	 */
387 	kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps);
388 	kinfo->rss_size = min_t(u16, kinfo->num_tqps / num_tc,
389 				kinfo->rss_size);
390 
391 	return 0;
392 }
393 
hclgevf_request_link_info(struct hclgevf_dev * hdev)394 static void hclgevf_request_link_info(struct hclgevf_dev *hdev)
395 {
396 	struct hclge_vf_to_pf_msg send_msg;
397 	int status;
398 
399 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_STATUS, 0);
400 	status = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
401 	if (status)
402 		dev_err(&hdev->pdev->dev,
403 			"VF failed to fetch link status(%d) from PF", status);
404 }
405 
hclgevf_update_link_status(struct hclgevf_dev * hdev,int link_state)406 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
407 {
408 	struct hnae3_handle *rhandle = &hdev->roce;
409 	struct hnae3_handle *handle = &hdev->nic;
410 	struct hnae3_client *rclient;
411 	struct hnae3_client *client;
412 
413 	if (test_and_set_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state))
414 		return;
415 
416 	client = handle->client;
417 	rclient = hdev->roce_client;
418 
419 	link_state =
420 		test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state;
421 	if (link_state != hdev->hw.mac.link) {
422 		hdev->hw.mac.link = link_state;
423 		client->ops->link_status_change(handle, !!link_state);
424 		if (rclient && rclient->ops->link_status_change)
425 			rclient->ops->link_status_change(rhandle, !!link_state);
426 	}
427 
428 	clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state);
429 }
430 
hclgevf_update_link_mode(struct hclgevf_dev * hdev)431 static void hclgevf_update_link_mode(struct hclgevf_dev *hdev)
432 {
433 #define HCLGEVF_ADVERTISING	0
434 #define HCLGEVF_SUPPORTED	1
435 
436 	struct hclge_vf_to_pf_msg send_msg;
437 
438 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_MODE, 0);
439 	send_msg.data[0] = HCLGEVF_ADVERTISING;
440 	hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
441 	send_msg.data[0] = HCLGEVF_SUPPORTED;
442 	hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
443 }
444 
hclgevf_set_handle_info(struct hclgevf_dev * hdev)445 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
446 {
447 	struct hnae3_handle *nic = &hdev->nic;
448 	int ret;
449 
450 	nic->ae_algo = &ae_algovf;
451 	nic->pdev = hdev->pdev;
452 	bitmap_copy(nic->numa_node_mask.bits, hdev->numa_node_mask.bits,
453 		    MAX_NUMNODES);
454 	nic->flags |= HNAE3_SUPPORT_VF;
455 	nic->kinfo.io_base = hdev->hw.hw.io_base;
456 
457 	ret = hclgevf_knic_setup(hdev);
458 	if (ret)
459 		dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n",
460 			ret);
461 	return ret;
462 }
463 
hclgevf_free_vector(struct hclgevf_dev * hdev,int vector_id)464 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id)
465 {
466 	if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) {
467 		dev_warn(&hdev->pdev->dev,
468 			 "vector(vector_id %d) has been freed.\n", vector_id);
469 		return;
470 	}
471 
472 	hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT;
473 	hdev->num_msi_left += 1;
474 	hdev->num_msi_used -= 1;
475 }
476 
hclgevf_get_vector(struct hnae3_handle * handle,u16 vector_num,struct hnae3_vector_info * vector_info)477 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num,
478 			      struct hnae3_vector_info *vector_info)
479 {
480 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
481 	struct hnae3_vector_info *vector = vector_info;
482 	int alloc = 0;
483 	int i, j;
484 
485 	vector_num = min_t(u16, hdev->num_nic_msix - 1, vector_num);
486 	vector_num = min(hdev->num_msi_left, vector_num);
487 
488 	for (j = 0; j < vector_num; j++) {
489 		for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) {
490 			if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) {
491 				vector->vector = pci_irq_vector(hdev->pdev, i);
492 				vector->io_addr = hdev->hw.hw.io_base +
493 					HCLGEVF_VECTOR_REG_BASE +
494 					(i - 1) * HCLGEVF_VECTOR_REG_OFFSET;
495 				hdev->vector_status[i] = 0;
496 				hdev->vector_irq[i] = vector->vector;
497 
498 				vector++;
499 				alloc++;
500 
501 				break;
502 			}
503 		}
504 	}
505 	hdev->num_msi_left -= alloc;
506 	hdev->num_msi_used += alloc;
507 
508 	return alloc;
509 }
510 
hclgevf_get_vector_index(struct hclgevf_dev * hdev,int vector)511 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector)
512 {
513 	int i;
514 
515 	for (i = 0; i < hdev->num_msi; i++)
516 		if (vector == hdev->vector_irq[i])
517 			return i;
518 
519 	return -EINVAL;
520 }
521 
522 /* for revision 0x20, vf shared the same rss config with pf */
hclgevf_get_rss_hash_key(struct hclgevf_dev * hdev)523 static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev)
524 {
525 #define HCLGEVF_RSS_MBX_RESP_LEN	8
526 	struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
527 	u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN];
528 	struct hclge_vf_to_pf_msg send_msg;
529 	u16 msg_num, hash_key_index;
530 	u8 index;
531 	int ret;
532 
533 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_RSS_KEY, 0);
534 	msg_num = (HCLGE_COMM_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) /
535 			HCLGEVF_RSS_MBX_RESP_LEN;
536 	for (index = 0; index < msg_num; index++) {
537 		send_msg.data[0] = index;
538 		ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
539 					   HCLGEVF_RSS_MBX_RESP_LEN);
540 		if (ret) {
541 			dev_err(&hdev->pdev->dev,
542 				"VF get rss hash key from PF failed, ret=%d",
543 				ret);
544 			return ret;
545 		}
546 
547 		hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index;
548 		if (index == msg_num - 1)
549 			memcpy(&rss_cfg->rss_hash_key[hash_key_index],
550 			       &resp_msg[0],
551 			       HCLGE_COMM_RSS_KEY_SIZE - hash_key_index);
552 		else
553 			memcpy(&rss_cfg->rss_hash_key[hash_key_index],
554 			       &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN);
555 	}
556 
557 	return 0;
558 }
559 
hclgevf_get_rss(struct hnae3_handle * handle,u32 * indir,u8 * key,u8 * hfunc)560 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
561 			   u8 *hfunc)
562 {
563 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
564 	struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
565 	int ret;
566 
567 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
568 		hclge_comm_get_rss_hash_info(rss_cfg, key, hfunc);
569 	} else {
570 		if (hfunc)
571 			*hfunc = ETH_RSS_HASH_TOP;
572 		if (key) {
573 			ret = hclgevf_get_rss_hash_key(hdev);
574 			if (ret)
575 				return ret;
576 			memcpy(key, rss_cfg->rss_hash_key,
577 			       HCLGE_COMM_RSS_KEY_SIZE);
578 		}
579 	}
580 
581 	hclge_comm_get_rss_indir_tbl(rss_cfg, indir,
582 				     hdev->ae_dev->dev_specs.rss_ind_tbl_size);
583 
584 	return 0;
585 }
586 
hclgevf_set_rss(struct hnae3_handle * handle,const u32 * indir,const u8 * key,const u8 hfunc)587 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
588 			   const u8 *key, const u8 hfunc)
589 {
590 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
591 	struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
592 	int ret, i;
593 
594 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
595 		ret = hclge_comm_set_rss_hash_key(rss_cfg, &hdev->hw.hw, key,
596 						  hfunc);
597 		if (ret)
598 			return ret;
599 	}
600 
601 	/* update the shadow RSS table with user specified qids */
602 	for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
603 		rss_cfg->rss_indirection_tbl[i] = indir[i];
604 
605 	/* update the hardware */
606 	return hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw,
607 					      rss_cfg->rss_indirection_tbl);
608 }
609 
hclgevf_set_rss_tuple(struct hnae3_handle * handle,struct ethtool_rxnfc * nfc)610 static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
611 				 struct ethtool_rxnfc *nfc)
612 {
613 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
614 	int ret;
615 
616 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
617 		return -EOPNOTSUPP;
618 
619 	ret = hclge_comm_set_rss_tuple(hdev->ae_dev, &hdev->hw.hw,
620 				       &hdev->rss_cfg, nfc);
621 	if (ret)
622 		dev_err(&hdev->pdev->dev,
623 		"failed to set rss tuple, ret = %d.\n", ret);
624 
625 	return ret;
626 }
627 
hclgevf_get_rss_tuple(struct hnae3_handle * handle,struct ethtool_rxnfc * nfc)628 static int hclgevf_get_rss_tuple(struct hnae3_handle *handle,
629 				 struct ethtool_rxnfc *nfc)
630 {
631 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
632 	u8 tuple_sets;
633 	int ret;
634 
635 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
636 		return -EOPNOTSUPP;
637 
638 	nfc->data = 0;
639 
640 	ret = hclge_comm_get_rss_tuple(&hdev->rss_cfg, nfc->flow_type,
641 				       &tuple_sets);
642 	if (ret || !tuple_sets)
643 		return ret;
644 
645 	nfc->data = hclge_comm_convert_rss_tuple(tuple_sets);
646 
647 	return 0;
648 }
649 
hclgevf_get_tc_size(struct hnae3_handle * handle)650 static int hclgevf_get_tc_size(struct hnae3_handle *handle)
651 {
652 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
653 	struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
654 
655 	return rss_cfg->rss_size;
656 }
657 
hclgevf_bind_ring_to_vector(struct hnae3_handle * handle,bool en,int vector_id,struct hnae3_ring_chain_node * ring_chain)658 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
659 				       int vector_id,
660 				       struct hnae3_ring_chain_node *ring_chain)
661 {
662 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
663 	struct hclge_vf_to_pf_msg send_msg;
664 	struct hnae3_ring_chain_node *node;
665 	int status;
666 	int i = 0;
667 
668 	memset(&send_msg, 0, sizeof(send_msg));
669 	send_msg.code = en ? HCLGE_MBX_MAP_RING_TO_VECTOR :
670 		HCLGE_MBX_UNMAP_RING_TO_VECTOR;
671 	send_msg.vector_id = vector_id;
672 
673 	for (node = ring_chain; node; node = node->next) {
674 		send_msg.param[i].ring_type =
675 				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B);
676 
677 		send_msg.param[i].tqp_index = node->tqp_index;
678 		send_msg.param[i].int_gl_index =
679 					hnae3_get_field(node->int_gl_idx,
680 							HNAE3_RING_GL_IDX_M,
681 							HNAE3_RING_GL_IDX_S);
682 
683 		i++;
684 		if (i == HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM || !node->next) {
685 			send_msg.ring_num = i;
686 
687 			status = hclgevf_send_mbx_msg(hdev, &send_msg, false,
688 						      NULL, 0);
689 			if (status) {
690 				dev_err(&hdev->pdev->dev,
691 					"Map TQP fail, status is %d.\n",
692 					status);
693 				return status;
694 			}
695 			i = 0;
696 		}
697 	}
698 
699 	return 0;
700 }
701 
hclgevf_map_ring_to_vector(struct hnae3_handle * handle,int vector,struct hnae3_ring_chain_node * ring_chain)702 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector,
703 				      struct hnae3_ring_chain_node *ring_chain)
704 {
705 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
706 	int vector_id;
707 
708 	vector_id = hclgevf_get_vector_index(hdev, vector);
709 	if (vector_id < 0) {
710 		dev_err(&handle->pdev->dev,
711 			"Get vector index fail. ret =%d\n", vector_id);
712 		return vector_id;
713 	}
714 
715 	return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain);
716 }
717 
hclgevf_unmap_ring_from_vector(struct hnae3_handle * handle,int vector,struct hnae3_ring_chain_node * ring_chain)718 static int hclgevf_unmap_ring_from_vector(
719 				struct hnae3_handle *handle,
720 				int vector,
721 				struct hnae3_ring_chain_node *ring_chain)
722 {
723 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
724 	int ret, vector_id;
725 
726 	if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
727 		return 0;
728 
729 	vector_id = hclgevf_get_vector_index(hdev, vector);
730 	if (vector_id < 0) {
731 		dev_err(&handle->pdev->dev,
732 			"Get vector index fail. ret =%d\n", vector_id);
733 		return vector_id;
734 	}
735 
736 	ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain);
737 	if (ret)
738 		dev_err(&handle->pdev->dev,
739 			"Unmap ring from vector fail. vector=%d, ret =%d\n",
740 			vector_id,
741 			ret);
742 
743 	return ret;
744 }
745 
hclgevf_put_vector(struct hnae3_handle * handle,int vector)746 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector)
747 {
748 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
749 	int vector_id;
750 
751 	vector_id = hclgevf_get_vector_index(hdev, vector);
752 	if (vector_id < 0) {
753 		dev_err(&handle->pdev->dev,
754 			"hclgevf_put_vector get vector index fail. ret =%d\n",
755 			vector_id);
756 		return vector_id;
757 	}
758 
759 	hclgevf_free_vector(hdev, vector_id);
760 
761 	return 0;
762 }
763 
hclgevf_cmd_set_promisc_mode(struct hclgevf_dev * hdev,bool en_uc_pmc,bool en_mc_pmc,bool en_bc_pmc)764 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev,
765 					bool en_uc_pmc, bool en_mc_pmc,
766 					bool en_bc_pmc)
767 {
768 	struct hnae3_handle *handle = &hdev->nic;
769 	struct hclge_vf_to_pf_msg send_msg;
770 	int ret;
771 
772 	memset(&send_msg, 0, sizeof(send_msg));
773 	send_msg.code = HCLGE_MBX_SET_PROMISC_MODE;
774 	send_msg.en_bc = en_bc_pmc ? 1 : 0;
775 	send_msg.en_uc = en_uc_pmc ? 1 : 0;
776 	send_msg.en_mc = en_mc_pmc ? 1 : 0;
777 	send_msg.en_limit_promisc = test_bit(HNAE3_PFLAG_LIMIT_PROMISC,
778 					     &handle->priv_flags) ? 1 : 0;
779 
780 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
781 	if (ret)
782 		dev_err(&hdev->pdev->dev,
783 			"Set promisc mode fail, status is %d.\n", ret);
784 
785 	return ret;
786 }
787 
hclgevf_set_promisc_mode(struct hnae3_handle * handle,bool en_uc_pmc,bool en_mc_pmc)788 static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
789 				    bool en_mc_pmc)
790 {
791 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
792 	bool en_bc_pmc;
793 
794 	en_bc_pmc = hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
795 
796 	return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc,
797 					    en_bc_pmc);
798 }
799 
hclgevf_request_update_promisc_mode(struct hnae3_handle * handle)800 static void hclgevf_request_update_promisc_mode(struct hnae3_handle *handle)
801 {
802 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
803 
804 	set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
805 	hclgevf_task_schedule(hdev, 0);
806 }
807 
hclgevf_sync_promisc_mode(struct hclgevf_dev * hdev)808 static void hclgevf_sync_promisc_mode(struct hclgevf_dev *hdev)
809 {
810 	struct hnae3_handle *handle = &hdev->nic;
811 	bool en_uc_pmc = handle->netdev_flags & HNAE3_UPE;
812 	bool en_mc_pmc = handle->netdev_flags & HNAE3_MPE;
813 	int ret;
814 
815 	if (test_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state)) {
816 		ret = hclgevf_set_promisc_mode(handle, en_uc_pmc, en_mc_pmc);
817 		if (!ret)
818 			clear_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
819 	}
820 }
821 
hclgevf_tqp_enable_cmd_send(struct hclgevf_dev * hdev,u16 tqp_id,u16 stream_id,bool enable)822 static int hclgevf_tqp_enable_cmd_send(struct hclgevf_dev *hdev, u16 tqp_id,
823 				       u16 stream_id, bool enable)
824 {
825 	struct hclgevf_cfg_com_tqp_queue_cmd *req;
826 	struct hclge_desc desc;
827 
828 	req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data;
829 
830 	hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
831 	req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK);
832 	req->stream_id = cpu_to_le16(stream_id);
833 	if (enable)
834 		req->enable |= 1U << HCLGEVF_TQP_ENABLE_B;
835 
836 	return hclgevf_cmd_send(&hdev->hw, &desc, 1);
837 }
838 
hclgevf_tqp_enable(struct hnae3_handle * handle,bool enable)839 static int hclgevf_tqp_enable(struct hnae3_handle *handle, bool enable)
840 {
841 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
842 	int ret;
843 	u16 i;
844 
845 	for (i = 0; i < handle->kinfo.num_tqps; i++) {
846 		ret = hclgevf_tqp_enable_cmd_send(hdev, i, 0, enable);
847 		if (ret)
848 			return ret;
849 	}
850 
851 	return 0;
852 }
853 
hclgevf_get_host_mac_addr(struct hclgevf_dev * hdev,u8 * p)854 static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p)
855 {
856 	struct hclge_vf_to_pf_msg send_msg;
857 	u8 host_mac[ETH_ALEN];
858 	int status;
859 
860 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MAC_ADDR, 0);
861 	status = hclgevf_send_mbx_msg(hdev, &send_msg, true, host_mac,
862 				      ETH_ALEN);
863 	if (status) {
864 		dev_err(&hdev->pdev->dev,
865 			"fail to get VF MAC from host %d", status);
866 		return status;
867 	}
868 
869 	ether_addr_copy(p, host_mac);
870 
871 	return 0;
872 }
873 
hclgevf_get_mac_addr(struct hnae3_handle * handle,u8 * p)874 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
875 {
876 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
877 	u8 host_mac_addr[ETH_ALEN];
878 
879 	if (hclgevf_get_host_mac_addr(hdev, host_mac_addr))
880 		return;
881 
882 	hdev->has_pf_mac = !is_zero_ether_addr(host_mac_addr);
883 	if (hdev->has_pf_mac)
884 		ether_addr_copy(p, host_mac_addr);
885 	else
886 		ether_addr_copy(p, hdev->hw.mac.mac_addr);
887 }
888 
hclgevf_set_mac_addr(struct hnae3_handle * handle,const void * p,bool is_first)889 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, const void *p,
890 				bool is_first)
891 {
892 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
893 	u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr;
894 	struct hclge_vf_to_pf_msg send_msg;
895 	u8 *new_mac_addr = (u8 *)p;
896 	int status;
897 
898 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, 0);
899 	send_msg.subcode = HCLGE_MBX_MAC_VLAN_UC_MODIFY;
900 	ether_addr_copy(send_msg.data, new_mac_addr);
901 	if (is_first && !hdev->has_pf_mac)
902 		eth_zero_addr(&send_msg.data[ETH_ALEN]);
903 	else
904 		ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr);
905 	status = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
906 	if (!status)
907 		ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr);
908 
909 	return status;
910 }
911 
912 static struct hclgevf_mac_addr_node *
hclgevf_find_mac_node(struct list_head * list,const u8 * mac_addr)913 hclgevf_find_mac_node(struct list_head *list, const u8 *mac_addr)
914 {
915 	struct hclgevf_mac_addr_node *mac_node, *tmp;
916 
917 	list_for_each_entry_safe(mac_node, tmp, list, node)
918 		if (ether_addr_equal(mac_addr, mac_node->mac_addr))
919 			return mac_node;
920 
921 	return NULL;
922 }
923 
hclgevf_update_mac_node(struct hclgevf_mac_addr_node * mac_node,enum HCLGEVF_MAC_NODE_STATE state)924 static void hclgevf_update_mac_node(struct hclgevf_mac_addr_node *mac_node,
925 				    enum HCLGEVF_MAC_NODE_STATE state)
926 {
927 	switch (state) {
928 	/* from set_rx_mode or tmp_add_list */
929 	case HCLGEVF_MAC_TO_ADD:
930 		if (mac_node->state == HCLGEVF_MAC_TO_DEL)
931 			mac_node->state = HCLGEVF_MAC_ACTIVE;
932 		break;
933 	/* only from set_rx_mode */
934 	case HCLGEVF_MAC_TO_DEL:
935 		if (mac_node->state == HCLGEVF_MAC_TO_ADD) {
936 			list_del(&mac_node->node);
937 			kfree(mac_node);
938 		} else {
939 			mac_node->state = HCLGEVF_MAC_TO_DEL;
940 		}
941 		break;
942 	/* only from tmp_add_list, the mac_node->state won't be
943 	 * HCLGEVF_MAC_ACTIVE
944 	 */
945 	case HCLGEVF_MAC_ACTIVE:
946 		if (mac_node->state == HCLGEVF_MAC_TO_ADD)
947 			mac_node->state = HCLGEVF_MAC_ACTIVE;
948 		break;
949 	}
950 }
951 
hclgevf_update_mac_list(struct hnae3_handle * handle,enum HCLGEVF_MAC_NODE_STATE state,enum HCLGEVF_MAC_ADDR_TYPE mac_type,const unsigned char * addr)952 static int hclgevf_update_mac_list(struct hnae3_handle *handle,
953 				   enum HCLGEVF_MAC_NODE_STATE state,
954 				   enum HCLGEVF_MAC_ADDR_TYPE mac_type,
955 				   const unsigned char *addr)
956 {
957 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
958 	struct hclgevf_mac_addr_node *mac_node;
959 	struct list_head *list;
960 
961 	list = (mac_type == HCLGEVF_MAC_ADDR_UC) ?
962 	       &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list;
963 
964 	spin_lock_bh(&hdev->mac_table.mac_list_lock);
965 
966 	/* if the mac addr is already in the mac list, no need to add a new
967 	 * one into it, just check the mac addr state, convert it to a new
968 	 * state, or just remove it, or do nothing.
969 	 */
970 	mac_node = hclgevf_find_mac_node(list, addr);
971 	if (mac_node) {
972 		hclgevf_update_mac_node(mac_node, state);
973 		spin_unlock_bh(&hdev->mac_table.mac_list_lock);
974 		return 0;
975 	}
976 	/* if this address is never added, unnecessary to delete */
977 	if (state == HCLGEVF_MAC_TO_DEL) {
978 		spin_unlock_bh(&hdev->mac_table.mac_list_lock);
979 		return -ENOENT;
980 	}
981 
982 	mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
983 	if (!mac_node) {
984 		spin_unlock_bh(&hdev->mac_table.mac_list_lock);
985 		return -ENOMEM;
986 	}
987 
988 	mac_node->state = state;
989 	ether_addr_copy(mac_node->mac_addr, addr);
990 	list_add_tail(&mac_node->node, list);
991 
992 	spin_unlock_bh(&hdev->mac_table.mac_list_lock);
993 	return 0;
994 }
995 
hclgevf_add_uc_addr(struct hnae3_handle * handle,const unsigned char * addr)996 static int hclgevf_add_uc_addr(struct hnae3_handle *handle,
997 			       const unsigned char *addr)
998 {
999 	return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD,
1000 				       HCLGEVF_MAC_ADDR_UC, addr);
1001 }
1002 
hclgevf_rm_uc_addr(struct hnae3_handle * handle,const unsigned char * addr)1003 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle,
1004 			      const unsigned char *addr)
1005 {
1006 	return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL,
1007 				       HCLGEVF_MAC_ADDR_UC, addr);
1008 }
1009 
hclgevf_add_mc_addr(struct hnae3_handle * handle,const unsigned char * addr)1010 static int hclgevf_add_mc_addr(struct hnae3_handle *handle,
1011 			       const unsigned char *addr)
1012 {
1013 	return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD,
1014 				       HCLGEVF_MAC_ADDR_MC, addr);
1015 }
1016 
hclgevf_rm_mc_addr(struct hnae3_handle * handle,const unsigned char * addr)1017 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle,
1018 			      const unsigned char *addr)
1019 {
1020 	return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL,
1021 				       HCLGEVF_MAC_ADDR_MC, addr);
1022 }
1023 
hclgevf_add_del_mac_addr(struct hclgevf_dev * hdev,struct hclgevf_mac_addr_node * mac_node,enum HCLGEVF_MAC_ADDR_TYPE mac_type)1024 static int hclgevf_add_del_mac_addr(struct hclgevf_dev *hdev,
1025 				    struct hclgevf_mac_addr_node *mac_node,
1026 				    enum HCLGEVF_MAC_ADDR_TYPE mac_type)
1027 {
1028 	struct hclge_vf_to_pf_msg send_msg;
1029 	u8 code, subcode;
1030 
1031 	if (mac_type == HCLGEVF_MAC_ADDR_UC) {
1032 		code = HCLGE_MBX_SET_UNICAST;
1033 		if (mac_node->state == HCLGEVF_MAC_TO_ADD)
1034 			subcode = HCLGE_MBX_MAC_VLAN_UC_ADD;
1035 		else
1036 			subcode = HCLGE_MBX_MAC_VLAN_UC_REMOVE;
1037 	} else {
1038 		code = HCLGE_MBX_SET_MULTICAST;
1039 		if (mac_node->state == HCLGEVF_MAC_TO_ADD)
1040 			subcode = HCLGE_MBX_MAC_VLAN_MC_ADD;
1041 		else
1042 			subcode = HCLGE_MBX_MAC_VLAN_MC_REMOVE;
1043 	}
1044 
1045 	hclgevf_build_send_msg(&send_msg, code, subcode);
1046 	ether_addr_copy(send_msg.data, mac_node->mac_addr);
1047 	return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
1048 }
1049 
hclgevf_config_mac_list(struct hclgevf_dev * hdev,struct list_head * list,enum HCLGEVF_MAC_ADDR_TYPE mac_type)1050 static void hclgevf_config_mac_list(struct hclgevf_dev *hdev,
1051 				    struct list_head *list,
1052 				    enum HCLGEVF_MAC_ADDR_TYPE mac_type)
1053 {
1054 	char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
1055 	struct hclgevf_mac_addr_node *mac_node, *tmp;
1056 	int ret;
1057 
1058 	list_for_each_entry_safe(mac_node, tmp, list, node) {
1059 		ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type);
1060 		if  (ret) {
1061 			hnae3_format_mac_addr(format_mac_addr,
1062 					      mac_node->mac_addr);
1063 			dev_err(&hdev->pdev->dev,
1064 				"failed to configure mac %s, state = %d, ret = %d\n",
1065 				format_mac_addr, mac_node->state, ret);
1066 			return;
1067 		}
1068 		if (mac_node->state == HCLGEVF_MAC_TO_ADD) {
1069 			mac_node->state = HCLGEVF_MAC_ACTIVE;
1070 		} else {
1071 			list_del(&mac_node->node);
1072 			kfree(mac_node);
1073 		}
1074 	}
1075 }
1076 
hclgevf_sync_from_add_list(struct list_head * add_list,struct list_head * mac_list)1077 static void hclgevf_sync_from_add_list(struct list_head *add_list,
1078 				       struct list_head *mac_list)
1079 {
1080 	struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
1081 
1082 	list_for_each_entry_safe(mac_node, tmp, add_list, node) {
1083 		/* if the mac address from tmp_add_list is not in the
1084 		 * uc/mc_mac_list, it means have received a TO_DEL request
1085 		 * during the time window of sending mac config request to PF
1086 		 * If mac_node state is ACTIVE, then change its state to TO_DEL,
1087 		 * then it will be removed at next time. If is TO_ADD, it means
1088 		 * send TO_ADD request failed, so just remove the mac node.
1089 		 */
1090 		new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr);
1091 		if (new_node) {
1092 			hclgevf_update_mac_node(new_node, mac_node->state);
1093 			list_del(&mac_node->node);
1094 			kfree(mac_node);
1095 		} else if (mac_node->state == HCLGEVF_MAC_ACTIVE) {
1096 			mac_node->state = HCLGEVF_MAC_TO_DEL;
1097 			list_move_tail(&mac_node->node, mac_list);
1098 		} else {
1099 			list_del(&mac_node->node);
1100 			kfree(mac_node);
1101 		}
1102 	}
1103 }
1104 
hclgevf_sync_from_del_list(struct list_head * del_list,struct list_head * mac_list)1105 static void hclgevf_sync_from_del_list(struct list_head *del_list,
1106 				       struct list_head *mac_list)
1107 {
1108 	struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
1109 
1110 	list_for_each_entry_safe(mac_node, tmp, del_list, node) {
1111 		new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr);
1112 		if (new_node) {
1113 			/* If the mac addr is exist in the mac list, it means
1114 			 * received a new request TO_ADD during the time window
1115 			 * of sending mac addr configurrequest to PF, so just
1116 			 * change the mac state to ACTIVE.
1117 			 */
1118 			new_node->state = HCLGEVF_MAC_ACTIVE;
1119 			list_del(&mac_node->node);
1120 			kfree(mac_node);
1121 		} else {
1122 			list_move_tail(&mac_node->node, mac_list);
1123 		}
1124 	}
1125 }
1126 
hclgevf_clear_list(struct list_head * list)1127 static void hclgevf_clear_list(struct list_head *list)
1128 {
1129 	struct hclgevf_mac_addr_node *mac_node, *tmp;
1130 
1131 	list_for_each_entry_safe(mac_node, tmp, list, node) {
1132 		list_del(&mac_node->node);
1133 		kfree(mac_node);
1134 	}
1135 }
1136 
hclgevf_sync_mac_list(struct hclgevf_dev * hdev,enum HCLGEVF_MAC_ADDR_TYPE mac_type)1137 static void hclgevf_sync_mac_list(struct hclgevf_dev *hdev,
1138 				  enum HCLGEVF_MAC_ADDR_TYPE mac_type)
1139 {
1140 	struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
1141 	struct list_head tmp_add_list, tmp_del_list;
1142 	struct list_head *list;
1143 
1144 	INIT_LIST_HEAD(&tmp_add_list);
1145 	INIT_LIST_HEAD(&tmp_del_list);
1146 
1147 	/* move the mac addr to the tmp_add_list and tmp_del_list, then
1148 	 * we can add/delete these mac addr outside the spin lock
1149 	 */
1150 	list = (mac_type == HCLGEVF_MAC_ADDR_UC) ?
1151 		&hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list;
1152 
1153 	spin_lock_bh(&hdev->mac_table.mac_list_lock);
1154 
1155 	list_for_each_entry_safe(mac_node, tmp, list, node) {
1156 		switch (mac_node->state) {
1157 		case HCLGEVF_MAC_TO_DEL:
1158 			list_move_tail(&mac_node->node, &tmp_del_list);
1159 			break;
1160 		case HCLGEVF_MAC_TO_ADD:
1161 			new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
1162 			if (!new_node)
1163 				goto stop_traverse;
1164 
1165 			ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
1166 			new_node->state = mac_node->state;
1167 			list_add_tail(&new_node->node, &tmp_add_list);
1168 			break;
1169 		default:
1170 			break;
1171 		}
1172 	}
1173 
1174 stop_traverse:
1175 	spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1176 
1177 	/* delete first, in order to get max mac table space for adding */
1178 	hclgevf_config_mac_list(hdev, &tmp_del_list, mac_type);
1179 	hclgevf_config_mac_list(hdev, &tmp_add_list, mac_type);
1180 
1181 	/* if some mac addresses were added/deleted fail, move back to the
1182 	 * mac_list, and retry at next time.
1183 	 */
1184 	spin_lock_bh(&hdev->mac_table.mac_list_lock);
1185 
1186 	hclgevf_sync_from_del_list(&tmp_del_list, list);
1187 	hclgevf_sync_from_add_list(&tmp_add_list, list);
1188 
1189 	spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1190 }
1191 
hclgevf_sync_mac_table(struct hclgevf_dev * hdev)1192 static void hclgevf_sync_mac_table(struct hclgevf_dev *hdev)
1193 {
1194 	hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_UC);
1195 	hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_MC);
1196 }
1197 
hclgevf_uninit_mac_list(struct hclgevf_dev * hdev)1198 static void hclgevf_uninit_mac_list(struct hclgevf_dev *hdev)
1199 {
1200 	spin_lock_bh(&hdev->mac_table.mac_list_lock);
1201 
1202 	hclgevf_clear_list(&hdev->mac_table.uc_mac_list);
1203 	hclgevf_clear_list(&hdev->mac_table.mc_mac_list);
1204 
1205 	spin_unlock_bh(&hdev->mac_table.mac_list_lock);
1206 }
1207 
hclgevf_enable_vlan_filter(struct hnae3_handle * handle,bool enable)1208 static int hclgevf_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
1209 {
1210 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1211 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
1212 	struct hclge_vf_to_pf_msg send_msg;
1213 
1214 	if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
1215 		return -EOPNOTSUPP;
1216 
1217 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
1218 			       HCLGE_MBX_ENABLE_VLAN_FILTER);
1219 	send_msg.data[0] = enable ? 1 : 0;
1220 
1221 	return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
1222 }
1223 
hclgevf_set_vlan_filter(struct hnae3_handle * handle,__be16 proto,u16 vlan_id,bool is_kill)1224 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
1225 				   __be16 proto, u16 vlan_id,
1226 				   bool is_kill)
1227 {
1228 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1229 	struct hclge_mbx_vlan_filter *vlan_filter;
1230 	struct hclge_vf_to_pf_msg send_msg;
1231 	int ret;
1232 
1233 	if (vlan_id > HCLGEVF_MAX_VLAN_ID)
1234 		return -EINVAL;
1235 
1236 	if (proto != htons(ETH_P_8021Q))
1237 		return -EPROTONOSUPPORT;
1238 
1239 	/* When device is resetting or reset failed, firmware is unable to
1240 	 * handle mailbox. Just record the vlan id, and remove it after
1241 	 * reset finished.
1242 	 */
1243 	if ((test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
1244 	     test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) {
1245 		set_bit(vlan_id, hdev->vlan_del_fail_bmap);
1246 		return -EBUSY;
1247 	} else if (!is_kill && test_bit(vlan_id, hdev->vlan_del_fail_bmap)) {
1248 		clear_bit(vlan_id, hdev->vlan_del_fail_bmap);
1249 	}
1250 
1251 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
1252 			       HCLGE_MBX_VLAN_FILTER);
1253 	vlan_filter = (struct hclge_mbx_vlan_filter *)send_msg.data;
1254 	vlan_filter->is_kill = is_kill;
1255 	vlan_filter->vlan_id = cpu_to_le16(vlan_id);
1256 	vlan_filter->proto = cpu_to_le16(be16_to_cpu(proto));
1257 
1258 	/* when remove hw vlan filter failed, record the vlan id,
1259 	 * and try to remove it from hw later, to be consistence
1260 	 * with stack.
1261 	 */
1262 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
1263 	if (is_kill && ret)
1264 		set_bit(vlan_id, hdev->vlan_del_fail_bmap);
1265 
1266 	return ret;
1267 }
1268 
hclgevf_sync_vlan_filter(struct hclgevf_dev * hdev)1269 static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev)
1270 {
1271 #define HCLGEVF_MAX_SYNC_COUNT	60
1272 	struct hnae3_handle *handle = &hdev->nic;
1273 	int ret, sync_cnt = 0;
1274 	u16 vlan_id;
1275 
1276 	if (bitmap_empty(hdev->vlan_del_fail_bmap, VLAN_N_VID))
1277 		return;
1278 
1279 	rtnl_lock();
1280 	vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
1281 	while (vlan_id != VLAN_N_VID) {
1282 		ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q),
1283 					      vlan_id, true);
1284 		if (ret)
1285 			break;
1286 
1287 		clear_bit(vlan_id, hdev->vlan_del_fail_bmap);
1288 		sync_cnt++;
1289 		if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT)
1290 			break;
1291 
1292 		vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
1293 	}
1294 	rtnl_unlock();
1295 }
1296 
hclgevf_en_hw_strip_rxvtag_cmd(struct hclgevf_dev * hdev,bool enable)1297 static int hclgevf_en_hw_strip_rxvtag_cmd(struct hclgevf_dev *hdev, bool enable)
1298 {
1299 	struct hclge_vf_to_pf_msg send_msg;
1300 
1301 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
1302 			       HCLGE_MBX_VLAN_RX_OFF_CFG);
1303 	send_msg.data[0] = enable ? 1 : 0;
1304 	return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
1305 }
1306 
hclgevf_en_hw_strip_rxvtag(struct hnae3_handle * handle,bool enable)1307 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
1308 {
1309 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1310 	int ret;
1311 
1312 	ret = hclgevf_en_hw_strip_rxvtag_cmd(hdev, enable);
1313 	if (ret)
1314 		return ret;
1315 
1316 	hdev->rxvtag_strip_en = enable;
1317 	return 0;
1318 }
1319 
hclgevf_reset_tqp(struct hnae3_handle * handle)1320 static int hclgevf_reset_tqp(struct hnae3_handle *handle)
1321 {
1322 #define HCLGEVF_RESET_ALL_QUEUE_DONE	1U
1323 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1324 	struct hclge_vf_to_pf_msg send_msg;
1325 	u8 return_status = 0;
1326 	int ret;
1327 	u16 i;
1328 
1329 	/* disable vf queue before send queue reset msg to PF */
1330 	ret = hclgevf_tqp_enable(handle, false);
1331 	if (ret) {
1332 		dev_err(&hdev->pdev->dev, "failed to disable tqp, ret = %d\n",
1333 			ret);
1334 		return ret;
1335 	}
1336 
1337 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0);
1338 
1339 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &return_status,
1340 				   sizeof(return_status));
1341 	if (ret || return_status == HCLGEVF_RESET_ALL_QUEUE_DONE)
1342 		return ret;
1343 
1344 	for (i = 1; i < handle->kinfo.num_tqps; i++) {
1345 		hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0);
1346 		*(__le16 *)send_msg.data = cpu_to_le16(i);
1347 		ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
1348 		if (ret)
1349 			return ret;
1350 	}
1351 
1352 	return 0;
1353 }
1354 
hclgevf_set_mtu(struct hnae3_handle * handle,int new_mtu)1355 static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu)
1356 {
1357 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1358 	struct hclge_mbx_mtu_info *mtu_info;
1359 	struct hclge_vf_to_pf_msg send_msg;
1360 
1361 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MTU, 0);
1362 	mtu_info = (struct hclge_mbx_mtu_info *)send_msg.data;
1363 	mtu_info->mtu = cpu_to_le32(new_mtu);
1364 
1365 	return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
1366 }
1367 
hclgevf_notify_client(struct hclgevf_dev * hdev,enum hnae3_reset_notify_type type)1368 static int hclgevf_notify_client(struct hclgevf_dev *hdev,
1369 				 enum hnae3_reset_notify_type type)
1370 {
1371 	struct hnae3_client *client = hdev->nic_client;
1372 	struct hnae3_handle *handle = &hdev->nic;
1373 	int ret;
1374 
1375 	if (!test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state) ||
1376 	    !client)
1377 		return 0;
1378 
1379 	if (!client->ops->reset_notify)
1380 		return -EOPNOTSUPP;
1381 
1382 	ret = client->ops->reset_notify(handle, type);
1383 	if (ret)
1384 		dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
1385 			type, ret);
1386 
1387 	return ret;
1388 }
1389 
hclgevf_notify_roce_client(struct hclgevf_dev * hdev,enum hnae3_reset_notify_type type)1390 static int hclgevf_notify_roce_client(struct hclgevf_dev *hdev,
1391 				      enum hnae3_reset_notify_type type)
1392 {
1393 	struct hnae3_client *client = hdev->roce_client;
1394 	struct hnae3_handle *handle = &hdev->roce;
1395 	int ret;
1396 
1397 	if (!test_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state) || !client)
1398 		return 0;
1399 
1400 	if (!client->ops->reset_notify)
1401 		return -EOPNOTSUPP;
1402 
1403 	ret = client->ops->reset_notify(handle, type);
1404 	if (ret)
1405 		dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
1406 			type, ret);
1407 	return ret;
1408 }
1409 
hclgevf_set_reset_pending(struct hclgevf_dev * hdev,enum hnae3_reset_type reset_type)1410 static void hclgevf_set_reset_pending(struct hclgevf_dev *hdev,
1411 				      enum hnae3_reset_type reset_type)
1412 {
1413 	/* When an incorrect reset type is executed, the get_reset_level
1414 	 * function generates the HNAE3_NONE_RESET flag. As a result, this
1415 	 * type do not need to pending.
1416 	 */
1417 	if (reset_type != HNAE3_NONE_RESET)
1418 		set_bit(reset_type, &hdev->reset_pending);
1419 }
1420 
hclgevf_reset_wait(struct hclgevf_dev * hdev)1421 static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
1422 {
1423 #define HCLGEVF_RESET_WAIT_US	20000
1424 #define HCLGEVF_RESET_WAIT_CNT	2000
1425 #define HCLGEVF_RESET_WAIT_TIMEOUT_US	\
1426 	(HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT)
1427 
1428 	u32 val;
1429 	int ret;
1430 
1431 	if (hdev->reset_type == HNAE3_VF_RESET)
1432 		ret = readl_poll_timeout(hdev->hw.hw.io_base +
1433 					 HCLGEVF_VF_RST_ING, val,
1434 					 !(val & HCLGEVF_VF_RST_ING_BIT),
1435 					 HCLGEVF_RESET_WAIT_US,
1436 					 HCLGEVF_RESET_WAIT_TIMEOUT_US);
1437 	else
1438 		ret = readl_poll_timeout(hdev->hw.hw.io_base +
1439 					 HCLGEVF_RST_ING, val,
1440 					 !(val & HCLGEVF_RST_ING_BITS),
1441 					 HCLGEVF_RESET_WAIT_US,
1442 					 HCLGEVF_RESET_WAIT_TIMEOUT_US);
1443 
1444 	/* hardware completion status should be available by this time */
1445 	if (ret) {
1446 		dev_err(&hdev->pdev->dev,
1447 			"couldn't get reset done status from h/w, timeout!\n");
1448 		return ret;
1449 	}
1450 
1451 	/* we will wait a bit more to let reset of the stack to complete. This
1452 	 * might happen in case reset assertion was made by PF. Yes, this also
1453 	 * means we might end up waiting bit more even for VF reset.
1454 	 */
1455 	if (hdev->reset_type == HNAE3_VF_FULL_RESET)
1456 		msleep(5000);
1457 	else
1458 		msleep(500);
1459 
1460 	return 0;
1461 }
1462 
hclgevf_reset_handshake(struct hclgevf_dev * hdev,bool enable)1463 static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable)
1464 {
1465 	u32 reg_val;
1466 
1467 	reg_val = hclgevf_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG);
1468 	if (enable)
1469 		reg_val |= HCLGEVF_NIC_SW_RST_RDY;
1470 	else
1471 		reg_val &= ~HCLGEVF_NIC_SW_RST_RDY;
1472 
1473 	hclgevf_write_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG,
1474 			  reg_val);
1475 }
1476 
hclgevf_reset_stack(struct hclgevf_dev * hdev)1477 static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
1478 {
1479 	int ret;
1480 
1481 	/* uninitialize the nic client */
1482 	ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
1483 	if (ret)
1484 		return ret;
1485 
1486 	/* re-initialize the hclge device */
1487 	ret = hclgevf_reset_hdev(hdev);
1488 	if (ret) {
1489 		dev_err(&hdev->pdev->dev,
1490 			"hclge device re-init failed, VF is disabled!\n");
1491 		return ret;
1492 	}
1493 
1494 	/* bring up the nic client again */
1495 	ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT);
1496 	if (ret)
1497 		return ret;
1498 
1499 	/* clear handshake status with IMP */
1500 	hclgevf_reset_handshake(hdev, false);
1501 
1502 	/* bring up the nic to enable TX/RX again */
1503 	return hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
1504 }
1505 
hclgevf_reset_prepare_wait(struct hclgevf_dev * hdev)1506 static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
1507 {
1508 #define HCLGEVF_RESET_SYNC_TIME 100
1509 
1510 	if (hdev->reset_type == HNAE3_VF_FUNC_RESET) {
1511 		struct hclge_vf_to_pf_msg send_msg;
1512 		int ret;
1513 
1514 		hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0);
1515 		ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
1516 		if (ret) {
1517 			dev_err(&hdev->pdev->dev,
1518 				"failed to assert VF reset, ret = %d\n", ret);
1519 			return ret;
1520 		}
1521 		hdev->rst_stats.vf_func_rst_cnt++;
1522 	}
1523 
1524 	set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
1525 	/* inform hardware that preparatory work is done */
1526 	msleep(HCLGEVF_RESET_SYNC_TIME);
1527 	hclgevf_reset_handshake(hdev, true);
1528 	dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done\n",
1529 		 hdev->reset_type);
1530 
1531 	return 0;
1532 }
1533 
hclgevf_dump_rst_info(struct hclgevf_dev * hdev)1534 static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev)
1535 {
1536 	dev_info(&hdev->pdev->dev, "VF function reset count: %u\n",
1537 		 hdev->rst_stats.vf_func_rst_cnt);
1538 	dev_info(&hdev->pdev->dev, "FLR reset count: %u\n",
1539 		 hdev->rst_stats.flr_rst_cnt);
1540 	dev_info(&hdev->pdev->dev, "VF reset count: %u\n",
1541 		 hdev->rst_stats.vf_rst_cnt);
1542 	dev_info(&hdev->pdev->dev, "reset done count: %u\n",
1543 		 hdev->rst_stats.rst_done_cnt);
1544 	dev_info(&hdev->pdev->dev, "HW reset done count: %u\n",
1545 		 hdev->rst_stats.hw_rst_done_cnt);
1546 	dev_info(&hdev->pdev->dev, "reset count: %u\n",
1547 		 hdev->rst_stats.rst_cnt);
1548 	dev_info(&hdev->pdev->dev, "reset fail count: %u\n",
1549 		 hdev->rst_stats.rst_fail_cnt);
1550 	dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n",
1551 		 hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE));
1552 	dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n",
1553 		 hclgevf_read_dev(&hdev->hw, HCLGE_COMM_VECTOR0_CMDQ_STATE_REG));
1554 	dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n",
1555 		 hclgevf_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG));
1556 	dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n",
1557 		 hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING));
1558 	dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state);
1559 }
1560 
hclgevf_reset_err_handle(struct hclgevf_dev * hdev)1561 static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
1562 {
1563 	/* recover handshake status with IMP when reset fail */
1564 	hclgevf_reset_handshake(hdev, true);
1565 	hdev->rst_stats.rst_fail_cnt++;
1566 	dev_err(&hdev->pdev->dev, "failed to reset VF(%u)\n",
1567 		hdev->rst_stats.rst_fail_cnt);
1568 
1569 	if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT)
1570 		hclgevf_set_reset_pending(hdev, hdev->reset_type);
1571 
1572 	if (hclgevf_is_reset_pending(hdev)) {
1573 		set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
1574 		hclgevf_reset_task_schedule(hdev);
1575 	} else {
1576 		set_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
1577 		hclgevf_dump_rst_info(hdev);
1578 	}
1579 }
1580 
hclgevf_reset_prepare(struct hclgevf_dev * hdev)1581 static int hclgevf_reset_prepare(struct hclgevf_dev *hdev)
1582 {
1583 	int ret;
1584 
1585 	hdev->rst_stats.rst_cnt++;
1586 
1587 	/* perform reset of the stack & ae device for a client */
1588 	ret = hclgevf_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
1589 	if (ret)
1590 		return ret;
1591 
1592 	rtnl_lock();
1593 	/* bring down the nic to stop any ongoing TX/RX */
1594 	ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
1595 	rtnl_unlock();
1596 	if (ret)
1597 		return ret;
1598 
1599 	return hclgevf_reset_prepare_wait(hdev);
1600 }
1601 
hclgevf_reset_rebuild(struct hclgevf_dev * hdev)1602 static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev)
1603 {
1604 	int ret;
1605 
1606 	hdev->rst_stats.hw_rst_done_cnt++;
1607 	ret = hclgevf_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
1608 	if (ret)
1609 		return ret;
1610 
1611 	rtnl_lock();
1612 	/* now, re-initialize the nic client and ae device */
1613 	ret = hclgevf_reset_stack(hdev);
1614 	rtnl_unlock();
1615 	if (ret) {
1616 		dev_err(&hdev->pdev->dev, "failed to reset VF stack\n");
1617 		return ret;
1618 	}
1619 
1620 	ret = hclgevf_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
1621 	/* ignore RoCE notify error if it fails HCLGEVF_RESET_MAX_FAIL_CNT - 1
1622 	 * times
1623 	 */
1624 	if (ret &&
1625 	    hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT - 1)
1626 		return ret;
1627 
1628 	ret = hclgevf_notify_roce_client(hdev, HNAE3_UP_CLIENT);
1629 	if (ret)
1630 		return ret;
1631 
1632 	hdev->last_reset_time = jiffies;
1633 	hdev->rst_stats.rst_done_cnt++;
1634 	hdev->rst_stats.rst_fail_cnt = 0;
1635 	clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
1636 
1637 	return 0;
1638 }
1639 
hclgevf_reset(struct hclgevf_dev * hdev)1640 static void hclgevf_reset(struct hclgevf_dev *hdev)
1641 {
1642 	if (hclgevf_reset_prepare(hdev))
1643 		goto err_reset;
1644 
1645 	/* check if VF could successfully fetch the hardware reset completion
1646 	 * status from the hardware
1647 	 */
1648 	if (hclgevf_reset_wait(hdev)) {
1649 		/* can't do much in this situation, will disable VF */
1650 		dev_err(&hdev->pdev->dev,
1651 			"failed to fetch H/W reset completion status\n");
1652 		goto err_reset;
1653 	}
1654 
1655 	if (hclgevf_reset_rebuild(hdev))
1656 		goto err_reset;
1657 
1658 	return;
1659 
1660 err_reset:
1661 	hclgevf_reset_err_handle(hdev);
1662 }
1663 
hclgevf_get_reset_level(unsigned long * addr)1664 static enum hnae3_reset_type hclgevf_get_reset_level(unsigned long *addr)
1665 {
1666 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
1667 
1668 	/* return the highest priority reset level amongst all */
1669 	if (test_bit(HNAE3_VF_RESET, addr)) {
1670 		rst_level = HNAE3_VF_RESET;
1671 		clear_bit(HNAE3_VF_RESET, addr);
1672 		clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
1673 		clear_bit(HNAE3_VF_FUNC_RESET, addr);
1674 	} else if (test_bit(HNAE3_VF_FULL_RESET, addr)) {
1675 		rst_level = HNAE3_VF_FULL_RESET;
1676 		clear_bit(HNAE3_VF_FULL_RESET, addr);
1677 		clear_bit(HNAE3_VF_FUNC_RESET, addr);
1678 	} else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) {
1679 		rst_level = HNAE3_VF_PF_FUNC_RESET;
1680 		clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
1681 		clear_bit(HNAE3_VF_FUNC_RESET, addr);
1682 	} else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) {
1683 		rst_level = HNAE3_VF_FUNC_RESET;
1684 		clear_bit(HNAE3_VF_FUNC_RESET, addr);
1685 	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
1686 		rst_level = HNAE3_FLR_RESET;
1687 		clear_bit(HNAE3_FLR_RESET, addr);
1688 	}
1689 
1690 	clear_bit(HNAE3_NONE_RESET, addr);
1691 
1692 	return rst_level;
1693 }
1694 
hclgevf_reset_event(struct pci_dev * pdev,struct hnae3_handle * handle)1695 static void hclgevf_reset_event(struct pci_dev *pdev,
1696 				struct hnae3_handle *handle)
1697 {
1698 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1699 	struct hclgevf_dev *hdev = ae_dev->priv;
1700 
1701 	if (hdev->default_reset_request)
1702 		hdev->reset_level =
1703 			hclgevf_get_reset_level(&hdev->default_reset_request);
1704 	else
1705 		hdev->reset_level = HNAE3_VF_FUNC_RESET;
1706 
1707 	dev_info(&hdev->pdev->dev, "received reset request from VF enet, reset level is %d\n",
1708 		 hdev->reset_level);
1709 
1710 	/* reset of this VF requested */
1711 	set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state);
1712 	hclgevf_reset_task_schedule(hdev);
1713 
1714 	hdev->last_reset_time = jiffies;
1715 }
1716 
hclgevf_set_def_reset_request(struct hnae3_ae_dev * ae_dev,enum hnae3_reset_type rst_type)1717 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
1718 					  enum hnae3_reset_type rst_type)
1719 {
1720 #define HCLGEVF_SUPPORT_RESET_TYPE \
1721 	(BIT(HNAE3_VF_RESET) | BIT(HNAE3_VF_FUNC_RESET) | \
1722 	BIT(HNAE3_VF_PF_FUNC_RESET) | BIT(HNAE3_VF_FULL_RESET) | \
1723 	BIT(HNAE3_FLR_RESET) | BIT(HNAE3_VF_EXP_RESET))
1724 
1725 	struct hclgevf_dev *hdev = ae_dev->priv;
1726 
1727 	if (!(BIT(rst_type) & HCLGEVF_SUPPORT_RESET_TYPE)) {
1728 		/* To prevent reset triggered by hclge_reset_event */
1729 		set_bit(HNAE3_NONE_RESET, &hdev->default_reset_request);
1730 		dev_info(&hdev->pdev->dev, "unsupported reset type %d\n",
1731 			 rst_type);
1732 		return;
1733 	}
1734 	set_bit(rst_type, &hdev->default_reset_request);
1735 }
1736 
hclgevf_enable_vector(struct hclgevf_misc_vector * vector,bool en)1737 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
1738 {
1739 	writel(en ? 1 : 0, vector->addr);
1740 }
1741 
hclgevf_reset_prepare_general(struct hnae3_ae_dev * ae_dev,enum hnae3_reset_type rst_type)1742 static void hclgevf_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
1743 					  enum hnae3_reset_type rst_type)
1744 {
1745 #define HCLGEVF_RESET_RETRY_WAIT_MS	500
1746 #define HCLGEVF_RESET_RETRY_CNT		5
1747 
1748 	struct hclgevf_dev *hdev = ae_dev->priv;
1749 	int retry_cnt = 0;
1750 	int ret;
1751 
1752 	while (retry_cnt++ < HCLGEVF_RESET_RETRY_CNT) {
1753 		down(&hdev->reset_sem);
1754 		set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
1755 		hdev->reset_type = rst_type;
1756 		ret = hclgevf_reset_prepare(hdev);
1757 		if (!ret && !hdev->reset_pending)
1758 			break;
1759 
1760 		dev_err(&hdev->pdev->dev,
1761 			"failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n",
1762 			ret, hdev->reset_pending, retry_cnt);
1763 		clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
1764 		up(&hdev->reset_sem);
1765 		msleep(HCLGEVF_RESET_RETRY_WAIT_MS);
1766 	}
1767 
1768 	/* disable misc vector before reset done */
1769 	hclgevf_enable_vector(&hdev->misc_vector, false);
1770 
1771 	if (hdev->reset_type == HNAE3_FLR_RESET)
1772 		hdev->rst_stats.flr_rst_cnt++;
1773 }
1774 
hclgevf_reset_done(struct hnae3_ae_dev * ae_dev)1775 static void hclgevf_reset_done(struct hnae3_ae_dev *ae_dev)
1776 {
1777 	struct hclgevf_dev *hdev = ae_dev->priv;
1778 	int ret;
1779 
1780 	hclgevf_enable_vector(&hdev->misc_vector, true);
1781 
1782 	ret = hclgevf_reset_rebuild(hdev);
1783 	if (ret)
1784 		dev_warn(&hdev->pdev->dev, "fail to rebuild, ret=%d\n",
1785 			 ret);
1786 
1787 	hdev->reset_type = HNAE3_NONE_RESET;
1788 	if (test_and_clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
1789 		up(&hdev->reset_sem);
1790 }
1791 
hclgevf_get_fw_version(struct hnae3_handle * handle)1792 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
1793 {
1794 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1795 
1796 	return hdev->fw_version;
1797 }
1798 
hclgevf_get_misc_vector(struct hclgevf_dev * hdev)1799 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
1800 {
1801 	struct hclgevf_misc_vector *vector = &hdev->misc_vector;
1802 
1803 	vector->vector_irq = pci_irq_vector(hdev->pdev,
1804 					    HCLGEVF_MISC_VECTOR_NUM);
1805 	vector->addr = hdev->hw.hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE;
1806 	/* vector status always valid for Vector 0 */
1807 	hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0;
1808 	hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq;
1809 
1810 	hdev->num_msi_left -= 1;
1811 	hdev->num_msi_used += 1;
1812 }
1813 
hclgevf_reset_task_schedule(struct hclgevf_dev * hdev)1814 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev)
1815 {
1816 	if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
1817 	    test_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state) &&
1818 	    !test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED,
1819 			      &hdev->state))
1820 		mod_delayed_work(hclgevf_wq, &hdev->service_task, 0);
1821 }
1822 
hclgevf_mbx_task_schedule(struct hclgevf_dev * hdev)1823 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev)
1824 {
1825 	if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
1826 	    !test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED,
1827 			      &hdev->state))
1828 		mod_delayed_work(hclgevf_wq, &hdev->service_task, 0);
1829 }
1830 
hclgevf_task_schedule(struct hclgevf_dev * hdev,unsigned long delay)1831 static void hclgevf_task_schedule(struct hclgevf_dev *hdev,
1832 				  unsigned long delay)
1833 {
1834 	if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
1835 	    !test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state))
1836 		mod_delayed_work(hclgevf_wq, &hdev->service_task, delay);
1837 }
1838 
hclgevf_reset_service_task(struct hclgevf_dev * hdev)1839 static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)
1840 {
1841 #define	HCLGEVF_MAX_RESET_ATTEMPTS_CNT	3
1842 
1843 	if (!test_and_clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state))
1844 		return;
1845 
1846 	down(&hdev->reset_sem);
1847 	set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
1848 
1849 	if (test_and_clear_bit(HCLGEVF_RESET_PENDING,
1850 			       &hdev->reset_state)) {
1851 		/* PF has intimated that it is about to reset the hardware.
1852 		 * We now have to poll & check if hardware has actually
1853 		 * completed the reset sequence. On hardware reset completion,
1854 		 * VF needs to reset the client and ae device.
1855 		 */
1856 		hdev->reset_attempts = 0;
1857 
1858 		hdev->last_reset_time = jiffies;
1859 		hdev->reset_type =
1860 			hclgevf_get_reset_level(&hdev->reset_pending);
1861 		if (hdev->reset_type != HNAE3_NONE_RESET)
1862 			hclgevf_reset(hdev);
1863 	} else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
1864 				      &hdev->reset_state)) {
1865 		/* we could be here when either of below happens:
1866 		 * 1. reset was initiated due to watchdog timeout caused by
1867 		 *    a. IMP was earlier reset and our TX got choked down and
1868 		 *       which resulted in watchdog reacting and inducing VF
1869 		 *       reset. This also means our cmdq would be unreliable.
1870 		 *    b. problem in TX due to other lower layer(example link
1871 		 *       layer not functioning properly etc.)
1872 		 * 2. VF reset might have been initiated due to some config
1873 		 *    change.
1874 		 *
1875 		 * NOTE: Theres no clear way to detect above cases than to react
1876 		 * to the response of PF for this reset request. PF will ack the
1877 		 * 1b and 2. cases but we will not get any intimation about 1a
1878 		 * from PF as cmdq would be in unreliable state i.e. mailbox
1879 		 * communication between PF and VF would be broken.
1880 		 *
1881 		 * if we are never geting into pending state it means either:
1882 		 * 1. PF is not receiving our request which could be due to IMP
1883 		 *    reset
1884 		 * 2. PF is screwed
1885 		 * We cannot do much for 2. but to check first we can try reset
1886 		 * our PCIe + stack and see if it alleviates the problem.
1887 		 */
1888 		if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) {
1889 			/* prepare for full reset of stack + pcie interface */
1890 			hclgevf_set_reset_pending(hdev, HNAE3_VF_FULL_RESET);
1891 
1892 			/* "defer" schedule the reset task again */
1893 			set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
1894 		} else {
1895 			hdev->reset_attempts++;
1896 
1897 			hclgevf_set_reset_pending(hdev, hdev->reset_level);
1898 			set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
1899 		}
1900 		hclgevf_reset_task_schedule(hdev);
1901 	}
1902 
1903 	hdev->reset_type = HNAE3_NONE_RESET;
1904 	clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
1905 	up(&hdev->reset_sem);
1906 }
1907 
hclgevf_mailbox_service_task(struct hclgevf_dev * hdev)1908 static void hclgevf_mailbox_service_task(struct hclgevf_dev *hdev)
1909 {
1910 	if (!test_and_clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state))
1911 		return;
1912 
1913 	if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state))
1914 		return;
1915 
1916 	hclgevf_mbx_async_handler(hdev);
1917 
1918 	clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
1919 }
1920 
hclgevf_keep_alive(struct hclgevf_dev * hdev)1921 static void hclgevf_keep_alive(struct hclgevf_dev *hdev)
1922 {
1923 	struct hclge_vf_to_pf_msg send_msg;
1924 	int ret;
1925 
1926 	if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state))
1927 		return;
1928 
1929 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_KEEP_ALIVE, 0);
1930 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
1931 	if (ret)
1932 		dev_err(&hdev->pdev->dev,
1933 			"VF sends keep alive cmd failed(=%d)\n", ret);
1934 }
1935 
hclgevf_periodic_service_task(struct hclgevf_dev * hdev)1936 static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev)
1937 {
1938 	unsigned long delta = round_jiffies_relative(HZ);
1939 	struct hnae3_handle *handle = &hdev->nic;
1940 
1941 	if (test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state) ||
1942 	    test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state))
1943 		return;
1944 
1945 	if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
1946 		delta = jiffies - hdev->last_serv_processed;
1947 
1948 		if (delta < round_jiffies_relative(HZ)) {
1949 			delta = round_jiffies_relative(HZ) - delta;
1950 			goto out;
1951 		}
1952 	}
1953 
1954 	hdev->serv_processed_cnt++;
1955 	if (!(hdev->serv_processed_cnt % HCLGEVF_KEEP_ALIVE_TASK_INTERVAL))
1956 		hclgevf_keep_alive(hdev);
1957 
1958 	if (test_bit(HCLGEVF_STATE_DOWN, &hdev->state)) {
1959 		hdev->last_serv_processed = jiffies;
1960 		goto out;
1961 	}
1962 
1963 	if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL))
1964 		hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
1965 
1966 	/* VF does not need to request link status when this bit is set, because
1967 	 * PF will push its link status to VFs when link status changed.
1968 	 */
1969 	if (!test_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state))
1970 		hclgevf_request_link_info(hdev);
1971 
1972 	hclgevf_update_link_mode(hdev);
1973 
1974 	hclgevf_sync_vlan_filter(hdev);
1975 
1976 	hclgevf_sync_mac_table(hdev);
1977 
1978 	hclgevf_sync_promisc_mode(hdev);
1979 
1980 	hdev->last_serv_processed = jiffies;
1981 
1982 out:
1983 	hclgevf_task_schedule(hdev, delta);
1984 }
1985 
hclgevf_service_task(struct work_struct * work)1986 static void hclgevf_service_task(struct work_struct *work)
1987 {
1988 	struct hclgevf_dev *hdev = container_of(work, struct hclgevf_dev,
1989 						service_task.work);
1990 
1991 	hclgevf_reset_service_task(hdev);
1992 	hclgevf_mailbox_service_task(hdev);
1993 	hclgevf_periodic_service_task(hdev);
1994 
1995 	/* Handle reset and mbx again in case periodical task delays the
1996 	 * handling by calling hclgevf_task_schedule() in
1997 	 * hclgevf_periodic_service_task()
1998 	 */
1999 	hclgevf_reset_service_task(hdev);
2000 	hclgevf_mailbox_service_task(hdev);
2001 }
2002 
hclgevf_clear_event_cause(struct hclgevf_dev * hdev,u32 regclr)2003 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
2004 {
2005 	hclgevf_write_dev(&hdev->hw, HCLGE_COMM_VECTOR0_CMDQ_SRC_REG, regclr);
2006 }
2007 
hclgevf_check_evt_cause(struct hclgevf_dev * hdev,u32 * clearval)2008 static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
2009 						      u32 *clearval)
2010 {
2011 	u32 val, cmdq_stat_reg, rst_ing_reg;
2012 
2013 	/* fetch the events from their corresponding regs */
2014 	cmdq_stat_reg = hclgevf_read_dev(&hdev->hw,
2015 					 HCLGE_COMM_VECTOR0_CMDQ_STATE_REG);
2016 	if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) {
2017 		rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
2018 		dev_info(&hdev->pdev->dev,
2019 			 "receive reset interrupt 0x%x!\n", rst_ing_reg);
2020 		hclgevf_set_reset_pending(hdev, HNAE3_VF_RESET);
2021 		set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
2022 		set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
2023 		*clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B);
2024 		hdev->rst_stats.vf_rst_cnt++;
2025 		/* set up VF hardware reset status, its PF will clear
2026 		 * this status when PF has initialized done.
2027 		 */
2028 		val = hclgevf_read_dev(&hdev->hw, HCLGEVF_VF_RST_ING);
2029 		hclgevf_write_dev(&hdev->hw, HCLGEVF_VF_RST_ING,
2030 				  val | HCLGEVF_VF_RST_ING_BIT);
2031 		return HCLGEVF_VECTOR0_EVENT_RST;
2032 	}
2033 
2034 	/* check for vector0 mailbox(=CMDQ RX) event source */
2035 	if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) {
2036 		/* for revision 0x21, clearing interrupt is writing bit 0
2037 		 * to the clear register, writing bit 1 means to keep the
2038 		 * old value.
2039 		 * for revision 0x20, the clear register is a read & write
2040 		 * register, so we should just write 0 to the bit we are
2041 		 * handling, and keep other bits as cmdq_stat_reg.
2042 		 */
2043 		if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
2044 			*clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
2045 		else
2046 			*clearval = cmdq_stat_reg &
2047 				    ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
2048 
2049 		return HCLGEVF_VECTOR0_EVENT_MBX;
2050 	}
2051 
2052 	/* print other vector0 event source */
2053 	dev_info(&hdev->pdev->dev,
2054 		 "vector 0 interrupt from unknown source, cmdq_src = %#x\n",
2055 		 cmdq_stat_reg);
2056 
2057 	return HCLGEVF_VECTOR0_EVENT_OTHER;
2058 }
2059 
hclgevf_reset_timer(struct timer_list * t)2060 static void hclgevf_reset_timer(struct timer_list *t)
2061 {
2062 	struct hclgevf_dev *hdev = from_timer(hdev, t, reset_timer);
2063 
2064 	hclgevf_clear_event_cause(hdev, HCLGEVF_VECTOR0_EVENT_RST);
2065 	hclgevf_reset_task_schedule(hdev);
2066 }
2067 
hclgevf_misc_irq_handle(int irq,void * data)2068 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
2069 {
2070 #define HCLGEVF_RESET_DELAY	5
2071 
2072 	enum hclgevf_evt_cause event_cause;
2073 	struct hclgevf_dev *hdev = data;
2074 	u32 clearval;
2075 
2076 	hclgevf_enable_vector(&hdev->misc_vector, false);
2077 	event_cause = hclgevf_check_evt_cause(hdev, &clearval);
2078 	if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER)
2079 		hclgevf_clear_event_cause(hdev, clearval);
2080 
2081 	switch (event_cause) {
2082 	case HCLGEVF_VECTOR0_EVENT_RST:
2083 		mod_timer(&hdev->reset_timer,
2084 			  jiffies + msecs_to_jiffies(HCLGEVF_RESET_DELAY));
2085 		break;
2086 	case HCLGEVF_VECTOR0_EVENT_MBX:
2087 		hclgevf_mbx_handler(hdev);
2088 		break;
2089 	default:
2090 		break;
2091 	}
2092 
2093 	hclgevf_enable_vector(&hdev->misc_vector, true);
2094 
2095 	return IRQ_HANDLED;
2096 }
2097 
hclgevf_configure(struct hclgevf_dev * hdev)2098 static int hclgevf_configure(struct hclgevf_dev *hdev)
2099 {
2100 	int ret;
2101 
2102 	hdev->gro_en = true;
2103 
2104 	ret = hclgevf_get_basic_info(hdev);
2105 	if (ret)
2106 		return ret;
2107 
2108 	/* get current port based vlan state from PF */
2109 	ret = hclgevf_get_port_base_vlan_filter_state(hdev);
2110 	if (ret)
2111 		return ret;
2112 
2113 	/* get queue configuration from PF */
2114 	ret = hclgevf_get_queue_info(hdev);
2115 	if (ret)
2116 		return ret;
2117 
2118 	/* get queue depth info from PF */
2119 	ret = hclgevf_get_queue_depth(hdev);
2120 	if (ret)
2121 		return ret;
2122 
2123 	return hclgevf_get_pf_media_type(hdev);
2124 }
2125 
hclgevf_alloc_hdev(struct hnae3_ae_dev * ae_dev)2126 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev)
2127 {
2128 	struct pci_dev *pdev = ae_dev->pdev;
2129 	struct hclgevf_dev *hdev;
2130 
2131 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
2132 	if (!hdev)
2133 		return -ENOMEM;
2134 
2135 	hdev->pdev = pdev;
2136 	hdev->ae_dev = ae_dev;
2137 	ae_dev->priv = hdev;
2138 
2139 	return 0;
2140 }
2141 
hclgevf_init_roce_base_info(struct hclgevf_dev * hdev)2142 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
2143 {
2144 	struct hnae3_handle *roce = &hdev->roce;
2145 	struct hnae3_handle *nic = &hdev->nic;
2146 
2147 	roce->rinfo.num_vectors = hdev->num_roce_msix;
2148 
2149 	if (hdev->num_msi_left < roce->rinfo.num_vectors ||
2150 	    hdev->num_msi_left == 0)
2151 		return -EINVAL;
2152 
2153 	roce->rinfo.base_vector = hdev->roce_base_msix_offset;
2154 
2155 	roce->rinfo.netdev = nic->kinfo.netdev;
2156 	roce->rinfo.roce_io_base = hdev->hw.hw.io_base;
2157 	roce->rinfo.roce_mem_base = hdev->hw.hw.mem_base;
2158 
2159 	roce->pdev = nic->pdev;
2160 	roce->ae_algo = nic->ae_algo;
2161 	bitmap_copy(roce->numa_node_mask.bits, nic->numa_node_mask.bits,
2162 		    MAX_NUMNODES);
2163 	return 0;
2164 }
2165 
hclgevf_config_gro(struct hclgevf_dev * hdev)2166 static int hclgevf_config_gro(struct hclgevf_dev *hdev)
2167 {
2168 	struct hclgevf_cfg_gro_status_cmd *req;
2169 	struct hclge_desc desc;
2170 	int ret;
2171 
2172 	if (!hnae3_ae_dev_gro_supported(hdev->ae_dev))
2173 		return 0;
2174 
2175 	hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG,
2176 				     false);
2177 	req = (struct hclgevf_cfg_gro_status_cmd *)desc.data;
2178 
2179 	req->gro_en = hdev->gro_en ? 1 : 0;
2180 
2181 	ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
2182 	if (ret)
2183 		dev_err(&hdev->pdev->dev,
2184 			"VF GRO hardware config cmd failed, ret = %d.\n", ret);
2185 
2186 	return ret;
2187 }
2188 
hclgevf_rss_init_hw(struct hclgevf_dev * hdev)2189 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
2190 {
2191 	struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
2192 	u16 tc_offset[HCLGE_COMM_MAX_TC_NUM];
2193 	u16 tc_valid[HCLGE_COMM_MAX_TC_NUM];
2194 	u16 tc_size[HCLGE_COMM_MAX_TC_NUM];
2195 	int ret;
2196 
2197 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
2198 		ret = hclge_comm_set_rss_algo_key(&hdev->hw.hw,
2199 						  rss_cfg->rss_algo,
2200 						  rss_cfg->rss_hash_key);
2201 		if (ret)
2202 			return ret;
2203 
2204 		ret = hclge_comm_set_rss_input_tuple(&hdev->hw.hw, rss_cfg);
2205 		if (ret)
2206 			return ret;
2207 	}
2208 
2209 	ret = hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw,
2210 					     rss_cfg->rss_indirection_tbl);
2211 	if (ret)
2212 		return ret;
2213 
2214 	hclge_comm_get_rss_tc_info(rss_cfg->rss_size, hdev->hw_tc_map,
2215 				   tc_offset, tc_valid, tc_size);
2216 
2217 	return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset,
2218 					  tc_valid, tc_size);
2219 }
2220 
hclgevf_init_vlan_config(struct hclgevf_dev * hdev,bool rxvtag_strip_en)2221 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev,
2222 				    bool rxvtag_strip_en)
2223 {
2224 	struct hnae3_handle *nic = &hdev->nic;
2225 	int ret;
2226 
2227 	ret = hclgevf_en_hw_strip_rxvtag(nic, rxvtag_strip_en);
2228 	if (ret) {
2229 		dev_err(&hdev->pdev->dev,
2230 			"failed to enable rx vlan offload, ret = %d\n", ret);
2231 		return ret;
2232 	}
2233 
2234 	return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0,
2235 				       false);
2236 }
2237 
hclgevf_flush_link_update(struct hclgevf_dev * hdev)2238 static void hclgevf_flush_link_update(struct hclgevf_dev *hdev)
2239 {
2240 #define HCLGEVF_FLUSH_LINK_TIMEOUT	100000
2241 
2242 	unsigned long last = hdev->serv_processed_cnt;
2243 	int i = 0;
2244 
2245 	while (test_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state) &&
2246 	       i++ < HCLGEVF_FLUSH_LINK_TIMEOUT &&
2247 	       last == hdev->serv_processed_cnt)
2248 		usleep_range(1, 1);
2249 }
2250 
hclgevf_set_timer_task(struct hnae3_handle * handle,bool enable)2251 static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable)
2252 {
2253 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2254 
2255 	if (enable) {
2256 		hclgevf_task_schedule(hdev, 0);
2257 	} else {
2258 		set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
2259 
2260 		smp_mb__after_atomic(); /* flush memory to make sure DOWN is seen by service task */
2261 		hclgevf_flush_link_update(hdev);
2262 	}
2263 }
2264 
hclgevf_ae_start(struct hnae3_handle * handle)2265 static int hclgevf_ae_start(struct hnae3_handle *handle)
2266 {
2267 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2268 
2269 	clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
2270 	clear_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state);
2271 
2272 	hclge_comm_reset_tqp_stats(handle);
2273 
2274 	hclgevf_request_link_info(hdev);
2275 
2276 	hclgevf_update_link_mode(hdev);
2277 
2278 	return 0;
2279 }
2280 
hclgevf_ae_stop(struct hnae3_handle * handle)2281 static void hclgevf_ae_stop(struct hnae3_handle *handle)
2282 {
2283 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2284 
2285 	set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
2286 
2287 	if (hdev->reset_type != HNAE3_VF_RESET)
2288 		hclgevf_reset_tqp(handle);
2289 
2290 	hclge_comm_reset_tqp_stats(handle);
2291 	hclgevf_update_link_status(hdev, 0);
2292 }
2293 
hclgevf_set_alive(struct hnae3_handle * handle,bool alive)2294 static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive)
2295 {
2296 #define HCLGEVF_STATE_ALIVE	1
2297 #define HCLGEVF_STATE_NOT_ALIVE	0
2298 
2299 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2300 	struct hclge_vf_to_pf_msg send_msg;
2301 
2302 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_ALIVE, 0);
2303 	send_msg.data[0] = alive ? HCLGEVF_STATE_ALIVE :
2304 				HCLGEVF_STATE_NOT_ALIVE;
2305 	return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
2306 }
2307 
hclgevf_client_start(struct hnae3_handle * handle)2308 static int hclgevf_client_start(struct hnae3_handle *handle)
2309 {
2310 	return hclgevf_set_alive(handle, true);
2311 }
2312 
hclgevf_client_stop(struct hnae3_handle * handle)2313 static void hclgevf_client_stop(struct hnae3_handle *handle)
2314 {
2315 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2316 	int ret;
2317 
2318 	ret = hclgevf_set_alive(handle, false);
2319 	if (ret)
2320 		dev_warn(&hdev->pdev->dev,
2321 			 "%s failed %d\n", __func__, ret);
2322 }
2323 
hclgevf_state_init(struct hclgevf_dev * hdev)2324 static void hclgevf_state_init(struct hclgevf_dev *hdev)
2325 {
2326 	clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
2327 	clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
2328 	clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
2329 
2330 	INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task);
2331 	/* timer needs to be initialized before misc irq */
2332 	timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0);
2333 
2334 	mutex_init(&hdev->mbx_resp.mbx_mutex);
2335 	sema_init(&hdev->reset_sem, 1);
2336 
2337 	spin_lock_init(&hdev->mac_table.mac_list_lock);
2338 	INIT_LIST_HEAD(&hdev->mac_table.uc_mac_list);
2339 	INIT_LIST_HEAD(&hdev->mac_table.mc_mac_list);
2340 
2341 	/* bring the device down */
2342 	set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
2343 }
2344 
hclgevf_state_uninit(struct hclgevf_dev * hdev)2345 static void hclgevf_state_uninit(struct hclgevf_dev *hdev)
2346 {
2347 	set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
2348 	set_bit(HCLGEVF_STATE_REMOVING, &hdev->state);
2349 
2350 	if (hdev->service_task.work.func)
2351 		cancel_delayed_work_sync(&hdev->service_task);
2352 
2353 	mutex_destroy(&hdev->mbx_resp.mbx_mutex);
2354 }
2355 
hclgevf_init_msi(struct hclgevf_dev * hdev)2356 static int hclgevf_init_msi(struct hclgevf_dev *hdev)
2357 {
2358 	struct pci_dev *pdev = hdev->pdev;
2359 	int vectors;
2360 	int i;
2361 
2362 	if (hnae3_dev_roce_supported(hdev))
2363 		vectors = pci_alloc_irq_vectors(pdev,
2364 						hdev->roce_base_msix_offset + 1,
2365 						hdev->num_msi,
2366 						PCI_IRQ_MSIX);
2367 	else
2368 		vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2369 						hdev->num_msi,
2370 						PCI_IRQ_MSI | PCI_IRQ_MSIX);
2371 
2372 	if (vectors < 0) {
2373 		dev_err(&pdev->dev,
2374 			"failed(%d) to allocate MSI/MSI-X vectors\n",
2375 			vectors);
2376 		return vectors;
2377 	}
2378 	if (vectors < hdev->num_msi)
2379 		dev_warn(&hdev->pdev->dev,
2380 			 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2381 			 hdev->num_msi, vectors);
2382 
2383 	hdev->num_msi = vectors;
2384 	hdev->num_msi_left = vectors;
2385 
2386 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2387 					   sizeof(u16), GFP_KERNEL);
2388 	if (!hdev->vector_status) {
2389 		pci_free_irq_vectors(pdev);
2390 		return -ENOMEM;
2391 	}
2392 
2393 	for (i = 0; i < hdev->num_msi; i++)
2394 		hdev->vector_status[i] = HCLGEVF_INVALID_VPORT;
2395 
2396 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2397 					sizeof(int), GFP_KERNEL);
2398 	if (!hdev->vector_irq) {
2399 		devm_kfree(&pdev->dev, hdev->vector_status);
2400 		pci_free_irq_vectors(pdev);
2401 		return -ENOMEM;
2402 	}
2403 
2404 	return 0;
2405 }
2406 
hclgevf_uninit_msi(struct hclgevf_dev * hdev)2407 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev)
2408 {
2409 	struct pci_dev *pdev = hdev->pdev;
2410 
2411 	devm_kfree(&pdev->dev, hdev->vector_status);
2412 	devm_kfree(&pdev->dev, hdev->vector_irq);
2413 	pci_free_irq_vectors(pdev);
2414 }
2415 
hclgevf_misc_irq_init(struct hclgevf_dev * hdev)2416 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
2417 {
2418 	int ret;
2419 
2420 	hclgevf_get_misc_vector(hdev);
2421 
2422 	snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
2423 		 HCLGEVF_NAME, pci_name(hdev->pdev));
2424 	ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle,
2425 			  0, hdev->misc_vector.name, hdev);
2426 	if (ret) {
2427 		dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n",
2428 			hdev->misc_vector.vector_irq);
2429 		return ret;
2430 	}
2431 
2432 	hclgevf_clear_event_cause(hdev, 0);
2433 
2434 	/* enable misc. vector(vector 0) */
2435 	hclgevf_enable_vector(&hdev->misc_vector, true);
2436 
2437 	return ret;
2438 }
2439 
hclgevf_misc_irq_uninit(struct hclgevf_dev * hdev)2440 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev)
2441 {
2442 	/* disable misc vector(vector 0) */
2443 	hclgevf_enable_vector(&hdev->misc_vector, false);
2444 	synchronize_irq(hdev->misc_vector.vector_irq);
2445 	free_irq(hdev->misc_vector.vector_irq, hdev);
2446 	hclgevf_free_vector(hdev, 0);
2447 }
2448 
hclgevf_info_show(struct hclgevf_dev * hdev)2449 static void hclgevf_info_show(struct hclgevf_dev *hdev)
2450 {
2451 	struct device *dev = &hdev->pdev->dev;
2452 
2453 	dev_info(dev, "VF info begin:\n");
2454 
2455 	dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
2456 	dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
2457 	dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
2458 	dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
2459 	dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
2460 	dev_info(dev, "PF media type of this VF: %u\n",
2461 		 hdev->hw.mac.media_type);
2462 
2463 	dev_info(dev, "VF info end.\n");
2464 }
2465 
hclgevf_init_nic_client_instance(struct hnae3_ae_dev * ae_dev,struct hnae3_client * client)2466 static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
2467 					    struct hnae3_client *client)
2468 {
2469 	struct hclgevf_dev *hdev = ae_dev->priv;
2470 	int rst_cnt = hdev->rst_stats.rst_cnt;
2471 	int ret;
2472 
2473 	ret = client->ops->init_instance(&hdev->nic);
2474 	if (ret)
2475 		return ret;
2476 
2477 	set_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
2478 	if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
2479 	    rst_cnt != hdev->rst_stats.rst_cnt) {
2480 		clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
2481 
2482 		client->ops->uninit_instance(&hdev->nic, 0);
2483 		return -EBUSY;
2484 	}
2485 
2486 	hnae3_set_client_init_flag(client, ae_dev, 1);
2487 
2488 	if (netif_msg_drv(&hdev->nic))
2489 		hclgevf_info_show(hdev);
2490 
2491 	return 0;
2492 }
2493 
hclgevf_init_roce_client_instance(struct hnae3_ae_dev * ae_dev,struct hnae3_client * client)2494 static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
2495 					     struct hnae3_client *client)
2496 {
2497 	struct hclgevf_dev *hdev = ae_dev->priv;
2498 	int ret;
2499 
2500 	if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
2501 	    !hdev->nic_client)
2502 		return 0;
2503 
2504 	ret = hclgevf_init_roce_base_info(hdev);
2505 	if (ret)
2506 		return ret;
2507 
2508 	ret = client->ops->init_instance(&hdev->roce);
2509 	if (ret)
2510 		return ret;
2511 
2512 	set_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state);
2513 	hnae3_set_client_init_flag(client, ae_dev, 1);
2514 
2515 	return 0;
2516 }
2517 
hclgevf_init_client_instance(struct hnae3_client * client,struct hnae3_ae_dev * ae_dev)2518 static int hclgevf_init_client_instance(struct hnae3_client *client,
2519 					struct hnae3_ae_dev *ae_dev)
2520 {
2521 	struct hclgevf_dev *hdev = ae_dev->priv;
2522 	int ret;
2523 
2524 	switch (client->type) {
2525 	case HNAE3_CLIENT_KNIC:
2526 		hdev->nic_client = client;
2527 		hdev->nic.client = client;
2528 
2529 		ret = hclgevf_init_nic_client_instance(ae_dev, client);
2530 		if (ret)
2531 			goto clear_nic;
2532 
2533 		ret = hclgevf_init_roce_client_instance(ae_dev,
2534 							hdev->roce_client);
2535 		if (ret)
2536 			goto clear_roce;
2537 
2538 		break;
2539 	case HNAE3_CLIENT_ROCE:
2540 		if (hnae3_dev_roce_supported(hdev)) {
2541 			hdev->roce_client = client;
2542 			hdev->roce.client = client;
2543 		}
2544 
2545 		ret = hclgevf_init_roce_client_instance(ae_dev, client);
2546 		if (ret)
2547 			goto clear_roce;
2548 
2549 		break;
2550 	default:
2551 		return -EINVAL;
2552 	}
2553 
2554 	return 0;
2555 
2556 clear_nic:
2557 	hdev->nic_client = NULL;
2558 	hdev->nic.client = NULL;
2559 	return ret;
2560 clear_roce:
2561 	hdev->roce_client = NULL;
2562 	hdev->roce.client = NULL;
2563 	return ret;
2564 }
2565 
hclgevf_uninit_client_instance(struct hnae3_client * client,struct hnae3_ae_dev * ae_dev)2566 static void hclgevf_uninit_client_instance(struct hnae3_client *client,
2567 					   struct hnae3_ae_dev *ae_dev)
2568 {
2569 	struct hclgevf_dev *hdev = ae_dev->priv;
2570 
2571 	/* un-init roce, if it exists */
2572 	if (hdev->roce_client) {
2573 		while (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
2574 			msleep(HCLGEVF_WAIT_RESET_DONE);
2575 		clear_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state);
2576 
2577 		hdev->roce_client->ops->uninit_instance(&hdev->roce, 0);
2578 		hdev->roce_client = NULL;
2579 		hdev->roce.client = NULL;
2580 	}
2581 
2582 	/* un-init nic/unic, if this was not called by roce client */
2583 	if (client->ops->uninit_instance && hdev->nic_client &&
2584 	    client->type != HNAE3_CLIENT_ROCE) {
2585 		while (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
2586 			msleep(HCLGEVF_WAIT_RESET_DONE);
2587 		clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
2588 
2589 		client->ops->uninit_instance(&hdev->nic, 0);
2590 		hdev->nic_client = NULL;
2591 		hdev->nic.client = NULL;
2592 	}
2593 }
2594 
hclgevf_dev_mem_map(struct hclgevf_dev * hdev)2595 static int hclgevf_dev_mem_map(struct hclgevf_dev *hdev)
2596 {
2597 	struct pci_dev *pdev = hdev->pdev;
2598 	struct hclgevf_hw *hw = &hdev->hw;
2599 
2600 	/* for device does not have device memory, return directly */
2601 	if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGEVF_MEM_BAR)))
2602 		return 0;
2603 
2604 	hw->hw.mem_base =
2605 		devm_ioremap_wc(&pdev->dev,
2606 				pci_resource_start(pdev, HCLGEVF_MEM_BAR),
2607 				pci_resource_len(pdev, HCLGEVF_MEM_BAR));
2608 	if (!hw->hw.mem_base) {
2609 		dev_err(&pdev->dev, "failed to map device memory\n");
2610 		return -EFAULT;
2611 	}
2612 
2613 	return 0;
2614 }
2615 
hclgevf_pci_init(struct hclgevf_dev * hdev)2616 static int hclgevf_pci_init(struct hclgevf_dev *hdev)
2617 {
2618 	struct pci_dev *pdev = hdev->pdev;
2619 	struct hclgevf_hw *hw;
2620 	int ret;
2621 
2622 	ret = pci_enable_device(pdev);
2623 	if (ret) {
2624 		dev_err(&pdev->dev, "failed to enable PCI device\n");
2625 		return ret;
2626 	}
2627 
2628 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2629 	if (ret) {
2630 		dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting");
2631 		goto err_disable_device;
2632 	}
2633 
2634 	ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME);
2635 	if (ret) {
2636 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
2637 		goto err_disable_device;
2638 	}
2639 
2640 	pci_set_master(pdev);
2641 	hw = &hdev->hw;
2642 	hw->hw.io_base = pci_iomap(pdev, 2, 0);
2643 	if (!hw->hw.io_base) {
2644 		dev_err(&pdev->dev, "can't map configuration register space\n");
2645 		ret = -ENOMEM;
2646 		goto err_release_regions;
2647 	}
2648 
2649 	ret = hclgevf_dev_mem_map(hdev);
2650 	if (ret)
2651 		goto err_unmap_io_base;
2652 
2653 	return 0;
2654 
2655 err_unmap_io_base:
2656 	pci_iounmap(pdev, hdev->hw.hw.io_base);
2657 err_release_regions:
2658 	pci_release_regions(pdev);
2659 err_disable_device:
2660 	pci_disable_device(pdev);
2661 
2662 	return ret;
2663 }
2664 
hclgevf_pci_uninit(struct hclgevf_dev * hdev)2665 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev)
2666 {
2667 	struct pci_dev *pdev = hdev->pdev;
2668 
2669 	if (hdev->hw.hw.mem_base)
2670 		devm_iounmap(&pdev->dev, hdev->hw.hw.mem_base);
2671 
2672 	pci_iounmap(pdev, hdev->hw.hw.io_base);
2673 	pci_release_regions(pdev);
2674 	pci_disable_device(pdev);
2675 }
2676 
hclgevf_query_vf_resource(struct hclgevf_dev * hdev)2677 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
2678 {
2679 	struct hclgevf_query_res_cmd *req;
2680 	struct hclge_desc desc;
2681 	int ret;
2682 
2683 	hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RSRC, true);
2684 	ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
2685 	if (ret) {
2686 		dev_err(&hdev->pdev->dev,
2687 			"query vf resource failed, ret = %d.\n", ret);
2688 		return ret;
2689 	}
2690 
2691 	req = (struct hclgevf_query_res_cmd *)desc.data;
2692 
2693 	if (hnae3_dev_roce_supported(hdev)) {
2694 		hdev->roce_base_msix_offset =
2695 		hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
2696 				HCLGEVF_MSIX_OFT_ROCEE_M,
2697 				HCLGEVF_MSIX_OFT_ROCEE_S);
2698 		hdev->num_roce_msix =
2699 		hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number),
2700 				HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
2701 
2702 		/* nic's msix numbers is always equals to the roce's. */
2703 		hdev->num_nic_msix = hdev->num_roce_msix;
2704 
2705 		/* VF should have NIC vectors and Roce vectors, NIC vectors
2706 		 * are queued before Roce vectors. The offset is fixed to 64.
2707 		 */
2708 		hdev->num_msi = hdev->num_roce_msix +
2709 				hdev->roce_base_msix_offset;
2710 	} else {
2711 		hdev->num_msi =
2712 		hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number),
2713 				HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
2714 
2715 		hdev->num_nic_msix = hdev->num_msi;
2716 	}
2717 
2718 	if (hdev->num_nic_msix < HNAE3_MIN_VECTOR_NUM) {
2719 		dev_err(&hdev->pdev->dev,
2720 			"Just %u msi resources, not enough for vf(min:2).\n",
2721 			hdev->num_nic_msix);
2722 		return -EINVAL;
2723 	}
2724 
2725 	return 0;
2726 }
2727 
hclgevf_set_default_dev_specs(struct hclgevf_dev * hdev)2728 static void hclgevf_set_default_dev_specs(struct hclgevf_dev *hdev)
2729 {
2730 #define HCLGEVF_MAX_NON_TSO_BD_NUM			8U
2731 
2732 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
2733 
2734 	ae_dev->dev_specs.max_non_tso_bd_num =
2735 					HCLGEVF_MAX_NON_TSO_BD_NUM;
2736 	ae_dev->dev_specs.rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE;
2737 	ae_dev->dev_specs.rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
2738 	ae_dev->dev_specs.max_int_gl = HCLGEVF_DEF_MAX_INT_GL;
2739 	ae_dev->dev_specs.max_frm_size = HCLGEVF_MAC_MAX_FRAME;
2740 }
2741 
hclgevf_parse_dev_specs(struct hclgevf_dev * hdev,struct hclge_desc * desc)2742 static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev,
2743 				    struct hclge_desc *desc)
2744 {
2745 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
2746 	struct hclgevf_dev_specs_0_cmd *req0;
2747 	struct hclgevf_dev_specs_1_cmd *req1;
2748 
2749 	req0 = (struct hclgevf_dev_specs_0_cmd *)desc[0].data;
2750 	req1 = (struct hclgevf_dev_specs_1_cmd *)desc[1].data;
2751 
2752 	ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
2753 	ae_dev->dev_specs.rss_ind_tbl_size =
2754 					le16_to_cpu(req0->rss_ind_tbl_size);
2755 	ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
2756 	ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
2757 	ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
2758 	ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
2759 }
2760 
hclgevf_check_dev_specs(struct hclgevf_dev * hdev)2761 static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev)
2762 {
2763 	struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
2764 
2765 	if (!dev_specs->max_non_tso_bd_num)
2766 		dev_specs->max_non_tso_bd_num = HCLGEVF_MAX_NON_TSO_BD_NUM;
2767 	if (!dev_specs->rss_ind_tbl_size)
2768 		dev_specs->rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE;
2769 	if (!dev_specs->rss_key_size)
2770 		dev_specs->rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
2771 	if (!dev_specs->max_int_gl)
2772 		dev_specs->max_int_gl = HCLGEVF_DEF_MAX_INT_GL;
2773 	if (!dev_specs->max_frm_size)
2774 		dev_specs->max_frm_size = HCLGEVF_MAC_MAX_FRAME;
2775 }
2776 
hclgevf_query_dev_specs(struct hclgevf_dev * hdev)2777 static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev)
2778 {
2779 	struct hclge_desc desc[HCLGEVF_QUERY_DEV_SPECS_BD_NUM];
2780 	int ret;
2781 	int i;
2782 
2783 	/* set default specifications as devices lower than version V3 do not
2784 	 * support querying specifications from firmware.
2785 	 */
2786 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
2787 		hclgevf_set_default_dev_specs(hdev);
2788 		return 0;
2789 	}
2790 
2791 	for (i = 0; i < HCLGEVF_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
2792 		hclgevf_cmd_setup_basic_desc(&desc[i],
2793 					     HCLGE_OPC_QUERY_DEV_SPECS, true);
2794 		desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
2795 	}
2796 	hclgevf_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
2797 
2798 	ret = hclgevf_cmd_send(&hdev->hw, desc, HCLGEVF_QUERY_DEV_SPECS_BD_NUM);
2799 	if (ret)
2800 		return ret;
2801 
2802 	hclgevf_parse_dev_specs(hdev, desc);
2803 	hclgevf_check_dev_specs(hdev);
2804 
2805 	return 0;
2806 }
2807 
hclgevf_pci_reset(struct hclgevf_dev * hdev)2808 static int hclgevf_pci_reset(struct hclgevf_dev *hdev)
2809 {
2810 	struct pci_dev *pdev = hdev->pdev;
2811 	int ret = 0;
2812 
2813 	if ((hdev->reset_type == HNAE3_VF_FULL_RESET ||
2814 	     hdev->reset_type == HNAE3_FLR_RESET) &&
2815 	    test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
2816 		hclgevf_misc_irq_uninit(hdev);
2817 		hclgevf_uninit_msi(hdev);
2818 		clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
2819 	}
2820 
2821 	if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
2822 		pci_set_master(pdev);
2823 		ret = hclgevf_init_msi(hdev);
2824 		if (ret) {
2825 			dev_err(&pdev->dev,
2826 				"failed(%d) to init MSI/MSI-X\n", ret);
2827 			return ret;
2828 		}
2829 
2830 		ret = hclgevf_misc_irq_init(hdev);
2831 		if (ret) {
2832 			hclgevf_uninit_msi(hdev);
2833 			dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
2834 				ret);
2835 			return ret;
2836 		}
2837 
2838 		set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
2839 	}
2840 
2841 	return ret;
2842 }
2843 
hclgevf_clear_vport_list(struct hclgevf_dev * hdev)2844 static int hclgevf_clear_vport_list(struct hclgevf_dev *hdev)
2845 {
2846 	struct hclge_vf_to_pf_msg send_msg;
2847 
2848 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_HANDLE_VF_TBL,
2849 			       HCLGE_MBX_VPORT_LIST_CLEAR);
2850 	return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
2851 }
2852 
hclgevf_init_rxd_adv_layout(struct hclgevf_dev * hdev)2853 static void hclgevf_init_rxd_adv_layout(struct hclgevf_dev *hdev)
2854 {
2855 	if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
2856 		hclgevf_write_dev(&hdev->hw, HCLGEVF_RXD_ADV_LAYOUT_EN_REG, 1);
2857 }
2858 
hclgevf_uninit_rxd_adv_layout(struct hclgevf_dev * hdev)2859 static void hclgevf_uninit_rxd_adv_layout(struct hclgevf_dev *hdev)
2860 {
2861 	if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
2862 		hclgevf_write_dev(&hdev->hw, HCLGEVF_RXD_ADV_LAYOUT_EN_REG, 0);
2863 }
2864 
hclgevf_reset_hdev(struct hclgevf_dev * hdev)2865 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
2866 {
2867 	struct pci_dev *pdev = hdev->pdev;
2868 	int ret;
2869 
2870 	ret = hclgevf_pci_reset(hdev);
2871 	if (ret) {
2872 		dev_err(&pdev->dev, "pci reset failed %d\n", ret);
2873 		return ret;
2874 	}
2875 
2876 	hclgevf_arq_init(hdev);
2877 
2878 	ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw,
2879 				  &hdev->fw_version, false,
2880 				  hdev->reset_pending);
2881 	if (ret) {
2882 		dev_err(&pdev->dev, "cmd failed %d\n", ret);
2883 		return ret;
2884 	}
2885 
2886 	ret = hclgevf_rss_init_hw(hdev);
2887 	if (ret) {
2888 		dev_err(&hdev->pdev->dev,
2889 			"failed(%d) to initialize RSS\n", ret);
2890 		return ret;
2891 	}
2892 
2893 	ret = hclgevf_config_gro(hdev);
2894 	if (ret)
2895 		return ret;
2896 
2897 	ret = hclgevf_init_vlan_config(hdev, hdev->rxvtag_strip_en);
2898 	if (ret) {
2899 		dev_err(&hdev->pdev->dev,
2900 			"failed(%d) to initialize VLAN config\n", ret);
2901 		return ret;
2902 	}
2903 
2904 	/* get current port based vlan state from PF */
2905 	ret = hclgevf_get_port_base_vlan_filter_state(hdev);
2906 	if (ret)
2907 		return ret;
2908 
2909 	set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
2910 
2911 	hclgevf_init_rxd_adv_layout(hdev);
2912 
2913 	dev_info(&hdev->pdev->dev, "Reset done\n");
2914 
2915 	return 0;
2916 }
2917 
hclgevf_init_hdev(struct hclgevf_dev * hdev)2918 static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
2919 {
2920 	struct pci_dev *pdev = hdev->pdev;
2921 	int ret;
2922 
2923 	ret = hclgevf_pci_init(hdev);
2924 	if (ret)
2925 		return ret;
2926 
2927 	ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw);
2928 	if (ret)
2929 		goto err_cmd_queue_init;
2930 
2931 	hclgevf_arq_init(hdev);
2932 
2933 	hclge_comm_cmd_init_ops(&hdev->hw.hw, &hclgevf_cmq_ops);
2934 	ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw,
2935 				  &hdev->fw_version, false,
2936 				  hdev->reset_pending);
2937 	if (ret)
2938 		goto err_cmd_init;
2939 
2940 	/* Get vf resource */
2941 	ret = hclgevf_query_vf_resource(hdev);
2942 	if (ret)
2943 		goto err_cmd_init;
2944 
2945 	ret = hclgevf_query_dev_specs(hdev);
2946 	if (ret) {
2947 		dev_err(&pdev->dev,
2948 			"failed to query dev specifications, ret = %d\n", ret);
2949 		goto err_cmd_init;
2950 	}
2951 
2952 	ret = hclgevf_init_msi(hdev);
2953 	if (ret) {
2954 		dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret);
2955 		goto err_cmd_init;
2956 	}
2957 
2958 	hclgevf_state_init(hdev);
2959 	hdev->reset_level = HNAE3_VF_FUNC_RESET;
2960 	hdev->reset_type = HNAE3_NONE_RESET;
2961 
2962 	ret = hclgevf_misc_irq_init(hdev);
2963 	if (ret)
2964 		goto err_misc_irq_init;
2965 
2966 	set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
2967 
2968 	ret = hclgevf_configure(hdev);
2969 	if (ret) {
2970 		dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret);
2971 		goto err_config;
2972 	}
2973 
2974 	ret = hclgevf_alloc_tqps(hdev);
2975 	if (ret) {
2976 		dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret);
2977 		goto err_config;
2978 	}
2979 
2980 	ret = hclgevf_set_handle_info(hdev);
2981 	if (ret)
2982 		goto err_config;
2983 
2984 	ret = hclgevf_config_gro(hdev);
2985 	if (ret)
2986 		goto err_config;
2987 
2988 	/* Initialize RSS for this VF */
2989 	ret = hclge_comm_rss_init_cfg(&hdev->nic, hdev->ae_dev,
2990 				      &hdev->rss_cfg);
2991 	if (ret) {
2992 		dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
2993 		goto err_config;
2994 	}
2995 
2996 	ret = hclgevf_rss_init_hw(hdev);
2997 	if (ret) {
2998 		dev_err(&hdev->pdev->dev,
2999 			"failed(%d) to initialize RSS\n", ret);
3000 		goto err_config;
3001 	}
3002 
3003 	/* ensure vf tbl list as empty before init */
3004 	ret = hclgevf_clear_vport_list(hdev);
3005 	if (ret) {
3006 		dev_err(&pdev->dev,
3007 			"failed to clear tbl list configuration, ret = %d.\n",
3008 			ret);
3009 		goto err_config;
3010 	}
3011 
3012 	ret = hclgevf_init_vlan_config(hdev, true);
3013 	if (ret) {
3014 		dev_err(&hdev->pdev->dev,
3015 			"failed(%d) to initialize VLAN config\n", ret);
3016 		goto err_config;
3017 	}
3018 
3019 	hclgevf_init_rxd_adv_layout(hdev);
3020 
3021 	ret = hclgevf_devlink_init(hdev);
3022 	if (ret)
3023 		goto err_config;
3024 
3025 	set_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state);
3026 
3027 	hdev->last_reset_time = jiffies;
3028 	dev_info(&hdev->pdev->dev, "finished initializing %s driver\n",
3029 		 HCLGEVF_DRIVER_NAME);
3030 
3031 	hclgevf_task_schedule(hdev, round_jiffies_relative(HZ));
3032 
3033 	return 0;
3034 
3035 err_config:
3036 	hclgevf_misc_irq_uninit(hdev);
3037 err_misc_irq_init:
3038 	hclgevf_state_uninit(hdev);
3039 	hclgevf_uninit_msi(hdev);
3040 err_cmd_init:
3041 	hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
3042 err_cmd_queue_init:
3043 	hclgevf_pci_uninit(hdev);
3044 	clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
3045 	return ret;
3046 }
3047 
hclgevf_uninit_hdev(struct hclgevf_dev * hdev)3048 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
3049 {
3050 	struct hclge_vf_to_pf_msg send_msg;
3051 
3052 	hclgevf_state_uninit(hdev);
3053 	hclgevf_uninit_rxd_adv_layout(hdev);
3054 
3055 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_VF_UNINIT, 0);
3056 	hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
3057 
3058 	if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
3059 		hclgevf_misc_irq_uninit(hdev);
3060 		hclgevf_uninit_msi(hdev);
3061 	}
3062 
3063 	hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
3064 	hclgevf_devlink_uninit(hdev);
3065 	hclgevf_pci_uninit(hdev);
3066 	hclgevf_uninit_mac_list(hdev);
3067 }
3068 
hclgevf_init_ae_dev(struct hnae3_ae_dev * ae_dev)3069 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
3070 {
3071 	struct pci_dev *pdev = ae_dev->pdev;
3072 	int ret;
3073 
3074 	ret = hclgevf_alloc_hdev(ae_dev);
3075 	if (ret) {
3076 		dev_err(&pdev->dev, "hclge device allocation failed\n");
3077 		return ret;
3078 	}
3079 
3080 	ret = hclgevf_init_hdev(ae_dev->priv);
3081 	if (ret) {
3082 		dev_err(&pdev->dev, "hclge device initialization failed\n");
3083 		return ret;
3084 	}
3085 
3086 	return 0;
3087 }
3088 
hclgevf_uninit_ae_dev(struct hnae3_ae_dev * ae_dev)3089 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
3090 {
3091 	struct hclgevf_dev *hdev = ae_dev->priv;
3092 
3093 	hclgevf_uninit_hdev(hdev);
3094 	ae_dev->priv = NULL;
3095 }
3096 
hclgevf_get_max_channels(struct hclgevf_dev * hdev)3097 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev)
3098 {
3099 	return min(hdev->rss_size_max, hdev->num_tqps);
3100 }
3101 
3102 /**
3103  * hclgevf_get_channels - Get the current channels enabled and max supported.
3104  * @handle: hardware information for network interface
3105  * @ch: ethtool channels structure
3106  *
3107  * We don't support separate tx and rx queues as channels. The other count
3108  * represents how many queues are being used for control. max_combined counts
3109  * how many queue pairs we can support. They may not be mapped 1 to 1 with
3110  * q_vectors since we support a lot more queue pairs than q_vectors.
3111  **/
hclgevf_get_channels(struct hnae3_handle * handle,struct ethtool_channels * ch)3112 static void hclgevf_get_channels(struct hnae3_handle *handle,
3113 				 struct ethtool_channels *ch)
3114 {
3115 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3116 
3117 	ch->max_combined = hclgevf_get_max_channels(hdev);
3118 	ch->other_count = 0;
3119 	ch->max_other = 0;
3120 	ch->combined_count = handle->kinfo.rss_size;
3121 }
3122 
hclgevf_get_tqps_and_rss_info(struct hnae3_handle * handle,u16 * alloc_tqps,u16 * max_rss_size)3123 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle,
3124 					  u16 *alloc_tqps, u16 *max_rss_size)
3125 {
3126 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3127 
3128 	*alloc_tqps = hdev->num_tqps;
3129 	*max_rss_size = hdev->rss_size_max;
3130 }
3131 
hclgevf_update_rss_size(struct hnae3_handle * handle,u32 new_tqps_num)3132 static void hclgevf_update_rss_size(struct hnae3_handle *handle,
3133 				    u32 new_tqps_num)
3134 {
3135 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3136 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3137 	u16 max_rss_size;
3138 
3139 	kinfo->req_rss_size = new_tqps_num;
3140 
3141 	max_rss_size = min_t(u16, hdev->rss_size_max,
3142 			     hdev->num_tqps / kinfo->tc_info.num_tc);
3143 
3144 	/* Use the user's configuration when it is not larger than
3145 	 * max_rss_size, otherwise, use the maximum specification value.
3146 	 */
3147 	if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
3148 	    kinfo->req_rss_size <= max_rss_size)
3149 		kinfo->rss_size = kinfo->req_rss_size;
3150 	else if (kinfo->rss_size > max_rss_size ||
3151 		 (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size))
3152 		kinfo->rss_size = max_rss_size;
3153 
3154 	kinfo->num_tqps = kinfo->tc_info.num_tc * kinfo->rss_size;
3155 }
3156 
hclgevf_set_channels(struct hnae3_handle * handle,u32 new_tqps_num,bool rxfh_configured)3157 static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
3158 				bool rxfh_configured)
3159 {
3160 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3161 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3162 	u16 tc_offset[HCLGE_COMM_MAX_TC_NUM];
3163 	u16 tc_valid[HCLGE_COMM_MAX_TC_NUM];
3164 	u16 tc_size[HCLGE_COMM_MAX_TC_NUM];
3165 	u16 cur_rss_size = kinfo->rss_size;
3166 	u16 cur_tqps = kinfo->num_tqps;
3167 	u32 *rss_indir;
3168 	unsigned int i;
3169 	int ret;
3170 
3171 	hclgevf_update_rss_size(handle, new_tqps_num);
3172 
3173 	hclge_comm_get_rss_tc_info(kinfo->rss_size, hdev->hw_tc_map,
3174 				   tc_offset, tc_valid, tc_size);
3175 	ret = hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset,
3176 					 tc_valid, tc_size);
3177 	if (ret)
3178 		return ret;
3179 
3180 	/* RSS indirection table has been configured by user */
3181 	if (rxfh_configured)
3182 		goto out;
3183 
3184 	/* Reinitializes the rss indirect table according to the new RSS size */
3185 	rss_indir = kcalloc(hdev->ae_dev->dev_specs.rss_ind_tbl_size,
3186 			    sizeof(u32), GFP_KERNEL);
3187 	if (!rss_indir)
3188 		return -ENOMEM;
3189 
3190 	for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
3191 		rss_indir[i] = i % kinfo->rss_size;
3192 
3193 	hdev->rss_cfg.rss_size = kinfo->rss_size;
3194 
3195 	ret = hclgevf_set_rss(handle, rss_indir, NULL, 0);
3196 	if (ret)
3197 		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
3198 			ret);
3199 
3200 	kfree(rss_indir);
3201 
3202 out:
3203 	if (!ret)
3204 		dev_info(&hdev->pdev->dev,
3205 			 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
3206 			 cur_rss_size, kinfo->rss_size,
3207 			 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
3208 
3209 	return ret;
3210 }
3211 
hclgevf_get_status(struct hnae3_handle * handle)3212 static int hclgevf_get_status(struct hnae3_handle *handle)
3213 {
3214 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3215 
3216 	return hdev->hw.mac.link;
3217 }
3218 
hclgevf_get_ksettings_an_result(struct hnae3_handle * handle,u8 * auto_neg,u32 * speed,u8 * duplex,u32 * lane_num)3219 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle,
3220 					    u8 *auto_neg, u32 *speed,
3221 					    u8 *duplex, u32 *lane_num)
3222 {
3223 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3224 
3225 	if (speed)
3226 		*speed = hdev->hw.mac.speed;
3227 	if (duplex)
3228 		*duplex = hdev->hw.mac.duplex;
3229 	if (auto_neg)
3230 		*auto_neg = AUTONEG_DISABLE;
3231 }
3232 
hclgevf_update_speed_duplex(struct hclgevf_dev * hdev,u32 speed,u8 duplex)3233 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
3234 				 u8 duplex)
3235 {
3236 	hdev->hw.mac.speed = speed;
3237 	hdev->hw.mac.duplex = duplex;
3238 }
3239 
hclgevf_gro_en(struct hnae3_handle * handle,bool enable)3240 static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable)
3241 {
3242 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3243 	bool gro_en_old = hdev->gro_en;
3244 	int ret;
3245 
3246 	hdev->gro_en = enable;
3247 	ret = hclgevf_config_gro(hdev);
3248 	if (ret)
3249 		hdev->gro_en = gro_en_old;
3250 
3251 	return ret;
3252 }
3253 
hclgevf_get_media_type(struct hnae3_handle * handle,u8 * media_type,u8 * module_type)3254 static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type,
3255 				   u8 *module_type)
3256 {
3257 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3258 
3259 	if (media_type)
3260 		*media_type = hdev->hw.mac.media_type;
3261 
3262 	if (module_type)
3263 		*module_type = hdev->hw.mac.module_type;
3264 }
3265 
hclgevf_get_hw_reset_stat(struct hnae3_handle * handle)3266 static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle)
3267 {
3268 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3269 
3270 	return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
3271 }
3272 
hclgevf_get_cmdq_stat(struct hnae3_handle * handle)3273 static bool hclgevf_get_cmdq_stat(struct hnae3_handle *handle)
3274 {
3275 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3276 
3277 	return test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
3278 }
3279 
hclgevf_ae_dev_resetting(struct hnae3_handle * handle)3280 static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle)
3281 {
3282 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3283 
3284 	return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
3285 }
3286 
hclgevf_ae_dev_reset_cnt(struct hnae3_handle * handle)3287 static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle)
3288 {
3289 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3290 
3291 	return hdev->rst_stats.hw_rst_done_cnt;
3292 }
3293 
hclgevf_get_link_mode(struct hnae3_handle * handle,unsigned long * supported,unsigned long * advertising)3294 static void hclgevf_get_link_mode(struct hnae3_handle *handle,
3295 				  unsigned long *supported,
3296 				  unsigned long *advertising)
3297 {
3298 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
3299 
3300 	*supported = hdev->hw.mac.supported;
3301 	*advertising = hdev->hw.mac.advertising;
3302 }
3303 
hclgevf_update_port_base_vlan_info(struct hclgevf_dev * hdev,u16 state,struct hclge_mbx_port_base_vlan * port_base_vlan)3304 void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
3305 				struct hclge_mbx_port_base_vlan *port_base_vlan)
3306 {
3307 	struct hnae3_handle *nic = &hdev->nic;
3308 	struct hclge_vf_to_pf_msg send_msg;
3309 	int ret;
3310 
3311 	rtnl_lock();
3312 
3313 	if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
3314 	    test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) {
3315 		dev_warn(&hdev->pdev->dev,
3316 			 "is resetting when updating port based vlan info\n");
3317 		rtnl_unlock();
3318 		return;
3319 	}
3320 
3321 	ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
3322 	if (ret) {
3323 		rtnl_unlock();
3324 		return;
3325 	}
3326 
3327 	/* send msg to PF and wait update port based vlan info */
3328 	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
3329 			       HCLGE_MBX_PORT_BASE_VLAN_CFG);
3330 	memcpy(send_msg.data, port_base_vlan, sizeof(*port_base_vlan));
3331 	ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
3332 	if (!ret) {
3333 		if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
3334 			nic->port_base_vlan_state = state;
3335 		else
3336 			nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
3337 	}
3338 
3339 	hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
3340 	rtnl_unlock();
3341 }
3342 
3343 static const struct hnae3_ae_ops hclgevf_ops = {
3344 	.init_ae_dev = hclgevf_init_ae_dev,
3345 	.uninit_ae_dev = hclgevf_uninit_ae_dev,
3346 	.reset_prepare = hclgevf_reset_prepare_general,
3347 	.reset_done = hclgevf_reset_done,
3348 	.init_client_instance = hclgevf_init_client_instance,
3349 	.uninit_client_instance = hclgevf_uninit_client_instance,
3350 	.start = hclgevf_ae_start,
3351 	.stop = hclgevf_ae_stop,
3352 	.client_start = hclgevf_client_start,
3353 	.client_stop = hclgevf_client_stop,
3354 	.map_ring_to_vector = hclgevf_map_ring_to_vector,
3355 	.unmap_ring_from_vector = hclgevf_unmap_ring_from_vector,
3356 	.get_vector = hclgevf_get_vector,
3357 	.put_vector = hclgevf_put_vector,
3358 	.reset_queue = hclgevf_reset_tqp,
3359 	.get_mac_addr = hclgevf_get_mac_addr,
3360 	.set_mac_addr = hclgevf_set_mac_addr,
3361 	.add_uc_addr = hclgevf_add_uc_addr,
3362 	.rm_uc_addr = hclgevf_rm_uc_addr,
3363 	.add_mc_addr = hclgevf_add_mc_addr,
3364 	.rm_mc_addr = hclgevf_rm_mc_addr,
3365 	.get_stats = hclgevf_get_stats,
3366 	.update_stats = hclgevf_update_stats,
3367 	.get_strings = hclgevf_get_strings,
3368 	.get_sset_count = hclgevf_get_sset_count,
3369 	.get_rss_key_size = hclge_comm_get_rss_key_size,
3370 	.get_rss = hclgevf_get_rss,
3371 	.set_rss = hclgevf_set_rss,
3372 	.get_rss_tuple = hclgevf_get_rss_tuple,
3373 	.set_rss_tuple = hclgevf_set_rss_tuple,
3374 	.get_tc_size = hclgevf_get_tc_size,
3375 	.get_fw_version = hclgevf_get_fw_version,
3376 	.set_vlan_filter = hclgevf_set_vlan_filter,
3377 	.enable_vlan_filter = hclgevf_enable_vlan_filter,
3378 	.enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag,
3379 	.reset_event = hclgevf_reset_event,
3380 	.set_default_reset_request = hclgevf_set_def_reset_request,
3381 	.set_channels = hclgevf_set_channels,
3382 	.get_channels = hclgevf_get_channels,
3383 	.get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info,
3384 	.get_regs_len = hclgevf_get_regs_len,
3385 	.get_regs = hclgevf_get_regs,
3386 	.get_status = hclgevf_get_status,
3387 	.get_ksettings_an_result = hclgevf_get_ksettings_an_result,
3388 	.get_media_type = hclgevf_get_media_type,
3389 	.get_hw_reset_stat = hclgevf_get_hw_reset_stat,
3390 	.ae_dev_resetting = hclgevf_ae_dev_resetting,
3391 	.ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt,
3392 	.set_gro_en = hclgevf_gro_en,
3393 	.set_mtu = hclgevf_set_mtu,
3394 	.get_global_queue_id = hclgevf_get_qid_global,
3395 	.set_timer_task = hclgevf_set_timer_task,
3396 	.get_link_mode = hclgevf_get_link_mode,
3397 	.set_promisc_mode = hclgevf_set_promisc_mode,
3398 	.request_update_promisc_mode = hclgevf_request_update_promisc_mode,
3399 	.get_cmdq_stat = hclgevf_get_cmdq_stat,
3400 };
3401 
3402 static struct hnae3_ae_algo ae_algovf = {
3403 	.ops = &hclgevf_ops,
3404 	.pdev_id_table = ae_algovf_pci_tbl,
3405 };
3406 
hclgevf_init(void)3407 static int __init hclgevf_init(void)
3408 {
3409 	pr_info("%s is initializing\n", HCLGEVF_NAME);
3410 
3411 	hclgevf_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGEVF_NAME);
3412 	if (!hclgevf_wq) {
3413 		pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME);
3414 		return -ENOMEM;
3415 	}
3416 
3417 	hnae3_register_ae_algo(&ae_algovf);
3418 
3419 	return 0;
3420 }
3421 
hclgevf_exit(void)3422 static void __exit hclgevf_exit(void)
3423 {
3424 	hnae3_acquire_unload_lock();
3425 	hnae3_unregister_ae_algo(&ae_algovf);
3426 	destroy_workqueue(hclgevf_wq);
3427 	hnae3_release_unload_lock();
3428 }
3429 module_init(hclgevf_init);
3430 module_exit(hclgevf_exit);
3431 
3432 MODULE_LICENSE("GPL");
3433 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
3434 MODULE_DESCRIPTION("HCLGEVF Driver");
3435 MODULE_VERSION(HCLGEVF_MOD_VERSION);
3436