• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/etherdevice.h>
5 #include <net/rtnetlink.h>
6 #include "hclgevf_cmd.h"
7 #include "hclgevf_main.h"
8 #include "hclge_mbx.h"
9 #include "hnae3.h"
10 
11 #define HCLGEVF_NAME	"hclgevf"
12 
13 static int hclgevf_init_hdev(struct hclgevf_dev *hdev);
14 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev);
15 static struct hnae3_ae_algo ae_algovf;
16 
17 static const struct pci_device_id ae_algovf_pci_tbl[] = {
18 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
19 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0},
20 	/* required last entry */
21 	{0, }
22 };
23 
24 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl);
25 
hclgevf_ae_get_hdev(struct hnae3_handle * handle)26 static inline struct hclgevf_dev *hclgevf_ae_get_hdev(
27 	struct hnae3_handle *handle)
28 {
29 	if (!handle->client)
30 		return container_of(handle, struct hclgevf_dev, nic);
31 	else if (handle->client->type == HNAE3_CLIENT_ROCE)
32 		return container_of(handle, struct hclgevf_dev, roce);
33 	else
34 		return container_of(handle, struct hclgevf_dev, nic);
35 }
36 
hclgevf_tqps_update_stats(struct hnae3_handle * handle)37 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle)
38 {
39 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
40 	struct hnae3_queue *queue;
41 	struct hclgevf_desc desc;
42 	struct hclgevf_tqp *tqp;
43 	int status;
44 	int i;
45 
46 	for (i = 0; i < hdev->num_tqps; i++) {
47 		queue = handle->kinfo.tqp[i];
48 		tqp = container_of(queue, struct hclgevf_tqp, q);
49 		hclgevf_cmd_setup_basic_desc(&desc,
50 					     HCLGEVF_OPC_QUERY_RX_STATUS,
51 					     true);
52 
53 		desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
54 		status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
55 		if (status) {
56 			dev_err(&hdev->pdev->dev,
57 				"Query tqp stat fail, status = %d,queue = %d\n",
58 				status,	i);
59 			return status;
60 		}
61 		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
62 			le32_to_cpu(desc.data[1]);
63 
64 		hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS,
65 					     true);
66 
67 		desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
68 		status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
69 		if (status) {
70 			dev_err(&hdev->pdev->dev,
71 				"Query tqp stat fail, status = %d,queue = %d\n",
72 				status, i);
73 			return status;
74 		}
75 		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
76 			le32_to_cpu(desc.data[1]);
77 	}
78 
79 	return 0;
80 }
81 
hclgevf_tqps_get_stats(struct hnae3_handle * handle,u64 * data)82 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
83 {
84 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
85 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
86 	struct hclgevf_tqp *tqp;
87 	u64 *buff = data;
88 	int i;
89 
90 	for (i = 0; i < hdev->num_tqps; i++) {
91 		tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q);
92 		*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
93 	}
94 	for (i = 0; i < kinfo->num_tqps; i++) {
95 		tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q);
96 		*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
97 	}
98 
99 	return buff;
100 }
101 
hclgevf_tqps_get_sset_count(struct hnae3_handle * handle,int strset)102 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset)
103 {
104 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
105 
106 	return hdev->num_tqps * 2;
107 }
108 
hclgevf_tqps_get_strings(struct hnae3_handle * handle,u8 * data)109 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
110 {
111 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
112 	u8 *buff = data;
113 	int i = 0;
114 
115 	for (i = 0; i < hdev->num_tqps; i++) {
116 		struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i],
117 			struct hclgevf_tqp, q);
118 		snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd",
119 			 tqp->index);
120 		buff += ETH_GSTRING_LEN;
121 	}
122 
123 	for (i = 0; i < hdev->num_tqps; i++) {
124 		struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i],
125 			struct hclgevf_tqp, q);
126 		snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd",
127 			 tqp->index);
128 		buff += ETH_GSTRING_LEN;
129 	}
130 
131 	return buff;
132 }
133 
hclgevf_update_stats(struct hnae3_handle * handle,struct net_device_stats * net_stats)134 static void hclgevf_update_stats(struct hnae3_handle *handle,
135 				 struct net_device_stats *net_stats)
136 {
137 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
138 	int status;
139 
140 	status = hclgevf_tqps_update_stats(handle);
141 	if (status)
142 		dev_err(&hdev->pdev->dev,
143 			"VF update of TQPS stats fail, status = %d.\n",
144 			status);
145 }
146 
hclgevf_get_sset_count(struct hnae3_handle * handle,int strset)147 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset)
148 {
149 	if (strset == ETH_SS_TEST)
150 		return -EOPNOTSUPP;
151 	else if (strset == ETH_SS_STATS)
152 		return hclgevf_tqps_get_sset_count(handle, strset);
153 
154 	return 0;
155 }
156 
hclgevf_get_strings(struct hnae3_handle * handle,u32 strset,u8 * data)157 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset,
158 				u8 *data)
159 {
160 	u8 *p = (char *)data;
161 
162 	if (strset == ETH_SS_STATS)
163 		p = hclgevf_tqps_get_strings(handle, p);
164 }
165 
hclgevf_get_stats(struct hnae3_handle * handle,u64 * data)166 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data)
167 {
168 	hclgevf_tqps_get_stats(handle, data);
169 }
170 
hclgevf_get_tc_info(struct hclgevf_dev * hdev)171 static int hclgevf_get_tc_info(struct hclgevf_dev *hdev)
172 {
173 	u8 resp_msg;
174 	int status;
175 
176 	status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0,
177 				      true, &resp_msg, sizeof(u8));
178 	if (status) {
179 		dev_err(&hdev->pdev->dev,
180 			"VF request to get TC info from PF failed %d",
181 			status);
182 		return status;
183 	}
184 
185 	hdev->hw_tc_map = resp_msg;
186 
187 	return 0;
188 }
189 
hclge_get_queue_info(struct hclgevf_dev * hdev)190 static int hclge_get_queue_info(struct hclgevf_dev *hdev)
191 {
192 #define HCLGEVF_TQPS_RSS_INFO_LEN	8
193 	u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN];
194 	int status;
195 
196 	status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0,
197 				      true, resp_msg,
198 				      HCLGEVF_TQPS_RSS_INFO_LEN);
199 	if (status) {
200 		dev_err(&hdev->pdev->dev,
201 			"VF request to get tqp info from PF failed %d",
202 			status);
203 		return status;
204 	}
205 
206 	memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16));
207 	memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16));
208 	memcpy(&hdev->num_desc, &resp_msg[4], sizeof(u16));
209 	memcpy(&hdev->rx_buf_len, &resp_msg[6], sizeof(u16));
210 
211 	return 0;
212 }
213 
hclgevf_alloc_tqps(struct hclgevf_dev * hdev)214 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
215 {
216 	struct hclgevf_tqp *tqp;
217 	int i;
218 
219 	/* if this is on going reset then we need to re-allocate the TPQs
220 	 * since we cannot assume we would get same number of TPQs back from PF
221 	 */
222 	if (hclgevf_dev_ongoing_reset(hdev))
223 		devm_kfree(&hdev->pdev->dev, hdev->htqp);
224 
225 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
226 				  sizeof(struct hclgevf_tqp), GFP_KERNEL);
227 	if (!hdev->htqp)
228 		return -ENOMEM;
229 
230 	tqp = hdev->htqp;
231 
232 	for (i = 0; i < hdev->num_tqps; i++) {
233 		tqp->dev = &hdev->pdev->dev;
234 		tqp->index = i;
235 
236 		tqp->q.ae_algo = &ae_algovf;
237 		tqp->q.buf_size = hdev->rx_buf_len;
238 		tqp->q.desc_num = hdev->num_desc;
239 		tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET +
240 			i * HCLGEVF_TQP_REG_SIZE;
241 
242 		tqp++;
243 	}
244 
245 	return 0;
246 }
247 
hclgevf_knic_setup(struct hclgevf_dev * hdev)248 static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
249 {
250 	struct hnae3_handle *nic = &hdev->nic;
251 	struct hnae3_knic_private_info *kinfo;
252 	u16 new_tqps = hdev->num_tqps;
253 	int i;
254 
255 	kinfo = &nic->kinfo;
256 	kinfo->num_tc = 0;
257 	kinfo->num_desc = hdev->num_desc;
258 	kinfo->rx_buf_len = hdev->rx_buf_len;
259 	for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++)
260 		if (hdev->hw_tc_map & BIT(i))
261 			kinfo->num_tc++;
262 
263 	kinfo->rss_size
264 		= min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc);
265 	new_tqps = kinfo->rss_size * kinfo->num_tc;
266 	kinfo->num_tqps = min(new_tqps, hdev->num_tqps);
267 
268 	/* if this is on going reset then we need to re-allocate the hnae queues
269 	 * as well since number of TPQs from PF might have changed.
270 	 */
271 	if (hclgevf_dev_ongoing_reset(hdev))
272 		devm_kfree(&hdev->pdev->dev, kinfo->tqp);
273 
274 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
275 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
276 	if (!kinfo->tqp)
277 		return -ENOMEM;
278 
279 	for (i = 0; i < kinfo->num_tqps; i++) {
280 		hdev->htqp[i].q.handle = &hdev->nic;
281 		hdev->htqp[i].q.tqp_index = i;
282 		kinfo->tqp[i] = &hdev->htqp[i].q;
283 	}
284 
285 	return 0;
286 }
287 
hclgevf_request_link_info(struct hclgevf_dev * hdev)288 static void hclgevf_request_link_info(struct hclgevf_dev *hdev)
289 {
290 	int status;
291 	u8 resp_msg;
292 
293 	status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL,
294 				      0, false, &resp_msg, sizeof(u8));
295 	if (status)
296 		dev_err(&hdev->pdev->dev,
297 			"VF failed to fetch link status(%d) from PF", status);
298 }
299 
hclgevf_update_link_status(struct hclgevf_dev * hdev,int link_state)300 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
301 {
302 	struct hnae3_handle *handle = &hdev->nic;
303 	struct hnae3_client *client;
304 
305 	client = handle->client;
306 
307 	link_state =
308 		test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state;
309 
310 	if (link_state != hdev->hw.mac.link) {
311 		client->ops->link_status_change(handle, !!link_state);
312 		hdev->hw.mac.link = link_state;
313 	}
314 }
315 
hclgevf_set_handle_info(struct hclgevf_dev * hdev)316 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
317 {
318 	struct hnae3_handle *nic = &hdev->nic;
319 	int ret;
320 
321 	nic->ae_algo = &ae_algovf;
322 	nic->pdev = hdev->pdev;
323 	nic->numa_node_mask = hdev->numa_node_mask;
324 	nic->flags |= HNAE3_SUPPORT_VF;
325 
326 	if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) {
327 		dev_err(&hdev->pdev->dev, "unsupported device type %d\n",
328 			hdev->ae_dev->dev_type);
329 		return -EINVAL;
330 	}
331 
332 	ret = hclgevf_knic_setup(hdev);
333 	if (ret)
334 		dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n",
335 			ret);
336 	return ret;
337 }
338 
hclgevf_free_vector(struct hclgevf_dev * hdev,int vector_id)339 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id)
340 {
341 	if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) {
342 		dev_warn(&hdev->pdev->dev,
343 			 "vector(vector_id %d) has been freed.\n", vector_id);
344 		return;
345 	}
346 
347 	hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT;
348 	hdev->num_msi_left += 1;
349 	hdev->num_msi_used -= 1;
350 }
351 
hclgevf_get_vector(struct hnae3_handle * handle,u16 vector_num,struct hnae3_vector_info * vector_info)352 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num,
353 			      struct hnae3_vector_info *vector_info)
354 {
355 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
356 	struct hnae3_vector_info *vector = vector_info;
357 	int alloc = 0;
358 	int i, j;
359 
360 	vector_num = min(hdev->num_msi_left, vector_num);
361 
362 	for (j = 0; j < vector_num; j++) {
363 		for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) {
364 			if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) {
365 				vector->vector = pci_irq_vector(hdev->pdev, i);
366 				vector->io_addr = hdev->hw.io_base +
367 					HCLGEVF_VECTOR_REG_BASE +
368 					(i - 1) * HCLGEVF_VECTOR_REG_OFFSET;
369 				hdev->vector_status[i] = 0;
370 				hdev->vector_irq[i] = vector->vector;
371 
372 				vector++;
373 				alloc++;
374 
375 				break;
376 			}
377 		}
378 	}
379 	hdev->num_msi_left -= alloc;
380 	hdev->num_msi_used += alloc;
381 
382 	return alloc;
383 }
384 
hclgevf_get_vector_index(struct hclgevf_dev * hdev,int vector)385 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector)
386 {
387 	int i;
388 
389 	for (i = 0; i < hdev->num_msi; i++)
390 		if (vector == hdev->vector_irq[i])
391 			return i;
392 
393 	return -EINVAL;
394 }
395 
hclgevf_get_rss_key_size(struct hnae3_handle * handle)396 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle)
397 {
398 	return HCLGEVF_RSS_KEY_SIZE;
399 }
400 
hclgevf_get_rss_indir_size(struct hnae3_handle * handle)401 static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle)
402 {
403 	return HCLGEVF_RSS_IND_TBL_SIZE;
404 }
405 
hclgevf_set_rss_indir_table(struct hclgevf_dev * hdev)406 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev)
407 {
408 	const u8 *indir = hdev->rss_cfg.rss_indirection_tbl;
409 	struct hclgevf_rss_indirection_table_cmd *req;
410 	struct hclgevf_desc desc;
411 	int status;
412 	int i, j;
413 
414 	req = (struct hclgevf_rss_indirection_table_cmd *)desc.data;
415 
416 	for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) {
417 		hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE,
418 					     false);
419 		req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE;
420 		req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK;
421 		for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++)
422 			req->rss_result[j] =
423 				indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j];
424 
425 		status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
426 		if (status) {
427 			dev_err(&hdev->pdev->dev,
428 				"VF failed(=%d) to set RSS indirection table\n",
429 				status);
430 			return status;
431 		}
432 	}
433 
434 	return 0;
435 }
436 
hclgevf_set_rss_tc_mode(struct hclgevf_dev * hdev,u16 rss_size)437 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev,  u16 rss_size)
438 {
439 	struct hclgevf_rss_tc_mode_cmd *req;
440 	u16 tc_offset[HCLGEVF_MAX_TC_NUM];
441 	u16 tc_valid[HCLGEVF_MAX_TC_NUM];
442 	u16 tc_size[HCLGEVF_MAX_TC_NUM];
443 	struct hclgevf_desc desc;
444 	u16 roundup_size;
445 	int status;
446 	int i;
447 
448 	req = (struct hclgevf_rss_tc_mode_cmd *)desc.data;
449 
450 	roundup_size = roundup_pow_of_two(rss_size);
451 	roundup_size = ilog2(roundup_size);
452 
453 	for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
454 		tc_valid[i] = !!(hdev->hw_tc_map & BIT(i));
455 		tc_size[i] = roundup_size;
456 		tc_offset[i] = rss_size * i;
457 	}
458 
459 	hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
460 	for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
461 		hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B,
462 			      (tc_valid[i] & 0x1));
463 		hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M,
464 				HCLGEVF_RSS_TC_SIZE_S, tc_size[i]);
465 		hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M,
466 				HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]);
467 	}
468 	status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
469 	if (status)
470 		dev_err(&hdev->pdev->dev,
471 			"VF failed(=%d) to set rss tc mode\n", status);
472 
473 	return status;
474 }
475 
hclgevf_get_rss_hw_cfg(struct hnae3_handle * handle,u8 * hash,u8 * key)476 static int hclgevf_get_rss_hw_cfg(struct hnae3_handle *handle, u8 *hash,
477 				  u8 *key)
478 {
479 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
480 	struct hclgevf_rss_config_cmd *req;
481 	int lkup_times = key ? 3 : 1;
482 	struct hclgevf_desc desc;
483 	int key_offset;
484 	int key_size;
485 	int status;
486 
487 	req = (struct hclgevf_rss_config_cmd *)desc.data;
488 	lkup_times = (lkup_times == 3) ? 3 : ((hash) ? 1 : 0);
489 
490 	for (key_offset = 0; key_offset < lkup_times; key_offset++) {
491 		hclgevf_cmd_setup_basic_desc(&desc,
492 					     HCLGEVF_OPC_RSS_GENERIC_CONFIG,
493 					     true);
494 		req->hash_config |= (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET);
495 
496 		status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
497 		if (status) {
498 			dev_err(&hdev->pdev->dev,
499 				"failed to get hardware RSS cfg, status = %d\n",
500 				status);
501 			return status;
502 		}
503 
504 		if (key_offset == 2)
505 			key_size =
506 			HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2;
507 		else
508 			key_size = HCLGEVF_RSS_HASH_KEY_NUM;
509 
510 		if (key)
511 			memcpy(key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM,
512 			       req->hash_key,
513 			       key_size);
514 	}
515 
516 	if (hash) {
517 		if ((req->hash_config & 0xf) == HCLGEVF_RSS_HASH_ALGO_TOEPLITZ)
518 			*hash = ETH_RSS_HASH_TOP;
519 		else
520 			*hash = ETH_RSS_HASH_UNKNOWN;
521 	}
522 
523 	return 0;
524 }
525 
hclgevf_get_rss(struct hnae3_handle * handle,u32 * indir,u8 * key,u8 * hfunc)526 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
527 			   u8 *hfunc)
528 {
529 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
530 	struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
531 	int i;
532 
533 	if (indir)
534 		for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
535 			indir[i] = rss_cfg->rss_indirection_tbl[i];
536 
537 	return hclgevf_get_rss_hw_cfg(handle, hfunc, key);
538 }
539 
hclgevf_set_rss(struct hnae3_handle * handle,const u32 * indir,const u8 * key,const u8 hfunc)540 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
541 			   const  u8 *key, const  u8 hfunc)
542 {
543 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
544 	struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
545 	int i;
546 
547 	/* update the shadow RSS table with user specified qids */
548 	for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
549 		rss_cfg->rss_indirection_tbl[i] = indir[i];
550 
551 	/* update the hardware */
552 	return hclgevf_set_rss_indir_table(hdev);
553 }
554 
hclgevf_get_tc_size(struct hnae3_handle * handle)555 static int hclgevf_get_tc_size(struct hnae3_handle *handle)
556 {
557 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
558 	struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
559 
560 	return rss_cfg->rss_size;
561 }
562 
hclgevf_bind_ring_to_vector(struct hnae3_handle * handle,bool en,int vector_id,struct hnae3_ring_chain_node * ring_chain)563 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
564 				       int vector_id,
565 				       struct hnae3_ring_chain_node *ring_chain)
566 {
567 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
568 	struct hnae3_ring_chain_node *node;
569 	struct hclge_mbx_vf_to_pf_cmd *req;
570 	struct hclgevf_desc desc;
571 	int i = 0;
572 	int status;
573 	u8 type;
574 
575 	req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
576 
577 	for (node = ring_chain; node; node = node->next) {
578 		int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM +
579 					HCLGE_MBX_RING_NODE_VARIABLE_NUM * i;
580 
581 		if (i == 0) {
582 			hclgevf_cmd_setup_basic_desc(&desc,
583 						     HCLGEVF_OPC_MBX_VF_TO_PF,
584 						     false);
585 			type = en ?
586 				HCLGE_MBX_MAP_RING_TO_VECTOR :
587 				HCLGE_MBX_UNMAP_RING_TO_VECTOR;
588 			req->msg[0] = type;
589 			req->msg[1] = vector_id;
590 		}
591 
592 		req->msg[idx_offset] =
593 				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B);
594 		req->msg[idx_offset + 1] = node->tqp_index;
595 		req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx,
596 							   HNAE3_RING_GL_IDX_M,
597 							   HNAE3_RING_GL_IDX_S);
598 
599 		i++;
600 		if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM -
601 		     HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) /
602 		     HCLGE_MBX_RING_NODE_VARIABLE_NUM) ||
603 		    !node->next) {
604 			req->msg[2] = i;
605 
606 			status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
607 			if (status) {
608 				dev_err(&hdev->pdev->dev,
609 					"Map TQP fail, status is %d.\n",
610 					status);
611 				return status;
612 			}
613 			i = 0;
614 			hclgevf_cmd_setup_basic_desc(&desc,
615 						     HCLGEVF_OPC_MBX_VF_TO_PF,
616 						     false);
617 			req->msg[0] = type;
618 			req->msg[1] = vector_id;
619 		}
620 	}
621 
622 	return 0;
623 }
624 
hclgevf_map_ring_to_vector(struct hnae3_handle * handle,int vector,struct hnae3_ring_chain_node * ring_chain)625 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector,
626 				      struct hnae3_ring_chain_node *ring_chain)
627 {
628 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
629 	int vector_id;
630 
631 	vector_id = hclgevf_get_vector_index(hdev, vector);
632 	if (vector_id < 0) {
633 		dev_err(&handle->pdev->dev,
634 			"Get vector index fail. ret =%d\n", vector_id);
635 		return vector_id;
636 	}
637 
638 	return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain);
639 }
640 
hclgevf_unmap_ring_from_vector(struct hnae3_handle * handle,int vector,struct hnae3_ring_chain_node * ring_chain)641 static int hclgevf_unmap_ring_from_vector(
642 				struct hnae3_handle *handle,
643 				int vector,
644 				struct hnae3_ring_chain_node *ring_chain)
645 {
646 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
647 	int ret, vector_id;
648 
649 	vector_id = hclgevf_get_vector_index(hdev, vector);
650 	if (vector_id < 0) {
651 		dev_err(&handle->pdev->dev,
652 			"Get vector index fail. ret =%d\n", vector_id);
653 		return vector_id;
654 	}
655 
656 	ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain);
657 	if (ret)
658 		dev_err(&handle->pdev->dev,
659 			"Unmap ring from vector fail. vector=%d, ret =%d\n",
660 			vector_id,
661 			ret);
662 
663 	return ret;
664 }
665 
hclgevf_put_vector(struct hnae3_handle * handle,int vector)666 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector)
667 {
668 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
669 	int vector_id;
670 
671 	vector_id = hclgevf_get_vector_index(hdev, vector);
672 	if (vector_id < 0) {
673 		dev_err(&handle->pdev->dev,
674 			"hclgevf_put_vector get vector index fail. ret =%d\n",
675 			vector_id);
676 		return vector_id;
677 	}
678 
679 	hclgevf_free_vector(hdev, vector_id);
680 
681 	return 0;
682 }
683 
hclgevf_cmd_set_promisc_mode(struct hclgevf_dev * hdev,bool en_uc_pmc,bool en_mc_pmc)684 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev,
685 					bool en_uc_pmc, bool en_mc_pmc)
686 {
687 	struct hclge_mbx_vf_to_pf_cmd *req;
688 	struct hclgevf_desc desc;
689 	int status;
690 
691 	req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
692 
693 	hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
694 	req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE;
695 	req->msg[1] = en_uc_pmc ? 1 : 0;
696 	req->msg[2] = en_mc_pmc ? 1 : 0;
697 
698 	status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
699 	if (status)
700 		dev_err(&hdev->pdev->dev,
701 			"Set promisc mode fail, status is %d.\n", status);
702 
703 	return status;
704 }
705 
hclgevf_set_promisc_mode(struct hnae3_handle * handle,bool en_uc_pmc,bool en_mc_pmc)706 static void hclgevf_set_promisc_mode(struct hnae3_handle *handle,
707 				     bool en_uc_pmc, bool en_mc_pmc)
708 {
709 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
710 
711 	hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc);
712 }
713 
hclgevf_tqp_enable(struct hclgevf_dev * hdev,int tqp_id,int stream_id,bool enable)714 static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id,
715 			      int stream_id, bool enable)
716 {
717 	struct hclgevf_cfg_com_tqp_queue_cmd *req;
718 	struct hclgevf_desc desc;
719 	int status;
720 
721 	req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data;
722 
723 	hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE,
724 				     false);
725 	req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK);
726 	req->stream_id = cpu_to_le16(stream_id);
727 	req->enable |= enable << HCLGEVF_TQP_ENABLE_B;
728 
729 	status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
730 	if (status)
731 		dev_err(&hdev->pdev->dev,
732 			"TQP enable fail, status =%d.\n", status);
733 
734 	return status;
735 }
736 
hclgevf_get_queue_id(struct hnae3_queue * queue)737 static int hclgevf_get_queue_id(struct hnae3_queue *queue)
738 {
739 	struct hclgevf_tqp *tqp = container_of(queue, struct hclgevf_tqp, q);
740 
741 	return tqp->index;
742 }
743 
hclgevf_reset_tqp_stats(struct hnae3_handle * handle)744 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
745 {
746 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
747 	struct hnae3_queue *queue;
748 	struct hclgevf_tqp *tqp;
749 	int i;
750 
751 	for (i = 0; i < hdev->num_tqps; i++) {
752 		queue = handle->kinfo.tqp[i];
753 		tqp = container_of(queue, struct hclgevf_tqp, q);
754 		memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
755 	}
756 }
757 
hclgevf_cfg_func_mta_type(struct hclgevf_dev * hdev)758 static int hclgevf_cfg_func_mta_type(struct hclgevf_dev *hdev)
759 {
760 	u8 resp_msg = HCLGEVF_MTA_TYPE_SEL_MAX;
761 	int ret;
762 
763 	ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
764 				   HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ,
765 				   NULL, 0, true, &resp_msg, sizeof(u8));
766 
767 	if (ret) {
768 		dev_err(&hdev->pdev->dev,
769 			"Read mta type fail, ret=%d.\n", ret);
770 		return ret;
771 	}
772 
773 	if (resp_msg > HCLGEVF_MTA_TYPE_SEL_MAX) {
774 		dev_err(&hdev->pdev->dev,
775 			"Read mta type invalid, resp=%d.\n", resp_msg);
776 		return -EINVAL;
777 	}
778 
779 	hdev->mta_mac_sel_type = resp_msg;
780 
781 	return 0;
782 }
783 
hclgevf_get_mac_addr_to_mta_index(struct hclgevf_dev * hdev,const u8 * addr)784 static u16 hclgevf_get_mac_addr_to_mta_index(struct hclgevf_dev *hdev,
785 					     const u8 *addr)
786 {
787 	u32 rsh = HCLGEVF_MTA_TYPE_SEL_MAX - hdev->mta_mac_sel_type;
788 	u16 high_val = addr[1] | (addr[0] << 8);
789 
790 	return (high_val >> rsh) & 0xfff;
791 }
792 
hclgevf_do_update_mta_status(struct hclgevf_dev * hdev,unsigned long * status)793 static int hclgevf_do_update_mta_status(struct hclgevf_dev *hdev,
794 					unsigned long *status)
795 {
796 #define HCLGEVF_MTA_STATUS_MSG_SIZE 13
797 #define HCLGEVF_MTA_STATUS_MSG_BITS \
798 			(HCLGEVF_MTA_STATUS_MSG_SIZE * BITS_PER_BYTE)
799 #define HCLGEVF_MTA_STATUS_MSG_END_BITS \
800 			(HCLGEVF_MTA_TBL_SIZE % HCLGEVF_MTA_STATUS_MSG_BITS)
801 	u16 tbl_cnt;
802 	u16 tbl_idx;
803 	u8 msg_cnt;
804 	u8 msg_idx;
805 	int ret;
806 
807 	msg_cnt = DIV_ROUND_UP(HCLGEVF_MTA_TBL_SIZE,
808 			       HCLGEVF_MTA_STATUS_MSG_BITS);
809 	tbl_idx = 0;
810 	msg_idx = 0;
811 	while (msg_cnt--) {
812 		u8 msg[HCLGEVF_MTA_STATUS_MSG_SIZE + 1];
813 		u8 *p = &msg[1];
814 		u8 msg_ofs;
815 		u8 msg_bit;
816 
817 		memset(msg, 0, sizeof(msg));
818 
819 		/* set index field */
820 		msg[0] = 0x7F & msg_idx;
821 
822 		/* set end flag field */
823 		if (msg_cnt == 0) {
824 			msg[0] |= 0x80;
825 			tbl_cnt = HCLGEVF_MTA_STATUS_MSG_END_BITS;
826 		} else {
827 			tbl_cnt = HCLGEVF_MTA_STATUS_MSG_BITS;
828 		}
829 
830 		/* set status field */
831 		msg_ofs = 0;
832 		msg_bit = 0;
833 		while (tbl_cnt--) {
834 			if (test_bit(tbl_idx, status))
835 				p[msg_ofs] |= BIT(msg_bit);
836 
837 			tbl_idx++;
838 
839 			msg_bit++;
840 			if (msg_bit == BITS_PER_BYTE) {
841 				msg_bit = 0;
842 				msg_ofs++;
843 			}
844 		}
845 
846 		ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
847 					   HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE,
848 					   msg, sizeof(msg), false, NULL, 0);
849 		if (ret)
850 			break;
851 
852 		msg_idx++;
853 	}
854 
855 	return ret;
856 }
857 
hclgevf_update_mta_status(struct hnae3_handle * handle)858 static int hclgevf_update_mta_status(struct hnae3_handle *handle)
859 {
860 	unsigned long mta_status[BITS_TO_LONGS(HCLGEVF_MTA_TBL_SIZE)];
861 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
862 	struct net_device *netdev = hdev->nic.kinfo.netdev;
863 	struct netdev_hw_addr *ha;
864 	u16 tbl_idx;
865 
866 	/* clear status */
867 	memset(mta_status, 0, sizeof(mta_status));
868 
869 	/* update status from mc addr list */
870 	netdev_for_each_mc_addr(ha, netdev) {
871 		tbl_idx = hclgevf_get_mac_addr_to_mta_index(hdev, ha->addr);
872 		set_bit(tbl_idx, mta_status);
873 	}
874 
875 	return hclgevf_do_update_mta_status(hdev, mta_status);
876 }
877 
hclgevf_get_mac_addr(struct hnae3_handle * handle,u8 * p)878 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
879 {
880 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
881 
882 	ether_addr_copy(p, hdev->hw.mac.mac_addr);
883 }
884 
hclgevf_set_mac_addr(struct hnae3_handle * handle,void * p,bool is_first)885 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p,
886 				bool is_first)
887 {
888 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
889 	u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr;
890 	u8 *new_mac_addr = (u8 *)p;
891 	u8 msg_data[ETH_ALEN * 2];
892 	u16 subcode;
893 	int status;
894 
895 	ether_addr_copy(msg_data, new_mac_addr);
896 	ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr);
897 
898 	subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD :
899 			HCLGE_MBX_MAC_VLAN_UC_MODIFY;
900 
901 	status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
902 				      subcode, msg_data, ETH_ALEN * 2,
903 				      true, NULL, 0);
904 	if (!status)
905 		ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr);
906 
907 	return status;
908 }
909 
hclgevf_add_uc_addr(struct hnae3_handle * handle,const unsigned char * addr)910 static int hclgevf_add_uc_addr(struct hnae3_handle *handle,
911 			       const unsigned char *addr)
912 {
913 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
914 
915 	return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
916 				    HCLGE_MBX_MAC_VLAN_UC_ADD,
917 				    addr, ETH_ALEN, false, NULL, 0);
918 }
919 
hclgevf_rm_uc_addr(struct hnae3_handle * handle,const unsigned char * addr)920 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle,
921 			      const unsigned char *addr)
922 {
923 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
924 
925 	return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
926 				    HCLGE_MBX_MAC_VLAN_UC_REMOVE,
927 				    addr, ETH_ALEN, false, NULL, 0);
928 }
929 
hclgevf_add_mc_addr(struct hnae3_handle * handle,const unsigned char * addr)930 static int hclgevf_add_mc_addr(struct hnae3_handle *handle,
931 			       const unsigned char *addr)
932 {
933 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
934 
935 	return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
936 				    HCLGE_MBX_MAC_VLAN_MC_ADD,
937 				    addr, ETH_ALEN, false, NULL, 0);
938 }
939 
hclgevf_rm_mc_addr(struct hnae3_handle * handle,const unsigned char * addr)940 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle,
941 			      const unsigned char *addr)
942 {
943 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
944 
945 	return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
946 				    HCLGE_MBX_MAC_VLAN_MC_REMOVE,
947 				    addr, ETH_ALEN, false, NULL, 0);
948 }
949 
hclgevf_set_vlan_filter(struct hnae3_handle * handle,__be16 proto,u16 vlan_id,bool is_kill)950 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
951 				   __be16 proto, u16 vlan_id,
952 				   bool is_kill)
953 {
954 #define HCLGEVF_VLAN_MBX_MSG_LEN 5
955 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
956 	u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN];
957 
958 	if (vlan_id > 4095)
959 		return -EINVAL;
960 
961 	if (proto != htons(ETH_P_8021Q))
962 		return -EPROTONOSUPPORT;
963 
964 	msg_data[0] = is_kill;
965 	memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id));
966 	memcpy(&msg_data[3], &proto, sizeof(proto));
967 	return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
968 				    HCLGE_MBX_VLAN_FILTER, msg_data,
969 				    HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0);
970 }
971 
hclgevf_en_hw_strip_rxvtag(struct hnae3_handle * handle,bool enable)972 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
973 {
974 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
975 	u8 msg_data;
976 
977 	msg_data = enable ? 1 : 0;
978 	return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
979 				    HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data,
980 				    1, false, NULL, 0);
981 }
982 
hclgevf_reset_tqp(struct hnae3_handle * handle,u16 queue_id)983 static void hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
984 {
985 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
986 	u8 msg_data[2];
987 	int ret;
988 
989 	memcpy(&msg_data[0], &queue_id, sizeof(queue_id));
990 
991 	/* disable vf queue before send queue reset msg to PF */
992 	ret = hclgevf_tqp_enable(hdev, queue_id, 0, false);
993 	if (ret)
994 		return;
995 
996 	hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data,
997 			     2, true, NULL, 0);
998 }
999 
hclgevf_notify_client(struct hclgevf_dev * hdev,enum hnae3_reset_notify_type type)1000 static int hclgevf_notify_client(struct hclgevf_dev *hdev,
1001 				 enum hnae3_reset_notify_type type)
1002 {
1003 	struct hnae3_client *client = hdev->nic_client;
1004 	struct hnae3_handle *handle = &hdev->nic;
1005 
1006 	if (!client->ops->reset_notify)
1007 		return -EOPNOTSUPP;
1008 
1009 	return client->ops->reset_notify(handle, type);
1010 }
1011 
hclgevf_reset_wait(struct hclgevf_dev * hdev)1012 static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
1013 {
1014 #define HCLGEVF_RESET_WAIT_MS	500
1015 #define HCLGEVF_RESET_WAIT_CNT	20
1016 	u32 val, cnt = 0;
1017 
1018 	/* wait to check the hardware reset completion status */
1019 	val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING);
1020 	while (hnae3_get_bit(val, HCLGEVF_FUN_RST_ING_B) &&
1021 	       (cnt < HCLGEVF_RESET_WAIT_CNT)) {
1022 		msleep(HCLGEVF_RESET_WAIT_MS);
1023 		val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING);
1024 		cnt++;
1025 	}
1026 
1027 	/* hardware completion status should be available by this time */
1028 	if (cnt >= HCLGEVF_RESET_WAIT_CNT) {
1029 		dev_warn(&hdev->pdev->dev,
1030 			 "could'nt get reset done status from h/w, timeout!\n");
1031 		return -EBUSY;
1032 	}
1033 
1034 	/* we will wait a bit more to let reset of the stack to complete. This
1035 	 * might happen in case reset assertion was made by PF. Yes, this also
1036 	 * means we might end up waiting bit more even for VF reset.
1037 	 */
1038 	msleep(5000);
1039 
1040 	return 0;
1041 }
1042 
hclgevf_reset_stack(struct hclgevf_dev * hdev)1043 static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
1044 {
1045 	int ret;
1046 
1047 	/* uninitialize the nic client */
1048 	hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
1049 
1050 	/* re-initialize the hclge device */
1051 	ret = hclgevf_init_hdev(hdev);
1052 	if (ret) {
1053 		dev_err(&hdev->pdev->dev,
1054 			"hclge device re-init failed, VF is disabled!\n");
1055 		return ret;
1056 	}
1057 
1058 	/* bring up the nic client again */
1059 	hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT);
1060 
1061 	return 0;
1062 }
1063 
hclgevf_reset(struct hclgevf_dev * hdev)1064 static int hclgevf_reset(struct hclgevf_dev *hdev)
1065 {
1066 	int ret;
1067 
1068 	rtnl_lock();
1069 
1070 	/* bring down the nic to stop any ongoing TX/RX */
1071 	hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
1072 
1073 	rtnl_unlock();
1074 
1075 	/* check if VF could successfully fetch the hardware reset completion
1076 	 * status from the hardware
1077 	 */
1078 	ret = hclgevf_reset_wait(hdev);
1079 	if (ret) {
1080 		/* can't do much in this situation, will disable VF */
1081 		dev_err(&hdev->pdev->dev,
1082 			"VF failed(=%d) to fetch H/W reset completion status\n",
1083 			ret);
1084 
1085 		dev_warn(&hdev->pdev->dev, "VF reset failed, disabling VF!\n");
1086 		rtnl_lock();
1087 		hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
1088 
1089 		rtnl_unlock();
1090 		return ret;
1091 	}
1092 
1093 	rtnl_lock();
1094 
1095 	/* now, re-initialize the nic client and ae device*/
1096 	ret = hclgevf_reset_stack(hdev);
1097 	if (ret)
1098 		dev_err(&hdev->pdev->dev, "failed to reset VF stack\n");
1099 
1100 	/* bring up the nic to enable TX/RX again */
1101 	hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
1102 
1103 	rtnl_unlock();
1104 
1105 	return ret;
1106 }
1107 
hclgevf_do_reset(struct hclgevf_dev * hdev)1108 static int hclgevf_do_reset(struct hclgevf_dev *hdev)
1109 {
1110 	int status;
1111 	u8 respmsg;
1112 
1113 	status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL,
1114 				      0, false, &respmsg, sizeof(u8));
1115 	if (status)
1116 		dev_err(&hdev->pdev->dev,
1117 			"VF reset request to PF failed(=%d)\n", status);
1118 
1119 	return status;
1120 }
1121 
hclgevf_reset_event(struct hnae3_handle * handle)1122 static void hclgevf_reset_event(struct hnae3_handle *handle)
1123 {
1124 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1125 
1126 	dev_info(&hdev->pdev->dev, "received reset request from VF enet\n");
1127 
1128 	handle->reset_level = HNAE3_VF_RESET;
1129 
1130 	/* reset of this VF requested */
1131 	set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state);
1132 	hclgevf_reset_task_schedule(hdev);
1133 
1134 	handle->last_reset_time = jiffies;
1135 }
1136 
hclgevf_get_fw_version(struct hnae3_handle * handle)1137 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
1138 {
1139 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1140 
1141 	return hdev->fw_version;
1142 }
1143 
hclgevf_get_misc_vector(struct hclgevf_dev * hdev)1144 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
1145 {
1146 	struct hclgevf_misc_vector *vector = &hdev->misc_vector;
1147 
1148 	vector->vector_irq = pci_irq_vector(hdev->pdev,
1149 					    HCLGEVF_MISC_VECTOR_NUM);
1150 	vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE;
1151 	/* vector status always valid for Vector 0 */
1152 	hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0;
1153 	hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq;
1154 
1155 	hdev->num_msi_left -= 1;
1156 	hdev->num_msi_used += 1;
1157 }
1158 
hclgevf_reset_task_schedule(struct hclgevf_dev * hdev)1159 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev)
1160 {
1161 	if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) &&
1162 	    !test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) {
1163 		set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state);
1164 		schedule_work(&hdev->rst_service_task);
1165 	}
1166 }
1167 
hclgevf_mbx_task_schedule(struct hclgevf_dev * hdev)1168 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev)
1169 {
1170 	if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) &&
1171 	    !test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) {
1172 		set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
1173 		schedule_work(&hdev->mbx_service_task);
1174 	}
1175 }
1176 
hclgevf_task_schedule(struct hclgevf_dev * hdev)1177 static void hclgevf_task_schedule(struct hclgevf_dev *hdev)
1178 {
1179 	if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state)  &&
1180 	    !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state))
1181 		schedule_work(&hdev->service_task);
1182 }
1183 
hclgevf_deferred_task_schedule(struct hclgevf_dev * hdev)1184 static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev)
1185 {
1186 	/* if we have any pending mailbox event then schedule the mbx task */
1187 	if (hdev->mbx_event_pending)
1188 		hclgevf_mbx_task_schedule(hdev);
1189 
1190 	if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state))
1191 		hclgevf_reset_task_schedule(hdev);
1192 }
1193 
hclgevf_service_timer(struct timer_list * t)1194 static void hclgevf_service_timer(struct timer_list *t)
1195 {
1196 	struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer);
1197 
1198 	mod_timer(&hdev->service_timer, jiffies + 5 * HZ);
1199 
1200 	hclgevf_task_schedule(hdev);
1201 }
1202 
hclgevf_reset_service_task(struct work_struct * work)1203 static void hclgevf_reset_service_task(struct work_struct *work)
1204 {
1205 	struct hclgevf_dev *hdev =
1206 		container_of(work, struct hclgevf_dev, rst_service_task);
1207 	int ret;
1208 
1209 	if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
1210 		return;
1211 
1212 	clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state);
1213 
1214 	if (test_and_clear_bit(HCLGEVF_RESET_PENDING,
1215 			       &hdev->reset_state)) {
1216 		/* PF has initmated that it is about to reset the hardware.
1217 		 * We now have to poll & check if harware has actually completed
1218 		 * the reset sequence. On hardware reset completion, VF needs to
1219 		 * reset the client and ae device.
1220 		 */
1221 		hdev->reset_attempts = 0;
1222 
1223 		ret = hclgevf_reset(hdev);
1224 		if (ret)
1225 			dev_err(&hdev->pdev->dev, "VF stack reset failed.\n");
1226 	} else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
1227 				      &hdev->reset_state)) {
1228 		/* we could be here when either of below happens:
1229 		 * 1. reset was initiated due to watchdog timeout due to
1230 		 *    a. IMP was earlier reset and our TX got choked down and
1231 		 *       which resulted in watchdog reacting and inducing VF
1232 		 *       reset. This also means our cmdq would be unreliable.
1233 		 *    b. problem in TX due to other lower layer(example link
1234 		 *       layer not functioning properly etc.)
1235 		 * 2. VF reset might have been initiated due to some config
1236 		 *    change.
1237 		 *
1238 		 * NOTE: Theres no clear way to detect above cases than to react
1239 		 * to the response of PF for this reset request. PF will ack the
1240 		 * 1b and 2. cases but we will not get any intimation about 1a
1241 		 * from PF as cmdq would be in unreliable state i.e. mailbox
1242 		 * communication between PF and VF would be broken.
1243 		 */
1244 
1245 		/* if we are never geting into pending state it means either:
1246 		 * 1. PF is not receiving our request which could be due to IMP
1247 		 *    reset
1248 		 * 2. PF is screwed
1249 		 * We cannot do much for 2. but to check first we can try reset
1250 		 * our PCIe + stack and see if it alleviates the problem.
1251 		 */
1252 		if (hdev->reset_attempts > 3) {
1253 			/* prepare for full reset of stack + pcie interface */
1254 			hdev->nic.reset_level = HNAE3_VF_FULL_RESET;
1255 
1256 			/* "defer" schedule the reset task again */
1257 			set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
1258 		} else {
1259 			hdev->reset_attempts++;
1260 
1261 			/* request PF for resetting this VF via mailbox */
1262 			ret = hclgevf_do_reset(hdev);
1263 			if (ret)
1264 				dev_warn(&hdev->pdev->dev,
1265 					 "VF rst fail, stack will call\n");
1266 		}
1267 	}
1268 
1269 	clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
1270 }
1271 
hclgevf_mailbox_service_task(struct work_struct * work)1272 static void hclgevf_mailbox_service_task(struct work_struct *work)
1273 {
1274 	struct hclgevf_dev *hdev;
1275 
1276 	hdev = container_of(work, struct hclgevf_dev, mbx_service_task);
1277 
1278 	if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state))
1279 		return;
1280 
1281 	clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
1282 
1283 	hclgevf_mbx_async_handler(hdev);
1284 
1285 	clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
1286 }
1287 
hclgevf_service_task(struct work_struct * work)1288 static void hclgevf_service_task(struct work_struct *work)
1289 {
1290 	struct hclgevf_dev *hdev;
1291 
1292 	hdev = container_of(work, struct hclgevf_dev, service_task);
1293 
1294 	/* request the link status from the PF. PF would be able to tell VF
1295 	 * about such updates in future so we might remove this later
1296 	 */
1297 	hclgevf_request_link_info(hdev);
1298 
1299 	hclgevf_deferred_task_schedule(hdev);
1300 
1301 	clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
1302 }
1303 
hclgevf_clear_event_cause(struct hclgevf_dev * hdev,u32 regclr)1304 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
1305 {
1306 	hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr);
1307 }
1308 
hclgevf_check_event_cause(struct hclgevf_dev * hdev,u32 * clearval)1309 static bool hclgevf_check_event_cause(struct hclgevf_dev *hdev, u32 *clearval)
1310 {
1311 	u32 cmdq_src_reg;
1312 
1313 	/* fetch the events from their corresponding regs */
1314 	cmdq_src_reg = hclgevf_read_dev(&hdev->hw,
1315 					HCLGEVF_VECTOR0_CMDQ_SRC_REG);
1316 
1317 	/* check for vector0 mailbox(=CMDQ RX) event source */
1318 	if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
1319 		cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
1320 		*clearval = cmdq_src_reg;
1321 		return true;
1322 	}
1323 
1324 	dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n");
1325 
1326 	return false;
1327 }
1328 
hclgevf_enable_vector(struct hclgevf_misc_vector * vector,bool en)1329 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
1330 {
1331 	writel(en ? 1 : 0, vector->addr);
1332 }
1333 
hclgevf_misc_irq_handle(int irq,void * data)1334 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
1335 {
1336 	struct hclgevf_dev *hdev = data;
1337 	u32 clearval;
1338 
1339 	hclgevf_enable_vector(&hdev->misc_vector, false);
1340 	if (!hclgevf_check_event_cause(hdev, &clearval))
1341 		goto skip_sched;
1342 
1343 	hclgevf_mbx_handler(hdev);
1344 
1345 	hclgevf_clear_event_cause(hdev, clearval);
1346 
1347 skip_sched:
1348 	hclgevf_enable_vector(&hdev->misc_vector, true);
1349 
1350 	return IRQ_HANDLED;
1351 }
1352 
hclgevf_configure(struct hclgevf_dev * hdev)1353 static int hclgevf_configure(struct hclgevf_dev *hdev)
1354 {
1355 	int ret;
1356 
1357 	/* get queue configuration from PF */
1358 	ret = hclge_get_queue_info(hdev);
1359 	if (ret)
1360 		return ret;
1361 	/* get tc configuration from PF */
1362 	return hclgevf_get_tc_info(hdev);
1363 }
1364 
hclgevf_alloc_hdev(struct hnae3_ae_dev * ae_dev)1365 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev)
1366 {
1367 	struct pci_dev *pdev = ae_dev->pdev;
1368 	struct hclgevf_dev *hdev = ae_dev->priv;
1369 
1370 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
1371 	if (!hdev)
1372 		return -ENOMEM;
1373 
1374 	hdev->pdev = pdev;
1375 	hdev->ae_dev = ae_dev;
1376 	ae_dev->priv = hdev;
1377 
1378 	return 0;
1379 }
1380 
hclgevf_init_roce_base_info(struct hclgevf_dev * hdev)1381 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
1382 {
1383 	struct hnae3_handle *roce = &hdev->roce;
1384 	struct hnae3_handle *nic = &hdev->nic;
1385 
1386 	roce->rinfo.num_vectors = hdev->num_roce_msix;
1387 
1388 	if (hdev->num_msi_left < roce->rinfo.num_vectors ||
1389 	    hdev->num_msi_left == 0)
1390 		return -EINVAL;
1391 
1392 	roce->rinfo.base_vector = hdev->roce_base_vector;
1393 
1394 	roce->rinfo.netdev = nic->kinfo.netdev;
1395 	roce->rinfo.roce_io_base = hdev->hw.io_base;
1396 
1397 	roce->pdev = nic->pdev;
1398 	roce->ae_algo = nic->ae_algo;
1399 	roce->numa_node_mask = nic->numa_node_mask;
1400 
1401 	return 0;
1402 }
1403 
hclgevf_rss_init_hw(struct hclgevf_dev * hdev)1404 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
1405 {
1406 	struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
1407 	int i, ret;
1408 
1409 	rss_cfg->rss_size = hdev->rss_size_max;
1410 
1411 	/* Initialize RSS indirect table for each vport */
1412 	for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
1413 		rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max;
1414 
1415 	ret = hclgevf_set_rss_indir_table(hdev);
1416 	if (ret)
1417 		return ret;
1418 
1419 	return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max);
1420 }
1421 
hclgevf_init_vlan_config(struct hclgevf_dev * hdev)1422 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
1423 {
1424 	/* other vlan config(like, VLAN TX/RX offload) would also be added
1425 	 * here later
1426 	 */
1427 	return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0,
1428 				       false);
1429 }
1430 
hclgevf_ae_start(struct hnae3_handle * handle)1431 static int hclgevf_ae_start(struct hnae3_handle *handle)
1432 {
1433 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1434 	int i, queue_id;
1435 
1436 	for (i = 0; i < handle->kinfo.num_tqps; i++) {
1437 		/* ring enable */
1438 		queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]);
1439 		if (queue_id < 0) {
1440 			dev_warn(&hdev->pdev->dev,
1441 				 "Get invalid queue id, ignore it\n");
1442 			continue;
1443 		}
1444 
1445 		hclgevf_tqp_enable(hdev, queue_id, 0, true);
1446 	}
1447 
1448 	/* reset tqp stats */
1449 	hclgevf_reset_tqp_stats(handle);
1450 
1451 	hclgevf_request_link_info(hdev);
1452 
1453 	clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
1454 	mod_timer(&hdev->service_timer, jiffies + HZ);
1455 
1456 	return 0;
1457 }
1458 
hclgevf_ae_stop(struct hnae3_handle * handle)1459 static void hclgevf_ae_stop(struct hnae3_handle *handle)
1460 {
1461 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1462 	int i, queue_id;
1463 
1464 	set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
1465 
1466 	for (i = 0; i < hdev->num_tqps; i++) {
1467 		/* Ring disable */
1468 		queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]);
1469 		if (queue_id < 0) {
1470 			dev_warn(&hdev->pdev->dev,
1471 				 "Get invalid queue id, ignore it\n");
1472 			continue;
1473 		}
1474 
1475 		hclgevf_tqp_enable(hdev, queue_id, 0, false);
1476 	}
1477 
1478 	/* reset tqp stats */
1479 	hclgevf_reset_tqp_stats(handle);
1480 	del_timer_sync(&hdev->service_timer);
1481 	cancel_work_sync(&hdev->service_task);
1482 	clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
1483 	hclgevf_update_link_status(hdev, 0);
1484 }
1485 
hclgevf_state_init(struct hclgevf_dev * hdev)1486 static void hclgevf_state_init(struct hclgevf_dev *hdev)
1487 {
1488 	/* if this is on going reset then skip this initialization */
1489 	if (hclgevf_dev_ongoing_reset(hdev))
1490 		return;
1491 
1492 	/* setup tasks for the MBX */
1493 	INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task);
1494 	clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
1495 	clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
1496 
1497 	/* setup tasks for service timer */
1498 	timer_setup(&hdev->service_timer, hclgevf_service_timer, 0);
1499 
1500 	INIT_WORK(&hdev->service_task, hclgevf_service_task);
1501 	clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
1502 
1503 	INIT_WORK(&hdev->rst_service_task, hclgevf_reset_service_task);
1504 
1505 	mutex_init(&hdev->mbx_resp.mbx_mutex);
1506 
1507 	/* bring the device down */
1508 	set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
1509 }
1510 
hclgevf_state_uninit(struct hclgevf_dev * hdev)1511 static void hclgevf_state_uninit(struct hclgevf_dev *hdev)
1512 {
1513 	set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
1514 
1515 	if (hdev->service_timer.function)
1516 		del_timer_sync(&hdev->service_timer);
1517 	if (hdev->service_task.func)
1518 		cancel_work_sync(&hdev->service_task);
1519 	if (hdev->mbx_service_task.func)
1520 		cancel_work_sync(&hdev->mbx_service_task);
1521 	if (hdev->rst_service_task.func)
1522 		cancel_work_sync(&hdev->rst_service_task);
1523 
1524 	mutex_destroy(&hdev->mbx_resp.mbx_mutex);
1525 }
1526 
hclgevf_init_msi(struct hclgevf_dev * hdev)1527 static int hclgevf_init_msi(struct hclgevf_dev *hdev)
1528 {
1529 	struct pci_dev *pdev = hdev->pdev;
1530 	int vectors;
1531 	int i;
1532 
1533 	/* if this is on going reset then skip this initialization */
1534 	if (hclgevf_dev_ongoing_reset(hdev))
1535 		return 0;
1536 
1537 	if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B))
1538 		vectors = pci_alloc_irq_vectors(pdev,
1539 						hdev->roce_base_msix_offset + 1,
1540 						hdev->num_msi,
1541 						PCI_IRQ_MSIX);
1542 	else
1543 		vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
1544 						PCI_IRQ_MSI | PCI_IRQ_MSIX);
1545 
1546 	if (vectors < 0) {
1547 		dev_err(&pdev->dev,
1548 			"failed(%d) to allocate MSI/MSI-X vectors\n",
1549 			vectors);
1550 		return vectors;
1551 	}
1552 	if (vectors < hdev->num_msi)
1553 		dev_warn(&hdev->pdev->dev,
1554 			 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
1555 			 hdev->num_msi, vectors);
1556 
1557 	hdev->num_msi = vectors;
1558 	hdev->num_msi_left = vectors;
1559 	hdev->base_msi_vector = pdev->irq;
1560 	hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset;
1561 
1562 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1563 					   sizeof(u16), GFP_KERNEL);
1564 	if (!hdev->vector_status) {
1565 		pci_free_irq_vectors(pdev);
1566 		return -ENOMEM;
1567 	}
1568 
1569 	for (i = 0; i < hdev->num_msi; i++)
1570 		hdev->vector_status[i] = HCLGEVF_INVALID_VPORT;
1571 
1572 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
1573 					sizeof(int), GFP_KERNEL);
1574 	if (!hdev->vector_irq) {
1575 		pci_free_irq_vectors(pdev);
1576 		return -ENOMEM;
1577 	}
1578 
1579 	return 0;
1580 }
1581 
hclgevf_uninit_msi(struct hclgevf_dev * hdev)1582 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev)
1583 {
1584 	struct pci_dev *pdev = hdev->pdev;
1585 
1586 	pci_free_irq_vectors(pdev);
1587 }
1588 
hclgevf_misc_irq_init(struct hclgevf_dev * hdev)1589 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
1590 {
1591 	int ret = 0;
1592 
1593 	/* if this is on going reset then skip this initialization */
1594 	if (hclgevf_dev_ongoing_reset(hdev))
1595 		return 0;
1596 
1597 	hclgevf_get_misc_vector(hdev);
1598 
1599 	ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle,
1600 			  0, "hclgevf_cmd", hdev);
1601 	if (ret) {
1602 		dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n",
1603 			hdev->misc_vector.vector_irq);
1604 		return ret;
1605 	}
1606 
1607 	hclgevf_clear_event_cause(hdev, 0);
1608 
1609 	/* enable misc. vector(vector 0) */
1610 	hclgevf_enable_vector(&hdev->misc_vector, true);
1611 
1612 	return ret;
1613 }
1614 
hclgevf_misc_irq_uninit(struct hclgevf_dev * hdev)1615 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev)
1616 {
1617 	/* disable misc vector(vector 0) */
1618 	hclgevf_enable_vector(&hdev->misc_vector, false);
1619 	synchronize_irq(hdev->misc_vector.vector_irq);
1620 	free_irq(hdev->misc_vector.vector_irq, hdev);
1621 	hclgevf_free_vector(hdev, 0);
1622 }
1623 
hclgevf_init_client_instance(struct hnae3_client * client,struct hnae3_ae_dev * ae_dev)1624 static int hclgevf_init_client_instance(struct hnae3_client *client,
1625 					struct hnae3_ae_dev *ae_dev)
1626 {
1627 	struct hclgevf_dev *hdev = ae_dev->priv;
1628 	int ret;
1629 
1630 	switch (client->type) {
1631 	case HNAE3_CLIENT_KNIC:
1632 		hdev->nic_client = client;
1633 		hdev->nic.client = client;
1634 
1635 		ret = client->ops->init_instance(&hdev->nic);
1636 		if (ret)
1637 			goto clear_nic;
1638 
1639 		hnae3_set_client_init_flag(client, ae_dev, 1);
1640 
1641 		if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) {
1642 			struct hnae3_client *rc = hdev->roce_client;
1643 
1644 			ret = hclgevf_init_roce_base_info(hdev);
1645 			if (ret)
1646 				goto clear_roce;
1647 			ret = rc->ops->init_instance(&hdev->roce);
1648 			if (ret)
1649 				goto clear_roce;
1650 
1651 			hnae3_set_client_init_flag(hdev->roce_client, ae_dev,
1652 						   1);
1653 		}
1654 		break;
1655 	case HNAE3_CLIENT_UNIC:
1656 		hdev->nic_client = client;
1657 		hdev->nic.client = client;
1658 
1659 		ret = client->ops->init_instance(&hdev->nic);
1660 		if (ret)
1661 			goto clear_nic;
1662 
1663 		hnae3_set_client_init_flag(client, ae_dev, 1);
1664 		break;
1665 	case HNAE3_CLIENT_ROCE:
1666 		if (hnae3_dev_roce_supported(hdev)) {
1667 			hdev->roce_client = client;
1668 			hdev->roce.client = client;
1669 		}
1670 
1671 		if (hdev->roce_client && hdev->nic_client) {
1672 			ret = hclgevf_init_roce_base_info(hdev);
1673 			if (ret)
1674 				goto clear_roce;
1675 
1676 			ret = client->ops->init_instance(&hdev->roce);
1677 			if (ret)
1678 				goto clear_roce;
1679 		}
1680 
1681 		hnae3_set_client_init_flag(client, ae_dev, 1);
1682 	}
1683 
1684 	return 0;
1685 
1686 clear_nic:
1687 	hdev->nic_client = NULL;
1688 	hdev->nic.client = NULL;
1689 	return ret;
1690 clear_roce:
1691 	hdev->roce_client = NULL;
1692 	hdev->roce.client = NULL;
1693 	return ret;
1694 }
1695 
hclgevf_uninit_client_instance(struct hnae3_client * client,struct hnae3_ae_dev * ae_dev)1696 static void hclgevf_uninit_client_instance(struct hnae3_client *client,
1697 					   struct hnae3_ae_dev *ae_dev)
1698 {
1699 	struct hclgevf_dev *hdev = ae_dev->priv;
1700 
1701 	/* un-init roce, if it exists */
1702 	if (hdev->roce_client) {
1703 		hdev->roce_client->ops->uninit_instance(&hdev->roce, 0);
1704 		hdev->roce_client = NULL;
1705 		hdev->roce.client = NULL;
1706 	}
1707 
1708 	/* un-init nic/unic, if this was not called by roce client */
1709 	if (client->ops->uninit_instance && hdev->nic_client &&
1710 	    client->type != HNAE3_CLIENT_ROCE) {
1711 		client->ops->uninit_instance(&hdev->nic, 0);
1712 		hdev->nic_client = NULL;
1713 		hdev->nic.client = NULL;
1714 	}
1715 }
1716 
hclgevf_pci_init(struct hclgevf_dev * hdev)1717 static int hclgevf_pci_init(struct hclgevf_dev *hdev)
1718 {
1719 	struct pci_dev *pdev = hdev->pdev;
1720 	struct hclgevf_hw *hw;
1721 	int ret;
1722 
1723 	/* check if we need to skip initialization of pci. This will happen if
1724 	 * device is undergoing VF reset. Otherwise, we would need to
1725 	 * re-initialize pci interface again i.e. when device is not going
1726 	 * through *any* reset or actually undergoing full reset.
1727 	 */
1728 	if (hclgevf_dev_ongoing_reset(hdev))
1729 		return 0;
1730 
1731 	ret = pci_enable_device(pdev);
1732 	if (ret) {
1733 		dev_err(&pdev->dev, "failed to enable PCI device\n");
1734 		return ret;
1735 	}
1736 
1737 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1738 	if (ret) {
1739 		dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting");
1740 		goto err_disable_device;
1741 	}
1742 
1743 	ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME);
1744 	if (ret) {
1745 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
1746 		goto err_disable_device;
1747 	}
1748 
1749 	pci_set_master(pdev);
1750 	hw = &hdev->hw;
1751 	hw->hdev = hdev;
1752 	hw->io_base = pci_iomap(pdev, 2, 0);
1753 	if (!hw->io_base) {
1754 		dev_err(&pdev->dev, "can't map configuration register space\n");
1755 		ret = -ENOMEM;
1756 		goto err_clr_master;
1757 	}
1758 
1759 	return 0;
1760 
1761 err_clr_master:
1762 	pci_clear_master(pdev);
1763 	pci_release_regions(pdev);
1764 err_disable_device:
1765 	pci_disable_device(pdev);
1766 
1767 	return ret;
1768 }
1769 
hclgevf_pci_uninit(struct hclgevf_dev * hdev)1770 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev)
1771 {
1772 	struct pci_dev *pdev = hdev->pdev;
1773 
1774 	pci_iounmap(pdev, hdev->hw.io_base);
1775 	pci_clear_master(pdev);
1776 	pci_release_regions(pdev);
1777 	pci_disable_device(pdev);
1778 }
1779 
hclgevf_query_vf_resource(struct hclgevf_dev * hdev)1780 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
1781 {
1782 	struct hclgevf_query_res_cmd *req;
1783 	struct hclgevf_desc desc;
1784 	int ret;
1785 
1786 	hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true);
1787 	ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
1788 	if (ret) {
1789 		dev_err(&hdev->pdev->dev,
1790 			"query vf resource failed, ret = %d.\n", ret);
1791 		return ret;
1792 	}
1793 
1794 	req = (struct hclgevf_query_res_cmd *)desc.data;
1795 
1796 	if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) {
1797 		hdev->roce_base_msix_offset =
1798 		hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
1799 				HCLGEVF_MSIX_OFT_ROCEE_M,
1800 				HCLGEVF_MSIX_OFT_ROCEE_S);
1801 		hdev->num_roce_msix =
1802 		hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number),
1803 				HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
1804 
1805 		/* VF should have NIC vectors and Roce vectors, NIC vectors
1806 		 * are queued before Roce vectors. The offset is fixed to 64.
1807 		 */
1808 		hdev->num_msi = hdev->num_roce_msix +
1809 				hdev->roce_base_msix_offset;
1810 	} else {
1811 		hdev->num_msi =
1812 		hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number),
1813 				HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
1814 	}
1815 
1816 	return 0;
1817 }
1818 
hclgevf_init_hdev(struct hclgevf_dev * hdev)1819 static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
1820 {
1821 	struct pci_dev *pdev = hdev->pdev;
1822 	int ret;
1823 
1824 	/* check if device is on-going full reset(i.e. pcie as well) */
1825 	if (hclgevf_dev_ongoing_full_reset(hdev)) {
1826 		dev_warn(&pdev->dev, "device is going full reset\n");
1827 		hclgevf_uninit_hdev(hdev);
1828 	}
1829 
1830 	ret = hclgevf_pci_init(hdev);
1831 	if (ret) {
1832 		dev_err(&pdev->dev, "PCI initialization failed\n");
1833 		return ret;
1834 	}
1835 
1836 	ret = hclgevf_cmd_init(hdev);
1837 	if (ret)
1838 		goto err_cmd_init;
1839 
1840 	/* Get vf resource */
1841 	ret = hclgevf_query_vf_resource(hdev);
1842 	if (ret) {
1843 		dev_err(&hdev->pdev->dev,
1844 			"Query vf status error, ret = %d.\n", ret);
1845 		goto err_query_vf;
1846 	}
1847 
1848 	ret = hclgevf_init_msi(hdev);
1849 	if (ret) {
1850 		dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret);
1851 		goto err_query_vf;
1852 	}
1853 
1854 	hclgevf_state_init(hdev);
1855 
1856 	ret = hclgevf_misc_irq_init(hdev);
1857 	if (ret) {
1858 		dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
1859 			ret);
1860 		goto err_misc_irq_init;
1861 	}
1862 
1863 	ret = hclgevf_configure(hdev);
1864 	if (ret) {
1865 		dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret);
1866 		goto err_config;
1867 	}
1868 
1869 	ret = hclgevf_alloc_tqps(hdev);
1870 	if (ret) {
1871 		dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret);
1872 		goto err_config;
1873 	}
1874 
1875 	ret = hclgevf_set_handle_info(hdev);
1876 	if (ret) {
1877 		dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret);
1878 		goto err_config;
1879 	}
1880 
1881 	/* Initialize mta type for this VF */
1882 	ret = hclgevf_cfg_func_mta_type(hdev);
1883 	if (ret) {
1884 		dev_err(&hdev->pdev->dev,
1885 			"failed(%d) to initialize MTA type\n", ret);
1886 		goto err_config;
1887 	}
1888 
1889 	/* Initialize RSS for this VF */
1890 	ret = hclgevf_rss_init_hw(hdev);
1891 	if (ret) {
1892 		dev_err(&hdev->pdev->dev,
1893 			"failed(%d) to initialize RSS\n", ret);
1894 		goto err_config;
1895 	}
1896 
1897 	ret = hclgevf_init_vlan_config(hdev);
1898 	if (ret) {
1899 		dev_err(&hdev->pdev->dev,
1900 			"failed(%d) to initialize VLAN config\n", ret);
1901 		goto err_config;
1902 	}
1903 
1904 	pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME);
1905 
1906 	return 0;
1907 
1908 err_config:
1909 	hclgevf_misc_irq_uninit(hdev);
1910 err_misc_irq_init:
1911 	hclgevf_state_uninit(hdev);
1912 	hclgevf_uninit_msi(hdev);
1913 err_query_vf:
1914 	hclgevf_cmd_uninit(hdev);
1915 err_cmd_init:
1916 	hclgevf_pci_uninit(hdev);
1917 	return ret;
1918 }
1919 
hclgevf_uninit_hdev(struct hclgevf_dev * hdev)1920 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
1921 {
1922 	hclgevf_state_uninit(hdev);
1923 	hclgevf_misc_irq_uninit(hdev);
1924 	hclgevf_cmd_uninit(hdev);
1925 	hclgevf_uninit_msi(hdev);
1926 	hclgevf_pci_uninit(hdev);
1927 }
1928 
hclgevf_init_ae_dev(struct hnae3_ae_dev * ae_dev)1929 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
1930 {
1931 	struct pci_dev *pdev = ae_dev->pdev;
1932 	int ret;
1933 
1934 	ret = hclgevf_alloc_hdev(ae_dev);
1935 	if (ret) {
1936 		dev_err(&pdev->dev, "hclge device allocation failed\n");
1937 		return ret;
1938 	}
1939 
1940 	ret = hclgevf_init_hdev(ae_dev->priv);
1941 	if (ret)
1942 		dev_err(&pdev->dev, "hclge device initialization failed\n");
1943 
1944 	return ret;
1945 }
1946 
hclgevf_uninit_ae_dev(struct hnae3_ae_dev * ae_dev)1947 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
1948 {
1949 	struct hclgevf_dev *hdev = ae_dev->priv;
1950 
1951 	hclgevf_uninit_hdev(hdev);
1952 	ae_dev->priv = NULL;
1953 }
1954 
hclgevf_get_max_channels(struct hclgevf_dev * hdev)1955 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev)
1956 {
1957 	struct hnae3_handle *nic = &hdev->nic;
1958 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1959 
1960 	return min_t(u32, hdev->rss_size_max,
1961 		     hdev->num_tqps / kinfo->num_tc);
1962 }
1963 
1964 /**
1965  * hclgevf_get_channels - Get the current channels enabled and max supported.
1966  * @handle: hardware information for network interface
1967  * @ch: ethtool channels structure
1968  *
1969  * We don't support separate tx and rx queues as channels. The other count
1970  * represents how many queues are being used for control. max_combined counts
1971  * how many queue pairs we can support. They may not be mapped 1 to 1 with
1972  * q_vectors since we support a lot more queue pairs than q_vectors.
1973  **/
hclgevf_get_channels(struct hnae3_handle * handle,struct ethtool_channels * ch)1974 static void hclgevf_get_channels(struct hnae3_handle *handle,
1975 				 struct ethtool_channels *ch)
1976 {
1977 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1978 
1979 	ch->max_combined = hclgevf_get_max_channels(hdev);
1980 	ch->other_count = 0;
1981 	ch->max_other = 0;
1982 	ch->combined_count = handle->kinfo.rss_size;
1983 }
1984 
hclgevf_get_tqps_and_rss_info(struct hnae3_handle * handle,u16 * free_tqps,u16 * max_rss_size)1985 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle,
1986 					  u16 *free_tqps, u16 *max_rss_size)
1987 {
1988 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1989 
1990 	*free_tqps = 0;
1991 	*max_rss_size = hdev->rss_size_max;
1992 }
1993 
hclgevf_get_status(struct hnae3_handle * handle)1994 static int hclgevf_get_status(struct hnae3_handle *handle)
1995 {
1996 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1997 
1998 	return hdev->hw.mac.link;
1999 }
2000 
hclgevf_get_ksettings_an_result(struct hnae3_handle * handle,u8 * auto_neg,u32 * speed,u8 * duplex)2001 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle,
2002 					    u8 *auto_neg, u32 *speed,
2003 					    u8 *duplex)
2004 {
2005 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2006 
2007 	if (speed)
2008 		*speed = hdev->hw.mac.speed;
2009 	if (duplex)
2010 		*duplex = hdev->hw.mac.duplex;
2011 	if (auto_neg)
2012 		*auto_neg = AUTONEG_DISABLE;
2013 }
2014 
hclgevf_update_speed_duplex(struct hclgevf_dev * hdev,u32 speed,u8 duplex)2015 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
2016 				 u8 duplex)
2017 {
2018 	hdev->hw.mac.speed = speed;
2019 	hdev->hw.mac.duplex = duplex;
2020 }
2021 
2022 static const struct hnae3_ae_ops hclgevf_ops = {
2023 	.init_ae_dev = hclgevf_init_ae_dev,
2024 	.uninit_ae_dev = hclgevf_uninit_ae_dev,
2025 	.init_client_instance = hclgevf_init_client_instance,
2026 	.uninit_client_instance = hclgevf_uninit_client_instance,
2027 	.start = hclgevf_ae_start,
2028 	.stop = hclgevf_ae_stop,
2029 	.map_ring_to_vector = hclgevf_map_ring_to_vector,
2030 	.unmap_ring_from_vector = hclgevf_unmap_ring_from_vector,
2031 	.get_vector = hclgevf_get_vector,
2032 	.put_vector = hclgevf_put_vector,
2033 	.reset_queue = hclgevf_reset_tqp,
2034 	.set_promisc_mode = hclgevf_set_promisc_mode,
2035 	.get_mac_addr = hclgevf_get_mac_addr,
2036 	.set_mac_addr = hclgevf_set_mac_addr,
2037 	.add_uc_addr = hclgevf_add_uc_addr,
2038 	.rm_uc_addr = hclgevf_rm_uc_addr,
2039 	.add_mc_addr = hclgevf_add_mc_addr,
2040 	.rm_mc_addr = hclgevf_rm_mc_addr,
2041 	.update_mta_status = hclgevf_update_mta_status,
2042 	.get_stats = hclgevf_get_stats,
2043 	.update_stats = hclgevf_update_stats,
2044 	.get_strings = hclgevf_get_strings,
2045 	.get_sset_count = hclgevf_get_sset_count,
2046 	.get_rss_key_size = hclgevf_get_rss_key_size,
2047 	.get_rss_indir_size = hclgevf_get_rss_indir_size,
2048 	.get_rss = hclgevf_get_rss,
2049 	.set_rss = hclgevf_set_rss,
2050 	.get_tc_size = hclgevf_get_tc_size,
2051 	.get_fw_version = hclgevf_get_fw_version,
2052 	.set_vlan_filter = hclgevf_set_vlan_filter,
2053 	.enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag,
2054 	.reset_event = hclgevf_reset_event,
2055 	.get_channels = hclgevf_get_channels,
2056 	.get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info,
2057 	.get_status = hclgevf_get_status,
2058 	.get_ksettings_an_result = hclgevf_get_ksettings_an_result,
2059 };
2060 
2061 static struct hnae3_ae_algo ae_algovf = {
2062 	.ops = &hclgevf_ops,
2063 	.pdev_id_table = ae_algovf_pci_tbl,
2064 };
2065 
hclgevf_init(void)2066 static int hclgevf_init(void)
2067 {
2068 	pr_info("%s is initializing\n", HCLGEVF_NAME);
2069 
2070 	hnae3_register_ae_algo(&ae_algovf);
2071 
2072 	return 0;
2073 }
2074 
hclgevf_exit(void)2075 static void hclgevf_exit(void)
2076 {
2077 	hnae3_unregister_ae_algo(&ae_algovf);
2078 }
2079 module_init(hclgevf_init);
2080 module_exit(hclgevf_exit);
2081 
2082 MODULE_LICENSE("GPL");
2083 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
2084 MODULE_DESCRIPTION("HCLGEVF Driver");
2085 MODULE_VERSION(HCLGEVF_MOD_VERSION);
2086