• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include "hclge_main.h"
5 #include "hclge_dcb.h"
6 #include "hclge_tm.h"
7 #include "hnae3.h"
8 
9 #define BW_PERCENT	100
10 
hclge_ieee_ets_to_tm_info(struct hclge_dev * hdev,struct ieee_ets * ets)11 static int hclge_ieee_ets_to_tm_info(struct hclge_dev *hdev,
12 				     struct ieee_ets *ets)
13 {
14 	u8 i;
15 
16 	for (i = 0; i < HNAE3_MAX_TC; i++) {
17 		switch (ets->tc_tsa[i]) {
18 		case IEEE_8021QAZ_TSA_STRICT:
19 			hdev->tm_info.tc_info[i].tc_sch_mode =
20 				HCLGE_SCH_MODE_SP;
21 			hdev->tm_info.pg_info[0].tc_dwrr[i] = 0;
22 			break;
23 		case IEEE_8021QAZ_TSA_ETS:
24 			hdev->tm_info.tc_info[i].tc_sch_mode =
25 				HCLGE_SCH_MODE_DWRR;
26 			hdev->tm_info.pg_info[0].tc_dwrr[i] =
27 				ets->tc_tx_bw[i];
28 			break;
29 		default:
30 			/* Hardware only supports SP (strict priority)
31 			 * or ETS (enhanced transmission selection)
32 			 * algorithms, if we receive some other value
33 			 * from dcbnl, then throw an error.
34 			 */
35 			return -EINVAL;
36 		}
37 	}
38 
39 	hclge_tm_prio_tc_info_update(hdev, ets->prio_tc);
40 
41 	return 0;
42 }
43 
hclge_tm_info_to_ieee_ets(struct hclge_dev * hdev,struct ieee_ets * ets)44 static void hclge_tm_info_to_ieee_ets(struct hclge_dev *hdev,
45 				      struct ieee_ets *ets)
46 {
47 	u32 i;
48 
49 	memset(ets, 0, sizeof(*ets));
50 	ets->willing = 1;
51 	ets->ets_cap = hdev->tc_max;
52 
53 	for (i = 0; i < HNAE3_MAX_TC; i++) {
54 		ets->prio_tc[i] = hdev->tm_info.prio_tc[i];
55 		if (i < hdev->tm_info.num_tc)
56 			ets->tc_tx_bw[i] = hdev->tm_info.pg_info[0].tc_dwrr[i];
57 		else
58 			ets->tc_tx_bw[i] = 0;
59 
60 		if (hdev->tm_info.tc_info[i].tc_sch_mode ==
61 		    HCLGE_SCH_MODE_SP)
62 			ets->tc_tsa[i] = IEEE_8021QAZ_TSA_STRICT;
63 		else
64 			ets->tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
65 	}
66 }
67 
68 /* IEEE std */
hclge_ieee_getets(struct hnae3_handle * h,struct ieee_ets * ets)69 static int hclge_ieee_getets(struct hnae3_handle *h, struct ieee_ets *ets)
70 {
71 	struct hclge_vport *vport = hclge_get_vport(h);
72 	struct hclge_dev *hdev = vport->back;
73 
74 	hclge_tm_info_to_ieee_ets(hdev, ets);
75 
76 	return 0;
77 }
78 
hclge_dcb_common_validate(struct hclge_dev * hdev,u8 num_tc,u8 * prio_tc)79 static int hclge_dcb_common_validate(struct hclge_dev *hdev, u8 num_tc,
80 				     u8 *prio_tc)
81 {
82 	int i;
83 
84 	if (num_tc > hdev->tc_max) {
85 		dev_err(&hdev->pdev->dev,
86 			"tc num checking failed, %u > tc_max(%u)\n",
87 			num_tc, hdev->tc_max);
88 		return -EINVAL;
89 	}
90 
91 	for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
92 		if (prio_tc[i] >= num_tc) {
93 			dev_err(&hdev->pdev->dev,
94 				"prio_tc[%d] checking failed, %u >= num_tc(%u)\n",
95 				i, prio_tc[i], num_tc);
96 			return -EINVAL;
97 		}
98 	}
99 
100 	if (num_tc > hdev->vport[0].alloc_tqps) {
101 		dev_err(&hdev->pdev->dev,
102 			"allocated tqp checking failed, %u > tqp(%u)\n",
103 			num_tc, hdev->vport[0].alloc_tqps);
104 		return -EINVAL;
105 	}
106 
107 	return 0;
108 }
109 
hclge_ets_tc_changed(struct hclge_dev * hdev,struct ieee_ets * ets,bool * changed)110 static u8 hclge_ets_tc_changed(struct hclge_dev *hdev, struct ieee_ets *ets,
111 			       bool *changed)
112 {
113 	u8 max_tc_id = 0;
114 	u8 i;
115 
116 	for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
117 		if (ets->prio_tc[i] != hdev->tm_info.prio_tc[i])
118 			*changed = true;
119 
120 		if (ets->prio_tc[i] > max_tc_id)
121 			max_tc_id = ets->prio_tc[i];
122 	}
123 
124 	/* return max tc number, max tc id need to plus 1 */
125 	return max_tc_id + 1;
126 }
127 
hclge_ets_sch_mode_validate(struct hclge_dev * hdev,struct ieee_ets * ets,bool * changed,u8 tc_num)128 static int hclge_ets_sch_mode_validate(struct hclge_dev *hdev,
129 				       struct ieee_ets *ets, bool *changed,
130 				       u8 tc_num)
131 {
132 	bool has_ets_tc = false;
133 	u32 total_ets_bw = 0;
134 	u8 i;
135 
136 	for (i = 0; i < HNAE3_MAX_TC; i++) {
137 		switch (ets->tc_tsa[i]) {
138 		case IEEE_8021QAZ_TSA_STRICT:
139 			if (hdev->tm_info.tc_info[i].tc_sch_mode !=
140 				HCLGE_SCH_MODE_SP)
141 				*changed = true;
142 			break;
143 		case IEEE_8021QAZ_TSA_ETS:
144 			if (i >= tc_num) {
145 				dev_err(&hdev->pdev->dev,
146 					"tc%u is disabled, cannot set ets bw\n",
147 					i);
148 				return -EINVAL;
149 			}
150 
151 			/* The hardware will switch to sp mode if bandwidth is
152 			 * 0, so limit ets bandwidth must be greater than 0.
153 			 */
154 			if (!ets->tc_tx_bw[i]) {
155 				dev_err(&hdev->pdev->dev,
156 					"tc%u ets bw cannot be 0\n", i);
157 				return -EINVAL;
158 			}
159 
160 			if (hdev->tm_info.tc_info[i].tc_sch_mode !=
161 				HCLGE_SCH_MODE_DWRR)
162 				*changed = true;
163 
164 			total_ets_bw += ets->tc_tx_bw[i];
165 			has_ets_tc = true;
166 			break;
167 		default:
168 			return -EINVAL;
169 		}
170 	}
171 
172 	if (has_ets_tc && total_ets_bw != BW_PERCENT)
173 		return -EINVAL;
174 
175 	return 0;
176 }
177 
hclge_ets_validate(struct hclge_dev * hdev,struct ieee_ets * ets,u8 * tc,bool * changed)178 static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
179 			      u8 *tc, bool *changed)
180 {
181 	u8 tc_num;
182 	int ret;
183 
184 	tc_num = hclge_ets_tc_changed(hdev, ets, changed);
185 
186 	ret = hclge_dcb_common_validate(hdev, tc_num, ets->prio_tc);
187 	if (ret)
188 		return ret;
189 
190 	ret = hclge_ets_sch_mode_validate(hdev, ets, changed, tc_num);
191 	if (ret)
192 		return ret;
193 
194 	*tc = tc_num;
195 	if (*tc != hdev->tm_info.num_tc)
196 		*changed = true;
197 
198 	return 0;
199 }
200 
hclge_map_update(struct hclge_dev * hdev)201 static int hclge_map_update(struct hclge_dev *hdev)
202 {
203 	int ret;
204 
205 	ret = hclge_tm_schd_setup_hw(hdev);
206 	if (ret)
207 		return ret;
208 
209 	ret = hclge_pause_setup_hw(hdev, false);
210 	if (ret)
211 		return ret;
212 
213 	ret = hclge_buffer_alloc(hdev);
214 	if (ret)
215 		return ret;
216 
217 	hclge_rss_indir_init_cfg(hdev);
218 
219 	return hclge_rss_init_hw(hdev);
220 }
221 
hclge_notify_down_uinit(struct hclge_dev * hdev)222 static int hclge_notify_down_uinit(struct hclge_dev *hdev)
223 {
224 	int ret;
225 
226 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
227 	if (ret)
228 		return ret;
229 
230 	return hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
231 }
232 
hclge_notify_init_up(struct hclge_dev * hdev)233 static int hclge_notify_init_up(struct hclge_dev *hdev)
234 {
235 	int ret;
236 
237 	ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
238 	if (ret)
239 		return ret;
240 
241 	return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
242 }
243 
hclge_ieee_setets(struct hnae3_handle * h,struct ieee_ets * ets)244 static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
245 {
246 	struct hclge_vport *vport = hclge_get_vport(h);
247 	struct net_device *netdev = h->kinfo.netdev;
248 	struct hclge_dev *hdev = vport->back;
249 	bool map_changed = false;
250 	u8 num_tc = 0;
251 	int ret;
252 
253 	if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
254 	    h->kinfo.tc_info.mqprio_active)
255 		return -EINVAL;
256 
257 	ret = hclge_ets_validate(hdev, ets, &num_tc, &map_changed);
258 	if (ret)
259 		return ret;
260 
261 	if (map_changed) {
262 		netif_dbg(h, drv, netdev, "set ets\n");
263 
264 		ret = hclge_notify_down_uinit(hdev);
265 		if (ret)
266 			return ret;
267 	}
268 
269 	hclge_tm_schd_info_update(hdev, num_tc);
270 	h->kinfo.tc_info.dcb_ets_active = num_tc > 1;
271 
272 	ret = hclge_ieee_ets_to_tm_info(hdev, ets);
273 	if (ret)
274 		goto err_out;
275 
276 	if (map_changed) {
277 		ret = hclge_map_update(hdev);
278 		if (ret)
279 			goto err_out;
280 
281 		return hclge_notify_init_up(hdev);
282 	}
283 
284 	return hclge_tm_dwrr_cfg(hdev);
285 
286 err_out:
287 	if (!map_changed)
288 		return ret;
289 
290 	hclge_notify_init_up(hdev);
291 
292 	return ret;
293 }
294 
hclge_ieee_getpfc(struct hnae3_handle * h,struct ieee_pfc * pfc)295 static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
296 {
297 	struct hclge_vport *vport = hclge_get_vport(h);
298 	struct hclge_dev *hdev = vport->back;
299 	int ret;
300 
301 	memset(pfc, 0, sizeof(*pfc));
302 	pfc->pfc_cap = hdev->pfc_max;
303 	pfc->pfc_en = hdev->tm_info.pfc_en;
304 
305 	ret = hclge_mac_update_stats(hdev);
306 	if (ret) {
307 		dev_err(&hdev->pdev->dev,
308 			"failed to update MAC stats, ret = %d.\n", ret);
309 		return ret;
310 	}
311 
312 	hclge_pfc_tx_stats_get(hdev, pfc->requests);
313 	hclge_pfc_rx_stats_get(hdev, pfc->indications);
314 
315 	return 0;
316 }
317 
hclge_ieee_setpfc(struct hnae3_handle * h,struct ieee_pfc * pfc)318 static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
319 {
320 	struct hclge_vport *vport = hclge_get_vport(h);
321 	struct net_device *netdev = h->kinfo.netdev;
322 	struct hclge_dev *hdev = vport->back;
323 	u8 i, j, pfc_map, *prio_tc;
324 	int ret;
325 
326 	if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
327 		return -EINVAL;
328 
329 	if (pfc->pfc_en == hdev->tm_info.pfc_en)
330 		return 0;
331 
332 	prio_tc = hdev->tm_info.prio_tc;
333 	pfc_map = 0;
334 
335 	for (i = 0; i < hdev->tm_info.num_tc; i++) {
336 		for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) {
337 			if ((prio_tc[j] == i) && (pfc->pfc_en & BIT(j))) {
338 				pfc_map |= BIT(i);
339 				break;
340 			}
341 		}
342 	}
343 
344 	hdev->tm_info.hw_pfc_map = pfc_map;
345 	hdev->tm_info.pfc_en = pfc->pfc_en;
346 
347 	netif_dbg(h, drv, netdev,
348 		  "set pfc: pfc_en=%x, pfc_map=%x, num_tc=%u\n",
349 		  pfc->pfc_en, pfc_map, hdev->tm_info.num_tc);
350 
351 	hclge_tm_pfc_info_update(hdev);
352 
353 	ret = hclge_pause_setup_hw(hdev, false);
354 	if (ret)
355 		return ret;
356 
357 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
358 	if (ret)
359 		return ret;
360 
361 	ret = hclge_buffer_alloc(hdev);
362 	if (ret) {
363 		hclge_notify_client(hdev, HNAE3_UP_CLIENT);
364 		return ret;
365 	}
366 
367 	return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
368 }
369 
370 /* DCBX configuration */
hclge_getdcbx(struct hnae3_handle * h)371 static u8 hclge_getdcbx(struct hnae3_handle *h)
372 {
373 	struct hclge_vport *vport = hclge_get_vport(h);
374 	struct hclge_dev *hdev = vport->back;
375 
376 	if (h->kinfo.tc_info.mqprio_active)
377 		return 0;
378 
379 	return hdev->dcbx_cap;
380 }
381 
hclge_setdcbx(struct hnae3_handle * h,u8 mode)382 static u8 hclge_setdcbx(struct hnae3_handle *h, u8 mode)
383 {
384 	struct hclge_vport *vport = hclge_get_vport(h);
385 	struct net_device *netdev = h->kinfo.netdev;
386 	struct hclge_dev *hdev = vport->back;
387 
388 	netif_dbg(h, drv, netdev, "set dcbx: mode=%u\n", mode);
389 
390 	/* No support for LLD_MANAGED modes or CEE */
391 	if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
392 	    (mode & DCB_CAP_DCBX_VER_CEE) ||
393 	    !(mode & DCB_CAP_DCBX_HOST))
394 		return 1;
395 
396 	hdev->dcbx_cap = mode;
397 
398 	return 0;
399 }
400 
hclge_mqprio_qopt_check(struct hclge_dev * hdev,struct tc_mqprio_qopt_offload * mqprio_qopt)401 static int hclge_mqprio_qopt_check(struct hclge_dev *hdev,
402 				   struct tc_mqprio_qopt_offload *mqprio_qopt)
403 {
404 	u16 queue_sum = 0;
405 	int ret;
406 	int i;
407 
408 	if (!mqprio_qopt->qopt.num_tc) {
409 		mqprio_qopt->qopt.num_tc = 1;
410 		return 0;
411 	}
412 
413 	ret = hclge_dcb_common_validate(hdev, mqprio_qopt->qopt.num_tc,
414 					mqprio_qopt->qopt.prio_tc_map);
415 	if (ret)
416 		return ret;
417 
418 	for (i = 0; i < mqprio_qopt->qopt.num_tc; i++) {
419 		if (!is_power_of_2(mqprio_qopt->qopt.count[i])) {
420 			dev_err(&hdev->pdev->dev,
421 				"qopt queue count must be power of 2\n");
422 			return -EINVAL;
423 		}
424 
425 		if (mqprio_qopt->qopt.count[i] > hdev->pf_rss_size_max) {
426 			dev_err(&hdev->pdev->dev,
427 				"qopt queue count should be no more than %u\n",
428 				hdev->pf_rss_size_max);
429 			return -EINVAL;
430 		}
431 
432 		if (mqprio_qopt->qopt.offset[i] != queue_sum) {
433 			dev_err(&hdev->pdev->dev,
434 				"qopt queue offset must start from 0, and being continuous\n");
435 			return -EINVAL;
436 		}
437 
438 		if (mqprio_qopt->min_rate[i] || mqprio_qopt->max_rate[i]) {
439 			dev_err(&hdev->pdev->dev,
440 				"qopt tx_rate is not supported\n");
441 			return -EOPNOTSUPP;
442 		}
443 
444 		queue_sum = mqprio_qopt->qopt.offset[i];
445 		queue_sum += mqprio_qopt->qopt.count[i];
446 	}
447 	if (hdev->vport[0].alloc_tqps < queue_sum) {
448 		dev_err(&hdev->pdev->dev,
449 			"qopt queue count sum should be less than %u\n",
450 			hdev->vport[0].alloc_tqps);
451 		return -EINVAL;
452 	}
453 
454 	return 0;
455 }
456 
hclge_sync_mqprio_qopt(struct hnae3_tc_info * tc_info,struct tc_mqprio_qopt_offload * mqprio_qopt)457 static void hclge_sync_mqprio_qopt(struct hnae3_tc_info *tc_info,
458 				   struct tc_mqprio_qopt_offload *mqprio_qopt)
459 {
460 	memset(tc_info, 0, sizeof(*tc_info));
461 	tc_info->num_tc = mqprio_qopt->qopt.num_tc;
462 	memcpy(tc_info->prio_tc, mqprio_qopt->qopt.prio_tc_map,
463 	       sizeof_field(struct hnae3_tc_info, prio_tc));
464 	memcpy(tc_info->tqp_count, mqprio_qopt->qopt.count,
465 	       sizeof_field(struct hnae3_tc_info, tqp_count));
466 	memcpy(tc_info->tqp_offset, mqprio_qopt->qopt.offset,
467 	       sizeof_field(struct hnae3_tc_info, tqp_offset));
468 }
469 
hclge_config_tc(struct hclge_dev * hdev,struct hnae3_tc_info * tc_info)470 static int hclge_config_tc(struct hclge_dev *hdev,
471 			   struct hnae3_tc_info *tc_info)
472 {
473 	int i;
474 
475 	hclge_tm_schd_info_update(hdev, tc_info->num_tc);
476 	for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
477 		hdev->tm_info.prio_tc[i] = tc_info->prio_tc[i];
478 
479 	return hclge_map_update(hdev);
480 }
481 
482 /* Set up TC for hardware offloaded mqprio in channel mode */
hclge_setup_tc(struct hnae3_handle * h,struct tc_mqprio_qopt_offload * mqprio_qopt)483 static int hclge_setup_tc(struct hnae3_handle *h,
484 			  struct tc_mqprio_qopt_offload *mqprio_qopt)
485 {
486 	struct hclge_vport *vport = hclge_get_vport(h);
487 	struct hnae3_knic_private_info *kinfo;
488 	struct hclge_dev *hdev = vport->back;
489 	struct hnae3_tc_info old_tc_info;
490 	u8 tc = mqprio_qopt->qopt.num_tc;
491 	int ret;
492 
493 	/* if client unregistered, it's not allowed to change
494 	 * mqprio configuration, which may cause uninit ring
495 	 * fail.
496 	 */
497 	if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
498 		return -EBUSY;
499 
500 	kinfo = &vport->nic.kinfo;
501 	if (kinfo->tc_info.dcb_ets_active)
502 		return -EINVAL;
503 
504 	ret = hclge_mqprio_qopt_check(hdev, mqprio_qopt);
505 	if (ret) {
506 		dev_err(&hdev->pdev->dev,
507 			"failed to check mqprio qopt params, ret = %d\n", ret);
508 		return ret;
509 	}
510 
511 	ret = hclge_notify_down_uinit(hdev);
512 	if (ret)
513 		return ret;
514 
515 	memcpy(&old_tc_info, &kinfo->tc_info, sizeof(old_tc_info));
516 	hclge_sync_mqprio_qopt(&kinfo->tc_info, mqprio_qopt);
517 	kinfo->tc_info.mqprio_active = tc > 0;
518 
519 	ret = hclge_config_tc(hdev, &kinfo->tc_info);
520 	if (ret)
521 		goto err_out;
522 
523 	return hclge_notify_init_up(hdev);
524 
525 err_out:
526 	if (!tc) {
527 		dev_warn(&hdev->pdev->dev,
528 			 "failed to destroy mqprio, will active after reset, ret = %d\n",
529 			 ret);
530 	} else {
531 		/* roll-back */
532 		memcpy(&kinfo->tc_info, &old_tc_info, sizeof(old_tc_info));
533 		if (hclge_config_tc(hdev, &kinfo->tc_info))
534 			dev_err(&hdev->pdev->dev,
535 				"failed to roll back tc configuration\n");
536 	}
537 	hclge_notify_init_up(hdev);
538 
539 	return ret;
540 }
541 
542 static const struct hnae3_dcb_ops hns3_dcb_ops = {
543 	.ieee_getets	= hclge_ieee_getets,
544 	.ieee_setets	= hclge_ieee_setets,
545 	.ieee_getpfc	= hclge_ieee_getpfc,
546 	.ieee_setpfc	= hclge_ieee_setpfc,
547 	.getdcbx	= hclge_getdcbx,
548 	.setdcbx	= hclge_setdcbx,
549 	.setup_tc	= hclge_setup_tc,
550 };
551 
hclge_dcb_ops_set(struct hclge_dev * hdev)552 void hclge_dcb_ops_set(struct hclge_dev *hdev)
553 {
554 	struct hclge_vport *vport = hdev->vport;
555 	struct hnae3_knic_private_info *kinfo;
556 
557 	/* Hdev does not support DCB or vport is
558 	 * not a pf, then dcb_ops is not set.
559 	 */
560 	if (!hnae3_dev_dcb_supported(hdev) ||
561 	    vport->vport_id != 0)
562 		return;
563 
564 	kinfo = &vport->nic.kinfo;
565 	kinfo->dcb_ops = &hns3_dcb_ops;
566 	hdev->dcbx_cap = DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_HOST;
567 }
568