1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include "hclge_main.h"
5 #include "hclge_dcb.h"
6 #include "hclge_tm.h"
7 #include "hclge_dcb.h"
8 #include "hnae3.h"
9
10 #define BW_PERCENT 100
11
hclge_ieee_ets_to_tm_info(struct hclge_dev * hdev,struct ieee_ets * ets)12 static int hclge_ieee_ets_to_tm_info(struct hclge_dev *hdev,
13 struct ieee_ets *ets)
14 {
15 u8 i;
16
17 for (i = 0; i < HNAE3_MAX_TC; i++) {
18 switch (ets->tc_tsa[i]) {
19 case IEEE_8021QAZ_TSA_STRICT:
20 hdev->tm_info.tc_info[i].tc_sch_mode =
21 HCLGE_SCH_MODE_SP;
22 hdev->tm_info.pg_info[0].tc_dwrr[i] = 0;
23 break;
24 case IEEE_8021QAZ_TSA_ETS:
25 hdev->tm_info.tc_info[i].tc_sch_mode =
26 HCLGE_SCH_MODE_DWRR;
27 hdev->tm_info.pg_info[0].tc_dwrr[i] =
28 ets->tc_tx_bw[i];
29 break;
30 default:
31 /* Hardware only supports SP (strict priority)
32 * or ETS (enhanced transmission selection)
33 * algorithms, if we receive some other value
34 * from dcbnl, then throw an error.
35 */
36 return -EINVAL;
37 }
38 }
39
40 hclge_tm_prio_tc_info_update(hdev, ets->prio_tc);
41
42 return 0;
43 }
44
hclge_tm_info_to_ieee_ets(struct hclge_dev * hdev,struct ieee_ets * ets)45 static void hclge_tm_info_to_ieee_ets(struct hclge_dev *hdev,
46 struct ieee_ets *ets)
47 {
48 u32 i;
49
50 memset(ets, 0, sizeof(*ets));
51 ets->willing = 1;
52 ets->ets_cap = hdev->tc_max;
53
54 for (i = 0; i < HNAE3_MAX_TC; i++) {
55 ets->prio_tc[i] = hdev->tm_info.prio_tc[i];
56 if (i < hdev->tm_info.num_tc)
57 ets->tc_tx_bw[i] = hdev->tm_info.pg_info[0].tc_dwrr[i];
58 else
59 ets->tc_tx_bw[i] = 0;
60
61 if (hdev->tm_info.tc_info[i].tc_sch_mode ==
62 HCLGE_SCH_MODE_SP)
63 ets->tc_tsa[i] = IEEE_8021QAZ_TSA_STRICT;
64 else
65 ets->tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
66 }
67 }
68
69 /* IEEE std */
hclge_ieee_getets(struct hnae3_handle * h,struct ieee_ets * ets)70 static int hclge_ieee_getets(struct hnae3_handle *h, struct ieee_ets *ets)
71 {
72 struct hclge_vport *vport = hclge_get_vport(h);
73 struct hclge_dev *hdev = vport->back;
74
75 hclge_tm_info_to_ieee_ets(hdev, ets);
76
77 return 0;
78 }
79
hclge_dcb_common_validate(struct hclge_dev * hdev,u8 num_tc,u8 * prio_tc)80 static int hclge_dcb_common_validate(struct hclge_dev *hdev, u8 num_tc,
81 u8 *prio_tc)
82 {
83 int i;
84
85 if (num_tc > hdev->tc_max) {
86 dev_err(&hdev->pdev->dev,
87 "tc num checking failed, %u > tc_max(%u)\n",
88 num_tc, hdev->tc_max);
89 return -EINVAL;
90 }
91
92 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
93 if (prio_tc[i] >= num_tc) {
94 dev_err(&hdev->pdev->dev,
95 "prio_tc[%d] checking failed, %u >= num_tc(%u)\n",
96 i, prio_tc[i], num_tc);
97 return -EINVAL;
98 }
99 }
100
101 if (num_tc > hdev->vport[0].alloc_tqps) {
102 dev_err(&hdev->pdev->dev,
103 "allocated tqp checking failed, %u > tqp(%u)\n",
104 num_tc, hdev->vport[0].alloc_tqps);
105 return -EINVAL;
106 }
107
108 return 0;
109 }
110
hclge_ets_tc_changed(struct hclge_dev * hdev,struct ieee_ets * ets,bool * changed)111 static u8 hclge_ets_tc_changed(struct hclge_dev *hdev, struct ieee_ets *ets,
112 bool *changed)
113 {
114 u8 max_tc_id = 0;
115 u8 i;
116
117 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
118 if (ets->prio_tc[i] != hdev->tm_info.prio_tc[i])
119 *changed = true;
120
121 if (ets->prio_tc[i] > max_tc_id)
122 max_tc_id = ets->prio_tc[i];
123 }
124
125 /* return max tc number, max tc id need to plus 1 */
126 return max_tc_id + 1;
127 }
128
hclge_ets_sch_mode_validate(struct hclge_dev * hdev,struct ieee_ets * ets,bool * changed,u8 tc_num)129 static int hclge_ets_sch_mode_validate(struct hclge_dev *hdev,
130 struct ieee_ets *ets, bool *changed,
131 u8 tc_num)
132 {
133 bool has_ets_tc = false;
134 u32 total_ets_bw = 0;
135 u8 i;
136
137 for (i = 0; i < HNAE3_MAX_TC; i++) {
138 switch (ets->tc_tsa[i]) {
139 case IEEE_8021QAZ_TSA_STRICT:
140 if (hdev->tm_info.tc_info[i].tc_sch_mode !=
141 HCLGE_SCH_MODE_SP)
142 *changed = true;
143 break;
144 case IEEE_8021QAZ_TSA_ETS:
145 if (i >= tc_num) {
146 dev_err(&hdev->pdev->dev,
147 "tc%u is disabled, cannot set ets bw\n",
148 i);
149 return -EINVAL;
150 }
151
152 /* The hardware will switch to sp mode if bandwidth is
153 * 0, so limit ets bandwidth must be greater than 0.
154 */
155 if (!ets->tc_tx_bw[i]) {
156 dev_err(&hdev->pdev->dev,
157 "tc%u ets bw cannot be 0\n", i);
158 return -EINVAL;
159 }
160
161 if (hdev->tm_info.tc_info[i].tc_sch_mode !=
162 HCLGE_SCH_MODE_DWRR)
163 *changed = true;
164
165 total_ets_bw += ets->tc_tx_bw[i];
166 has_ets_tc = true;
167 break;
168 default:
169 return -EINVAL;
170 }
171 }
172
173 if (has_ets_tc && total_ets_bw != BW_PERCENT)
174 return -EINVAL;
175
176 return 0;
177 }
178
hclge_ets_validate(struct hclge_dev * hdev,struct ieee_ets * ets,u8 * tc,bool * changed)179 static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
180 u8 *tc, bool *changed)
181 {
182 u8 tc_num;
183 int ret;
184
185 tc_num = hclge_ets_tc_changed(hdev, ets, changed);
186
187 ret = hclge_dcb_common_validate(hdev, tc_num, ets->prio_tc);
188 if (ret)
189 return ret;
190
191 ret = hclge_ets_sch_mode_validate(hdev, ets, changed, tc_num);
192 if (ret)
193 return ret;
194
195 *tc = tc_num;
196 if (*tc != hdev->tm_info.num_tc)
197 *changed = true;
198
199 return 0;
200 }
201
hclge_map_update(struct hclge_dev * hdev)202 static int hclge_map_update(struct hclge_dev *hdev)
203 {
204 int ret;
205
206 ret = hclge_tm_schd_setup_hw(hdev);
207 if (ret)
208 return ret;
209
210 ret = hclge_pause_setup_hw(hdev, false);
211 if (ret)
212 return ret;
213
214 ret = hclge_buffer_alloc(hdev);
215 if (ret)
216 return ret;
217
218 hclge_rss_indir_init_cfg(hdev);
219
220 return hclge_rss_init_hw(hdev);
221 }
222
hclge_client_setup_tc(struct hclge_dev * hdev)223 static int hclge_client_setup_tc(struct hclge_dev *hdev)
224 {
225 struct hclge_vport *vport = hdev->vport;
226 struct hnae3_client *client;
227 struct hnae3_handle *handle;
228 int ret;
229 u32 i;
230
231 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
232 handle = &vport[i].nic;
233 client = handle->client;
234
235 if (!client || !client->ops || !client->ops->setup_tc)
236 continue;
237
238 ret = client->ops->setup_tc(handle, hdev->tm_info.num_tc);
239 if (ret)
240 return ret;
241 }
242
243 return 0;
244 }
245
hclge_notify_down_uinit(struct hclge_dev * hdev)246 static int hclge_notify_down_uinit(struct hclge_dev *hdev)
247 {
248 int ret;
249
250 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
251 if (ret)
252 return ret;
253
254 return hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
255 }
256
hclge_notify_init_up(struct hclge_dev * hdev)257 static int hclge_notify_init_up(struct hclge_dev *hdev)
258 {
259 int ret;
260
261 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
262 if (ret)
263 return ret;
264
265 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
266 }
267
hclge_ieee_setets(struct hnae3_handle * h,struct ieee_ets * ets)268 static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
269 {
270 struct hclge_vport *vport = hclge_get_vport(h);
271 struct net_device *netdev = h->kinfo.netdev;
272 struct hclge_dev *hdev = vport->back;
273 bool map_changed = false;
274 u8 num_tc = 0;
275 int ret;
276
277 if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
278 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
279 return -EINVAL;
280
281 ret = hclge_ets_validate(hdev, ets, &num_tc, &map_changed);
282 if (ret)
283 return ret;
284
285 if (map_changed) {
286 netif_dbg(h, drv, netdev, "set ets\n");
287
288 ret = hclge_notify_down_uinit(hdev);
289 if (ret)
290 return ret;
291 }
292
293 hclge_tm_schd_info_update(hdev, num_tc);
294 if (num_tc > 1)
295 hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
296 else
297 hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
298
299 ret = hclge_ieee_ets_to_tm_info(hdev, ets);
300 if (ret)
301 goto err_out;
302
303 if (map_changed) {
304 ret = hclge_map_update(hdev);
305 if (ret)
306 goto err_out;
307
308 ret = hclge_client_setup_tc(hdev);
309 if (ret)
310 goto err_out;
311
312 ret = hclge_notify_init_up(hdev);
313 if (ret)
314 return ret;
315 }
316
317 return hclge_tm_dwrr_cfg(hdev);
318
319 err_out:
320 if (!map_changed)
321 return ret;
322
323 hclge_notify_init_up(hdev);
324
325 return ret;
326 }
327
hclge_ieee_getpfc(struct hnae3_handle * h,struct ieee_pfc * pfc)328 static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
329 {
330 u64 requests[HNAE3_MAX_TC], indications[HNAE3_MAX_TC];
331 struct hclge_vport *vport = hclge_get_vport(h);
332 struct hclge_dev *hdev = vport->back;
333 int ret;
334 u8 i;
335
336 memset(pfc, 0, sizeof(*pfc));
337 pfc->pfc_cap = hdev->pfc_max;
338 pfc->pfc_en = hdev->tm_info.pfc_en;
339
340 ret = hclge_pfc_tx_stats_get(hdev, requests);
341 if (ret)
342 return ret;
343
344 ret = hclge_pfc_rx_stats_get(hdev, indications);
345 if (ret)
346 return ret;
347
348 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
349 pfc->requests[i] = requests[i];
350 pfc->indications[i] = indications[i];
351 }
352 return 0;
353 }
354
hclge_ieee_setpfc(struct hnae3_handle * h,struct ieee_pfc * pfc)355 static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
356 {
357 struct hclge_vport *vport = hclge_get_vport(h);
358 struct net_device *netdev = h->kinfo.netdev;
359 struct hclge_dev *hdev = vport->back;
360 u8 i, j, pfc_map, *prio_tc;
361 int ret;
362
363 if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
364 return -EINVAL;
365
366 if (pfc->pfc_en == hdev->tm_info.pfc_en)
367 return 0;
368
369 prio_tc = hdev->tm_info.prio_tc;
370 pfc_map = 0;
371
372 for (i = 0; i < hdev->tm_info.num_tc; i++) {
373 for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) {
374 if ((prio_tc[j] == i) && (pfc->pfc_en & BIT(j))) {
375 pfc_map |= BIT(i);
376 break;
377 }
378 }
379 }
380
381 hdev->tm_info.hw_pfc_map = pfc_map;
382 hdev->tm_info.pfc_en = pfc->pfc_en;
383
384 netif_dbg(h, drv, netdev,
385 "set pfc: pfc_en=%x, pfc_map=%x, num_tc=%u\n",
386 pfc->pfc_en, pfc_map, hdev->tm_info.num_tc);
387
388 hclge_tm_pfc_info_update(hdev);
389
390 ret = hclge_pause_setup_hw(hdev, false);
391 if (ret)
392 return ret;
393
394 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
395 if (ret)
396 return ret;
397
398 ret = hclge_buffer_alloc(hdev);
399 if (ret) {
400 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
401 return ret;
402 }
403
404 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
405 }
406
407 /* DCBX configuration */
hclge_getdcbx(struct hnae3_handle * h)408 static u8 hclge_getdcbx(struct hnae3_handle *h)
409 {
410 struct hclge_vport *vport = hclge_get_vport(h);
411 struct hclge_dev *hdev = vport->back;
412
413 if (hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
414 return 0;
415
416 return hdev->dcbx_cap;
417 }
418
hclge_setdcbx(struct hnae3_handle * h,u8 mode)419 static u8 hclge_setdcbx(struct hnae3_handle *h, u8 mode)
420 {
421 struct hclge_vport *vport = hclge_get_vport(h);
422 struct net_device *netdev = h->kinfo.netdev;
423 struct hclge_dev *hdev = vport->back;
424
425 netif_dbg(h, drv, netdev, "set dcbx: mode=%u\n", mode);
426
427 /* No support for LLD_MANAGED modes or CEE */
428 if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
429 (mode & DCB_CAP_DCBX_VER_CEE) ||
430 !(mode & DCB_CAP_DCBX_HOST))
431 return 1;
432
433 hdev->dcbx_cap = mode;
434
435 return 0;
436 }
437
438 /* Set up TC for hardware offloaded mqprio in channel mode */
hclge_setup_tc(struct hnae3_handle * h,u8 tc,u8 * prio_tc)439 static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc)
440 {
441 struct hclge_vport *vport = hclge_get_vport(h);
442 struct hclge_dev *hdev = vport->back;
443 int ret;
444
445 if (hdev->flag & HCLGE_FLAG_DCB_ENABLE)
446 return -EINVAL;
447
448 ret = hclge_dcb_common_validate(hdev, tc, prio_tc);
449 if (ret)
450 return -EINVAL;
451
452 ret = hclge_notify_down_uinit(hdev);
453 if (ret)
454 return ret;
455
456 hclge_tm_schd_info_update(hdev, tc);
457 hclge_tm_prio_tc_info_update(hdev, prio_tc);
458
459 ret = hclge_tm_init_hw(hdev, false);
460 if (ret)
461 goto err_out;
462
463 ret = hclge_client_setup_tc(hdev);
464 if (ret)
465 goto err_out;
466
467 hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
468
469 if (tc > 1)
470 hdev->flag |= HCLGE_FLAG_MQPRIO_ENABLE;
471 else
472 hdev->flag &= ~HCLGE_FLAG_MQPRIO_ENABLE;
473
474 return hclge_notify_init_up(hdev);
475
476 err_out:
477 hclge_notify_init_up(hdev);
478
479 return ret;
480 }
481
482 static const struct hnae3_dcb_ops hns3_dcb_ops = {
483 .ieee_getets = hclge_ieee_getets,
484 .ieee_setets = hclge_ieee_setets,
485 .ieee_getpfc = hclge_ieee_getpfc,
486 .ieee_setpfc = hclge_ieee_setpfc,
487 .getdcbx = hclge_getdcbx,
488 .setdcbx = hclge_setdcbx,
489 .setup_tc = hclge_setup_tc,
490 };
491
hclge_dcb_ops_set(struct hclge_dev * hdev)492 void hclge_dcb_ops_set(struct hclge_dev *hdev)
493 {
494 struct hclge_vport *vport = hdev->vport;
495 struct hnae3_knic_private_info *kinfo;
496
497 /* Hdev does not support DCB or vport is
498 * not a pf, then dcb_ops is not set.
499 */
500 if (!hnae3_dev_dcb_supported(hdev) ||
501 vport->vport_id != 0)
502 return;
503
504 kinfo = &vport->nic.kinfo;
505 kinfo->dcb_ops = &hns3_dcb_ops;
506 hdev->dcbx_cap = DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_HOST;
507 }
508