1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include "hclge_main.h"
5 #include "hclge_dcb.h"
6 #include "hclge_tm.h"
7 #include "hclge_dcb.h"
8 #include "hnae3.h"
9
10 #define BW_PERCENT 100
11
hclge_ieee_ets_to_tm_info(struct hclge_dev * hdev,struct ieee_ets * ets)12 static int hclge_ieee_ets_to_tm_info(struct hclge_dev *hdev,
13 struct ieee_ets *ets)
14 {
15 u8 i;
16
17 for (i = 0; i < HNAE3_MAX_TC; i++) {
18 switch (ets->tc_tsa[i]) {
19 case IEEE_8021QAZ_TSA_STRICT:
20 hdev->tm_info.tc_info[i].tc_sch_mode =
21 HCLGE_SCH_MODE_SP;
22 hdev->tm_info.pg_info[0].tc_dwrr[i] = 0;
23 break;
24 case IEEE_8021QAZ_TSA_ETS:
25 hdev->tm_info.tc_info[i].tc_sch_mode =
26 HCLGE_SCH_MODE_DWRR;
27 hdev->tm_info.pg_info[0].tc_dwrr[i] =
28 ets->tc_tx_bw[i];
29 break;
30 default:
31 /* Hardware only supports SP (strict priority)
32 * or ETS (enhanced transmission selection)
33 * algorithms, if we receive some other value
34 * from dcbnl, then throw an error.
35 */
36 return -EINVAL;
37 }
38 }
39
40 hclge_tm_prio_tc_info_update(hdev, ets->prio_tc);
41
42 return 0;
43 }
44
hclge_tm_info_to_ieee_ets(struct hclge_dev * hdev,struct ieee_ets * ets)45 static void hclge_tm_info_to_ieee_ets(struct hclge_dev *hdev,
46 struct ieee_ets *ets)
47 {
48 u32 i;
49
50 memset(ets, 0, sizeof(*ets));
51 ets->willing = 1;
52 ets->ets_cap = hdev->tc_max;
53
54 for (i = 0; i < HNAE3_MAX_TC; i++) {
55 ets->prio_tc[i] = hdev->tm_info.prio_tc[i];
56 ets->tc_tx_bw[i] = hdev->tm_info.pg_info[0].tc_dwrr[i];
57
58 if (hdev->tm_info.tc_info[i].tc_sch_mode ==
59 HCLGE_SCH_MODE_SP)
60 ets->tc_tsa[i] = IEEE_8021QAZ_TSA_STRICT;
61 else
62 ets->tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
63 }
64 }
65
66 /* IEEE std */
hclge_ieee_getets(struct hnae3_handle * h,struct ieee_ets * ets)67 static int hclge_ieee_getets(struct hnae3_handle *h, struct ieee_ets *ets)
68 {
69 struct hclge_vport *vport = hclge_get_vport(h);
70 struct hclge_dev *hdev = vport->back;
71
72 hclge_tm_info_to_ieee_ets(hdev, ets);
73
74 return 0;
75 }
76
hclge_dcb_common_validate(struct hclge_dev * hdev,u8 num_tc,u8 * prio_tc)77 static int hclge_dcb_common_validate(struct hclge_dev *hdev, u8 num_tc,
78 u8 *prio_tc)
79 {
80 int i;
81
82 if (num_tc > hdev->tc_max) {
83 dev_err(&hdev->pdev->dev,
84 "tc num checking failed, %u > tc_max(%u)\n",
85 num_tc, hdev->tc_max);
86 return -EINVAL;
87 }
88
89 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
90 if (prio_tc[i] >= num_tc) {
91 dev_err(&hdev->pdev->dev,
92 "prio_tc[%d] checking failed, %u >= num_tc(%u)\n",
93 i, prio_tc[i], num_tc);
94 return -EINVAL;
95 }
96 }
97
98 if (num_tc > hdev->vport[0].alloc_tqps) {
99 dev_err(&hdev->pdev->dev,
100 "allocated tqp checking failed, %u > tqp(%u)\n",
101 num_tc, hdev->vport[0].alloc_tqps);
102 return -EINVAL;
103 }
104
105 return 0;
106 }
107
hclge_ets_validate(struct hclge_dev * hdev,struct ieee_ets * ets,u8 * tc,bool * changed)108 static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
109 u8 *tc, bool *changed)
110 {
111 bool has_ets_tc = false;
112 u32 total_ets_bw = 0;
113 u8 max_tc = 0;
114 int ret;
115 u8 i;
116
117 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
118 if (ets->prio_tc[i] != hdev->tm_info.prio_tc[i])
119 *changed = true;
120
121 if (ets->prio_tc[i] > max_tc)
122 max_tc = ets->prio_tc[i];
123 }
124
125 ret = hclge_dcb_common_validate(hdev, max_tc + 1, ets->prio_tc);
126 if (ret)
127 return ret;
128
129 for (i = 0; i < HNAE3_MAX_TC; i++) {
130 switch (ets->tc_tsa[i]) {
131 case IEEE_8021QAZ_TSA_STRICT:
132 if (hdev->tm_info.tc_info[i].tc_sch_mode !=
133 HCLGE_SCH_MODE_SP)
134 *changed = true;
135 break;
136 case IEEE_8021QAZ_TSA_ETS:
137 /* The hardware will switch to sp mode if bandwidth is
138 * 0, so limit ets bandwidth must be greater than 0.
139 */
140 if (!ets->tc_tx_bw[i]) {
141 dev_err(&hdev->pdev->dev,
142 "tc%u ets bw cannot be 0\n", i);
143 return -EINVAL;
144 }
145
146 if (hdev->tm_info.tc_info[i].tc_sch_mode !=
147 HCLGE_SCH_MODE_DWRR)
148 *changed = true;
149
150 total_ets_bw += ets->tc_tx_bw[i];
151 has_ets_tc = true;
152 break;
153 default:
154 return -EINVAL;
155 }
156 }
157
158 if (has_ets_tc && total_ets_bw != BW_PERCENT)
159 return -EINVAL;
160
161 *tc = max_tc + 1;
162 if (*tc != hdev->tm_info.num_tc)
163 *changed = true;
164
165 return 0;
166 }
167
hclge_map_update(struct hclge_dev * hdev)168 static int hclge_map_update(struct hclge_dev *hdev)
169 {
170 int ret;
171
172 ret = hclge_tm_schd_setup_hw(hdev);
173 if (ret)
174 return ret;
175
176 ret = hclge_pause_setup_hw(hdev, false);
177 if (ret)
178 return ret;
179
180 ret = hclge_buffer_alloc(hdev);
181 if (ret)
182 return ret;
183
184 hclge_rss_indir_init_cfg(hdev);
185
186 return hclge_rss_init_hw(hdev);
187 }
188
hclge_client_setup_tc(struct hclge_dev * hdev)189 static int hclge_client_setup_tc(struct hclge_dev *hdev)
190 {
191 struct hclge_vport *vport = hdev->vport;
192 struct hnae3_client *client;
193 struct hnae3_handle *handle;
194 int ret;
195 u32 i;
196
197 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
198 handle = &vport[i].nic;
199 client = handle->client;
200
201 if (!client || !client->ops || !client->ops->setup_tc)
202 continue;
203
204 ret = client->ops->setup_tc(handle, hdev->tm_info.num_tc);
205 if (ret)
206 return ret;
207 }
208
209 return 0;
210 }
211
hclge_notify_down_uinit(struct hclge_dev * hdev)212 static int hclge_notify_down_uinit(struct hclge_dev *hdev)
213 {
214 int ret;
215
216 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
217 if (ret)
218 return ret;
219
220 return hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
221 }
222
hclge_notify_init_up(struct hclge_dev * hdev)223 static int hclge_notify_init_up(struct hclge_dev *hdev)
224 {
225 int ret;
226
227 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
228 if (ret)
229 return ret;
230
231 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
232 }
233
hclge_ieee_setets(struct hnae3_handle * h,struct ieee_ets * ets)234 static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
235 {
236 struct hclge_vport *vport = hclge_get_vport(h);
237 struct net_device *netdev = h->kinfo.netdev;
238 struct hclge_dev *hdev = vport->back;
239 bool map_changed = false;
240 u8 num_tc = 0;
241 int ret;
242
243 if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
244 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
245 return -EINVAL;
246
247 ret = hclge_ets_validate(hdev, ets, &num_tc, &map_changed);
248 if (ret)
249 return ret;
250
251 if (map_changed) {
252 netif_dbg(h, drv, netdev, "set ets\n");
253
254 ret = hclge_notify_down_uinit(hdev);
255 if (ret)
256 return ret;
257 }
258
259 hclge_tm_schd_info_update(hdev, num_tc);
260 if (num_tc > 1)
261 hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
262 else
263 hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
264
265 ret = hclge_ieee_ets_to_tm_info(hdev, ets);
266 if (ret)
267 goto err_out;
268
269 if (map_changed) {
270 ret = hclge_map_update(hdev);
271 if (ret)
272 goto err_out;
273
274 ret = hclge_client_setup_tc(hdev);
275 if (ret)
276 goto err_out;
277
278 ret = hclge_notify_init_up(hdev);
279 if (ret)
280 return ret;
281 }
282
283 return hclge_tm_dwrr_cfg(hdev);
284
285 err_out:
286 if (!map_changed)
287 return ret;
288
289 hclge_notify_init_up(hdev);
290
291 return ret;
292 }
293
hclge_ieee_getpfc(struct hnae3_handle * h,struct ieee_pfc * pfc)294 static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
295 {
296 u64 requests[HNAE3_MAX_TC], indications[HNAE3_MAX_TC];
297 struct hclge_vport *vport = hclge_get_vport(h);
298 struct hclge_dev *hdev = vport->back;
299 int ret;
300 u8 i;
301
302 memset(pfc, 0, sizeof(*pfc));
303 pfc->pfc_cap = hdev->pfc_max;
304 pfc->pfc_en = hdev->tm_info.pfc_en;
305
306 ret = hclge_pfc_tx_stats_get(hdev, requests);
307 if (ret)
308 return ret;
309
310 ret = hclge_pfc_rx_stats_get(hdev, indications);
311 if (ret)
312 return ret;
313
314 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
315 pfc->requests[i] = requests[i];
316 pfc->indications[i] = indications[i];
317 }
318 return 0;
319 }
320
hclge_ieee_setpfc(struct hnae3_handle * h,struct ieee_pfc * pfc)321 static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
322 {
323 struct hclge_vport *vport = hclge_get_vport(h);
324 struct net_device *netdev = h->kinfo.netdev;
325 struct hclge_dev *hdev = vport->back;
326 u8 i, j, pfc_map, *prio_tc;
327 int ret;
328
329 if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
330 return -EINVAL;
331
332 if (pfc->pfc_en == hdev->tm_info.pfc_en)
333 return 0;
334
335 prio_tc = hdev->tm_info.prio_tc;
336 pfc_map = 0;
337
338 for (i = 0; i < hdev->tm_info.num_tc; i++) {
339 for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) {
340 if ((prio_tc[j] == i) && (pfc->pfc_en & BIT(j))) {
341 pfc_map |= BIT(i);
342 break;
343 }
344 }
345 }
346
347 hdev->tm_info.hw_pfc_map = pfc_map;
348 hdev->tm_info.pfc_en = pfc->pfc_en;
349
350 netif_dbg(h, drv, netdev,
351 "set pfc: pfc_en=%x, pfc_map=%x, num_tc=%u\n",
352 pfc->pfc_en, pfc_map, hdev->tm_info.num_tc);
353
354 hclge_tm_pfc_info_update(hdev);
355
356 ret = hclge_pause_setup_hw(hdev, false);
357 if (ret)
358 return ret;
359
360 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
361 if (ret)
362 return ret;
363
364 ret = hclge_buffer_alloc(hdev);
365 if (ret) {
366 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
367 return ret;
368 }
369
370 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
371 }
372
373 /* DCBX configuration */
hclge_getdcbx(struct hnae3_handle * h)374 static u8 hclge_getdcbx(struct hnae3_handle *h)
375 {
376 struct hclge_vport *vport = hclge_get_vport(h);
377 struct hclge_dev *hdev = vport->back;
378
379 if (hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
380 return 0;
381
382 return hdev->dcbx_cap;
383 }
384
hclge_setdcbx(struct hnae3_handle * h,u8 mode)385 static u8 hclge_setdcbx(struct hnae3_handle *h, u8 mode)
386 {
387 struct hclge_vport *vport = hclge_get_vport(h);
388 struct net_device *netdev = h->kinfo.netdev;
389 struct hclge_dev *hdev = vport->back;
390
391 netif_dbg(h, drv, netdev, "set dcbx: mode=%u\n", mode);
392
393 /* No support for LLD_MANAGED modes or CEE */
394 if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
395 (mode & DCB_CAP_DCBX_VER_CEE) ||
396 !(mode & DCB_CAP_DCBX_HOST))
397 return 1;
398
399 hdev->dcbx_cap = mode;
400
401 return 0;
402 }
403
404 /* Set up TC for hardware offloaded mqprio in channel mode */
hclge_setup_tc(struct hnae3_handle * h,u8 tc,u8 * prio_tc)405 static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc)
406 {
407 struct hclge_vport *vport = hclge_get_vport(h);
408 struct hclge_dev *hdev = vport->back;
409 int ret;
410
411 if (hdev->flag & HCLGE_FLAG_DCB_ENABLE)
412 return -EINVAL;
413
414 ret = hclge_dcb_common_validate(hdev, tc, prio_tc);
415 if (ret)
416 return -EINVAL;
417
418 ret = hclge_notify_down_uinit(hdev);
419 if (ret)
420 return ret;
421
422 hclge_tm_schd_info_update(hdev, tc);
423 hclge_tm_prio_tc_info_update(hdev, prio_tc);
424
425 ret = hclge_tm_init_hw(hdev, false);
426 if (ret)
427 goto err_out;
428
429 ret = hclge_client_setup_tc(hdev);
430 if (ret)
431 goto err_out;
432
433 hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
434
435 if (tc > 1)
436 hdev->flag |= HCLGE_FLAG_MQPRIO_ENABLE;
437 else
438 hdev->flag &= ~HCLGE_FLAG_MQPRIO_ENABLE;
439
440 return hclge_notify_init_up(hdev);
441
442 err_out:
443 hclge_notify_init_up(hdev);
444
445 return ret;
446 }
447
448 static const struct hnae3_dcb_ops hns3_dcb_ops = {
449 .ieee_getets = hclge_ieee_getets,
450 .ieee_setets = hclge_ieee_setets,
451 .ieee_getpfc = hclge_ieee_getpfc,
452 .ieee_setpfc = hclge_ieee_setpfc,
453 .getdcbx = hclge_getdcbx,
454 .setdcbx = hclge_setdcbx,
455 .setup_tc = hclge_setup_tc,
456 };
457
hclge_dcb_ops_set(struct hclge_dev * hdev)458 void hclge_dcb_ops_set(struct hclge_dev *hdev)
459 {
460 struct hclge_vport *vport = hdev->vport;
461 struct hnae3_knic_private_info *kinfo;
462
463 /* Hdev does not support DCB or vport is
464 * not a pf, then dcb_ops is not set.
465 */
466 if (!hnae3_dev_dcb_supported(hdev) ||
467 vport->vport_id != 0)
468 return;
469
470 kinfo = &vport->nic.kinfo;
471 kinfo->dcb_ops = &hns3_dcb_ops;
472 hdev->dcbx_cap = DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_HOST;
473 }
474