1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include "hclge_main.h"
5 #include "hclge_tm.h"
6 #include "hnae3.h"
7
8 #define BW_PERCENT 100
9
hclge_ieee_ets_to_tm_info(struct hclge_dev * hdev,struct ieee_ets * ets)10 static int hclge_ieee_ets_to_tm_info(struct hclge_dev *hdev,
11 struct ieee_ets *ets)
12 {
13 u8 i;
14
15 for (i = 0; i < HNAE3_MAX_TC; i++) {
16 switch (ets->tc_tsa[i]) {
17 case IEEE_8021QAZ_TSA_STRICT:
18 hdev->tm_info.tc_info[i].tc_sch_mode =
19 HCLGE_SCH_MODE_SP;
20 hdev->tm_info.pg_info[0].tc_dwrr[i] = 0;
21 break;
22 case IEEE_8021QAZ_TSA_ETS:
23 hdev->tm_info.tc_info[i].tc_sch_mode =
24 HCLGE_SCH_MODE_DWRR;
25 hdev->tm_info.pg_info[0].tc_dwrr[i] =
26 ets->tc_tx_bw[i];
27 break;
28 default:
29 /* Hardware only supports SP (strict priority)
30 * or ETS (enhanced transmission selection)
31 * algorithms, if we receive some other value
32 * from dcbnl, then throw an error.
33 */
34 return -EINVAL;
35 }
36 }
37
38 return hclge_tm_prio_tc_info_update(hdev, ets->prio_tc);
39 }
40
hclge_tm_info_to_ieee_ets(struct hclge_dev * hdev,struct ieee_ets * ets)41 static void hclge_tm_info_to_ieee_ets(struct hclge_dev *hdev,
42 struct ieee_ets *ets)
43 {
44 u32 i;
45
46 memset(ets, 0, sizeof(*ets));
47 ets->willing = 1;
48 ets->ets_cap = hdev->tc_max;
49
50 for (i = 0; i < HNAE3_MAX_TC; i++) {
51 ets->prio_tc[i] = hdev->tm_info.prio_tc[i];
52 ets->tc_tx_bw[i] = hdev->tm_info.pg_info[0].tc_dwrr[i];
53
54 if (hdev->tm_info.tc_info[i].tc_sch_mode ==
55 HCLGE_SCH_MODE_SP)
56 ets->tc_tsa[i] = IEEE_8021QAZ_TSA_STRICT;
57 else
58 ets->tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
59 }
60 }
61
62 /* IEEE std */
hclge_ieee_getets(struct hnae3_handle * h,struct ieee_ets * ets)63 static int hclge_ieee_getets(struct hnae3_handle *h, struct ieee_ets *ets)
64 {
65 struct hclge_vport *vport = hclge_get_vport(h);
66 struct hclge_dev *hdev = vport->back;
67
68 hclge_tm_info_to_ieee_ets(hdev, ets);
69
70 return 0;
71 }
72
hclge_ets_validate(struct hclge_dev * hdev,struct ieee_ets * ets,u8 * tc,bool * changed)73 static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
74 u8 *tc, bool *changed)
75 {
76 bool has_ets_tc = false;
77 u32 total_ets_bw = 0;
78 u8 max_tc = 0;
79 u8 i;
80
81 for (i = 0; i < HNAE3_MAX_TC; i++) {
82 if (ets->prio_tc[i] >= hdev->tc_max ||
83 i >= hdev->tc_max)
84 return -EINVAL;
85
86 if (ets->prio_tc[i] != hdev->tm_info.prio_tc[i])
87 *changed = true;
88
89 if (ets->prio_tc[i] > max_tc)
90 max_tc = ets->prio_tc[i];
91
92 switch (ets->tc_tsa[i]) {
93 case IEEE_8021QAZ_TSA_STRICT:
94 if (hdev->tm_info.tc_info[i].tc_sch_mode !=
95 HCLGE_SCH_MODE_SP)
96 *changed = true;
97 break;
98 case IEEE_8021QAZ_TSA_ETS:
99 if (hdev->tm_info.tc_info[i].tc_sch_mode !=
100 HCLGE_SCH_MODE_DWRR)
101 *changed = true;
102
103 total_ets_bw += ets->tc_tx_bw[i];
104 has_ets_tc = true;
105 break;
106 default:
107 return -EINVAL;
108 }
109 }
110
111 if (has_ets_tc && total_ets_bw != BW_PERCENT)
112 return -EINVAL;
113
114 *tc = max_tc + 1;
115 if (*tc != hdev->tm_info.num_tc)
116 *changed = true;
117
118 return 0;
119 }
120
hclge_map_update(struct hnae3_handle * h)121 static int hclge_map_update(struct hnae3_handle *h)
122 {
123 struct hclge_vport *vport = hclge_get_vport(h);
124 struct hclge_dev *hdev = vport->back;
125 int ret;
126
127 ret = hclge_tm_map_cfg(hdev);
128 if (ret)
129 return ret;
130
131 ret = hclge_tm_schd_mode_hw(hdev);
132 if (ret)
133 return ret;
134
135 ret = hclge_pause_setup_hw(hdev);
136 if (ret)
137 return ret;
138
139 ret = hclge_buffer_alloc(hdev);
140 if (ret)
141 return ret;
142
143 hclge_rss_indir_init_cfg(hdev);
144
145 return hclge_rss_init_hw(hdev);
146 }
147
hclge_client_setup_tc(struct hclge_dev * hdev)148 static int hclge_client_setup_tc(struct hclge_dev *hdev)
149 {
150 struct hclge_vport *vport = hdev->vport;
151 struct hnae3_client *client;
152 struct hnae3_handle *handle;
153 int ret;
154 u32 i;
155
156 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
157 handle = &vport[i].nic;
158 client = handle->client;
159
160 if (!client || !client->ops || !client->ops->setup_tc)
161 continue;
162
163 ret = client->ops->setup_tc(handle, hdev->tm_info.num_tc);
164 if (ret)
165 return ret;
166 }
167
168 return 0;
169 }
170
hclge_ieee_setets(struct hnae3_handle * h,struct ieee_ets * ets)171 static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
172 {
173 struct hclge_vport *vport = hclge_get_vport(h);
174 struct hclge_dev *hdev = vport->back;
175 bool map_changed = false;
176 u8 num_tc = 0;
177 int ret;
178
179 if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
180 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
181 return -EINVAL;
182
183 ret = hclge_ets_validate(hdev, ets, &num_tc, &map_changed);
184 if (ret)
185 return ret;
186
187 hclge_tm_schd_info_update(hdev, num_tc);
188
189 ret = hclge_ieee_ets_to_tm_info(hdev, ets);
190 if (ret)
191 return ret;
192
193 if (map_changed) {
194 ret = hclge_client_setup_tc(hdev);
195 if (ret)
196 return ret;
197 }
198
199 return hclge_tm_dwrr_cfg(hdev);
200 }
201
hclge_ieee_getpfc(struct hnae3_handle * h,struct ieee_pfc * pfc)202 static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
203 {
204 u64 requests[HNAE3_MAX_TC], indications[HNAE3_MAX_TC];
205 struct hclge_vport *vport = hclge_get_vport(h);
206 struct hclge_dev *hdev = vport->back;
207 u8 i, j, pfc_map, *prio_tc;
208 int ret;
209
210 memset(pfc, 0, sizeof(*pfc));
211 pfc->pfc_cap = hdev->pfc_max;
212 prio_tc = hdev->tm_info.prio_tc;
213 pfc_map = hdev->tm_info.hw_pfc_map;
214
215 /* Pfc setting is based on TC */
216 for (i = 0; i < hdev->tm_info.num_tc; i++) {
217 for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) {
218 if ((prio_tc[j] == i) && (pfc_map & BIT(i)))
219 pfc->pfc_en |= BIT(j);
220 }
221 }
222
223 ret = hclge_pfc_tx_stats_get(hdev, requests);
224 if (ret)
225 return ret;
226
227 ret = hclge_pfc_rx_stats_get(hdev, indications);
228 if (ret)
229 return ret;
230
231 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
232 pfc->requests[i] = requests[i];
233 pfc->indications[i] = indications[i];
234 }
235 return 0;
236 }
237
hclge_ieee_setpfc(struct hnae3_handle * h,struct ieee_pfc * pfc)238 static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
239 {
240 struct hclge_vport *vport = hclge_get_vport(h);
241 struct hclge_dev *hdev = vport->back;
242 u8 i, j, pfc_map, *prio_tc;
243
244 if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
245 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
246 return -EINVAL;
247
248 if (pfc->pfc_en == hdev->tm_info.pfc_en)
249 return 0;
250
251 prio_tc = hdev->tm_info.prio_tc;
252 pfc_map = 0;
253
254 for (i = 0; i < hdev->tm_info.num_tc; i++) {
255 for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) {
256 if ((prio_tc[j] == i) && (pfc->pfc_en & BIT(j))) {
257 pfc_map |= BIT(i);
258 break;
259 }
260 }
261 }
262
263 hdev->tm_info.hw_pfc_map = pfc_map;
264 hdev->tm_info.pfc_en = pfc->pfc_en;
265
266 return hclge_pause_setup_hw(hdev);
267 }
268
269 /* DCBX configuration */
hclge_getdcbx(struct hnae3_handle * h)270 static u8 hclge_getdcbx(struct hnae3_handle *h)
271 {
272 struct hclge_vport *vport = hclge_get_vport(h);
273 struct hclge_dev *hdev = vport->back;
274
275 if (hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
276 return 0;
277
278 return hdev->dcbx_cap;
279 }
280
hclge_setdcbx(struct hnae3_handle * h,u8 mode)281 static u8 hclge_setdcbx(struct hnae3_handle *h, u8 mode)
282 {
283 struct hclge_vport *vport = hclge_get_vport(h);
284 struct hclge_dev *hdev = vport->back;
285
286 /* No support for LLD_MANAGED modes or CEE */
287 if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
288 (mode & DCB_CAP_DCBX_VER_CEE) ||
289 !(mode & DCB_CAP_DCBX_HOST))
290 return 1;
291
292 hdev->dcbx_cap = mode;
293
294 return 0;
295 }
296
297 /* Set up TC for hardware offloaded mqprio in channel mode */
hclge_setup_tc(struct hnae3_handle * h,u8 tc,u8 * prio_tc)298 static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc)
299 {
300 struct hclge_vport *vport = hclge_get_vport(h);
301 struct hclge_dev *hdev = vport->back;
302 int ret;
303
304 if (hdev->flag & HCLGE_FLAG_DCB_ENABLE)
305 return -EINVAL;
306
307 if (tc > hdev->tc_max) {
308 dev_err(&hdev->pdev->dev,
309 "setup tc failed, tc(%u) > tc_max(%u)\n",
310 tc, hdev->tc_max);
311 return -EINVAL;
312 }
313
314 hclge_tm_schd_info_update(hdev, tc);
315
316 ret = hclge_tm_prio_tc_info_update(hdev, prio_tc);
317 if (ret)
318 return ret;
319
320 ret = hclge_tm_init_hw(hdev);
321 if (ret)
322 return ret;
323
324 hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
325
326 if (tc > 1)
327 hdev->flag |= HCLGE_FLAG_MQPRIO_ENABLE;
328 else
329 hdev->flag &= ~HCLGE_FLAG_MQPRIO_ENABLE;
330
331 return 0;
332 }
333
334 static const struct hnae3_dcb_ops hns3_dcb_ops = {
335 .ieee_getets = hclge_ieee_getets,
336 .ieee_setets = hclge_ieee_setets,
337 .ieee_getpfc = hclge_ieee_getpfc,
338 .ieee_setpfc = hclge_ieee_setpfc,
339 .getdcbx = hclge_getdcbx,
340 .setdcbx = hclge_setdcbx,
341 .map_update = hclge_map_update,
342 .setup_tc = hclge_setup_tc,
343 };
344
hclge_dcb_ops_set(struct hclge_dev * hdev)345 void hclge_dcb_ops_set(struct hclge_dev *hdev)
346 {
347 struct hclge_vport *vport = hdev->vport;
348 struct hnae3_knic_private_info *kinfo;
349
350 /* Hdev does not support DCB or vport is
351 * not a pf, then dcb_ops is not set.
352 */
353 if (!hnae3_dev_dcb_supported(hdev) ||
354 vport->vport_id != 0)
355 return;
356
357 kinfo = &vport->nic.kinfo;
358 kinfo->dcb_ops = &hns3_dcb_ops;
359 hdev->dcbx_cap = DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_HOST;
360 }
361