1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Ethernet driver
3 *
4 * Copyright (C) 2023 Marvell.
5 *
6 */
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/inetdevice.h>
10 #include <linux/bitfield.h>
11
12 #include "otx2_common.h"
13 #include "cn10k.h"
14 #include "qos.h"
15
16 #define OTX2_QOS_QID_INNER 0xFFFFU
17 #define OTX2_QOS_QID_NONE 0xFFFEU
18 #define OTX2_QOS_ROOT_CLASSID 0xFFFFFFFF
19 #define OTX2_QOS_CLASS_NONE 0
20 #define OTX2_QOS_DEFAULT_PRIO 0xF
21 #define OTX2_QOS_INVALID_SQ 0xFFFF
22 #define OTX2_QOS_INVALID_TXSCHQ_IDX 0xFFFF
23 #define CN10K_MAX_RR_WEIGHT GENMASK_ULL(13, 0)
24 #define OTX2_MAX_RR_QUANTUM GENMASK_ULL(23, 0)
25
otx2_qos_update_tx_netdev_queues(struct otx2_nic * pfvf)26 static void otx2_qos_update_tx_netdev_queues(struct otx2_nic *pfvf)
27 {
28 struct otx2_hw *hw = &pfvf->hw;
29 int tx_queues, qos_txqs, err;
30
31 qos_txqs = bitmap_weight(pfvf->qos.qos_sq_bmap,
32 OTX2_QOS_MAX_LEAF_NODES);
33
34 tx_queues = hw->tx_queues + qos_txqs;
35
36 err = netif_set_real_num_tx_queues(pfvf->netdev, tx_queues);
37 if (err) {
38 netdev_err(pfvf->netdev,
39 "Failed to set no of Tx queues: %d\n", tx_queues);
40 return;
41 }
42 }
43
otx2_qos_get_regaddr(struct otx2_qos_node * node,struct nix_txschq_config * cfg,int index)44 static void otx2_qos_get_regaddr(struct otx2_qos_node *node,
45 struct nix_txschq_config *cfg,
46 int index)
47 {
48 if (node->level == NIX_TXSCH_LVL_SMQ) {
49 cfg->reg[index++] = NIX_AF_MDQX_PARENT(node->schq);
50 cfg->reg[index++] = NIX_AF_MDQX_SCHEDULE(node->schq);
51 cfg->reg[index++] = NIX_AF_MDQX_PIR(node->schq);
52 cfg->reg[index] = NIX_AF_MDQX_CIR(node->schq);
53 } else if (node->level == NIX_TXSCH_LVL_TL4) {
54 cfg->reg[index++] = NIX_AF_TL4X_PARENT(node->schq);
55 cfg->reg[index++] = NIX_AF_TL4X_SCHEDULE(node->schq);
56 cfg->reg[index++] = NIX_AF_TL4X_PIR(node->schq);
57 cfg->reg[index] = NIX_AF_TL4X_CIR(node->schq);
58 } else if (node->level == NIX_TXSCH_LVL_TL3) {
59 cfg->reg[index++] = NIX_AF_TL3X_PARENT(node->schq);
60 cfg->reg[index++] = NIX_AF_TL3X_SCHEDULE(node->schq);
61 cfg->reg[index++] = NIX_AF_TL3X_PIR(node->schq);
62 cfg->reg[index] = NIX_AF_TL3X_CIR(node->schq);
63 } else if (node->level == NIX_TXSCH_LVL_TL2) {
64 cfg->reg[index++] = NIX_AF_TL2X_PARENT(node->schq);
65 cfg->reg[index++] = NIX_AF_TL2X_SCHEDULE(node->schq);
66 cfg->reg[index++] = NIX_AF_TL2X_PIR(node->schq);
67 cfg->reg[index] = NIX_AF_TL2X_CIR(node->schq);
68 }
69 }
70
otx2_qos_quantum_to_dwrr_weight(struct otx2_nic * pfvf,u32 quantum)71 static int otx2_qos_quantum_to_dwrr_weight(struct otx2_nic *pfvf, u32 quantum)
72 {
73 u32 weight;
74
75 weight = quantum / pfvf->hw.dwrr_mtu;
76 if (quantum % pfvf->hw.dwrr_mtu)
77 weight += 1;
78
79 return weight;
80 }
81
otx2_config_sched_shaping(struct otx2_nic * pfvf,struct otx2_qos_node * node,struct nix_txschq_config * cfg,int * num_regs)82 static void otx2_config_sched_shaping(struct otx2_nic *pfvf,
83 struct otx2_qos_node *node,
84 struct nix_txschq_config *cfg,
85 int *num_regs)
86 {
87 u32 rr_weight;
88 u32 quantum;
89 u64 maxrate;
90
91 otx2_qos_get_regaddr(node, cfg, *num_regs);
92
93 /* configure parent txschq */
94 cfg->regval[*num_regs] = node->parent->schq << 16;
95 (*num_regs)++;
96
97 /* configure prio/quantum */
98 if (node->qid == OTX2_QOS_QID_NONE) {
99 cfg->regval[*num_regs] = node->prio << 24 |
100 mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
101 (*num_regs)++;
102 return;
103 }
104
105 /* configure priority/quantum */
106 if (node->is_static) {
107 cfg->regval[*num_regs] =
108 (node->schq - node->parent->prio_anchor) << 24;
109 } else {
110 quantum = node->quantum ?
111 node->quantum : pfvf->tx_max_pktlen;
112 rr_weight = otx2_qos_quantum_to_dwrr_weight(pfvf, quantum);
113 cfg->regval[*num_regs] = node->parent->child_dwrr_prio << 24 |
114 rr_weight;
115 }
116 (*num_regs)++;
117
118 /* configure PIR */
119 maxrate = (node->rate > node->ceil) ? node->rate : node->ceil;
120
121 cfg->regval[*num_regs] =
122 otx2_get_txschq_rate_regval(pfvf, maxrate, 65536);
123 (*num_regs)++;
124
125 /* Don't configure CIR when both CIR+PIR not supported
126 * On 96xx, CIR + PIR + RED_ALGO=STALL causes deadlock
127 */
128 if (!test_bit(QOS_CIR_PIR_SUPPORT, &pfvf->hw.cap_flag))
129 return;
130
131 cfg->regval[*num_regs] =
132 otx2_get_txschq_rate_regval(pfvf, node->rate, 65536);
133 (*num_regs)++;
134 }
135
__otx2_qos_txschq_cfg(struct otx2_nic * pfvf,struct otx2_qos_node * node,struct nix_txschq_config * cfg)136 static void __otx2_qos_txschq_cfg(struct otx2_nic *pfvf,
137 struct otx2_qos_node *node,
138 struct nix_txschq_config *cfg)
139 {
140 struct otx2_hw *hw = &pfvf->hw;
141 int num_regs = 0;
142 u8 level;
143
144 level = node->level;
145
146 /* program txschq registers */
147 if (level == NIX_TXSCH_LVL_SMQ) {
148 cfg->reg[num_regs] = NIX_AF_SMQX_CFG(node->schq);
149 cfg->regval[num_regs] = ((u64)pfvf->tx_max_pktlen << 8) |
150 OTX2_MIN_MTU;
151 cfg->regval[num_regs] |= (0x20ULL << 51) | (0x80ULL << 39) |
152 (0x2ULL << 36);
153 num_regs++;
154
155 otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
156 } else if (level == NIX_TXSCH_LVL_TL4) {
157 otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
158 } else if (level == NIX_TXSCH_LVL_TL3) {
159 /* configure link cfg */
160 if (level == pfvf->qos.link_cfg_lvl) {
161 cfg->reg[num_regs] = NIX_AF_TL3_TL2X_LINKX_CFG(node->schq, hw->tx_link);
162 cfg->regval[num_regs] = BIT_ULL(13) | BIT_ULL(12);
163 num_regs++;
164 }
165
166 otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
167 } else if (level == NIX_TXSCH_LVL_TL2) {
168 /* configure link cfg */
169 if (level == pfvf->qos.link_cfg_lvl) {
170 cfg->reg[num_regs] = NIX_AF_TL3_TL2X_LINKX_CFG(node->schq, hw->tx_link);
171 cfg->regval[num_regs] = BIT_ULL(13) | BIT_ULL(12);
172 num_regs++;
173 }
174
175 /* check if node is root */
176 if (node->qid == OTX2_QOS_QID_INNER && !node->parent) {
177 cfg->reg[num_regs] = NIX_AF_TL2X_SCHEDULE(node->schq);
178 cfg->regval[num_regs] = (u64)hw->txschq_aggr_lvl_rr_prio << 24 |
179 mtu_to_dwrr_weight(pfvf,
180 pfvf->tx_max_pktlen);
181 num_regs++;
182 goto txschq_cfg_out;
183 }
184
185 otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
186 }
187
188 txschq_cfg_out:
189 cfg->num_regs = num_regs;
190 }
191
otx2_qos_txschq_set_parent_topology(struct otx2_nic * pfvf,struct otx2_qos_node * parent)192 static int otx2_qos_txschq_set_parent_topology(struct otx2_nic *pfvf,
193 struct otx2_qos_node *parent)
194 {
195 struct mbox *mbox = &pfvf->mbox;
196 struct nix_txschq_config *cfg;
197 int rc;
198
199 if (parent->level == NIX_TXSCH_LVL_MDQ)
200 return 0;
201
202 mutex_lock(&mbox->lock);
203
204 cfg = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox);
205 if (!cfg) {
206 mutex_unlock(&mbox->lock);
207 return -ENOMEM;
208 }
209
210 cfg->lvl = parent->level;
211
212 if (parent->level == NIX_TXSCH_LVL_TL4)
213 cfg->reg[0] = NIX_AF_TL4X_TOPOLOGY(parent->schq);
214 else if (parent->level == NIX_TXSCH_LVL_TL3)
215 cfg->reg[0] = NIX_AF_TL3X_TOPOLOGY(parent->schq);
216 else if (parent->level == NIX_TXSCH_LVL_TL2)
217 cfg->reg[0] = NIX_AF_TL2X_TOPOLOGY(parent->schq);
218 else if (parent->level == NIX_TXSCH_LVL_TL1)
219 cfg->reg[0] = NIX_AF_TL1X_TOPOLOGY(parent->schq);
220
221 cfg->regval[0] = (u64)parent->prio_anchor << 32;
222 cfg->regval[0] |= ((parent->child_dwrr_prio != OTX2_QOS_DEFAULT_PRIO) ?
223 parent->child_dwrr_prio : 0) << 1;
224 cfg->num_regs++;
225
226 rc = otx2_sync_mbox_msg(&pfvf->mbox);
227
228 mutex_unlock(&mbox->lock);
229
230 return rc;
231 }
232
otx2_qos_free_hw_node_schq(struct otx2_nic * pfvf,struct otx2_qos_node * parent)233 static void otx2_qos_free_hw_node_schq(struct otx2_nic *pfvf,
234 struct otx2_qos_node *parent)
235 {
236 struct otx2_qos_node *node;
237
238 list_for_each_entry_reverse(node, &parent->child_schq_list, list)
239 otx2_txschq_free_one(pfvf, node->level, node->schq);
240 }
241
otx2_qos_free_hw_node(struct otx2_nic * pfvf,struct otx2_qos_node * parent)242 static void otx2_qos_free_hw_node(struct otx2_nic *pfvf,
243 struct otx2_qos_node *parent)
244 {
245 struct otx2_qos_node *node, *tmp;
246
247 list_for_each_entry_safe(node, tmp, &parent->child_list, list) {
248 otx2_qos_free_hw_node(pfvf, node);
249 otx2_qos_free_hw_node_schq(pfvf, node);
250 otx2_txschq_free_one(pfvf, node->level, node->schq);
251 }
252 }
253
otx2_qos_free_hw_cfg(struct otx2_nic * pfvf,struct otx2_qos_node * node)254 static void otx2_qos_free_hw_cfg(struct otx2_nic *pfvf,
255 struct otx2_qos_node *node)
256 {
257 mutex_lock(&pfvf->qos.qos_lock);
258
259 /* free child node hw mappings */
260 otx2_qos_free_hw_node(pfvf, node);
261 otx2_qos_free_hw_node_schq(pfvf, node);
262
263 /* free node hw mappings */
264 otx2_txschq_free_one(pfvf, node->level, node->schq);
265
266 mutex_unlock(&pfvf->qos.qos_lock);
267 }
268
otx2_qos_sw_node_delete(struct otx2_nic * pfvf,struct otx2_qos_node * node)269 static void otx2_qos_sw_node_delete(struct otx2_nic *pfvf,
270 struct otx2_qos_node *node)
271 {
272 hash_del_rcu(&node->hlist);
273
274 if (node->qid != OTX2_QOS_QID_INNER && node->qid != OTX2_QOS_QID_NONE) {
275 __clear_bit(node->qid, pfvf->qos.qos_sq_bmap);
276 otx2_qos_update_tx_netdev_queues(pfvf);
277 }
278
279 list_del(&node->list);
280 kfree(node);
281 }
282
otx2_qos_free_sw_node_schq(struct otx2_nic * pfvf,struct otx2_qos_node * parent)283 static void otx2_qos_free_sw_node_schq(struct otx2_nic *pfvf,
284 struct otx2_qos_node *parent)
285 {
286 struct otx2_qos_node *node, *tmp;
287
288 list_for_each_entry_safe(node, tmp, &parent->child_schq_list, list) {
289 list_del(&node->list);
290 kfree(node);
291 }
292 }
293
__otx2_qos_free_sw_node(struct otx2_nic * pfvf,struct otx2_qos_node * parent)294 static void __otx2_qos_free_sw_node(struct otx2_nic *pfvf,
295 struct otx2_qos_node *parent)
296 {
297 struct otx2_qos_node *node, *tmp;
298
299 list_for_each_entry_safe(node, tmp, &parent->child_list, list) {
300 __otx2_qos_free_sw_node(pfvf, node);
301 otx2_qos_free_sw_node_schq(pfvf, node);
302 otx2_qos_sw_node_delete(pfvf, node);
303 }
304 }
305
otx2_qos_free_sw_node(struct otx2_nic * pfvf,struct otx2_qos_node * node)306 static void otx2_qos_free_sw_node(struct otx2_nic *pfvf,
307 struct otx2_qos_node *node)
308 {
309 mutex_lock(&pfvf->qos.qos_lock);
310
311 __otx2_qos_free_sw_node(pfvf, node);
312 otx2_qos_free_sw_node_schq(pfvf, node);
313 otx2_qos_sw_node_delete(pfvf, node);
314
315 mutex_unlock(&pfvf->qos.qos_lock);
316 }
317
otx2_qos_destroy_node(struct otx2_nic * pfvf,struct otx2_qos_node * node)318 static void otx2_qos_destroy_node(struct otx2_nic *pfvf,
319 struct otx2_qos_node *node)
320 {
321 otx2_qos_free_hw_cfg(pfvf, node);
322 otx2_qos_free_sw_node(pfvf, node);
323 }
324
otx2_qos_fill_cfg_schq(struct otx2_qos_node * parent,struct otx2_qos_cfg * cfg)325 static void otx2_qos_fill_cfg_schq(struct otx2_qos_node *parent,
326 struct otx2_qos_cfg *cfg)
327 {
328 struct otx2_qos_node *node;
329
330 list_for_each_entry(node, &parent->child_schq_list, list)
331 cfg->schq[node->level]++;
332 }
333
otx2_qos_fill_cfg_tl(struct otx2_qos_node * parent,struct otx2_qos_cfg * cfg)334 static void otx2_qos_fill_cfg_tl(struct otx2_qos_node *parent,
335 struct otx2_qos_cfg *cfg)
336 {
337 struct otx2_qos_node *node;
338
339 list_for_each_entry(node, &parent->child_list, list) {
340 otx2_qos_fill_cfg_tl(node, cfg);
341 otx2_qos_fill_cfg_schq(node, cfg);
342 }
343
344 /* Assign the required number of transmit schedular queues under the
345 * given class
346 */
347 cfg->schq_contig[parent->level - 1] += parent->child_dwrr_cnt +
348 parent->max_static_prio + 1;
349 }
350
otx2_qos_prepare_txschq_cfg(struct otx2_nic * pfvf,struct otx2_qos_node * parent,struct otx2_qos_cfg * cfg)351 static void otx2_qos_prepare_txschq_cfg(struct otx2_nic *pfvf,
352 struct otx2_qos_node *parent,
353 struct otx2_qos_cfg *cfg)
354 {
355 mutex_lock(&pfvf->qos.qos_lock);
356 otx2_qos_fill_cfg_tl(parent, cfg);
357 mutex_unlock(&pfvf->qos.qos_lock);
358 }
359
otx2_qos_read_txschq_cfg_schq(struct otx2_qos_node * parent,struct otx2_qos_cfg * cfg)360 static void otx2_qos_read_txschq_cfg_schq(struct otx2_qos_node *parent,
361 struct otx2_qos_cfg *cfg)
362 {
363 struct otx2_qos_node *node;
364 int cnt;
365
366 list_for_each_entry(node, &parent->child_schq_list, list) {
367 cnt = cfg->dwrr_node_pos[node->level];
368 cfg->schq_list[node->level][cnt] = node->schq;
369 cfg->schq[node->level]++;
370 cfg->dwrr_node_pos[node->level]++;
371 }
372 }
373
otx2_qos_read_txschq_cfg_tl(struct otx2_qos_node * parent,struct otx2_qos_cfg * cfg)374 static void otx2_qos_read_txschq_cfg_tl(struct otx2_qos_node *parent,
375 struct otx2_qos_cfg *cfg)
376 {
377 struct otx2_qos_node *node;
378 int cnt;
379
380 list_for_each_entry(node, &parent->child_list, list) {
381 otx2_qos_read_txschq_cfg_tl(node, cfg);
382 cnt = cfg->static_node_pos[node->level];
383 cfg->schq_contig_list[node->level][cnt] = node->schq;
384 cfg->schq_index_used[node->level][cnt] = true;
385 cfg->schq_contig[node->level]++;
386 cfg->static_node_pos[node->level]++;
387 otx2_qos_read_txschq_cfg_schq(node, cfg);
388 }
389 }
390
otx2_qos_read_txschq_cfg(struct otx2_nic * pfvf,struct otx2_qos_node * node,struct otx2_qos_cfg * cfg)391 static void otx2_qos_read_txschq_cfg(struct otx2_nic *pfvf,
392 struct otx2_qos_node *node,
393 struct otx2_qos_cfg *cfg)
394 {
395 mutex_lock(&pfvf->qos.qos_lock);
396 otx2_qos_read_txschq_cfg_tl(node, cfg);
397 mutex_unlock(&pfvf->qos.qos_lock);
398 }
399
400 static struct otx2_qos_node *
otx2_qos_alloc_root(struct otx2_nic * pfvf)401 otx2_qos_alloc_root(struct otx2_nic *pfvf)
402 {
403 struct otx2_qos_node *node;
404
405 node = kzalloc(sizeof(*node), GFP_KERNEL);
406 if (!node)
407 return ERR_PTR(-ENOMEM);
408
409 node->parent = NULL;
410 if (!is_otx2_vf(pfvf->pcifunc)) {
411 node->level = NIX_TXSCH_LVL_TL1;
412 } else {
413 node->level = NIX_TXSCH_LVL_TL2;
414 node->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO;
415 }
416
417 WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER);
418 node->classid = OTX2_QOS_ROOT_CLASSID;
419
420 hash_add_rcu(pfvf->qos.qos_hlist, &node->hlist, node->classid);
421 list_add_tail(&node->list, &pfvf->qos.qos_tree);
422 INIT_LIST_HEAD(&node->child_list);
423 INIT_LIST_HEAD(&node->child_schq_list);
424
425 return node;
426 }
427
otx2_qos_add_child_node(struct otx2_qos_node * parent,struct otx2_qos_node * node)428 static int otx2_qos_add_child_node(struct otx2_qos_node *parent,
429 struct otx2_qos_node *node)
430 {
431 struct list_head *head = &parent->child_list;
432 struct otx2_qos_node *tmp_node;
433 struct list_head *tmp;
434
435 if (node->prio > parent->max_static_prio)
436 parent->max_static_prio = node->prio;
437
438 for (tmp = head->next; tmp != head; tmp = tmp->next) {
439 tmp_node = list_entry(tmp, struct otx2_qos_node, list);
440 if (tmp_node->prio == node->prio &&
441 tmp_node->is_static)
442 return -EEXIST;
443 if (tmp_node->prio > node->prio) {
444 list_add_tail(&node->list, tmp);
445 return 0;
446 }
447 }
448
449 list_add_tail(&node->list, head);
450 return 0;
451 }
452
otx2_qos_alloc_txschq_node(struct otx2_nic * pfvf,struct otx2_qos_node * node)453 static int otx2_qos_alloc_txschq_node(struct otx2_nic *pfvf,
454 struct otx2_qos_node *node)
455 {
456 struct otx2_qos_node *txschq_node, *parent, *tmp;
457 int lvl;
458
459 parent = node;
460 for (lvl = node->level - 1; lvl >= NIX_TXSCH_LVL_MDQ; lvl--) {
461 txschq_node = kzalloc(sizeof(*txschq_node), GFP_KERNEL);
462 if (!txschq_node)
463 goto err_out;
464
465 txschq_node->parent = parent;
466 txschq_node->level = lvl;
467 txschq_node->classid = OTX2_QOS_CLASS_NONE;
468 WRITE_ONCE(txschq_node->qid, OTX2_QOS_QID_NONE);
469 txschq_node->rate = 0;
470 txschq_node->ceil = 0;
471 txschq_node->prio = 0;
472 txschq_node->quantum = 0;
473 txschq_node->is_static = true;
474 txschq_node->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO;
475 txschq_node->txschq_idx = OTX2_QOS_INVALID_TXSCHQ_IDX;
476
477 mutex_lock(&pfvf->qos.qos_lock);
478 list_add_tail(&txschq_node->list, &node->child_schq_list);
479 mutex_unlock(&pfvf->qos.qos_lock);
480
481 INIT_LIST_HEAD(&txschq_node->child_list);
482 INIT_LIST_HEAD(&txschq_node->child_schq_list);
483 parent = txschq_node;
484 }
485
486 return 0;
487
488 err_out:
489 list_for_each_entry_safe(txschq_node, tmp, &node->child_schq_list,
490 list) {
491 list_del(&txschq_node->list);
492 kfree(txschq_node);
493 }
494 return -ENOMEM;
495 }
496
497 static struct otx2_qos_node *
otx2_qos_sw_create_leaf_node(struct otx2_nic * pfvf,struct otx2_qos_node * parent,u16 classid,u32 prio,u64 rate,u64 ceil,u32 quantum,u16 qid,bool static_cfg)498 otx2_qos_sw_create_leaf_node(struct otx2_nic *pfvf,
499 struct otx2_qos_node *parent,
500 u16 classid, u32 prio, u64 rate, u64 ceil,
501 u32 quantum, u16 qid, bool static_cfg)
502 {
503 struct otx2_qos_node *node;
504 int err;
505
506 node = kzalloc(sizeof(*node), GFP_KERNEL);
507 if (!node)
508 return ERR_PTR(-ENOMEM);
509
510 node->parent = parent;
511 node->level = parent->level - 1;
512 node->classid = classid;
513 WRITE_ONCE(node->qid, qid);
514
515 node->rate = otx2_convert_rate(rate);
516 node->ceil = otx2_convert_rate(ceil);
517 node->prio = prio;
518 node->quantum = quantum;
519 node->is_static = static_cfg;
520 node->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO;
521 node->txschq_idx = OTX2_QOS_INVALID_TXSCHQ_IDX;
522
523 __set_bit(qid, pfvf->qos.qos_sq_bmap);
524
525 hash_add_rcu(pfvf->qos.qos_hlist, &node->hlist, classid);
526
527 mutex_lock(&pfvf->qos.qos_lock);
528 err = otx2_qos_add_child_node(parent, node);
529 if (err) {
530 mutex_unlock(&pfvf->qos.qos_lock);
531 return ERR_PTR(err);
532 }
533 mutex_unlock(&pfvf->qos.qos_lock);
534
535 INIT_LIST_HEAD(&node->child_list);
536 INIT_LIST_HEAD(&node->child_schq_list);
537
538 err = otx2_qos_alloc_txschq_node(pfvf, node);
539 if (err) {
540 otx2_qos_sw_node_delete(pfvf, node);
541 return ERR_PTR(-ENOMEM);
542 }
543
544 return node;
545 }
546
547 static struct otx2_qos_node *
otx2_sw_node_find(struct otx2_nic * pfvf,u32 classid)548 otx2_sw_node_find(struct otx2_nic *pfvf, u32 classid)
549 {
550 struct otx2_qos_node *node = NULL;
551
552 hash_for_each_possible(pfvf->qos.qos_hlist, node, hlist, classid) {
553 if (node->classid == classid)
554 break;
555 }
556
557 return node;
558 }
559
560 static struct otx2_qos_node *
otx2_sw_node_find_rcu(struct otx2_nic * pfvf,u32 classid)561 otx2_sw_node_find_rcu(struct otx2_nic *pfvf, u32 classid)
562 {
563 struct otx2_qos_node *node = NULL;
564
565 hash_for_each_possible_rcu(pfvf->qos.qos_hlist, node, hlist, classid) {
566 if (node->classid == classid)
567 break;
568 }
569
570 return node;
571 }
572
otx2_get_txq_by_classid(struct otx2_nic * pfvf,u16 classid)573 int otx2_get_txq_by_classid(struct otx2_nic *pfvf, u16 classid)
574 {
575 struct otx2_qos_node *node;
576 u16 qid;
577 int res;
578
579 node = otx2_sw_node_find_rcu(pfvf, classid);
580 if (!node) {
581 res = -ENOENT;
582 goto out;
583 }
584 qid = READ_ONCE(node->qid);
585 if (qid == OTX2_QOS_QID_INNER) {
586 res = -EINVAL;
587 goto out;
588 }
589 res = pfvf->hw.tx_queues + qid;
590 out:
591 return res;
592 }
593
594 static int
otx2_qos_txschq_config(struct otx2_nic * pfvf,struct otx2_qos_node * node)595 otx2_qos_txschq_config(struct otx2_nic *pfvf, struct otx2_qos_node *node)
596 {
597 struct mbox *mbox = &pfvf->mbox;
598 struct nix_txschq_config *req;
599 int rc;
600
601 mutex_lock(&mbox->lock);
602
603 req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox);
604 if (!req) {
605 mutex_unlock(&mbox->lock);
606 return -ENOMEM;
607 }
608
609 req->lvl = node->level;
610 __otx2_qos_txschq_cfg(pfvf, node, req);
611
612 rc = otx2_sync_mbox_msg(&pfvf->mbox);
613
614 mutex_unlock(&mbox->lock);
615
616 return rc;
617 }
618
otx2_qos_txschq_alloc(struct otx2_nic * pfvf,struct otx2_qos_cfg * cfg)619 static int otx2_qos_txschq_alloc(struct otx2_nic *pfvf,
620 struct otx2_qos_cfg *cfg)
621 {
622 struct nix_txsch_alloc_req *req;
623 struct nix_txsch_alloc_rsp *rsp;
624 struct mbox *mbox = &pfvf->mbox;
625 int lvl, rc, schq;
626
627 mutex_lock(&mbox->lock);
628 req = otx2_mbox_alloc_msg_nix_txsch_alloc(&pfvf->mbox);
629 if (!req) {
630 mutex_unlock(&mbox->lock);
631 return -ENOMEM;
632 }
633
634 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
635 req->schq[lvl] = cfg->schq[lvl];
636 req->schq_contig[lvl] = cfg->schq_contig[lvl];
637 }
638
639 rc = otx2_sync_mbox_msg(&pfvf->mbox);
640 if (rc) {
641 mutex_unlock(&mbox->lock);
642 return rc;
643 }
644
645 rsp = (struct nix_txsch_alloc_rsp *)
646 otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
647
648 if (IS_ERR(rsp)) {
649 rc = PTR_ERR(rsp);
650 goto out;
651 }
652
653 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
654 for (schq = 0; schq < rsp->schq_contig[lvl]; schq++) {
655 cfg->schq_contig_list[lvl][schq] =
656 rsp->schq_contig_list[lvl][schq];
657 }
658 }
659
660 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
661 for (schq = 0; schq < rsp->schq[lvl]; schq++) {
662 cfg->schq_list[lvl][schq] =
663 rsp->schq_list[lvl][schq];
664 }
665 }
666
667 pfvf->qos.link_cfg_lvl = rsp->link_cfg_lvl;
668 pfvf->hw.txschq_aggr_lvl_rr_prio = rsp->aggr_lvl_rr_prio;
669
670 out:
671 mutex_unlock(&mbox->lock);
672 return rc;
673 }
674
otx2_qos_free_unused_txschq(struct otx2_nic * pfvf,struct otx2_qos_cfg * cfg)675 static void otx2_qos_free_unused_txschq(struct otx2_nic *pfvf,
676 struct otx2_qos_cfg *cfg)
677 {
678 int lvl, idx, schq;
679
680 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
681 for (idx = 0; idx < cfg->schq_contig[lvl]; idx++) {
682 if (!cfg->schq_index_used[lvl][idx]) {
683 schq = cfg->schq_contig_list[lvl][idx];
684 otx2_txschq_free_one(pfvf, lvl, schq);
685 }
686 }
687 }
688 }
689
otx2_qos_txschq_fill_cfg_schq(struct otx2_nic * pfvf,struct otx2_qos_node * node,struct otx2_qos_cfg * cfg)690 static void otx2_qos_txschq_fill_cfg_schq(struct otx2_nic *pfvf,
691 struct otx2_qos_node *node,
692 struct otx2_qos_cfg *cfg)
693 {
694 struct otx2_qos_node *tmp;
695 int cnt;
696
697 list_for_each_entry(tmp, &node->child_schq_list, list) {
698 cnt = cfg->dwrr_node_pos[tmp->level];
699 tmp->schq = cfg->schq_list[tmp->level][cnt];
700 cfg->dwrr_node_pos[tmp->level]++;
701 }
702 }
703
otx2_qos_txschq_fill_cfg_tl(struct otx2_nic * pfvf,struct otx2_qos_node * node,struct otx2_qos_cfg * cfg)704 static void otx2_qos_txschq_fill_cfg_tl(struct otx2_nic *pfvf,
705 struct otx2_qos_node *node,
706 struct otx2_qos_cfg *cfg)
707 {
708 struct otx2_qos_node *tmp;
709 int cnt;
710
711 list_for_each_entry(tmp, &node->child_list, list) {
712 otx2_qos_txschq_fill_cfg_tl(pfvf, tmp, cfg);
713 cnt = cfg->static_node_pos[tmp->level];
714 tmp->schq = cfg->schq_contig_list[tmp->level][tmp->txschq_idx];
715 cfg->schq_index_used[tmp->level][tmp->txschq_idx] = true;
716 if (cnt == 0)
717 node->prio_anchor =
718 cfg->schq_contig_list[tmp->level][0];
719 cfg->static_node_pos[tmp->level]++;
720 otx2_qos_txschq_fill_cfg_schq(pfvf, tmp, cfg);
721 }
722 }
723
otx2_qos_txschq_fill_cfg(struct otx2_nic * pfvf,struct otx2_qos_node * node,struct otx2_qos_cfg * cfg)724 static void otx2_qos_txschq_fill_cfg(struct otx2_nic *pfvf,
725 struct otx2_qos_node *node,
726 struct otx2_qos_cfg *cfg)
727 {
728 mutex_lock(&pfvf->qos.qos_lock);
729 otx2_qos_txschq_fill_cfg_tl(pfvf, node, cfg);
730 otx2_qos_txschq_fill_cfg_schq(pfvf, node, cfg);
731 otx2_qos_free_unused_txschq(pfvf, cfg);
732 mutex_unlock(&pfvf->qos.qos_lock);
733 }
734
__otx2_qos_assign_base_idx_tl(struct otx2_nic * pfvf,struct otx2_qos_node * tmp,unsigned long * child_idx_bmap,int child_cnt)735 static void __otx2_qos_assign_base_idx_tl(struct otx2_nic *pfvf,
736 struct otx2_qos_node *tmp,
737 unsigned long *child_idx_bmap,
738 int child_cnt)
739 {
740 int idx;
741
742 if (tmp->txschq_idx != OTX2_QOS_INVALID_TXSCHQ_IDX)
743 return;
744
745 /* assign static nodes 1:1 prio mapping first, then remaining nodes */
746 for (idx = 0; idx < child_cnt; idx++) {
747 if (tmp->is_static && tmp->prio == idx &&
748 !test_bit(idx, child_idx_bmap)) {
749 tmp->txschq_idx = idx;
750 set_bit(idx, child_idx_bmap);
751 return;
752 } else if (!tmp->is_static && idx >= tmp->prio &&
753 !test_bit(idx, child_idx_bmap)) {
754 tmp->txschq_idx = idx;
755 set_bit(idx, child_idx_bmap);
756 return;
757 }
758 }
759 }
760
otx2_qos_assign_base_idx_tl(struct otx2_nic * pfvf,struct otx2_qos_node * node)761 static int otx2_qos_assign_base_idx_tl(struct otx2_nic *pfvf,
762 struct otx2_qos_node *node)
763 {
764 unsigned long *child_idx_bmap;
765 struct otx2_qos_node *tmp;
766 int child_cnt;
767
768 list_for_each_entry(tmp, &node->child_list, list)
769 tmp->txschq_idx = OTX2_QOS_INVALID_TXSCHQ_IDX;
770
771 /* allocate child index array */
772 child_cnt = node->child_dwrr_cnt + node->max_static_prio + 1;
773 child_idx_bmap = kcalloc(BITS_TO_LONGS(child_cnt),
774 sizeof(unsigned long),
775 GFP_KERNEL);
776 if (!child_idx_bmap)
777 return -ENOMEM;
778
779 list_for_each_entry(tmp, &node->child_list, list)
780 otx2_qos_assign_base_idx_tl(pfvf, tmp);
781
782 /* assign base index of static priority children first */
783 list_for_each_entry(tmp, &node->child_list, list) {
784 if (!tmp->is_static)
785 continue;
786 __otx2_qos_assign_base_idx_tl(pfvf, tmp, child_idx_bmap,
787 child_cnt);
788 }
789
790 /* assign base index of dwrr priority children */
791 list_for_each_entry(tmp, &node->child_list, list)
792 __otx2_qos_assign_base_idx_tl(pfvf, tmp, child_idx_bmap,
793 child_cnt);
794
795 kfree(child_idx_bmap);
796
797 return 0;
798 }
799
otx2_qos_assign_base_idx(struct otx2_nic * pfvf,struct otx2_qos_node * node)800 static int otx2_qos_assign_base_idx(struct otx2_nic *pfvf,
801 struct otx2_qos_node *node)
802 {
803 int ret = 0;
804
805 mutex_lock(&pfvf->qos.qos_lock);
806 ret = otx2_qos_assign_base_idx_tl(pfvf, node);
807 mutex_unlock(&pfvf->qos.qos_lock);
808
809 return ret;
810 }
811
otx2_qos_txschq_push_cfg_schq(struct otx2_nic * pfvf,struct otx2_qos_node * node,struct otx2_qos_cfg * cfg)812 static int otx2_qos_txschq_push_cfg_schq(struct otx2_nic *pfvf,
813 struct otx2_qos_node *node,
814 struct otx2_qos_cfg *cfg)
815 {
816 struct otx2_qos_node *tmp;
817 int ret;
818
819 list_for_each_entry(tmp, &node->child_schq_list, list) {
820 ret = otx2_qos_txschq_config(pfvf, tmp);
821 if (ret)
822 return -EIO;
823 ret = otx2_qos_txschq_set_parent_topology(pfvf, tmp->parent);
824 if (ret)
825 return -EIO;
826 }
827
828 return 0;
829 }
830
otx2_qos_txschq_push_cfg_tl(struct otx2_nic * pfvf,struct otx2_qos_node * node,struct otx2_qos_cfg * cfg)831 static int otx2_qos_txschq_push_cfg_tl(struct otx2_nic *pfvf,
832 struct otx2_qos_node *node,
833 struct otx2_qos_cfg *cfg)
834 {
835 struct otx2_qos_node *tmp;
836 int ret;
837
838 list_for_each_entry(tmp, &node->child_list, list) {
839 ret = otx2_qos_txschq_push_cfg_tl(pfvf, tmp, cfg);
840 if (ret)
841 return -EIO;
842 ret = otx2_qos_txschq_config(pfvf, tmp);
843 if (ret)
844 return -EIO;
845 ret = otx2_qos_txschq_push_cfg_schq(pfvf, tmp, cfg);
846 if (ret)
847 return -EIO;
848 }
849
850 ret = otx2_qos_txschq_set_parent_topology(pfvf, node);
851 if (ret)
852 return -EIO;
853
854 return 0;
855 }
856
otx2_qos_txschq_push_cfg(struct otx2_nic * pfvf,struct otx2_qos_node * node,struct otx2_qos_cfg * cfg)857 static int otx2_qos_txschq_push_cfg(struct otx2_nic *pfvf,
858 struct otx2_qos_node *node,
859 struct otx2_qos_cfg *cfg)
860 {
861 int ret;
862
863 mutex_lock(&pfvf->qos.qos_lock);
864 ret = otx2_qos_txschq_push_cfg_tl(pfvf, node, cfg);
865 if (ret)
866 goto out;
867 ret = otx2_qos_txschq_push_cfg_schq(pfvf, node, cfg);
868 out:
869 mutex_unlock(&pfvf->qos.qos_lock);
870 return ret;
871 }
872
otx2_qos_txschq_update_config(struct otx2_nic * pfvf,struct otx2_qos_node * node,struct otx2_qos_cfg * cfg)873 static int otx2_qos_txschq_update_config(struct otx2_nic *pfvf,
874 struct otx2_qos_node *node,
875 struct otx2_qos_cfg *cfg)
876 {
877 otx2_qos_txschq_fill_cfg(pfvf, node, cfg);
878
879 return otx2_qos_txschq_push_cfg(pfvf, node, cfg);
880 }
881
otx2_qos_txschq_update_root_cfg(struct otx2_nic * pfvf,struct otx2_qos_node * root,struct otx2_qos_cfg * cfg)882 static int otx2_qos_txschq_update_root_cfg(struct otx2_nic *pfvf,
883 struct otx2_qos_node *root,
884 struct otx2_qos_cfg *cfg)
885 {
886 root->schq = cfg->schq_list[root->level][0];
887 return otx2_qos_txschq_config(pfvf, root);
888 }
889
otx2_qos_free_cfg(struct otx2_nic * pfvf,struct otx2_qos_cfg * cfg)890 static void otx2_qos_free_cfg(struct otx2_nic *pfvf, struct otx2_qos_cfg *cfg)
891 {
892 int lvl, idx, schq;
893
894 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
895 for (idx = 0; idx < cfg->schq[lvl]; idx++) {
896 schq = cfg->schq_list[lvl][idx];
897 otx2_txschq_free_one(pfvf, lvl, schq);
898 }
899 }
900
901 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
902 for (idx = 0; idx < cfg->schq_contig[lvl]; idx++) {
903 if (cfg->schq_index_used[lvl][idx]) {
904 schq = cfg->schq_contig_list[lvl][idx];
905 otx2_txschq_free_one(pfvf, lvl, schq);
906 }
907 }
908 }
909 }
910
otx2_qos_enadis_sq(struct otx2_nic * pfvf,struct otx2_qos_node * node,u16 qid)911 static void otx2_qos_enadis_sq(struct otx2_nic *pfvf,
912 struct otx2_qos_node *node,
913 u16 qid)
914 {
915 if (pfvf->qos.qid_to_sqmap[qid] != OTX2_QOS_INVALID_SQ)
916 otx2_qos_disable_sq(pfvf, qid);
917
918 pfvf->qos.qid_to_sqmap[qid] = node->schq;
919 otx2_qos_enable_sq(pfvf, qid);
920 }
921
otx2_qos_update_smq_schq(struct otx2_nic * pfvf,struct otx2_qos_node * node,bool action)922 static void otx2_qos_update_smq_schq(struct otx2_nic *pfvf,
923 struct otx2_qos_node *node,
924 bool action)
925 {
926 struct otx2_qos_node *tmp;
927
928 if (node->qid == OTX2_QOS_QID_INNER)
929 return;
930
931 list_for_each_entry(tmp, &node->child_schq_list, list) {
932 if (tmp->level == NIX_TXSCH_LVL_MDQ) {
933 if (action == QOS_SMQ_FLUSH)
934 otx2_smq_flush(pfvf, tmp->schq);
935 else
936 otx2_qos_enadis_sq(pfvf, tmp, node->qid);
937 }
938 }
939 }
940
__otx2_qos_update_smq(struct otx2_nic * pfvf,struct otx2_qos_node * node,bool action)941 static void __otx2_qos_update_smq(struct otx2_nic *pfvf,
942 struct otx2_qos_node *node,
943 bool action)
944 {
945 struct otx2_qos_node *tmp;
946
947 list_for_each_entry(tmp, &node->child_list, list) {
948 __otx2_qos_update_smq(pfvf, tmp, action);
949 if (tmp->qid == OTX2_QOS_QID_INNER)
950 continue;
951 if (tmp->level == NIX_TXSCH_LVL_MDQ) {
952 if (action == QOS_SMQ_FLUSH)
953 otx2_smq_flush(pfvf, tmp->schq);
954 else
955 otx2_qos_enadis_sq(pfvf, tmp, tmp->qid);
956 } else {
957 otx2_qos_update_smq_schq(pfvf, tmp, action);
958 }
959 }
960 }
961
otx2_qos_update_smq(struct otx2_nic * pfvf,struct otx2_qos_node * node,bool action)962 static void otx2_qos_update_smq(struct otx2_nic *pfvf,
963 struct otx2_qos_node *node,
964 bool action)
965 {
966 mutex_lock(&pfvf->qos.qos_lock);
967 __otx2_qos_update_smq(pfvf, node, action);
968 otx2_qos_update_smq_schq(pfvf, node, action);
969 mutex_unlock(&pfvf->qos.qos_lock);
970 }
971
otx2_qos_push_txschq_cfg(struct otx2_nic * pfvf,struct otx2_qos_node * node,struct otx2_qos_cfg * cfg)972 static int otx2_qos_push_txschq_cfg(struct otx2_nic *pfvf,
973 struct otx2_qos_node *node,
974 struct otx2_qos_cfg *cfg)
975 {
976 int ret;
977
978 ret = otx2_qos_txschq_alloc(pfvf, cfg);
979 if (ret)
980 return -ENOSPC;
981
982 ret = otx2_qos_assign_base_idx(pfvf, node);
983 if (ret)
984 return -ENOMEM;
985
986 if (!(pfvf->netdev->flags & IFF_UP)) {
987 otx2_qos_txschq_fill_cfg(pfvf, node, cfg);
988 return 0;
989 }
990
991 ret = otx2_qos_txschq_update_config(pfvf, node, cfg);
992 if (ret) {
993 otx2_qos_free_cfg(pfvf, cfg);
994 return -EIO;
995 }
996
997 otx2_qos_update_smq(pfvf, node, QOS_CFG_SQ);
998
999 return 0;
1000 }
1001
otx2_qos_update_tree(struct otx2_nic * pfvf,struct otx2_qos_node * node,struct otx2_qos_cfg * cfg)1002 static int otx2_qos_update_tree(struct otx2_nic *pfvf,
1003 struct otx2_qos_node *node,
1004 struct otx2_qos_cfg *cfg)
1005 {
1006 otx2_qos_prepare_txschq_cfg(pfvf, node->parent, cfg);
1007 return otx2_qos_push_txschq_cfg(pfvf, node->parent, cfg);
1008 }
1009
otx2_qos_root_add(struct otx2_nic * pfvf,u16 htb_maj_id,u16 htb_defcls,struct netlink_ext_ack * extack)1010 static int otx2_qos_root_add(struct otx2_nic *pfvf, u16 htb_maj_id, u16 htb_defcls,
1011 struct netlink_ext_ack *extack)
1012 {
1013 struct otx2_qos_cfg *new_cfg;
1014 struct otx2_qos_node *root;
1015 int err;
1016
1017 netdev_dbg(pfvf->netdev,
1018 "TC_HTB_CREATE: handle=0x%x defcls=0x%x\n",
1019 htb_maj_id, htb_defcls);
1020
1021 root = otx2_qos_alloc_root(pfvf);
1022 if (IS_ERR(root)) {
1023 err = PTR_ERR(root);
1024 return err;
1025 }
1026
1027 /* allocate txschq queue */
1028 new_cfg = kzalloc(sizeof(*new_cfg), GFP_KERNEL);
1029 if (!new_cfg) {
1030 NL_SET_ERR_MSG_MOD(extack, "Memory allocation error");
1031 err = -ENOMEM;
1032 goto free_root_node;
1033 }
1034 /* allocate htb root node */
1035 new_cfg->schq[root->level] = 1;
1036 err = otx2_qos_txschq_alloc(pfvf, new_cfg);
1037 if (err) {
1038 NL_SET_ERR_MSG_MOD(extack, "Error allocating txschq");
1039 goto free_root_node;
1040 }
1041
1042 /* Update TL1 RR PRIO */
1043 if (root->level == NIX_TXSCH_LVL_TL1) {
1044 root->child_dwrr_prio = pfvf->hw.txschq_aggr_lvl_rr_prio;
1045 netdev_dbg(pfvf->netdev,
1046 "TL1 DWRR Priority %d\n", root->child_dwrr_prio);
1047 }
1048
1049 if (!(pfvf->netdev->flags & IFF_UP) ||
1050 root->level == NIX_TXSCH_LVL_TL1) {
1051 root->schq = new_cfg->schq_list[root->level][0];
1052 goto out;
1053 }
1054
1055 /* update the txschq configuration in hw */
1056 err = otx2_qos_txschq_update_root_cfg(pfvf, root, new_cfg);
1057 if (err) {
1058 NL_SET_ERR_MSG_MOD(extack,
1059 "Error updating txschq configuration");
1060 goto txschq_free;
1061 }
1062
1063 out:
1064 WRITE_ONCE(pfvf->qos.defcls, htb_defcls);
1065 /* Pairs with smp_load_acquire() in ndo_select_queue */
1066 smp_store_release(&pfvf->qos.maj_id, htb_maj_id);
1067 kfree(new_cfg);
1068 return 0;
1069
1070 txschq_free:
1071 otx2_qos_free_cfg(pfvf, new_cfg);
1072 free_root_node:
1073 kfree(new_cfg);
1074 otx2_qos_sw_node_delete(pfvf, root);
1075 return err;
1076 }
1077
otx2_qos_root_destroy(struct otx2_nic * pfvf)1078 static int otx2_qos_root_destroy(struct otx2_nic *pfvf)
1079 {
1080 struct otx2_qos_node *root;
1081
1082 netdev_dbg(pfvf->netdev, "TC_HTB_DESTROY\n");
1083
1084 /* find root node */
1085 root = otx2_sw_node_find(pfvf, OTX2_QOS_ROOT_CLASSID);
1086 if (!root)
1087 return -ENOENT;
1088
1089 /* free the hw mappings */
1090 otx2_qos_destroy_node(pfvf, root);
1091
1092 return 0;
1093 }
1094
otx2_qos_validate_quantum(struct otx2_nic * pfvf,u32 quantum)1095 static int otx2_qos_validate_quantum(struct otx2_nic *pfvf, u32 quantum)
1096 {
1097 u32 rr_weight = otx2_qos_quantum_to_dwrr_weight(pfvf, quantum);
1098 int err = 0;
1099
1100 /* Max Round robin weight supported by octeontx2 and CN10K
1101 * is different. Validate accordingly
1102 */
1103 if (is_dev_otx2(pfvf->pdev))
1104 err = (rr_weight > OTX2_MAX_RR_QUANTUM) ? -EINVAL : 0;
1105 else if (rr_weight > CN10K_MAX_RR_WEIGHT)
1106 err = -EINVAL;
1107
1108 return err;
1109 }
1110
otx2_qos_validate_dwrr_cfg(struct otx2_qos_node * parent,struct netlink_ext_ack * extack,struct otx2_nic * pfvf,u64 prio,u64 quantum)1111 static int otx2_qos_validate_dwrr_cfg(struct otx2_qos_node *parent,
1112 struct netlink_ext_ack *extack,
1113 struct otx2_nic *pfvf,
1114 u64 prio, u64 quantum)
1115 {
1116 int err;
1117
1118 err = otx2_qos_validate_quantum(pfvf, quantum);
1119 if (err) {
1120 NL_SET_ERR_MSG_MOD(extack, "Unsupported quantum value");
1121 return err;
1122 }
1123
1124 if (parent->child_dwrr_prio == OTX2_QOS_DEFAULT_PRIO) {
1125 parent->child_dwrr_prio = prio;
1126 } else if (prio != parent->child_dwrr_prio) {
1127 NL_SET_ERR_MSG_MOD(extack, "Only one DWRR group is allowed");
1128 return -EOPNOTSUPP;
1129 }
1130
1131 return 0;
1132 }
1133
otx2_qos_validate_configuration(struct otx2_qos_node * parent,struct netlink_ext_ack * extack,struct otx2_nic * pfvf,u64 prio,bool static_cfg)1134 static int otx2_qos_validate_configuration(struct otx2_qos_node *parent,
1135 struct netlink_ext_ack *extack,
1136 struct otx2_nic *pfvf,
1137 u64 prio, bool static_cfg)
1138 {
1139 if (prio == parent->child_dwrr_prio && static_cfg) {
1140 NL_SET_ERR_MSG_MOD(extack, "DWRR child group with same priority exists");
1141 return -EEXIST;
1142 }
1143
1144 if (static_cfg && test_bit(prio, parent->prio_bmap)) {
1145 NL_SET_ERR_MSG_MOD(extack,
1146 "Static priority child with same priority exists");
1147 return -EEXIST;
1148 }
1149
1150 return 0;
1151 }
1152
otx2_reset_dwrr_prio(struct otx2_qos_node * parent,u64 prio)1153 static void otx2_reset_dwrr_prio(struct otx2_qos_node *parent, u64 prio)
1154 {
1155 /* For PF, root node dwrr priority is static */
1156 if (parent->level == NIX_TXSCH_LVL_TL1)
1157 return;
1158
1159 if (parent->child_dwrr_prio != OTX2_QOS_DEFAULT_PRIO) {
1160 parent->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO;
1161 clear_bit(prio, parent->prio_bmap);
1162 }
1163 }
1164
is_qos_node_dwrr(struct otx2_qos_node * parent,struct otx2_nic * pfvf,u64 prio)1165 static bool is_qos_node_dwrr(struct otx2_qos_node *parent,
1166 struct otx2_nic *pfvf,
1167 u64 prio)
1168 {
1169 struct otx2_qos_node *node;
1170 bool ret = false;
1171
1172 if (parent->child_dwrr_prio == prio)
1173 return true;
1174
1175 mutex_lock(&pfvf->qos.qos_lock);
1176 list_for_each_entry(node, &parent->child_list, list) {
1177 if (prio == node->prio) {
1178 if (parent->child_dwrr_prio != OTX2_QOS_DEFAULT_PRIO &&
1179 parent->child_dwrr_prio != prio)
1180 continue;
1181
1182 if (otx2_qos_validate_quantum(pfvf, node->quantum)) {
1183 netdev_err(pfvf->netdev,
1184 "Unsupported quantum value for existing classid=0x%x quantum=%d prio=%d",
1185 node->classid, node->quantum,
1186 node->prio);
1187 break;
1188 }
1189 /* mark old node as dwrr */
1190 node->is_static = false;
1191 parent->child_dwrr_cnt++;
1192 parent->child_static_cnt--;
1193 ret = true;
1194 break;
1195 }
1196 }
1197 mutex_unlock(&pfvf->qos.qos_lock);
1198
1199 return ret;
1200 }
1201
otx2_qos_leaf_alloc_queue(struct otx2_nic * pfvf,u16 classid,u32 parent_classid,u64 rate,u64 ceil,u64 prio,u32 quantum,struct netlink_ext_ack * extack)1202 static int otx2_qos_leaf_alloc_queue(struct otx2_nic *pfvf, u16 classid,
1203 u32 parent_classid, u64 rate, u64 ceil,
1204 u64 prio, u32 quantum,
1205 struct netlink_ext_ack *extack)
1206 {
1207 struct otx2_qos_cfg *old_cfg, *new_cfg;
1208 struct otx2_qos_node *node, *parent;
1209 int qid, ret, err;
1210 bool static_cfg;
1211
1212 netdev_dbg(pfvf->netdev,
1213 "TC_HTB_LEAF_ALLOC_QUEUE: classid=0x%x parent_classid=0x%x rate=%lld ceil=%lld prio=%lld quantum=%d\n",
1214 classid, parent_classid, rate, ceil, prio, quantum);
1215
1216 if (prio > OTX2_QOS_MAX_PRIO) {
1217 NL_SET_ERR_MSG_MOD(extack, "Valid priority range 0 to 7");
1218 ret = -EOPNOTSUPP;
1219 goto out;
1220 }
1221
1222 if (!quantum || quantum > INT_MAX) {
1223 NL_SET_ERR_MSG_MOD(extack, "Invalid quantum, range 1 - 2147483647 bytes");
1224 ret = -EOPNOTSUPP;
1225 goto out;
1226 }
1227
1228 /* get parent node */
1229 parent = otx2_sw_node_find(pfvf, parent_classid);
1230 if (!parent) {
1231 NL_SET_ERR_MSG_MOD(extack, "parent node not found");
1232 ret = -ENOENT;
1233 goto out;
1234 }
1235 if (parent->level == NIX_TXSCH_LVL_MDQ) {
1236 NL_SET_ERR_MSG_MOD(extack, "HTB qos max levels reached");
1237 ret = -EOPNOTSUPP;
1238 goto out;
1239 }
1240
1241 static_cfg = !is_qos_node_dwrr(parent, pfvf, prio);
1242 ret = otx2_qos_validate_configuration(parent, extack, pfvf, prio,
1243 static_cfg);
1244 if (ret)
1245 goto out;
1246
1247 if (!static_cfg) {
1248 ret = otx2_qos_validate_dwrr_cfg(parent, extack, pfvf, prio,
1249 quantum);
1250 if (ret)
1251 goto out;
1252 }
1253
1254 if (static_cfg)
1255 parent->child_static_cnt++;
1256 else
1257 parent->child_dwrr_cnt++;
1258
1259 set_bit(prio, parent->prio_bmap);
1260
1261 /* read current txschq configuration */
1262 old_cfg = kzalloc(sizeof(*old_cfg), GFP_KERNEL);
1263 if (!old_cfg) {
1264 NL_SET_ERR_MSG_MOD(extack, "Memory allocation error");
1265 ret = -ENOMEM;
1266 goto reset_prio;
1267 }
1268 otx2_qos_read_txschq_cfg(pfvf, parent, old_cfg);
1269
1270 /* allocate a new sq */
1271 qid = otx2_qos_get_qid(pfvf);
1272 if (qid < 0) {
1273 NL_SET_ERR_MSG_MOD(extack, "Reached max supported QOS SQ's");
1274 ret = -ENOMEM;
1275 goto free_old_cfg;
1276 }
1277
1278 /* Actual SQ mapping will be updated after SMQ alloc */
1279 pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;
1280
1281 /* allocate and initialize a new child node */
1282 node = otx2_qos_sw_create_leaf_node(pfvf, parent, classid, prio, rate,
1283 ceil, quantum, qid, static_cfg);
1284 if (IS_ERR(node)) {
1285 NL_SET_ERR_MSG_MOD(extack, "Unable to allocate leaf node");
1286 ret = PTR_ERR(node);
1287 goto free_old_cfg;
1288 }
1289
1290 /* push new txschq config to hw */
1291 new_cfg = kzalloc(sizeof(*new_cfg), GFP_KERNEL);
1292 if (!new_cfg) {
1293 NL_SET_ERR_MSG_MOD(extack, "Memory allocation error");
1294 ret = -ENOMEM;
1295 goto free_node;
1296 }
1297 ret = otx2_qos_update_tree(pfvf, node, new_cfg);
1298 if (ret) {
1299 NL_SET_ERR_MSG_MOD(extack, "HTB HW configuration error");
1300 kfree(new_cfg);
1301 otx2_qos_sw_node_delete(pfvf, node);
1302 /* restore the old qos tree */
1303 err = otx2_qos_txschq_update_config(pfvf, parent, old_cfg);
1304 if (err) {
1305 netdev_err(pfvf->netdev,
1306 "Failed to restore txcshq configuration");
1307 goto free_old_cfg;
1308 }
1309
1310 otx2_qos_update_smq(pfvf, parent, QOS_CFG_SQ);
1311 goto free_old_cfg;
1312 }
1313
1314 /* update tx_real_queues */
1315 otx2_qos_update_tx_netdev_queues(pfvf);
1316
1317 /* free new txschq config */
1318 kfree(new_cfg);
1319
1320 /* free old txschq config */
1321 otx2_qos_free_cfg(pfvf, old_cfg);
1322 kfree(old_cfg);
1323
1324 return pfvf->hw.tx_queues + qid;
1325
1326 free_node:
1327 otx2_qos_sw_node_delete(pfvf, node);
1328 free_old_cfg:
1329 kfree(old_cfg);
1330 reset_prio:
1331 if (static_cfg)
1332 parent->child_static_cnt--;
1333 else
1334 parent->child_dwrr_cnt--;
1335
1336 clear_bit(prio, parent->prio_bmap);
1337 out:
1338 return ret;
1339 }
1340
otx2_qos_leaf_to_inner(struct otx2_nic * pfvf,u16 classid,u16 child_classid,u64 rate,u64 ceil,u64 prio,u32 quantum,struct netlink_ext_ack * extack)1341 static int otx2_qos_leaf_to_inner(struct otx2_nic *pfvf, u16 classid,
1342 u16 child_classid, u64 rate, u64 ceil, u64 prio,
1343 u32 quantum, struct netlink_ext_ack *extack)
1344 {
1345 struct otx2_qos_cfg *old_cfg, *new_cfg;
1346 struct otx2_qos_node *node, *child;
1347 bool static_cfg;
1348 int ret, err;
1349 u16 qid;
1350
1351 netdev_dbg(pfvf->netdev,
1352 "TC_HTB_LEAF_TO_INNER classid %04x, child %04x, rate %llu, ceil %llu\n",
1353 classid, child_classid, rate, ceil);
1354
1355 if (prio > OTX2_QOS_MAX_PRIO) {
1356 NL_SET_ERR_MSG_MOD(extack, "Valid priority range 0 to 7");
1357 ret = -EOPNOTSUPP;
1358 goto out;
1359 }
1360
1361 if (!quantum || quantum > INT_MAX) {
1362 NL_SET_ERR_MSG_MOD(extack, "Invalid quantum, range 1 - 2147483647 bytes");
1363 ret = -EOPNOTSUPP;
1364 goto out;
1365 }
1366
1367 /* find node related to classid */
1368 node = otx2_sw_node_find(pfvf, classid);
1369 if (!node) {
1370 NL_SET_ERR_MSG_MOD(extack, "HTB node not found");
1371 ret = -ENOENT;
1372 goto out;
1373 }
1374 /* check max qos txschq level */
1375 if (node->level == NIX_TXSCH_LVL_MDQ) {
1376 NL_SET_ERR_MSG_MOD(extack, "HTB qos level not supported");
1377 ret = -EOPNOTSUPP;
1378 goto out;
1379 }
1380
1381 static_cfg = !is_qos_node_dwrr(node, pfvf, prio);
1382 if (!static_cfg) {
1383 ret = otx2_qos_validate_dwrr_cfg(node, extack, pfvf, prio,
1384 quantum);
1385 if (ret)
1386 goto out;
1387 }
1388
1389 if (static_cfg)
1390 node->child_static_cnt++;
1391 else
1392 node->child_dwrr_cnt++;
1393
1394 set_bit(prio, node->prio_bmap);
1395
1396 /* store the qid to assign to leaf node */
1397 qid = node->qid;
1398
1399 /* read current txschq configuration */
1400 old_cfg = kzalloc(sizeof(*old_cfg), GFP_KERNEL);
1401 if (!old_cfg) {
1402 NL_SET_ERR_MSG_MOD(extack, "Memory allocation error");
1403 ret = -ENOMEM;
1404 goto reset_prio;
1405 }
1406 otx2_qos_read_txschq_cfg(pfvf, node, old_cfg);
1407
1408 /* delete the txschq nodes allocated for this node */
1409 otx2_qos_disable_sq(pfvf, qid);
1410 otx2_qos_free_hw_node_schq(pfvf, node);
1411 otx2_qos_free_sw_node_schq(pfvf, node);
1412 pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;
1413
1414 /* mark this node as htb inner node */
1415 WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER);
1416
1417 /* allocate and initialize a new child node */
1418 child = otx2_qos_sw_create_leaf_node(pfvf, node, child_classid,
1419 prio, rate, ceil, quantum,
1420 qid, static_cfg);
1421 if (IS_ERR(child)) {
1422 NL_SET_ERR_MSG_MOD(extack, "Unable to allocate leaf node");
1423 ret = PTR_ERR(child);
1424 goto free_old_cfg;
1425 }
1426
1427 /* push new txschq config to hw */
1428 new_cfg = kzalloc(sizeof(*new_cfg), GFP_KERNEL);
1429 if (!new_cfg) {
1430 NL_SET_ERR_MSG_MOD(extack, "Memory allocation error");
1431 ret = -ENOMEM;
1432 goto free_node;
1433 }
1434 ret = otx2_qos_update_tree(pfvf, child, new_cfg);
1435 if (ret) {
1436 NL_SET_ERR_MSG_MOD(extack, "HTB HW configuration error");
1437 kfree(new_cfg);
1438 otx2_qos_sw_node_delete(pfvf, child);
1439 /* restore the old qos tree */
1440 WRITE_ONCE(node->qid, qid);
1441 err = otx2_qos_alloc_txschq_node(pfvf, node);
1442 if (err) {
1443 netdev_err(pfvf->netdev,
1444 "Failed to restore old leaf node");
1445 goto free_old_cfg;
1446 }
1447 err = otx2_qos_txschq_update_config(pfvf, node, old_cfg);
1448 if (err) {
1449 netdev_err(pfvf->netdev,
1450 "Failed to restore txcshq configuration");
1451 goto free_old_cfg;
1452 }
1453 otx2_qos_update_smq(pfvf, node, QOS_CFG_SQ);
1454 goto free_old_cfg;
1455 }
1456
1457 /* free new txschq config */
1458 kfree(new_cfg);
1459
1460 /* free old txschq config */
1461 otx2_qos_free_cfg(pfvf, old_cfg);
1462 kfree(old_cfg);
1463
1464 return 0;
1465
1466 free_node:
1467 otx2_qos_sw_node_delete(pfvf, child);
1468 free_old_cfg:
1469 kfree(old_cfg);
1470 reset_prio:
1471 if (static_cfg)
1472 node->child_static_cnt--;
1473 else
1474 node->child_dwrr_cnt--;
1475 clear_bit(prio, node->prio_bmap);
1476 out:
1477 return ret;
1478 }
1479
otx2_qos_leaf_del(struct otx2_nic * pfvf,u16 * classid,struct netlink_ext_ack * extack)1480 static int otx2_qos_leaf_del(struct otx2_nic *pfvf, u16 *classid,
1481 struct netlink_ext_ack *extack)
1482 {
1483 struct otx2_qos_node *node, *parent;
1484 int dwrr_del_node = false;
1485 u64 prio;
1486 u16 qid;
1487
1488 netdev_dbg(pfvf->netdev, "TC_HTB_LEAF_DEL classid %04x\n", *classid);
1489
1490 /* find node related to classid */
1491 node = otx2_sw_node_find(pfvf, *classid);
1492 if (!node) {
1493 NL_SET_ERR_MSG_MOD(extack, "HTB node not found");
1494 return -ENOENT;
1495 }
1496 parent = node->parent;
1497 prio = node->prio;
1498 qid = node->qid;
1499
1500 if (!node->is_static)
1501 dwrr_del_node = true;
1502
1503 otx2_qos_disable_sq(pfvf, node->qid);
1504
1505 otx2_qos_destroy_node(pfvf, node);
1506 pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;
1507
1508 if (dwrr_del_node) {
1509 parent->child_dwrr_cnt--;
1510 } else {
1511 parent->child_static_cnt--;
1512 clear_bit(prio, parent->prio_bmap);
1513 }
1514
1515 /* Reset DWRR priority if all dwrr nodes are deleted */
1516 if (!parent->child_dwrr_cnt)
1517 otx2_reset_dwrr_prio(parent, prio);
1518
1519 if (!parent->child_static_cnt)
1520 parent->max_static_prio = 0;
1521
1522 return 0;
1523 }
1524
otx2_qos_leaf_del_last(struct otx2_nic * pfvf,u16 classid,bool force,struct netlink_ext_ack * extack)1525 static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force,
1526 struct netlink_ext_ack *extack)
1527 {
1528 struct otx2_qos_node *node, *parent;
1529 struct otx2_qos_cfg *new_cfg;
1530 int dwrr_del_node = false;
1531 u64 prio;
1532 int err;
1533 u16 qid;
1534
1535 netdev_dbg(pfvf->netdev,
1536 "TC_HTB_LEAF_DEL_LAST classid %04x\n", classid);
1537
1538 /* find node related to classid */
1539 node = otx2_sw_node_find(pfvf, classid);
1540 if (!node) {
1541 NL_SET_ERR_MSG_MOD(extack, "HTB node not found");
1542 return -ENOENT;
1543 }
1544
1545 /* save qid for use by parent */
1546 qid = node->qid;
1547 prio = node->prio;
1548
1549 parent = otx2_sw_node_find(pfvf, node->parent->classid);
1550 if (!parent) {
1551 NL_SET_ERR_MSG_MOD(extack, "parent node not found");
1552 return -ENOENT;
1553 }
1554
1555 if (!node->is_static)
1556 dwrr_del_node = true;
1557
1558 /* destroy the leaf node */
1559 otx2_qos_disable_sq(pfvf, qid);
1560 otx2_qos_destroy_node(pfvf, node);
1561 pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;
1562
1563 if (dwrr_del_node) {
1564 parent->child_dwrr_cnt--;
1565 } else {
1566 parent->child_static_cnt--;
1567 clear_bit(prio, parent->prio_bmap);
1568 }
1569
1570 /* Reset DWRR priority if all dwrr nodes are deleted */
1571 if (!parent->child_dwrr_cnt)
1572 otx2_reset_dwrr_prio(parent, prio);
1573
1574 if (!parent->child_static_cnt)
1575 parent->max_static_prio = 0;
1576
1577 /* create downstream txschq entries to parent */
1578 err = otx2_qos_alloc_txschq_node(pfvf, parent);
1579 if (err) {
1580 NL_SET_ERR_MSG_MOD(extack, "HTB failed to create txsch configuration");
1581 return err;
1582 }
1583 WRITE_ONCE(parent->qid, qid);
1584 __set_bit(qid, pfvf->qos.qos_sq_bmap);
1585
1586 /* push new txschq config to hw */
1587 new_cfg = kzalloc(sizeof(*new_cfg), GFP_KERNEL);
1588 if (!new_cfg) {
1589 NL_SET_ERR_MSG_MOD(extack, "Memory allocation error");
1590 return -ENOMEM;
1591 }
1592 /* fill txschq cfg and push txschq cfg to hw */
1593 otx2_qos_fill_cfg_schq(parent, new_cfg);
1594 err = otx2_qos_push_txschq_cfg(pfvf, parent, new_cfg);
1595 if (err) {
1596 NL_SET_ERR_MSG_MOD(extack, "HTB HW configuration error");
1597 kfree(new_cfg);
1598 return err;
1599 }
1600 kfree(new_cfg);
1601
1602 /* update tx_real_queues */
1603 otx2_qos_update_tx_netdev_queues(pfvf);
1604
1605 return 0;
1606 }
1607
otx2_clean_qos_queues(struct otx2_nic * pfvf)1608 void otx2_clean_qos_queues(struct otx2_nic *pfvf)
1609 {
1610 struct otx2_qos_node *root;
1611
1612 root = otx2_sw_node_find(pfvf, OTX2_QOS_ROOT_CLASSID);
1613 if (!root)
1614 return;
1615
1616 otx2_qos_update_smq(pfvf, root, QOS_SMQ_FLUSH);
1617 }
1618
otx2_qos_config_txschq(struct otx2_nic * pfvf)1619 void otx2_qos_config_txschq(struct otx2_nic *pfvf)
1620 {
1621 struct otx2_qos_node *root;
1622 int err;
1623
1624 root = otx2_sw_node_find(pfvf, OTX2_QOS_ROOT_CLASSID);
1625 if (!root)
1626 return;
1627
1628 if (root->level != NIX_TXSCH_LVL_TL1) {
1629 err = otx2_qos_txschq_config(pfvf, root);
1630 if (err) {
1631 netdev_err(pfvf->netdev, "Error update txschq configuration\n");
1632 goto root_destroy;
1633 }
1634 }
1635
1636 err = otx2_qos_txschq_push_cfg_tl(pfvf, root, NULL);
1637 if (err) {
1638 netdev_err(pfvf->netdev, "Error update txschq configuration\n");
1639 goto root_destroy;
1640 }
1641
1642 otx2_qos_update_smq(pfvf, root, QOS_CFG_SQ);
1643 return;
1644
1645 root_destroy:
1646 netdev_err(pfvf->netdev, "Failed to update Scheduler/Shaping config in Hardware\n");
1647 /* Free resources allocated */
1648 otx2_qos_root_destroy(pfvf);
1649 }
1650
otx2_setup_tc_htb(struct net_device * ndev,struct tc_htb_qopt_offload * htb)1651 int otx2_setup_tc_htb(struct net_device *ndev, struct tc_htb_qopt_offload *htb)
1652 {
1653 struct otx2_nic *pfvf = netdev_priv(ndev);
1654 int res;
1655
1656 switch (htb->command) {
1657 case TC_HTB_CREATE:
1658 return otx2_qos_root_add(pfvf, htb->parent_classid,
1659 htb->classid, htb->extack);
1660 case TC_HTB_DESTROY:
1661 return otx2_qos_root_destroy(pfvf);
1662 case TC_HTB_LEAF_ALLOC_QUEUE:
1663 res = otx2_qos_leaf_alloc_queue(pfvf, htb->classid,
1664 htb->parent_classid,
1665 htb->rate, htb->ceil,
1666 htb->prio, htb->quantum,
1667 htb->extack);
1668 if (res < 0)
1669 return res;
1670 htb->qid = res;
1671 return 0;
1672 case TC_HTB_LEAF_TO_INNER:
1673 return otx2_qos_leaf_to_inner(pfvf, htb->parent_classid,
1674 htb->classid, htb->rate,
1675 htb->ceil, htb->prio,
1676 htb->quantum, htb->extack);
1677 case TC_HTB_LEAF_DEL:
1678 return otx2_qos_leaf_del(pfvf, &htb->classid, htb->extack);
1679 case TC_HTB_LEAF_DEL_LAST:
1680 case TC_HTB_LEAF_DEL_LAST_FORCE:
1681 return otx2_qos_leaf_del_last(pfvf, htb->classid,
1682 htb->command == TC_HTB_LEAF_DEL_LAST_FORCE,
1683 htb->extack);
1684 case TC_HTB_LEAF_QUERY_QUEUE:
1685 res = otx2_get_txq_by_classid(pfvf, htb->classid);
1686 htb->qid = res;
1687 return 0;
1688 case TC_HTB_NODE_MODIFY:
1689 fallthrough;
1690 default:
1691 return -EOPNOTSUPP;
1692 }
1693 }
1694