1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
3
4 #include <linux/kernel.h>
5 #include <linux/errno.h>
6 #include <linux/netdevice.h>
7 #include <net/pkt_cls.h>
8 #include <net/red.h>
9
10 #include "spectrum.h"
11 #include "spectrum_span.h"
12 #include "reg.h"
13
14 #define MLXSW_SP_PRIO_BAND_TO_TCLASS(band) (IEEE_8021QAZ_MAX_TCS - band - 1)
15 #define MLXSW_SP_PRIO_CHILD_TO_TCLASS(child) \
16 MLXSW_SP_PRIO_BAND_TO_TCLASS((child - 1))
17
18 enum mlxsw_sp_qdisc_type {
19 MLXSW_SP_QDISC_NO_QDISC,
20 MLXSW_SP_QDISC_RED,
21 MLXSW_SP_QDISC_PRIO,
22 MLXSW_SP_QDISC_ETS,
23 MLXSW_SP_QDISC_TBF,
24 MLXSW_SP_QDISC_FIFO,
25 };
26
27 struct mlxsw_sp_qdisc;
28
29 struct mlxsw_sp_qdisc_ops {
30 enum mlxsw_sp_qdisc_type type;
31 int (*check_params)(struct mlxsw_sp_port *mlxsw_sp_port,
32 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
33 void *params);
34 int (*replace)(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
35 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
36 int (*destroy)(struct mlxsw_sp_port *mlxsw_sp_port,
37 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
38 int (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
39 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
40 struct tc_qopt_offload_stats *stats_ptr);
41 int (*get_xstats)(struct mlxsw_sp_port *mlxsw_sp_port,
42 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
43 void *xstats_ptr);
44 void (*clean_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
45 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
46 /* unoffload - to be used for a qdisc that stops being offloaded without
47 * being destroyed.
48 */
49 void (*unoffload)(struct mlxsw_sp_port *mlxsw_sp_port,
50 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
51 };
52
53 struct mlxsw_sp_qdisc {
54 u32 handle;
55 u8 tclass_num;
56 u8 prio_bitmap;
57 union {
58 struct red_stats red;
59 } xstats_base;
60 struct mlxsw_sp_qdisc_stats {
61 u64 tx_bytes;
62 u64 tx_packets;
63 u64 drops;
64 u64 overlimits;
65 u64 backlog;
66 } stats_base;
67
68 struct mlxsw_sp_qdisc_ops *ops;
69 };
70
71 struct mlxsw_sp_qdisc_state {
72 struct mlxsw_sp_qdisc root_qdisc;
73 struct mlxsw_sp_qdisc tclass_qdiscs[IEEE_8021QAZ_MAX_TCS];
74
75 /* When a PRIO or ETS are added, the invisible FIFOs in their bands are
76 * created first. When notifications for these FIFOs arrive, it is not
77 * known what qdisc their parent handle refers to. It could be a
78 * newly-created PRIO that will replace the currently-offloaded one, or
79 * it could be e.g. a RED that will be attached below it.
80 *
81 * As the notifications start to arrive, use them to note what the
82 * future parent handle is, and keep track of which child FIFOs were
83 * seen. Then when the parent is known, retroactively offload those
84 * FIFOs.
85 */
86 u32 future_handle;
87 bool future_fifos[IEEE_8021QAZ_MAX_TCS];
88 };
89
90 static bool
mlxsw_sp_qdisc_compare(struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,u32 handle,enum mlxsw_sp_qdisc_type type)91 mlxsw_sp_qdisc_compare(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, u32 handle,
92 enum mlxsw_sp_qdisc_type type)
93 {
94 return mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
95 mlxsw_sp_qdisc->ops->type == type &&
96 mlxsw_sp_qdisc->handle == handle;
97 }
98
99 static struct mlxsw_sp_qdisc *
mlxsw_sp_qdisc_find(struct mlxsw_sp_port * mlxsw_sp_port,u32 parent,bool root_only)100 mlxsw_sp_qdisc_find(struct mlxsw_sp_port *mlxsw_sp_port, u32 parent,
101 bool root_only)
102 {
103 struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
104 int tclass, child_index;
105
106 if (parent == TC_H_ROOT)
107 return &qdisc_state->root_qdisc;
108
109 if (root_only || !qdisc_state ||
110 !qdisc_state->root_qdisc.ops ||
111 TC_H_MAJ(parent) != qdisc_state->root_qdisc.handle ||
112 TC_H_MIN(parent) > IEEE_8021QAZ_MAX_TCS)
113 return NULL;
114
115 child_index = TC_H_MIN(parent);
116 tclass = MLXSW_SP_PRIO_CHILD_TO_TCLASS(child_index);
117 return &qdisc_state->tclass_qdiscs[tclass];
118 }
119
120 static struct mlxsw_sp_qdisc *
mlxsw_sp_qdisc_find_by_handle(struct mlxsw_sp_port * mlxsw_sp_port,u32 handle)121 mlxsw_sp_qdisc_find_by_handle(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle)
122 {
123 struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
124 int i;
125
126 if (qdisc_state->root_qdisc.handle == handle)
127 return &qdisc_state->root_qdisc;
128
129 if (qdisc_state->root_qdisc.handle == TC_H_UNSPEC)
130 return NULL;
131
132 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
133 if (qdisc_state->tclass_qdiscs[i].handle == handle)
134 return &qdisc_state->tclass_qdiscs[i];
135
136 return NULL;
137 }
138
139 static int
mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc)140 mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
141 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
142 {
143 struct mlxsw_sp_qdisc *root_qdisc = &mlxsw_sp_port->qdisc->root_qdisc;
144 int err_hdroom = 0;
145 int err = 0;
146
147 if (!mlxsw_sp_qdisc)
148 return 0;
149
150 if (root_qdisc == mlxsw_sp_qdisc) {
151 struct mlxsw_sp_hdroom hdroom = *mlxsw_sp_port->hdroom;
152
153 hdroom.mode = MLXSW_SP_HDROOM_MODE_DCB;
154 mlxsw_sp_hdroom_prios_reset_buf_idx(&hdroom);
155 mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
156 mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
157 err_hdroom = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
158 }
159
160 if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->destroy)
161 err = mlxsw_sp_qdisc->ops->destroy(mlxsw_sp_port,
162 mlxsw_sp_qdisc);
163
164 mlxsw_sp_qdisc->handle = TC_H_UNSPEC;
165 mlxsw_sp_qdisc->ops = NULL;
166
167 return err_hdroom ?: err;
168 }
169
170 static int
mlxsw_sp_qdisc_replace(struct mlxsw_sp_port * mlxsw_sp_port,u32 handle,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,struct mlxsw_sp_qdisc_ops * ops,void * params)171 mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
172 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
173 struct mlxsw_sp_qdisc_ops *ops, void *params)
174 {
175 struct mlxsw_sp_qdisc *root_qdisc = &mlxsw_sp_port->qdisc->root_qdisc;
176 struct mlxsw_sp_hdroom orig_hdroom;
177 int err;
178
179 if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->type != ops->type)
180 /* In case this location contained a different qdisc of the
181 * same type we can override the old qdisc configuration.
182 * Otherwise, we need to remove the old qdisc before setting the
183 * new one.
184 */
185 mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
186
187 orig_hdroom = *mlxsw_sp_port->hdroom;
188 if (root_qdisc == mlxsw_sp_qdisc) {
189 struct mlxsw_sp_hdroom hdroom = orig_hdroom;
190
191 hdroom.mode = MLXSW_SP_HDROOM_MODE_TC;
192 mlxsw_sp_hdroom_prios_reset_buf_idx(&hdroom);
193 mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
194 mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
195
196 err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
197 if (err)
198 goto err_hdroom_configure;
199 }
200
201 err = ops->check_params(mlxsw_sp_port, mlxsw_sp_qdisc, params);
202 if (err)
203 goto err_bad_param;
204
205 err = ops->replace(mlxsw_sp_port, handle, mlxsw_sp_qdisc, params);
206 if (err)
207 goto err_config;
208
209 /* Check if the Qdisc changed. That includes a situation where an
210 * invisible Qdisc replaces another one, or is being added for the
211 * first time.
212 */
213 if (mlxsw_sp_qdisc->handle != handle || handle == TC_H_UNSPEC) {
214 mlxsw_sp_qdisc->ops = ops;
215 if (ops->clean_stats)
216 ops->clean_stats(mlxsw_sp_port, mlxsw_sp_qdisc);
217 }
218
219 mlxsw_sp_qdisc->handle = handle;
220 return 0;
221
222 err_bad_param:
223 err_config:
224 mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
225 err_hdroom_configure:
226 if (mlxsw_sp_qdisc->handle == handle && ops->unoffload)
227 ops->unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, params);
228
229 mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
230 return err;
231 }
232
233 static int
mlxsw_sp_qdisc_get_stats(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,struct tc_qopt_offload_stats * stats_ptr)234 mlxsw_sp_qdisc_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
235 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
236 struct tc_qopt_offload_stats *stats_ptr)
237 {
238 if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
239 mlxsw_sp_qdisc->ops->get_stats)
240 return mlxsw_sp_qdisc->ops->get_stats(mlxsw_sp_port,
241 mlxsw_sp_qdisc,
242 stats_ptr);
243
244 return -EOPNOTSUPP;
245 }
246
247 static int
mlxsw_sp_qdisc_get_xstats(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,void * xstats_ptr)248 mlxsw_sp_qdisc_get_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
249 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
250 void *xstats_ptr)
251 {
252 if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
253 mlxsw_sp_qdisc->ops->get_xstats)
254 return mlxsw_sp_qdisc->ops->get_xstats(mlxsw_sp_port,
255 mlxsw_sp_qdisc,
256 xstats_ptr);
257
258 return -EOPNOTSUPP;
259 }
260
261 static u64
mlxsw_sp_xstats_backlog(struct mlxsw_sp_port_xstats * xstats,int tclass_num)262 mlxsw_sp_xstats_backlog(struct mlxsw_sp_port_xstats *xstats, int tclass_num)
263 {
264 return xstats->backlog[tclass_num] +
265 xstats->backlog[tclass_num + 8];
266 }
267
268 static u64
mlxsw_sp_xstats_tail_drop(struct mlxsw_sp_port_xstats * xstats,int tclass_num)269 mlxsw_sp_xstats_tail_drop(struct mlxsw_sp_port_xstats *xstats, int tclass_num)
270 {
271 return xstats->tail_drop[tclass_num] +
272 xstats->tail_drop[tclass_num + 8];
273 }
274
275 static void
mlxsw_sp_qdisc_bstats_per_priority_get(struct mlxsw_sp_port_xstats * xstats,u8 prio_bitmap,u64 * tx_packets,u64 * tx_bytes)276 mlxsw_sp_qdisc_bstats_per_priority_get(struct mlxsw_sp_port_xstats *xstats,
277 u8 prio_bitmap, u64 *tx_packets,
278 u64 *tx_bytes)
279 {
280 int i;
281
282 *tx_packets = 0;
283 *tx_bytes = 0;
284 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
285 if (prio_bitmap & BIT(i)) {
286 *tx_packets += xstats->tx_packets[i];
287 *tx_bytes += xstats->tx_bytes[i];
288 }
289 }
290 }
291
292 static void
mlxsw_sp_qdisc_collect_tc_stats(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,u64 * p_tx_bytes,u64 * p_tx_packets,u64 * p_drops,u64 * p_backlog)293 mlxsw_sp_qdisc_collect_tc_stats(struct mlxsw_sp_port *mlxsw_sp_port,
294 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
295 u64 *p_tx_bytes, u64 *p_tx_packets,
296 u64 *p_drops, u64 *p_backlog)
297 {
298 u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
299 struct mlxsw_sp_port_xstats *xstats;
300 u64 tx_bytes, tx_packets;
301
302 xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
303 mlxsw_sp_qdisc_bstats_per_priority_get(xstats,
304 mlxsw_sp_qdisc->prio_bitmap,
305 &tx_packets, &tx_bytes);
306
307 *p_tx_packets += tx_packets;
308 *p_tx_bytes += tx_bytes;
309 *p_drops += xstats->wred_drop[tclass_num] +
310 mlxsw_sp_xstats_tail_drop(xstats, tclass_num);
311 *p_backlog += mlxsw_sp_xstats_backlog(xstats, tclass_num);
312 }
313
314 static void
mlxsw_sp_qdisc_update_stats(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,u64 tx_bytes,u64 tx_packets,u64 drops,u64 backlog,struct tc_qopt_offload_stats * stats_ptr)315 mlxsw_sp_qdisc_update_stats(struct mlxsw_sp *mlxsw_sp,
316 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
317 u64 tx_bytes, u64 tx_packets,
318 u64 drops, u64 backlog,
319 struct tc_qopt_offload_stats *stats_ptr)
320 {
321 struct mlxsw_sp_qdisc_stats *stats_base = &mlxsw_sp_qdisc->stats_base;
322
323 tx_bytes -= stats_base->tx_bytes;
324 tx_packets -= stats_base->tx_packets;
325 drops -= stats_base->drops;
326 backlog -= stats_base->backlog;
327
328 _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
329 stats_ptr->qstats->drops += drops;
330 stats_ptr->qstats->backlog += mlxsw_sp_cells_bytes(mlxsw_sp, backlog);
331
332 stats_base->backlog += backlog;
333 stats_base->drops += drops;
334 stats_base->tx_bytes += tx_bytes;
335 stats_base->tx_packets += tx_packets;
336 }
337
338 static void
mlxsw_sp_qdisc_get_tc_stats(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,struct tc_qopt_offload_stats * stats_ptr)339 mlxsw_sp_qdisc_get_tc_stats(struct mlxsw_sp_port *mlxsw_sp_port,
340 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
341 struct tc_qopt_offload_stats *stats_ptr)
342 {
343 u64 tx_packets = 0;
344 u64 tx_bytes = 0;
345 u64 backlog = 0;
346 u64 drops = 0;
347
348 mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
349 &tx_bytes, &tx_packets,
350 &drops, &backlog);
351 mlxsw_sp_qdisc_update_stats(mlxsw_sp_port->mlxsw_sp, mlxsw_sp_qdisc,
352 tx_bytes, tx_packets, drops, backlog,
353 stats_ptr);
354 }
355
356 static int
mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port * mlxsw_sp_port,int tclass_num,u32 min,u32 max,u32 probability,bool is_wred,bool is_ecn)357 mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
358 int tclass_num, u32 min, u32 max,
359 u32 probability, bool is_wred, bool is_ecn)
360 {
361 char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
362 char cwtp_cmd[MLXSW_REG_CWTP_LEN];
363 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
364 int err;
365
366 mlxsw_reg_cwtp_pack(cwtp_cmd, mlxsw_sp_port->local_port, tclass_num);
367 mlxsw_reg_cwtp_profile_pack(cwtp_cmd, MLXSW_REG_CWTP_DEFAULT_PROFILE,
368 roundup(min, MLXSW_REG_CWTP_MIN_VALUE),
369 roundup(max, MLXSW_REG_CWTP_MIN_VALUE),
370 probability);
371
372 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtp), cwtp_cmd);
373 if (err)
374 return err;
375
376 mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
377 MLXSW_REG_CWTP_DEFAULT_PROFILE, is_wred, is_ecn);
378
379 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
380 }
381
382 static int
mlxsw_sp_tclass_congestion_disable(struct mlxsw_sp_port * mlxsw_sp_port,int tclass_num)383 mlxsw_sp_tclass_congestion_disable(struct mlxsw_sp_port *mlxsw_sp_port,
384 int tclass_num)
385 {
386 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
387 char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
388
389 mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
390 MLXSW_REG_CWTPM_RESET_PROFILE, false, false);
391 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
392 }
393
394 static void
mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc)395 mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
396 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
397 {
398 u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
399 struct mlxsw_sp_qdisc_stats *stats_base;
400 struct mlxsw_sp_port_xstats *xstats;
401 struct red_stats *red_base;
402
403 xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
404 stats_base = &mlxsw_sp_qdisc->stats_base;
405 red_base = &mlxsw_sp_qdisc->xstats_base.red;
406
407 mlxsw_sp_qdisc_bstats_per_priority_get(xstats,
408 mlxsw_sp_qdisc->prio_bitmap,
409 &stats_base->tx_packets,
410 &stats_base->tx_bytes);
411 red_base->prob_drop = xstats->wred_drop[tclass_num];
412 red_base->pdrop = mlxsw_sp_xstats_tail_drop(xstats, tclass_num);
413
414 stats_base->overlimits = red_base->prob_drop + red_base->prob_mark;
415 stats_base->drops = red_base->prob_drop + red_base->pdrop;
416
417 stats_base->backlog = 0;
418 }
419
420 static int
mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc)421 mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
422 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
423 {
424 struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
425 struct mlxsw_sp_qdisc *root_qdisc = &qdisc_state->root_qdisc;
426
427 if (root_qdisc != mlxsw_sp_qdisc)
428 root_qdisc->stats_base.backlog -=
429 mlxsw_sp_qdisc->stats_base.backlog;
430
431 return mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port,
432 mlxsw_sp_qdisc->tclass_num);
433 }
434
435 static int
mlxsw_sp_qdisc_red_check_params(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,void * params)436 mlxsw_sp_qdisc_red_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
437 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
438 void *params)
439 {
440 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
441 struct tc_red_qopt_offload_params *p = params;
442
443 if (p->min > p->max) {
444 dev_err(mlxsw_sp->bus_info->dev,
445 "spectrum: RED: min %u is bigger then max %u\n", p->min,
446 p->max);
447 return -EINVAL;
448 }
449 if (p->max > MLXSW_CORE_RES_GET(mlxsw_sp->core,
450 GUARANTEED_SHARED_BUFFER)) {
451 dev_err(mlxsw_sp->bus_info->dev,
452 "spectrum: RED: max value %u is too big\n", p->max);
453 return -EINVAL;
454 }
455 if (p->min == 0 || p->max == 0) {
456 dev_err(mlxsw_sp->bus_info->dev,
457 "spectrum: RED: 0 value is illegal for min and max\n");
458 return -EINVAL;
459 }
460 return 0;
461 }
462
463 static int
mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port * mlxsw_sp_port,u32 handle,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,void * params)464 mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
465 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
466 void *params)
467 {
468 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
469 struct tc_red_qopt_offload_params *p = params;
470 u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
471 u32 min, max;
472 u64 prob;
473
474 /* calculate probability in percentage */
475 prob = p->probability;
476 prob *= 100;
477 prob = DIV_ROUND_UP(prob, 1 << 16);
478 prob = DIV_ROUND_UP(prob, 1 << 16);
479 min = mlxsw_sp_bytes_cells(mlxsw_sp, p->min);
480 max = mlxsw_sp_bytes_cells(mlxsw_sp, p->max);
481 return mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port, tclass_num,
482 min, max, prob,
483 !p->is_nodrop, p->is_ecn);
484 }
485
486 static void
mlxsw_sp_qdisc_leaf_unoffload(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,struct gnet_stats_queue * qstats)487 mlxsw_sp_qdisc_leaf_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
488 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
489 struct gnet_stats_queue *qstats)
490 {
491 u64 backlog;
492
493 backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
494 mlxsw_sp_qdisc->stats_base.backlog);
495 qstats->backlog -= backlog;
496 mlxsw_sp_qdisc->stats_base.backlog = 0;
497 }
498
499 static void
mlxsw_sp_qdisc_red_unoffload(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,void * params)500 mlxsw_sp_qdisc_red_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
501 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
502 void *params)
503 {
504 struct tc_red_qopt_offload_params *p = params;
505
506 mlxsw_sp_qdisc_leaf_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, p->qstats);
507 }
508
509 static int
mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,void * xstats_ptr)510 mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
511 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
512 void *xstats_ptr)
513 {
514 struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base.red;
515 u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
516 struct mlxsw_sp_port_xstats *xstats;
517 struct red_stats *res = xstats_ptr;
518 int early_drops, pdrops;
519
520 xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
521
522 early_drops = xstats->wred_drop[tclass_num] - xstats_base->prob_drop;
523 pdrops = mlxsw_sp_xstats_tail_drop(xstats, tclass_num) -
524 xstats_base->pdrop;
525
526 res->pdrop += pdrops;
527 res->prob_drop += early_drops;
528
529 xstats_base->pdrop += pdrops;
530 xstats_base->prob_drop += early_drops;
531 return 0;
532 }
533
534 static int
mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,struct tc_qopt_offload_stats * stats_ptr)535 mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port,
536 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
537 struct tc_qopt_offload_stats *stats_ptr)
538 {
539 u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
540 struct mlxsw_sp_qdisc_stats *stats_base;
541 struct mlxsw_sp_port_xstats *xstats;
542 u64 overlimits;
543
544 xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
545 stats_base = &mlxsw_sp_qdisc->stats_base;
546
547 mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc, stats_ptr);
548 overlimits = xstats->wred_drop[tclass_num] - stats_base->overlimits;
549
550 stats_ptr->qstats->overlimits += overlimits;
551 stats_base->overlimits += overlimits;
552
553 return 0;
554 }
555
556 #define MLXSW_SP_PORT_DEFAULT_TCLASS 0
557
558 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_red = {
559 .type = MLXSW_SP_QDISC_RED,
560 .check_params = mlxsw_sp_qdisc_red_check_params,
561 .replace = mlxsw_sp_qdisc_red_replace,
562 .unoffload = mlxsw_sp_qdisc_red_unoffload,
563 .destroy = mlxsw_sp_qdisc_red_destroy,
564 .get_stats = mlxsw_sp_qdisc_get_red_stats,
565 .get_xstats = mlxsw_sp_qdisc_get_red_xstats,
566 .clean_stats = mlxsw_sp_setup_tc_qdisc_red_clean_stats,
567 };
568
mlxsw_sp_setup_tc_red(struct mlxsw_sp_port * mlxsw_sp_port,struct tc_red_qopt_offload * p)569 int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
570 struct tc_red_qopt_offload *p)
571 {
572 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
573
574 mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
575 if (!mlxsw_sp_qdisc)
576 return -EOPNOTSUPP;
577
578 if (p->command == TC_RED_REPLACE)
579 return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
580 mlxsw_sp_qdisc,
581 &mlxsw_sp_qdisc_ops_red,
582 &p->set);
583
584 if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
585 MLXSW_SP_QDISC_RED))
586 return -EOPNOTSUPP;
587
588 switch (p->command) {
589 case TC_RED_DESTROY:
590 return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
591 case TC_RED_XSTATS:
592 return mlxsw_sp_qdisc_get_xstats(mlxsw_sp_port, mlxsw_sp_qdisc,
593 p->xstats);
594 case TC_RED_STATS:
595 return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
596 &p->stats);
597 default:
598 return -EOPNOTSUPP;
599 }
600 }
601
602 static void
mlxsw_sp_setup_tc_qdisc_leaf_clean_stats(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc)603 mlxsw_sp_setup_tc_qdisc_leaf_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
604 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
605 {
606 u64 backlog_cells = 0;
607 u64 tx_packets = 0;
608 u64 tx_bytes = 0;
609 u64 drops = 0;
610
611 mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
612 &tx_bytes, &tx_packets,
613 &drops, &backlog_cells);
614
615 mlxsw_sp_qdisc->stats_base.tx_packets = tx_packets;
616 mlxsw_sp_qdisc->stats_base.tx_bytes = tx_bytes;
617 mlxsw_sp_qdisc->stats_base.drops = drops;
618 mlxsw_sp_qdisc->stats_base.backlog = 0;
619 }
620
621 static int
mlxsw_sp_qdisc_tbf_destroy(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc)622 mlxsw_sp_qdisc_tbf_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
623 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
624 {
625 struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
626 struct mlxsw_sp_qdisc *root_qdisc = &qdisc_state->root_qdisc;
627
628 if (root_qdisc != mlxsw_sp_qdisc)
629 root_qdisc->stats_base.backlog -=
630 mlxsw_sp_qdisc->stats_base.backlog;
631
632 return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
633 MLXSW_REG_QEEC_HR_SUBGROUP,
634 mlxsw_sp_qdisc->tclass_num, 0,
635 MLXSW_REG_QEEC_MAS_DIS, 0);
636 }
637
638 static int
mlxsw_sp_qdisc_tbf_bs(struct mlxsw_sp_port * mlxsw_sp_port,u32 max_size,u8 * p_burst_size)639 mlxsw_sp_qdisc_tbf_bs(struct mlxsw_sp_port *mlxsw_sp_port,
640 u32 max_size, u8 *p_burst_size)
641 {
642 /* TBF burst size is configured in bytes. The ASIC burst size value is
643 * ((2 ^ bs) * 512 bits. Convert the TBF bytes to 512-bit units.
644 */
645 u32 bs512 = max_size / 64;
646 u8 bs = fls(bs512);
647
648 if (!bs)
649 return -EINVAL;
650 --bs;
651
652 /* Demand a power of two. */
653 if ((1 << bs) != bs512)
654 return -EINVAL;
655
656 if (bs < mlxsw_sp_port->mlxsw_sp->lowest_shaper_bs ||
657 bs > MLXSW_REG_QEEC_HIGHEST_SHAPER_BS)
658 return -EINVAL;
659
660 *p_burst_size = bs;
661 return 0;
662 }
663
664 static u32
mlxsw_sp_qdisc_tbf_max_size(u8 bs)665 mlxsw_sp_qdisc_tbf_max_size(u8 bs)
666 {
667 return (1U << bs) * 64;
668 }
669
670 static u64
mlxsw_sp_qdisc_tbf_rate_kbps(struct tc_tbf_qopt_offload_replace_params * p)671 mlxsw_sp_qdisc_tbf_rate_kbps(struct tc_tbf_qopt_offload_replace_params *p)
672 {
673 /* TBF interface is in bytes/s, whereas Spectrum ASIC is configured in
674 * Kbits/s.
675 */
676 return div_u64(p->rate.rate_bytes_ps, 1000) * 8;
677 }
678
679 static int
mlxsw_sp_qdisc_tbf_check_params(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,void * params)680 mlxsw_sp_qdisc_tbf_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
681 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
682 void *params)
683 {
684 struct tc_tbf_qopt_offload_replace_params *p = params;
685 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
686 u64 rate_kbps = mlxsw_sp_qdisc_tbf_rate_kbps(p);
687 u8 burst_size;
688 int err;
689
690 if (rate_kbps >= MLXSW_REG_QEEC_MAS_DIS) {
691 dev_err(mlxsw_sp_port->mlxsw_sp->bus_info->dev,
692 "spectrum: TBF: rate of %lluKbps must be below %u\n",
693 rate_kbps, MLXSW_REG_QEEC_MAS_DIS);
694 return -EINVAL;
695 }
696
697 err = mlxsw_sp_qdisc_tbf_bs(mlxsw_sp_port, p->max_size, &burst_size);
698 if (err) {
699 u8 highest_shaper_bs = MLXSW_REG_QEEC_HIGHEST_SHAPER_BS;
700
701 dev_err(mlxsw_sp->bus_info->dev,
702 "spectrum: TBF: invalid burst size of %u, must be a power of two between %u and %u",
703 p->max_size,
704 mlxsw_sp_qdisc_tbf_max_size(mlxsw_sp->lowest_shaper_bs),
705 mlxsw_sp_qdisc_tbf_max_size(highest_shaper_bs));
706 return -EINVAL;
707 }
708
709 return 0;
710 }
711
712 static int
mlxsw_sp_qdisc_tbf_replace(struct mlxsw_sp_port * mlxsw_sp_port,u32 handle,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,void * params)713 mlxsw_sp_qdisc_tbf_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
714 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
715 void *params)
716 {
717 struct tc_tbf_qopt_offload_replace_params *p = params;
718 u64 rate_kbps = mlxsw_sp_qdisc_tbf_rate_kbps(p);
719 u8 burst_size;
720 int err;
721
722 err = mlxsw_sp_qdisc_tbf_bs(mlxsw_sp_port, p->max_size, &burst_size);
723 if (WARN_ON_ONCE(err))
724 /* check_params above was supposed to reject this value. */
725 return -EINVAL;
726
727 /* Configure subgroup shaper, so that both UC and MC traffic is subject
728 * to shaping. That is unlike RED, however UC queue lengths are going to
729 * be different than MC ones due to different pool and quota
730 * configurations, so the configuration is not applicable. For shaper on
731 * the other hand, subjecting the overall stream to the configured
732 * shaper makes sense. Also note that that is what we do for
733 * ieee_setmaxrate().
734 */
735 return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
736 MLXSW_REG_QEEC_HR_SUBGROUP,
737 mlxsw_sp_qdisc->tclass_num, 0,
738 rate_kbps, burst_size);
739 }
740
741 static void
mlxsw_sp_qdisc_tbf_unoffload(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,void * params)742 mlxsw_sp_qdisc_tbf_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
743 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
744 void *params)
745 {
746 struct tc_tbf_qopt_offload_replace_params *p = params;
747
748 mlxsw_sp_qdisc_leaf_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, p->qstats);
749 }
750
751 static int
mlxsw_sp_qdisc_get_tbf_stats(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,struct tc_qopt_offload_stats * stats_ptr)752 mlxsw_sp_qdisc_get_tbf_stats(struct mlxsw_sp_port *mlxsw_sp_port,
753 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
754 struct tc_qopt_offload_stats *stats_ptr)
755 {
756 mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
757 stats_ptr);
758 return 0;
759 }
760
761 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_tbf = {
762 .type = MLXSW_SP_QDISC_TBF,
763 .check_params = mlxsw_sp_qdisc_tbf_check_params,
764 .replace = mlxsw_sp_qdisc_tbf_replace,
765 .unoffload = mlxsw_sp_qdisc_tbf_unoffload,
766 .destroy = mlxsw_sp_qdisc_tbf_destroy,
767 .get_stats = mlxsw_sp_qdisc_get_tbf_stats,
768 .clean_stats = mlxsw_sp_setup_tc_qdisc_leaf_clean_stats,
769 };
770
mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port * mlxsw_sp_port,struct tc_tbf_qopt_offload * p)771 int mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
772 struct tc_tbf_qopt_offload *p)
773 {
774 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
775
776 mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
777 if (!mlxsw_sp_qdisc)
778 return -EOPNOTSUPP;
779
780 if (p->command == TC_TBF_REPLACE)
781 return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
782 mlxsw_sp_qdisc,
783 &mlxsw_sp_qdisc_ops_tbf,
784 &p->replace_params);
785
786 if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
787 MLXSW_SP_QDISC_TBF))
788 return -EOPNOTSUPP;
789
790 switch (p->command) {
791 case TC_TBF_DESTROY:
792 return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
793 case TC_TBF_STATS:
794 return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
795 &p->stats);
796 default:
797 return -EOPNOTSUPP;
798 }
799 }
800
801 static int
mlxsw_sp_qdisc_fifo_destroy(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc)802 mlxsw_sp_qdisc_fifo_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
803 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
804 {
805 struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
806 struct mlxsw_sp_qdisc *root_qdisc = &qdisc_state->root_qdisc;
807
808 if (root_qdisc != mlxsw_sp_qdisc)
809 root_qdisc->stats_base.backlog -=
810 mlxsw_sp_qdisc->stats_base.backlog;
811 return 0;
812 }
813
814 static int
mlxsw_sp_qdisc_fifo_check_params(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,void * params)815 mlxsw_sp_qdisc_fifo_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
816 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
817 void *params)
818 {
819 return 0;
820 }
821
822 static int
mlxsw_sp_qdisc_fifo_replace(struct mlxsw_sp_port * mlxsw_sp_port,u32 handle,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,void * params)823 mlxsw_sp_qdisc_fifo_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
824 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
825 void *params)
826 {
827 return 0;
828 }
829
830 static int
mlxsw_sp_qdisc_get_fifo_stats(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,struct tc_qopt_offload_stats * stats_ptr)831 mlxsw_sp_qdisc_get_fifo_stats(struct mlxsw_sp_port *mlxsw_sp_port,
832 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
833 struct tc_qopt_offload_stats *stats_ptr)
834 {
835 mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
836 stats_ptr);
837 return 0;
838 }
839
840 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_fifo = {
841 .type = MLXSW_SP_QDISC_FIFO,
842 .check_params = mlxsw_sp_qdisc_fifo_check_params,
843 .replace = mlxsw_sp_qdisc_fifo_replace,
844 .destroy = mlxsw_sp_qdisc_fifo_destroy,
845 .get_stats = mlxsw_sp_qdisc_get_fifo_stats,
846 .clean_stats = mlxsw_sp_setup_tc_qdisc_leaf_clean_stats,
847 };
848
mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port * mlxsw_sp_port,struct tc_fifo_qopt_offload * p)849 int mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
850 struct tc_fifo_qopt_offload *p)
851 {
852 struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
853 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
854 int tclass, child_index;
855 u32 parent_handle;
856
857 /* Invisible FIFOs are tracked in future_handle and future_fifos. Make
858 * sure that not more than one qdisc is created for a port at a time.
859 * RTNL is a simple proxy for that.
860 */
861 ASSERT_RTNL();
862
863 mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
864 if (!mlxsw_sp_qdisc && p->handle == TC_H_UNSPEC) {
865 parent_handle = TC_H_MAJ(p->parent);
866 if (parent_handle != qdisc_state->future_handle) {
867 /* This notifications is for a different Qdisc than
868 * previously. Wipe the future cache.
869 */
870 memset(qdisc_state->future_fifos, 0,
871 sizeof(qdisc_state->future_fifos));
872 qdisc_state->future_handle = parent_handle;
873 }
874
875 child_index = TC_H_MIN(p->parent);
876 tclass = MLXSW_SP_PRIO_CHILD_TO_TCLASS(child_index);
877 if (tclass < IEEE_8021QAZ_MAX_TCS) {
878 if (p->command == TC_FIFO_REPLACE)
879 qdisc_state->future_fifos[tclass] = true;
880 else if (p->command == TC_FIFO_DESTROY)
881 qdisc_state->future_fifos[tclass] = false;
882 }
883 }
884 if (!mlxsw_sp_qdisc)
885 return -EOPNOTSUPP;
886
887 if (p->command == TC_FIFO_REPLACE) {
888 return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
889 mlxsw_sp_qdisc,
890 &mlxsw_sp_qdisc_ops_fifo, NULL);
891 }
892
893 if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
894 MLXSW_SP_QDISC_FIFO))
895 return -EOPNOTSUPP;
896
897 switch (p->command) {
898 case TC_FIFO_DESTROY:
899 if (p->handle == mlxsw_sp_qdisc->handle)
900 return mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
901 mlxsw_sp_qdisc);
902 return 0;
903 case TC_FIFO_STATS:
904 return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
905 &p->stats);
906 case TC_FIFO_REPLACE: /* Handled above. */
907 break;
908 }
909
910 return -EOPNOTSUPP;
911 }
912
913 static int
__mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port * mlxsw_sp_port)914 __mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port)
915 {
916 struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
917 int i;
918
919 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
920 mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i,
921 MLXSW_SP_PORT_DEFAULT_TCLASS);
922 mlxsw_sp_port_ets_set(mlxsw_sp_port,
923 MLXSW_REG_QEEC_HR_SUBGROUP,
924 i, 0, false, 0);
925 mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
926 &qdisc_state->tclass_qdiscs[i]);
927 qdisc_state->tclass_qdiscs[i].prio_bitmap = 0;
928 }
929
930 return 0;
931 }
932
933 static int
mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc)934 mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
935 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
936 {
937 return __mlxsw_sp_qdisc_ets_destroy(mlxsw_sp_port);
938 }
939
940 static int
__mlxsw_sp_qdisc_ets_check_params(unsigned int nbands)941 __mlxsw_sp_qdisc_ets_check_params(unsigned int nbands)
942 {
943 if (nbands > IEEE_8021QAZ_MAX_TCS)
944 return -EOPNOTSUPP;
945
946 return 0;
947 }
948
949 static int
mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,void * params)950 mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
951 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
952 void *params)
953 {
954 struct tc_prio_qopt_offload_params *p = params;
955
956 return __mlxsw_sp_qdisc_ets_check_params(p->bands);
957 }
958
959 static int
__mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port * mlxsw_sp_port,u32 handle,unsigned int nbands,const unsigned int * quanta,const unsigned int * weights,const u8 * priomap)960 __mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
961 unsigned int nbands,
962 const unsigned int *quanta,
963 const unsigned int *weights,
964 const u8 *priomap)
965 {
966 struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
967 struct mlxsw_sp_qdisc *child_qdisc;
968 int tclass, i, band, backlog;
969 u8 old_priomap;
970 int err;
971
972 for (band = 0; band < nbands; band++) {
973 tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
974 child_qdisc = &qdisc_state->tclass_qdiscs[tclass];
975 old_priomap = child_qdisc->prio_bitmap;
976 child_qdisc->prio_bitmap = 0;
977
978 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
979 MLXSW_REG_QEEC_HR_SUBGROUP,
980 tclass, 0, !!quanta[band],
981 weights[band]);
982 if (err)
983 return err;
984
985 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
986 if (priomap[i] == band) {
987 child_qdisc->prio_bitmap |= BIT(i);
988 if (BIT(i) & old_priomap)
989 continue;
990 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port,
991 i, tclass);
992 if (err)
993 return err;
994 }
995 }
996 if (old_priomap != child_qdisc->prio_bitmap &&
997 child_qdisc->ops && child_qdisc->ops->clean_stats) {
998 backlog = child_qdisc->stats_base.backlog;
999 child_qdisc->ops->clean_stats(mlxsw_sp_port,
1000 child_qdisc);
1001 child_qdisc->stats_base.backlog = backlog;
1002 }
1003
1004 if (handle == qdisc_state->future_handle &&
1005 qdisc_state->future_fifos[tclass]) {
1006 err = mlxsw_sp_qdisc_replace(mlxsw_sp_port, TC_H_UNSPEC,
1007 child_qdisc,
1008 &mlxsw_sp_qdisc_ops_fifo,
1009 NULL);
1010 if (err)
1011 return err;
1012 }
1013 }
1014 for (; band < IEEE_8021QAZ_MAX_TCS; band++) {
1015 tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
1016 child_qdisc = &qdisc_state->tclass_qdiscs[tclass];
1017 child_qdisc->prio_bitmap = 0;
1018 mlxsw_sp_qdisc_destroy(mlxsw_sp_port, child_qdisc);
1019 mlxsw_sp_port_ets_set(mlxsw_sp_port,
1020 MLXSW_REG_QEEC_HR_SUBGROUP,
1021 tclass, 0, false, 0);
1022 }
1023
1024 qdisc_state->future_handle = TC_H_UNSPEC;
1025 memset(qdisc_state->future_fifos, 0, sizeof(qdisc_state->future_fifos));
1026 return 0;
1027 }
1028
1029 static int
mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port * mlxsw_sp_port,u32 handle,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,void * params)1030 mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
1031 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1032 void *params)
1033 {
1034 struct tc_prio_qopt_offload_params *p = params;
1035 unsigned int zeroes[TCQ_ETS_MAX_BANDS] = {0};
1036
1037 return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, handle, p->bands,
1038 zeroes, zeroes, p->priomap);
1039 }
1040
1041 static void
__mlxsw_sp_qdisc_ets_unoffload(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,struct gnet_stats_queue * qstats)1042 __mlxsw_sp_qdisc_ets_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
1043 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1044 struct gnet_stats_queue *qstats)
1045 {
1046 u64 backlog;
1047
1048 backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
1049 mlxsw_sp_qdisc->stats_base.backlog);
1050 qstats->backlog -= backlog;
1051 }
1052
1053 static void
mlxsw_sp_qdisc_prio_unoffload(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,void * params)1054 mlxsw_sp_qdisc_prio_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
1055 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1056 void *params)
1057 {
1058 struct tc_prio_qopt_offload_params *p = params;
1059
1060 __mlxsw_sp_qdisc_ets_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc,
1061 p->qstats);
1062 }
1063
1064 static int
mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,struct tc_qopt_offload_stats * stats_ptr)1065 mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port,
1066 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1067 struct tc_qopt_offload_stats *stats_ptr)
1068 {
1069 struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
1070 struct mlxsw_sp_qdisc *tc_qdisc;
1071 u64 tx_packets = 0;
1072 u64 tx_bytes = 0;
1073 u64 backlog = 0;
1074 u64 drops = 0;
1075 int i;
1076
1077 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1078 tc_qdisc = &qdisc_state->tclass_qdiscs[i];
1079 mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, tc_qdisc,
1080 &tx_bytes, &tx_packets,
1081 &drops, &backlog);
1082 }
1083
1084 mlxsw_sp_qdisc_update_stats(mlxsw_sp_port->mlxsw_sp, mlxsw_sp_qdisc,
1085 tx_bytes, tx_packets, drops, backlog,
1086 stats_ptr);
1087 return 0;
1088 }
1089
1090 static void
mlxsw_sp_setup_tc_qdisc_prio_clean_stats(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc)1091 mlxsw_sp_setup_tc_qdisc_prio_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
1092 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
1093 {
1094 struct mlxsw_sp_qdisc_stats *stats_base;
1095 struct mlxsw_sp_port_xstats *xstats;
1096 struct rtnl_link_stats64 *stats;
1097 int i;
1098
1099 xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
1100 stats = &mlxsw_sp_port->periodic_hw_stats.stats;
1101 stats_base = &mlxsw_sp_qdisc->stats_base;
1102
1103 stats_base->tx_packets = stats->tx_packets;
1104 stats_base->tx_bytes = stats->tx_bytes;
1105
1106 stats_base->drops = 0;
1107 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1108 stats_base->drops += mlxsw_sp_xstats_tail_drop(xstats, i);
1109 stats_base->drops += xstats->wred_drop[i];
1110 }
1111
1112 mlxsw_sp_qdisc->stats_base.backlog = 0;
1113 }
1114
1115 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio = {
1116 .type = MLXSW_SP_QDISC_PRIO,
1117 .check_params = mlxsw_sp_qdisc_prio_check_params,
1118 .replace = mlxsw_sp_qdisc_prio_replace,
1119 .unoffload = mlxsw_sp_qdisc_prio_unoffload,
1120 .destroy = mlxsw_sp_qdisc_prio_destroy,
1121 .get_stats = mlxsw_sp_qdisc_get_prio_stats,
1122 .clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
1123 };
1124
1125 static int
mlxsw_sp_qdisc_ets_check_params(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,void * params)1126 mlxsw_sp_qdisc_ets_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
1127 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1128 void *params)
1129 {
1130 struct tc_ets_qopt_offload_replace_params *p = params;
1131
1132 return __mlxsw_sp_qdisc_ets_check_params(p->bands);
1133 }
1134
1135 static int
mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port * mlxsw_sp_port,u32 handle,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,void * params)1136 mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
1137 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1138 void *params)
1139 {
1140 struct tc_ets_qopt_offload_replace_params *p = params;
1141
1142 return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, handle, p->bands,
1143 p->quanta, p->weights, p->priomap);
1144 }
1145
1146 static void
mlxsw_sp_qdisc_ets_unoffload(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,void * params)1147 mlxsw_sp_qdisc_ets_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
1148 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1149 void *params)
1150 {
1151 struct tc_ets_qopt_offload_replace_params *p = params;
1152
1153 __mlxsw_sp_qdisc_ets_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc,
1154 p->qstats);
1155 }
1156
1157 static int
mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc)1158 mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
1159 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
1160 {
1161 return __mlxsw_sp_qdisc_ets_destroy(mlxsw_sp_port);
1162 }
1163
1164 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_ets = {
1165 .type = MLXSW_SP_QDISC_ETS,
1166 .check_params = mlxsw_sp_qdisc_ets_check_params,
1167 .replace = mlxsw_sp_qdisc_ets_replace,
1168 .unoffload = mlxsw_sp_qdisc_ets_unoffload,
1169 .destroy = mlxsw_sp_qdisc_ets_destroy,
1170 .get_stats = mlxsw_sp_qdisc_get_prio_stats,
1171 .clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
1172 };
1173
1174 /* Linux allows linking of Qdiscs to arbitrary classes (so long as the resulting
1175 * graph is free of cycles). These operations do not change the parent handle
1176 * though, which means it can be incomplete (if there is more than one class
1177 * where the Qdisc in question is grafted) or outright wrong (if the Qdisc was
1178 * linked to a different class and then removed from the original class).
1179 *
1180 * E.g. consider this sequence of operations:
1181 *
1182 * # tc qdisc add dev swp1 root handle 1: prio
1183 * # tc qdisc add dev swp1 parent 1:3 handle 13: red limit 1000000 avpkt 10000
1184 * RED: set bandwidth to 10Mbit
1185 * # tc qdisc link dev swp1 handle 13: parent 1:2
1186 *
1187 * At this point, both 1:2 and 1:3 have the same RED Qdisc instance as their
1188 * child. But RED will still only claim that 1:3 is its parent. If it's removed
1189 * from that band, its only parent will be 1:2, but it will continue to claim
1190 * that it is in fact 1:3.
1191 *
1192 * The notification for child Qdisc replace (e.g. TC_RED_REPLACE) comes before
1193 * the notification for parent graft (e.g. TC_PRIO_GRAFT). We take the replace
1194 * notification to offload the child Qdisc, based on its parent handle, and use
1195 * the graft operation to validate that the class where the child is actually
1196 * grafted corresponds to the parent handle. If the two don't match, we
1197 * unoffload the child.
1198 */
1199 static int
__mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,u8 band,u32 child_handle)1200 __mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port *mlxsw_sp_port,
1201 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1202 u8 band, u32 child_handle)
1203 {
1204 struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
1205 int tclass_num = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
1206 struct mlxsw_sp_qdisc *old_qdisc;
1207
1208 if (band < IEEE_8021QAZ_MAX_TCS &&
1209 qdisc_state->tclass_qdiscs[tclass_num].handle == child_handle)
1210 return 0;
1211
1212 if (!child_handle) {
1213 /* This is an invisible FIFO replacing the original Qdisc.
1214 * Ignore it--the original Qdisc's destroy will follow.
1215 */
1216 return 0;
1217 }
1218
1219 /* See if the grafted qdisc is already offloaded on any tclass. If so,
1220 * unoffload it.
1221 */
1222 old_qdisc = mlxsw_sp_qdisc_find_by_handle(mlxsw_sp_port,
1223 child_handle);
1224 if (old_qdisc)
1225 mlxsw_sp_qdisc_destroy(mlxsw_sp_port, old_qdisc);
1226
1227 mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
1228 &qdisc_state->tclass_qdiscs[tclass_num]);
1229 return -EOPNOTSUPP;
1230 }
1231
1232 static int
mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,struct tc_prio_qopt_offload_graft_params * p)1233 mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port *mlxsw_sp_port,
1234 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1235 struct tc_prio_qopt_offload_graft_params *p)
1236 {
1237 return __mlxsw_sp_qdisc_ets_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
1238 p->band, p->child_handle);
1239 }
1240
mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port * mlxsw_sp_port,struct tc_prio_qopt_offload * p)1241 int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
1242 struct tc_prio_qopt_offload *p)
1243 {
1244 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
1245
1246 mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, true);
1247 if (!mlxsw_sp_qdisc)
1248 return -EOPNOTSUPP;
1249
1250 if (p->command == TC_PRIO_REPLACE)
1251 return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
1252 mlxsw_sp_qdisc,
1253 &mlxsw_sp_qdisc_ops_prio,
1254 &p->replace_params);
1255
1256 if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
1257 MLXSW_SP_QDISC_PRIO))
1258 return -EOPNOTSUPP;
1259
1260 switch (p->command) {
1261 case TC_PRIO_DESTROY:
1262 return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
1263 case TC_PRIO_STATS:
1264 return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
1265 &p->stats);
1266 case TC_PRIO_GRAFT:
1267 return mlxsw_sp_qdisc_prio_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
1268 &p->graft_params);
1269 default:
1270 return -EOPNOTSUPP;
1271 }
1272 }
1273
mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port * mlxsw_sp_port,struct tc_ets_qopt_offload * p)1274 int mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
1275 struct tc_ets_qopt_offload *p)
1276 {
1277 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
1278
1279 mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, true);
1280 if (!mlxsw_sp_qdisc)
1281 return -EOPNOTSUPP;
1282
1283 if (p->command == TC_ETS_REPLACE)
1284 return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
1285 mlxsw_sp_qdisc,
1286 &mlxsw_sp_qdisc_ops_ets,
1287 &p->replace_params);
1288
1289 if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
1290 MLXSW_SP_QDISC_ETS))
1291 return -EOPNOTSUPP;
1292
1293 switch (p->command) {
1294 case TC_ETS_DESTROY:
1295 return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
1296 case TC_ETS_STATS:
1297 return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
1298 &p->stats);
1299 case TC_ETS_GRAFT:
1300 return __mlxsw_sp_qdisc_ets_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
1301 p->graft_params.band,
1302 p->graft_params.child_handle);
1303 default:
1304 return -EOPNOTSUPP;
1305 }
1306 }
1307
1308 struct mlxsw_sp_qevent_block {
1309 struct list_head binding_list;
1310 struct list_head mall_entry_list;
1311 struct mlxsw_sp *mlxsw_sp;
1312 };
1313
1314 struct mlxsw_sp_qevent_binding {
1315 struct list_head list;
1316 struct mlxsw_sp_port *mlxsw_sp_port;
1317 u32 handle;
1318 int tclass_num;
1319 enum mlxsw_sp_span_trigger span_trigger;
1320 };
1321
1322 static LIST_HEAD(mlxsw_sp_qevent_block_cb_list);
1323
mlxsw_sp_qevent_span_configure(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_mall_entry * mall_entry,struct mlxsw_sp_qevent_binding * qevent_binding,const struct mlxsw_sp_span_agent_parms * agent_parms,int * p_span_id)1324 static int mlxsw_sp_qevent_span_configure(struct mlxsw_sp *mlxsw_sp,
1325 struct mlxsw_sp_mall_entry *mall_entry,
1326 struct mlxsw_sp_qevent_binding *qevent_binding,
1327 const struct mlxsw_sp_span_agent_parms *agent_parms,
1328 int *p_span_id)
1329 {
1330 struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port;
1331 struct mlxsw_sp_span_trigger_parms trigger_parms = {};
1332 int span_id;
1333 int err;
1334
1335 err = mlxsw_sp_span_agent_get(mlxsw_sp, &span_id, agent_parms);
1336 if (err)
1337 return err;
1338
1339 err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port, true);
1340 if (err)
1341 goto err_analyzed_port_get;
1342
1343 trigger_parms.span_id = span_id;
1344 err = mlxsw_sp_span_agent_bind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port,
1345 &trigger_parms);
1346 if (err)
1347 goto err_agent_bind;
1348
1349 err = mlxsw_sp_span_trigger_enable(mlxsw_sp_port, qevent_binding->span_trigger,
1350 qevent_binding->tclass_num);
1351 if (err)
1352 goto err_trigger_enable;
1353
1354 *p_span_id = span_id;
1355 return 0;
1356
1357 err_trigger_enable:
1358 mlxsw_sp_span_agent_unbind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port,
1359 &trigger_parms);
1360 err_agent_bind:
1361 mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, true);
1362 err_analyzed_port_get:
1363 mlxsw_sp_span_agent_put(mlxsw_sp, span_id);
1364 return err;
1365 }
1366
mlxsw_sp_qevent_span_deconfigure(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_qevent_binding * qevent_binding,int span_id)1367 static void mlxsw_sp_qevent_span_deconfigure(struct mlxsw_sp *mlxsw_sp,
1368 struct mlxsw_sp_qevent_binding *qevent_binding,
1369 int span_id)
1370 {
1371 struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port;
1372 struct mlxsw_sp_span_trigger_parms trigger_parms = {
1373 .span_id = span_id,
1374 };
1375
1376 mlxsw_sp_span_trigger_disable(mlxsw_sp_port, qevent_binding->span_trigger,
1377 qevent_binding->tclass_num);
1378 mlxsw_sp_span_agent_unbind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port,
1379 &trigger_parms);
1380 mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, true);
1381 mlxsw_sp_span_agent_put(mlxsw_sp, span_id);
1382 }
1383
mlxsw_sp_qevent_mirror_configure(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_mall_entry * mall_entry,struct mlxsw_sp_qevent_binding * qevent_binding)1384 static int mlxsw_sp_qevent_mirror_configure(struct mlxsw_sp *mlxsw_sp,
1385 struct mlxsw_sp_mall_entry *mall_entry,
1386 struct mlxsw_sp_qevent_binding *qevent_binding)
1387 {
1388 struct mlxsw_sp_span_agent_parms agent_parms = {
1389 .to_dev = mall_entry->mirror.to_dev,
1390 };
1391
1392 return mlxsw_sp_qevent_span_configure(mlxsw_sp, mall_entry, qevent_binding,
1393 &agent_parms, &mall_entry->mirror.span_id);
1394 }
1395
mlxsw_sp_qevent_mirror_deconfigure(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_mall_entry * mall_entry,struct mlxsw_sp_qevent_binding * qevent_binding)1396 static void mlxsw_sp_qevent_mirror_deconfigure(struct mlxsw_sp *mlxsw_sp,
1397 struct mlxsw_sp_mall_entry *mall_entry,
1398 struct mlxsw_sp_qevent_binding *qevent_binding)
1399 {
1400 mlxsw_sp_qevent_span_deconfigure(mlxsw_sp, qevent_binding, mall_entry->mirror.span_id);
1401 }
1402
mlxsw_sp_qevent_trap_configure(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_mall_entry * mall_entry,struct mlxsw_sp_qevent_binding * qevent_binding)1403 static int mlxsw_sp_qevent_trap_configure(struct mlxsw_sp *mlxsw_sp,
1404 struct mlxsw_sp_mall_entry *mall_entry,
1405 struct mlxsw_sp_qevent_binding *qevent_binding)
1406 {
1407 struct mlxsw_sp_span_agent_parms agent_parms = {};
1408 int err;
1409
1410 err = mlxsw_sp_trap_group_policer_hw_id_get(mlxsw_sp,
1411 DEVLINK_TRAP_GROUP_GENERIC_ID_BUFFER_DROPS,
1412 &agent_parms.policer_enable,
1413 &agent_parms.policer_id);
1414 if (err)
1415 return err;
1416
1417 return mlxsw_sp_qevent_span_configure(mlxsw_sp, mall_entry, qevent_binding,
1418 &agent_parms, &mall_entry->trap.span_id);
1419 }
1420
mlxsw_sp_qevent_trap_deconfigure(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_mall_entry * mall_entry,struct mlxsw_sp_qevent_binding * qevent_binding)1421 static void mlxsw_sp_qevent_trap_deconfigure(struct mlxsw_sp *mlxsw_sp,
1422 struct mlxsw_sp_mall_entry *mall_entry,
1423 struct mlxsw_sp_qevent_binding *qevent_binding)
1424 {
1425 mlxsw_sp_qevent_span_deconfigure(mlxsw_sp, qevent_binding, mall_entry->trap.span_id);
1426 }
1427
mlxsw_sp_qevent_entry_configure(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_mall_entry * mall_entry,struct mlxsw_sp_qevent_binding * qevent_binding)1428 static int mlxsw_sp_qevent_entry_configure(struct mlxsw_sp *mlxsw_sp,
1429 struct mlxsw_sp_mall_entry *mall_entry,
1430 struct mlxsw_sp_qevent_binding *qevent_binding)
1431 {
1432 switch (mall_entry->type) {
1433 case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
1434 return mlxsw_sp_qevent_mirror_configure(mlxsw_sp, mall_entry, qevent_binding);
1435 case MLXSW_SP_MALL_ACTION_TYPE_TRAP:
1436 return mlxsw_sp_qevent_trap_configure(mlxsw_sp, mall_entry, qevent_binding);
1437 default:
1438 /* This should have been validated away. */
1439 WARN_ON(1);
1440 return -EOPNOTSUPP;
1441 }
1442 }
1443
mlxsw_sp_qevent_entry_deconfigure(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_mall_entry * mall_entry,struct mlxsw_sp_qevent_binding * qevent_binding)1444 static void mlxsw_sp_qevent_entry_deconfigure(struct mlxsw_sp *mlxsw_sp,
1445 struct mlxsw_sp_mall_entry *mall_entry,
1446 struct mlxsw_sp_qevent_binding *qevent_binding)
1447 {
1448 switch (mall_entry->type) {
1449 case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
1450 return mlxsw_sp_qevent_mirror_deconfigure(mlxsw_sp, mall_entry, qevent_binding);
1451 case MLXSW_SP_MALL_ACTION_TYPE_TRAP:
1452 return mlxsw_sp_qevent_trap_deconfigure(mlxsw_sp, mall_entry, qevent_binding);
1453 default:
1454 WARN_ON(1);
1455 return;
1456 }
1457 }
1458
mlxsw_sp_qevent_binding_configure(struct mlxsw_sp_qevent_block * qevent_block,struct mlxsw_sp_qevent_binding * qevent_binding)1459 static int mlxsw_sp_qevent_binding_configure(struct mlxsw_sp_qevent_block *qevent_block,
1460 struct mlxsw_sp_qevent_binding *qevent_binding)
1461 {
1462 struct mlxsw_sp_mall_entry *mall_entry;
1463 int err;
1464
1465 list_for_each_entry(mall_entry, &qevent_block->mall_entry_list, list) {
1466 err = mlxsw_sp_qevent_entry_configure(qevent_block->mlxsw_sp, mall_entry,
1467 qevent_binding);
1468 if (err)
1469 goto err_entry_configure;
1470 }
1471
1472 return 0;
1473
1474 err_entry_configure:
1475 list_for_each_entry_continue_reverse(mall_entry, &qevent_block->mall_entry_list, list)
1476 mlxsw_sp_qevent_entry_deconfigure(qevent_block->mlxsw_sp, mall_entry,
1477 qevent_binding);
1478 return err;
1479 }
1480
mlxsw_sp_qevent_binding_deconfigure(struct mlxsw_sp_qevent_block * qevent_block,struct mlxsw_sp_qevent_binding * qevent_binding)1481 static void mlxsw_sp_qevent_binding_deconfigure(struct mlxsw_sp_qevent_block *qevent_block,
1482 struct mlxsw_sp_qevent_binding *qevent_binding)
1483 {
1484 struct mlxsw_sp_mall_entry *mall_entry;
1485
1486 list_for_each_entry(mall_entry, &qevent_block->mall_entry_list, list)
1487 mlxsw_sp_qevent_entry_deconfigure(qevent_block->mlxsw_sp, mall_entry,
1488 qevent_binding);
1489 }
1490
mlxsw_sp_qevent_block_configure(struct mlxsw_sp_qevent_block * qevent_block)1491 static int mlxsw_sp_qevent_block_configure(struct mlxsw_sp_qevent_block *qevent_block)
1492 {
1493 struct mlxsw_sp_qevent_binding *qevent_binding;
1494 int err;
1495
1496 list_for_each_entry(qevent_binding, &qevent_block->binding_list, list) {
1497 err = mlxsw_sp_qevent_binding_configure(qevent_block, qevent_binding);
1498 if (err)
1499 goto err_binding_configure;
1500 }
1501
1502 return 0;
1503
1504 err_binding_configure:
1505 list_for_each_entry_continue_reverse(qevent_binding, &qevent_block->binding_list, list)
1506 mlxsw_sp_qevent_binding_deconfigure(qevent_block, qevent_binding);
1507 return err;
1508 }
1509
mlxsw_sp_qevent_block_deconfigure(struct mlxsw_sp_qevent_block * qevent_block)1510 static void mlxsw_sp_qevent_block_deconfigure(struct mlxsw_sp_qevent_block *qevent_block)
1511 {
1512 struct mlxsw_sp_qevent_binding *qevent_binding;
1513
1514 list_for_each_entry(qevent_binding, &qevent_block->binding_list, list)
1515 mlxsw_sp_qevent_binding_deconfigure(qevent_block, qevent_binding);
1516 }
1517
1518 static struct mlxsw_sp_mall_entry *
mlxsw_sp_qevent_mall_entry_find(struct mlxsw_sp_qevent_block * block,unsigned long cookie)1519 mlxsw_sp_qevent_mall_entry_find(struct mlxsw_sp_qevent_block *block, unsigned long cookie)
1520 {
1521 struct mlxsw_sp_mall_entry *mall_entry;
1522
1523 list_for_each_entry(mall_entry, &block->mall_entry_list, list)
1524 if (mall_entry->cookie == cookie)
1525 return mall_entry;
1526
1527 return NULL;
1528 }
1529
mlxsw_sp_qevent_mall_replace(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_qevent_block * qevent_block,struct tc_cls_matchall_offload * f)1530 static int mlxsw_sp_qevent_mall_replace(struct mlxsw_sp *mlxsw_sp,
1531 struct mlxsw_sp_qevent_block *qevent_block,
1532 struct tc_cls_matchall_offload *f)
1533 {
1534 struct mlxsw_sp_mall_entry *mall_entry;
1535 struct flow_action_entry *act;
1536 int err;
1537
1538 /* It should not currently be possible to replace a matchall rule. So
1539 * this must be a new rule.
1540 */
1541 if (!list_empty(&qevent_block->mall_entry_list)) {
1542 NL_SET_ERR_MSG(f->common.extack, "At most one filter supported");
1543 return -EOPNOTSUPP;
1544 }
1545 if (f->rule->action.num_entries != 1) {
1546 NL_SET_ERR_MSG(f->common.extack, "Only singular actions supported");
1547 return -EOPNOTSUPP;
1548 }
1549 if (f->common.chain_index) {
1550 NL_SET_ERR_MSG(f->common.extack, "Only chain 0 is supported");
1551 return -EOPNOTSUPP;
1552 }
1553 if (f->common.protocol != htons(ETH_P_ALL)) {
1554 NL_SET_ERR_MSG(f->common.extack, "Protocol matching not supported");
1555 return -EOPNOTSUPP;
1556 }
1557
1558 act = &f->rule->action.entries[0];
1559 if (!(act->hw_stats & FLOW_ACTION_HW_STATS_DISABLED)) {
1560 NL_SET_ERR_MSG(f->common.extack, "HW counters not supported on qevents");
1561 return -EOPNOTSUPP;
1562 }
1563
1564 mall_entry = kzalloc(sizeof(*mall_entry), GFP_KERNEL);
1565 if (!mall_entry)
1566 return -ENOMEM;
1567 mall_entry->cookie = f->cookie;
1568
1569 if (act->id == FLOW_ACTION_MIRRED) {
1570 mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_MIRROR;
1571 mall_entry->mirror.to_dev = act->dev;
1572 } else if (act->id == FLOW_ACTION_TRAP) {
1573 mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_TRAP;
1574 } else {
1575 NL_SET_ERR_MSG(f->common.extack, "Unsupported action");
1576 err = -EOPNOTSUPP;
1577 goto err_unsupported_action;
1578 }
1579
1580 list_add_tail(&mall_entry->list, &qevent_block->mall_entry_list);
1581
1582 err = mlxsw_sp_qevent_block_configure(qevent_block);
1583 if (err)
1584 goto err_block_configure;
1585
1586 return 0;
1587
1588 err_block_configure:
1589 list_del(&mall_entry->list);
1590 err_unsupported_action:
1591 kfree(mall_entry);
1592 return err;
1593 }
1594
mlxsw_sp_qevent_mall_destroy(struct mlxsw_sp_qevent_block * qevent_block,struct tc_cls_matchall_offload * f)1595 static void mlxsw_sp_qevent_mall_destroy(struct mlxsw_sp_qevent_block *qevent_block,
1596 struct tc_cls_matchall_offload *f)
1597 {
1598 struct mlxsw_sp_mall_entry *mall_entry;
1599
1600 mall_entry = mlxsw_sp_qevent_mall_entry_find(qevent_block, f->cookie);
1601 if (!mall_entry)
1602 return;
1603
1604 mlxsw_sp_qevent_block_deconfigure(qevent_block);
1605
1606 list_del(&mall_entry->list);
1607 kfree(mall_entry);
1608 }
1609
mlxsw_sp_qevent_block_mall_cb(struct mlxsw_sp_qevent_block * qevent_block,struct tc_cls_matchall_offload * f)1610 static int mlxsw_sp_qevent_block_mall_cb(struct mlxsw_sp_qevent_block *qevent_block,
1611 struct tc_cls_matchall_offload *f)
1612 {
1613 struct mlxsw_sp *mlxsw_sp = qevent_block->mlxsw_sp;
1614
1615 switch (f->command) {
1616 case TC_CLSMATCHALL_REPLACE:
1617 return mlxsw_sp_qevent_mall_replace(mlxsw_sp, qevent_block, f);
1618 case TC_CLSMATCHALL_DESTROY:
1619 mlxsw_sp_qevent_mall_destroy(qevent_block, f);
1620 return 0;
1621 default:
1622 return -EOPNOTSUPP;
1623 }
1624 }
1625
mlxsw_sp_qevent_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)1626 static int mlxsw_sp_qevent_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
1627 {
1628 struct mlxsw_sp_qevent_block *qevent_block = cb_priv;
1629
1630 switch (type) {
1631 case TC_SETUP_CLSMATCHALL:
1632 return mlxsw_sp_qevent_block_mall_cb(qevent_block, type_data);
1633 default:
1634 return -EOPNOTSUPP;
1635 }
1636 }
1637
mlxsw_sp_qevent_block_create(struct mlxsw_sp * mlxsw_sp,struct net * net)1638 static struct mlxsw_sp_qevent_block *mlxsw_sp_qevent_block_create(struct mlxsw_sp *mlxsw_sp,
1639 struct net *net)
1640 {
1641 struct mlxsw_sp_qevent_block *qevent_block;
1642
1643 qevent_block = kzalloc(sizeof(*qevent_block), GFP_KERNEL);
1644 if (!qevent_block)
1645 return NULL;
1646
1647 INIT_LIST_HEAD(&qevent_block->binding_list);
1648 INIT_LIST_HEAD(&qevent_block->mall_entry_list);
1649 qevent_block->mlxsw_sp = mlxsw_sp;
1650 return qevent_block;
1651 }
1652
1653 static void
mlxsw_sp_qevent_block_destroy(struct mlxsw_sp_qevent_block * qevent_block)1654 mlxsw_sp_qevent_block_destroy(struct mlxsw_sp_qevent_block *qevent_block)
1655 {
1656 WARN_ON(!list_empty(&qevent_block->binding_list));
1657 WARN_ON(!list_empty(&qevent_block->mall_entry_list));
1658 kfree(qevent_block);
1659 }
1660
mlxsw_sp_qevent_block_release(void * cb_priv)1661 static void mlxsw_sp_qevent_block_release(void *cb_priv)
1662 {
1663 struct mlxsw_sp_qevent_block *qevent_block = cb_priv;
1664
1665 mlxsw_sp_qevent_block_destroy(qevent_block);
1666 }
1667
1668 static struct mlxsw_sp_qevent_binding *
mlxsw_sp_qevent_binding_create(struct mlxsw_sp_port * mlxsw_sp_port,u32 handle,int tclass_num,enum mlxsw_sp_span_trigger span_trigger)1669 mlxsw_sp_qevent_binding_create(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, int tclass_num,
1670 enum mlxsw_sp_span_trigger span_trigger)
1671 {
1672 struct mlxsw_sp_qevent_binding *binding;
1673
1674 binding = kzalloc(sizeof(*binding), GFP_KERNEL);
1675 if (!binding)
1676 return ERR_PTR(-ENOMEM);
1677
1678 binding->mlxsw_sp_port = mlxsw_sp_port;
1679 binding->handle = handle;
1680 binding->tclass_num = tclass_num;
1681 binding->span_trigger = span_trigger;
1682 return binding;
1683 }
1684
1685 static void
mlxsw_sp_qevent_binding_destroy(struct mlxsw_sp_qevent_binding * binding)1686 mlxsw_sp_qevent_binding_destroy(struct mlxsw_sp_qevent_binding *binding)
1687 {
1688 kfree(binding);
1689 }
1690
1691 static struct mlxsw_sp_qevent_binding *
mlxsw_sp_qevent_binding_lookup(struct mlxsw_sp_qevent_block * block,struct mlxsw_sp_port * mlxsw_sp_port,u32 handle,enum mlxsw_sp_span_trigger span_trigger)1692 mlxsw_sp_qevent_binding_lookup(struct mlxsw_sp_qevent_block *block,
1693 struct mlxsw_sp_port *mlxsw_sp_port,
1694 u32 handle,
1695 enum mlxsw_sp_span_trigger span_trigger)
1696 {
1697 struct mlxsw_sp_qevent_binding *qevent_binding;
1698
1699 list_for_each_entry(qevent_binding, &block->binding_list, list)
1700 if (qevent_binding->mlxsw_sp_port == mlxsw_sp_port &&
1701 qevent_binding->handle == handle &&
1702 qevent_binding->span_trigger == span_trigger)
1703 return qevent_binding;
1704 return NULL;
1705 }
1706
mlxsw_sp_setup_tc_block_qevent_bind(struct mlxsw_sp_port * mlxsw_sp_port,struct flow_block_offload * f,enum mlxsw_sp_span_trigger span_trigger)1707 static int mlxsw_sp_setup_tc_block_qevent_bind(struct mlxsw_sp_port *mlxsw_sp_port,
1708 struct flow_block_offload *f,
1709 enum mlxsw_sp_span_trigger span_trigger)
1710 {
1711 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1712 struct mlxsw_sp_qevent_binding *qevent_binding;
1713 struct mlxsw_sp_qevent_block *qevent_block;
1714 struct flow_block_cb *block_cb;
1715 struct mlxsw_sp_qdisc *qdisc;
1716 bool register_block = false;
1717 int err;
1718
1719 block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_qevent_block_cb, mlxsw_sp);
1720 if (!block_cb) {
1721 qevent_block = mlxsw_sp_qevent_block_create(mlxsw_sp, f->net);
1722 if (!qevent_block)
1723 return -ENOMEM;
1724 block_cb = flow_block_cb_alloc(mlxsw_sp_qevent_block_cb, mlxsw_sp, qevent_block,
1725 mlxsw_sp_qevent_block_release);
1726 if (IS_ERR(block_cb)) {
1727 mlxsw_sp_qevent_block_destroy(qevent_block);
1728 return PTR_ERR(block_cb);
1729 }
1730 register_block = true;
1731 } else {
1732 qevent_block = flow_block_cb_priv(block_cb);
1733 }
1734 flow_block_cb_incref(block_cb);
1735
1736 qdisc = mlxsw_sp_qdisc_find_by_handle(mlxsw_sp_port, f->sch->handle);
1737 if (!qdisc) {
1738 NL_SET_ERR_MSG(f->extack, "Qdisc not offloaded");
1739 err = -ENOENT;
1740 goto err_find_qdisc;
1741 }
1742
1743 if (WARN_ON(mlxsw_sp_qevent_binding_lookup(qevent_block, mlxsw_sp_port, f->sch->handle,
1744 span_trigger))) {
1745 err = -EEXIST;
1746 goto err_binding_exists;
1747 }
1748
1749 qevent_binding = mlxsw_sp_qevent_binding_create(mlxsw_sp_port, f->sch->handle,
1750 qdisc->tclass_num, span_trigger);
1751 if (IS_ERR(qevent_binding)) {
1752 err = PTR_ERR(qevent_binding);
1753 goto err_binding_create;
1754 }
1755
1756 err = mlxsw_sp_qevent_binding_configure(qevent_block, qevent_binding);
1757 if (err)
1758 goto err_binding_configure;
1759
1760 list_add(&qevent_binding->list, &qevent_block->binding_list);
1761
1762 if (register_block) {
1763 flow_block_cb_add(block_cb, f);
1764 list_add_tail(&block_cb->driver_list, &mlxsw_sp_qevent_block_cb_list);
1765 }
1766
1767 return 0;
1768
1769 err_binding_configure:
1770 mlxsw_sp_qevent_binding_destroy(qevent_binding);
1771 err_binding_create:
1772 err_binding_exists:
1773 err_find_qdisc:
1774 if (!flow_block_cb_decref(block_cb))
1775 flow_block_cb_free(block_cb);
1776 return err;
1777 }
1778
mlxsw_sp_setup_tc_block_qevent_unbind(struct mlxsw_sp_port * mlxsw_sp_port,struct flow_block_offload * f,enum mlxsw_sp_span_trigger span_trigger)1779 static void mlxsw_sp_setup_tc_block_qevent_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
1780 struct flow_block_offload *f,
1781 enum mlxsw_sp_span_trigger span_trigger)
1782 {
1783 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1784 struct mlxsw_sp_qevent_binding *qevent_binding;
1785 struct mlxsw_sp_qevent_block *qevent_block;
1786 struct flow_block_cb *block_cb;
1787
1788 block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_qevent_block_cb, mlxsw_sp);
1789 if (!block_cb)
1790 return;
1791 qevent_block = flow_block_cb_priv(block_cb);
1792
1793 qevent_binding = mlxsw_sp_qevent_binding_lookup(qevent_block, mlxsw_sp_port, f->sch->handle,
1794 span_trigger);
1795 if (!qevent_binding)
1796 return;
1797
1798 list_del(&qevent_binding->list);
1799 mlxsw_sp_qevent_binding_deconfigure(qevent_block, qevent_binding);
1800 mlxsw_sp_qevent_binding_destroy(qevent_binding);
1801
1802 if (!flow_block_cb_decref(block_cb)) {
1803 flow_block_cb_remove(block_cb, f);
1804 list_del(&block_cb->driver_list);
1805 }
1806 }
1807
mlxsw_sp_setup_tc_block_qevent(struct mlxsw_sp_port * mlxsw_sp_port,struct flow_block_offload * f,enum mlxsw_sp_span_trigger span_trigger)1808 static int mlxsw_sp_setup_tc_block_qevent(struct mlxsw_sp_port *mlxsw_sp_port,
1809 struct flow_block_offload *f,
1810 enum mlxsw_sp_span_trigger span_trigger)
1811 {
1812 f->driver_block_list = &mlxsw_sp_qevent_block_cb_list;
1813
1814 switch (f->command) {
1815 case FLOW_BLOCK_BIND:
1816 return mlxsw_sp_setup_tc_block_qevent_bind(mlxsw_sp_port, f, span_trigger);
1817 case FLOW_BLOCK_UNBIND:
1818 mlxsw_sp_setup_tc_block_qevent_unbind(mlxsw_sp_port, f, span_trigger);
1819 return 0;
1820 default:
1821 return -EOPNOTSUPP;
1822 }
1823 }
1824
mlxsw_sp_setup_tc_block_qevent_early_drop(struct mlxsw_sp_port * mlxsw_sp_port,struct flow_block_offload * f)1825 int mlxsw_sp_setup_tc_block_qevent_early_drop(struct mlxsw_sp_port *mlxsw_sp_port,
1826 struct flow_block_offload *f)
1827 {
1828 return mlxsw_sp_setup_tc_block_qevent(mlxsw_sp_port, f, MLXSW_SP_SPAN_TRIGGER_EARLY_DROP);
1829 }
1830
mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port * mlxsw_sp_port)1831 int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port)
1832 {
1833 struct mlxsw_sp_qdisc_state *qdisc_state;
1834 int i;
1835
1836 qdisc_state = kzalloc(sizeof(*qdisc_state), GFP_KERNEL);
1837 if (!qdisc_state)
1838 return -ENOMEM;
1839
1840 qdisc_state->root_qdisc.prio_bitmap = 0xff;
1841 qdisc_state->root_qdisc.tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS;
1842 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
1843 qdisc_state->tclass_qdiscs[i].tclass_num = i;
1844
1845 mlxsw_sp_port->qdisc = qdisc_state;
1846 return 0;
1847 }
1848
mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port * mlxsw_sp_port)1849 void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port)
1850 {
1851 kfree(mlxsw_sp_port->qdisc);
1852 }
1853