• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2020 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/errno.h>
6 #include <linux/netdevice.h>
7 #include <net/flow_offload.h>
8 
9 #include "spectrum.h"
10 #include "spectrum_span.h"
11 #include "reg.h"
12 
13 static struct mlxsw_sp_mall_entry *
mlxsw_sp_mall_entry_find(struct mlxsw_sp_flow_block * block,unsigned long cookie)14 mlxsw_sp_mall_entry_find(struct mlxsw_sp_flow_block *block, unsigned long cookie)
15 {
16 	struct mlxsw_sp_mall_entry *mall_entry;
17 
18 	list_for_each_entry(mall_entry, &block->mall.list, list)
19 		if (mall_entry->cookie == cookie)
20 			return mall_entry;
21 
22 	return NULL;
23 }
24 
25 static int
mlxsw_sp_mall_port_mirror_add(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_mall_entry * mall_entry,struct netlink_ext_ack * extack)26 mlxsw_sp_mall_port_mirror_add(struct mlxsw_sp_port *mlxsw_sp_port,
27 			      struct mlxsw_sp_mall_entry *mall_entry,
28 			      struct netlink_ext_ack *extack)
29 {
30 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
31 	struct mlxsw_sp_span_agent_parms agent_parms = {};
32 	struct mlxsw_sp_span_trigger_parms parms;
33 	enum mlxsw_sp_span_trigger trigger;
34 	int err;
35 
36 	if (!mall_entry->mirror.to_dev) {
37 		NL_SET_ERR_MSG(extack, "Could not find requested device");
38 		return -EINVAL;
39 	}
40 
41 	agent_parms.to_dev = mall_entry->mirror.to_dev;
42 	err = mlxsw_sp_span_agent_get(mlxsw_sp, &mall_entry->mirror.span_id,
43 				      &agent_parms);
44 	if (err) {
45 		NL_SET_ERR_MSG(extack, "Failed to get SPAN agent");
46 		return err;
47 	}
48 
49 	err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port,
50 					      mall_entry->ingress);
51 	if (err) {
52 		NL_SET_ERR_MSG(extack, "Failed to get analyzed port");
53 		goto err_analyzed_port_get;
54 	}
55 
56 	trigger = mall_entry->ingress ? MLXSW_SP_SPAN_TRIGGER_INGRESS :
57 					MLXSW_SP_SPAN_TRIGGER_EGRESS;
58 	parms.span_id = mall_entry->mirror.span_id;
59 	parms.probability_rate = 1;
60 	err = mlxsw_sp_span_agent_bind(mlxsw_sp, trigger, mlxsw_sp_port,
61 				       &parms);
62 	if (err) {
63 		NL_SET_ERR_MSG(extack, "Failed to bind SPAN agent");
64 		goto err_agent_bind;
65 	}
66 
67 	return 0;
68 
69 err_agent_bind:
70 	mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, mall_entry->ingress);
71 err_analyzed_port_get:
72 	mlxsw_sp_span_agent_put(mlxsw_sp, mall_entry->mirror.span_id);
73 	return err;
74 }
75 
76 static void
mlxsw_sp_mall_port_mirror_del(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_mall_entry * mall_entry)77 mlxsw_sp_mall_port_mirror_del(struct mlxsw_sp_port *mlxsw_sp_port,
78 			      struct mlxsw_sp_mall_entry *mall_entry)
79 {
80 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
81 	struct mlxsw_sp_span_trigger_parms parms;
82 	enum mlxsw_sp_span_trigger trigger;
83 
84 	trigger = mall_entry->ingress ? MLXSW_SP_SPAN_TRIGGER_INGRESS :
85 					MLXSW_SP_SPAN_TRIGGER_EGRESS;
86 	parms.span_id = mall_entry->mirror.span_id;
87 	mlxsw_sp_span_agent_unbind(mlxsw_sp, trigger, mlxsw_sp_port, &parms);
88 	mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, mall_entry->ingress);
89 	mlxsw_sp_span_agent_put(mlxsw_sp, mall_entry->mirror.span_id);
90 }
91 
mlxsw_sp_mall_port_sample_set(struct mlxsw_sp_port * mlxsw_sp_port,bool enable,u32 rate)92 static int mlxsw_sp_mall_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
93 					 bool enable, u32 rate)
94 {
95 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
96 	char mpsc_pl[MLXSW_REG_MPSC_LEN];
97 
98 	mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate);
99 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl);
100 }
101 
102 static int
mlxsw_sp_mall_port_sample_add(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_mall_entry * mall_entry,struct netlink_ext_ack * extack)103 mlxsw_sp_mall_port_sample_add(struct mlxsw_sp_port *mlxsw_sp_port,
104 			      struct mlxsw_sp_mall_entry *mall_entry,
105 			      struct netlink_ext_ack *extack)
106 {
107 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
108 	struct mlxsw_sp_sample_trigger trigger;
109 	int err;
110 
111 	if (mall_entry->ingress)
112 		trigger.type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_INGRESS;
113 	else
114 		trigger.type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_EGRESS;
115 	trigger.local_port = mlxsw_sp_port->local_port;
116 	err = mlxsw_sp_sample_trigger_params_set(mlxsw_sp, &trigger,
117 						 &mall_entry->sample.params,
118 						 extack);
119 	if (err)
120 		return err;
121 
122 	err = mlxsw_sp->mall_ops->sample_add(mlxsw_sp, mlxsw_sp_port,
123 					     mall_entry, extack);
124 	if (err)
125 		goto err_port_sample_set;
126 	return 0;
127 
128 err_port_sample_set:
129 	mlxsw_sp_sample_trigger_params_unset(mlxsw_sp, &trigger);
130 	return err;
131 }
132 
133 static void
mlxsw_sp_mall_port_sample_del(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_mall_entry * mall_entry)134 mlxsw_sp_mall_port_sample_del(struct mlxsw_sp_port *mlxsw_sp_port,
135 			      struct mlxsw_sp_mall_entry *mall_entry)
136 {
137 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
138 	struct mlxsw_sp_sample_trigger trigger;
139 
140 	if (mall_entry->ingress)
141 		trigger.type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_INGRESS;
142 	else
143 		trigger.type = MLXSW_SP_SAMPLE_TRIGGER_TYPE_EGRESS;
144 	trigger.local_port = mlxsw_sp_port->local_port;
145 
146 	mlxsw_sp->mall_ops->sample_del(mlxsw_sp, mlxsw_sp_port, mall_entry);
147 	mlxsw_sp_sample_trigger_params_unset(mlxsw_sp, &trigger);
148 }
149 
150 static int
mlxsw_sp_mall_port_rule_add(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_mall_entry * mall_entry,struct netlink_ext_ack * extack)151 mlxsw_sp_mall_port_rule_add(struct mlxsw_sp_port *mlxsw_sp_port,
152 			    struct mlxsw_sp_mall_entry *mall_entry,
153 			    struct netlink_ext_ack *extack)
154 {
155 	switch (mall_entry->type) {
156 	case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
157 		return mlxsw_sp_mall_port_mirror_add(mlxsw_sp_port, mall_entry,
158 						     extack);
159 	case MLXSW_SP_MALL_ACTION_TYPE_SAMPLE:
160 		return mlxsw_sp_mall_port_sample_add(mlxsw_sp_port, mall_entry,
161 						     extack);
162 	default:
163 		WARN_ON(1);
164 		return -EINVAL;
165 	}
166 }
167 
168 static void
mlxsw_sp_mall_port_rule_del(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_mall_entry * mall_entry)169 mlxsw_sp_mall_port_rule_del(struct mlxsw_sp_port *mlxsw_sp_port,
170 			    struct mlxsw_sp_mall_entry *mall_entry)
171 {
172 	switch (mall_entry->type) {
173 	case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
174 		mlxsw_sp_mall_port_mirror_del(mlxsw_sp_port, mall_entry);
175 		break;
176 	case MLXSW_SP_MALL_ACTION_TYPE_SAMPLE:
177 		mlxsw_sp_mall_port_sample_del(mlxsw_sp_port, mall_entry);
178 		break;
179 	default:
180 		WARN_ON(1);
181 	}
182 }
183 
mlxsw_sp_mall_prio_update(struct mlxsw_sp_flow_block * block)184 static void mlxsw_sp_mall_prio_update(struct mlxsw_sp_flow_block *block)
185 {
186 	struct mlxsw_sp_mall_entry *mall_entry;
187 
188 	if (list_empty(&block->mall.list))
189 		return;
190 	block->mall.min_prio = UINT_MAX;
191 	block->mall.max_prio = 0;
192 	list_for_each_entry(mall_entry, &block->mall.list, list) {
193 		if (mall_entry->priority < block->mall.min_prio)
194 			block->mall.min_prio = mall_entry->priority;
195 		if (mall_entry->priority > block->mall.max_prio)
196 			block->mall.max_prio = mall_entry->priority;
197 	}
198 }
199 
mlxsw_sp_mall_replace(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_flow_block * block,struct tc_cls_matchall_offload * f)200 int mlxsw_sp_mall_replace(struct mlxsw_sp *mlxsw_sp,
201 			  struct mlxsw_sp_flow_block *block,
202 			  struct tc_cls_matchall_offload *f)
203 {
204 	struct mlxsw_sp_flow_block_binding *binding;
205 	struct mlxsw_sp_mall_entry *mall_entry;
206 	__be16 protocol = f->common.protocol;
207 	struct flow_action_entry *act;
208 	unsigned int flower_min_prio;
209 	unsigned int flower_max_prio;
210 	bool flower_prio_valid;
211 	int err;
212 
213 	if (!flow_offload_has_one_action(&f->rule->action)) {
214 		NL_SET_ERR_MSG(f->common.extack, "Only singular actions are supported");
215 		return -EOPNOTSUPP;
216 	}
217 
218 	if (f->common.chain_index) {
219 		NL_SET_ERR_MSG(f->common.extack, "Only chain 0 is supported");
220 		return -EOPNOTSUPP;
221 	}
222 
223 	if (mlxsw_sp_flow_block_is_mixed_bound(block)) {
224 		NL_SET_ERR_MSG(f->common.extack, "Only not mixed bound blocks are supported");
225 		return -EOPNOTSUPP;
226 	}
227 
228 	err = mlxsw_sp_flower_prio_get(mlxsw_sp, block, f->common.chain_index,
229 				       &flower_min_prio, &flower_max_prio);
230 	if (err) {
231 		if (err != -ENOENT) {
232 			NL_SET_ERR_MSG(f->common.extack, "Failed to get flower priorities");
233 			return err;
234 		}
235 		flower_prio_valid = false;
236 		/* No flower filters are installed in specified chain. */
237 	} else {
238 		flower_prio_valid = true;
239 	}
240 
241 	if (protocol != htons(ETH_P_ALL)) {
242 		NL_SET_ERR_MSG(f->common.extack, "matchall rules only supported with 'all' protocol");
243 		return -EOPNOTSUPP;
244 	}
245 
246 	mall_entry = kzalloc(sizeof(*mall_entry), GFP_KERNEL);
247 	if (!mall_entry)
248 		return -ENOMEM;
249 	mall_entry->cookie = f->cookie;
250 	mall_entry->priority = f->common.prio;
251 	mall_entry->ingress = mlxsw_sp_flow_block_is_ingress_bound(block);
252 
253 	if (flower_prio_valid && mall_entry->ingress &&
254 	    mall_entry->priority >= flower_min_prio) {
255 		NL_SET_ERR_MSG(f->common.extack, "Failed to add behind existing flower rules");
256 		err = -EOPNOTSUPP;
257 		goto errout;
258 	}
259 	if (flower_prio_valid && !mall_entry->ingress &&
260 	    mall_entry->priority <= flower_max_prio) {
261 		NL_SET_ERR_MSG(f->common.extack, "Failed to add in front of existing flower rules");
262 		err = -EOPNOTSUPP;
263 		goto errout;
264 	}
265 
266 	act = &f->rule->action.entries[0];
267 
268 	switch (act->id) {
269 	case FLOW_ACTION_MIRRED:
270 		mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_MIRROR;
271 		mall_entry->mirror.to_dev = act->dev;
272 		break;
273 	case FLOW_ACTION_SAMPLE:
274 		mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_SAMPLE;
275 		mall_entry->sample.params.psample_group = act->sample.psample_group;
276 		mall_entry->sample.params.truncate = act->sample.truncate;
277 		mall_entry->sample.params.trunc_size = act->sample.trunc_size;
278 		mall_entry->sample.params.rate = act->sample.rate;
279 		break;
280 	default:
281 		err = -EOPNOTSUPP;
282 		goto errout;
283 	}
284 
285 	list_for_each_entry(binding, &block->binding_list, list) {
286 		err = mlxsw_sp_mall_port_rule_add(binding->mlxsw_sp_port,
287 						  mall_entry, f->common.extack);
288 		if (err)
289 			goto rollback;
290 	}
291 
292 	block->rule_count++;
293 	if (mall_entry->ingress)
294 		block->egress_blocker_rule_count++;
295 	else
296 		block->ingress_blocker_rule_count++;
297 	list_add_tail(&mall_entry->list, &block->mall.list);
298 	mlxsw_sp_mall_prio_update(block);
299 	return 0;
300 
301 rollback:
302 	list_for_each_entry_continue_reverse(binding, &block->binding_list,
303 					     list)
304 		mlxsw_sp_mall_port_rule_del(binding->mlxsw_sp_port, mall_entry);
305 errout:
306 	kfree(mall_entry);
307 	return err;
308 }
309 
mlxsw_sp_mall_destroy(struct mlxsw_sp_flow_block * block,struct tc_cls_matchall_offload * f)310 void mlxsw_sp_mall_destroy(struct mlxsw_sp_flow_block *block,
311 			   struct tc_cls_matchall_offload *f)
312 {
313 	struct mlxsw_sp_flow_block_binding *binding;
314 	struct mlxsw_sp_mall_entry *mall_entry;
315 
316 	mall_entry = mlxsw_sp_mall_entry_find(block, f->cookie);
317 	if (!mall_entry) {
318 		NL_SET_ERR_MSG(f->common.extack, "Entry not found");
319 		return;
320 	}
321 
322 	list_del(&mall_entry->list);
323 	if (mall_entry->ingress)
324 		block->egress_blocker_rule_count--;
325 	else
326 		block->ingress_blocker_rule_count--;
327 	block->rule_count--;
328 	list_for_each_entry(binding, &block->binding_list, list)
329 		mlxsw_sp_mall_port_rule_del(binding->mlxsw_sp_port, mall_entry);
330 	kfree_rcu(mall_entry, rcu); /* sample RX packets may be in-flight */
331 	mlxsw_sp_mall_prio_update(block);
332 }
333 
mlxsw_sp_mall_port_bind(struct mlxsw_sp_flow_block * block,struct mlxsw_sp_port * mlxsw_sp_port,struct netlink_ext_ack * extack)334 int mlxsw_sp_mall_port_bind(struct mlxsw_sp_flow_block *block,
335 			    struct mlxsw_sp_port *mlxsw_sp_port,
336 			    struct netlink_ext_ack *extack)
337 {
338 	struct mlxsw_sp_mall_entry *mall_entry;
339 	int err;
340 
341 	list_for_each_entry(mall_entry, &block->mall.list, list) {
342 		err = mlxsw_sp_mall_port_rule_add(mlxsw_sp_port, mall_entry,
343 						  extack);
344 		if (err)
345 			goto rollback;
346 	}
347 	return 0;
348 
349 rollback:
350 	list_for_each_entry_continue_reverse(mall_entry, &block->mall.list,
351 					     list)
352 		mlxsw_sp_mall_port_rule_del(mlxsw_sp_port, mall_entry);
353 	return err;
354 }
355 
mlxsw_sp_mall_port_unbind(struct mlxsw_sp_flow_block * block,struct mlxsw_sp_port * mlxsw_sp_port)356 void mlxsw_sp_mall_port_unbind(struct mlxsw_sp_flow_block *block,
357 			       struct mlxsw_sp_port *mlxsw_sp_port)
358 {
359 	struct mlxsw_sp_mall_entry *mall_entry;
360 
361 	list_for_each_entry(mall_entry, &block->mall.list, list)
362 		mlxsw_sp_mall_port_rule_del(mlxsw_sp_port, mall_entry);
363 }
364 
mlxsw_sp_mall_prio_get(struct mlxsw_sp_flow_block * block,u32 chain_index,unsigned int * p_min_prio,unsigned int * p_max_prio)365 int mlxsw_sp_mall_prio_get(struct mlxsw_sp_flow_block *block, u32 chain_index,
366 			   unsigned int *p_min_prio, unsigned int *p_max_prio)
367 {
368 	if (chain_index || list_empty(&block->mall.list))
369 		/* In case there are no matchall rules, the caller
370 		 * receives -ENOENT to indicate there is no need
371 		 * to check the priorities.
372 		 */
373 		return -ENOENT;
374 	*p_min_prio = block->mall.min_prio;
375 	*p_max_prio = block->mall.max_prio;
376 	return 0;
377 }
378 
mlxsw_sp1_mall_sample_add(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_mall_entry * mall_entry,struct netlink_ext_ack * extack)379 static int mlxsw_sp1_mall_sample_add(struct mlxsw_sp *mlxsw_sp,
380 				     struct mlxsw_sp_port *mlxsw_sp_port,
381 				     struct mlxsw_sp_mall_entry *mall_entry,
382 				     struct netlink_ext_ack *extack)
383 {
384 	u32 rate = mall_entry->sample.params.rate;
385 
386 	if (!mall_entry->ingress) {
387 		NL_SET_ERR_MSG(extack, "Sampling is not supported on egress");
388 		return -EOPNOTSUPP;
389 	}
390 
391 	if (rate > MLXSW_REG_MPSC_RATE_MAX) {
392 		NL_SET_ERR_MSG(extack, "Unsupported sampling rate");
393 		return -EOPNOTSUPP;
394 	}
395 
396 	return mlxsw_sp_mall_port_sample_set(mlxsw_sp_port, true, rate);
397 }
398 
mlxsw_sp1_mall_sample_del(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_mall_entry * mall_entry)399 static void mlxsw_sp1_mall_sample_del(struct mlxsw_sp *mlxsw_sp,
400 				      struct mlxsw_sp_port *mlxsw_sp_port,
401 				      struct mlxsw_sp_mall_entry *mall_entry)
402 {
403 	mlxsw_sp_mall_port_sample_set(mlxsw_sp_port, false, 1);
404 }
405 
406 const struct mlxsw_sp_mall_ops mlxsw_sp1_mall_ops = {
407 	.sample_add = mlxsw_sp1_mall_sample_add,
408 	.sample_del = mlxsw_sp1_mall_sample_del,
409 };
410 
mlxsw_sp2_mall_sample_add(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_mall_entry * mall_entry,struct netlink_ext_ack * extack)411 static int mlxsw_sp2_mall_sample_add(struct mlxsw_sp *mlxsw_sp,
412 				     struct mlxsw_sp_port *mlxsw_sp_port,
413 				     struct mlxsw_sp_mall_entry *mall_entry,
414 				     struct netlink_ext_ack *extack)
415 {
416 	struct mlxsw_sp_span_trigger_parms trigger_parms = {};
417 	struct mlxsw_sp_span_agent_parms agent_parms = {
418 		.to_dev = NULL,	/* Mirror to CPU. */
419 		.session_id = MLXSW_SP_SPAN_SESSION_ID_SAMPLING,
420 	};
421 	u32 rate = mall_entry->sample.params.rate;
422 	enum mlxsw_sp_span_trigger span_trigger;
423 	int err;
424 
425 	err = mlxsw_sp_span_agent_get(mlxsw_sp, &mall_entry->sample.span_id,
426 				      &agent_parms);
427 	if (err) {
428 		NL_SET_ERR_MSG(extack, "Failed to get SPAN agent");
429 		return err;
430 	}
431 
432 	err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port,
433 					      mall_entry->ingress);
434 	if (err) {
435 		NL_SET_ERR_MSG(extack, "Failed to get analyzed port");
436 		goto err_analyzed_port_get;
437 	}
438 
439 	span_trigger = mall_entry->ingress ? MLXSW_SP_SPAN_TRIGGER_INGRESS :
440 					     MLXSW_SP_SPAN_TRIGGER_EGRESS;
441 	trigger_parms.span_id = mall_entry->sample.span_id;
442 	trigger_parms.probability_rate = rate;
443 	err = mlxsw_sp_span_agent_bind(mlxsw_sp, span_trigger, mlxsw_sp_port,
444 				       &trigger_parms);
445 	if (err) {
446 		NL_SET_ERR_MSG(extack, "Failed to bind SPAN agent");
447 		goto err_agent_bind;
448 	}
449 
450 	return 0;
451 
452 err_agent_bind:
453 	mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, mall_entry->ingress);
454 err_analyzed_port_get:
455 	mlxsw_sp_span_agent_put(mlxsw_sp, mall_entry->sample.span_id);
456 	return err;
457 }
458 
mlxsw_sp2_mall_sample_del(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_mall_entry * mall_entry)459 static void mlxsw_sp2_mall_sample_del(struct mlxsw_sp *mlxsw_sp,
460 				      struct mlxsw_sp_port *mlxsw_sp_port,
461 				      struct mlxsw_sp_mall_entry *mall_entry)
462 {
463 	struct mlxsw_sp_span_trigger_parms trigger_parms = {};
464 	enum mlxsw_sp_span_trigger span_trigger;
465 
466 	span_trigger = mall_entry->ingress ? MLXSW_SP_SPAN_TRIGGER_INGRESS :
467 					     MLXSW_SP_SPAN_TRIGGER_EGRESS;
468 	trigger_parms.span_id = mall_entry->sample.span_id;
469 	mlxsw_sp_span_agent_unbind(mlxsw_sp, span_trigger, mlxsw_sp_port,
470 				   &trigger_parms);
471 	mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, mall_entry->ingress);
472 	mlxsw_sp_span_agent_put(mlxsw_sp, mall_entry->sample.span_id);
473 }
474 
475 const struct mlxsw_sp_mall_ops mlxsw_sp2_mall_ops = {
476 	.sample_add = mlxsw_sp2_mall_sample_add,
477 	.sample_del = mlxsw_sp2_mall_sample_del,
478 };
479