1 /*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/list.h>
34 #include <linux/ip.h>
35 #include <linux/ipv6.h>
36 #include <linux/tcp.h>
37 #include <linux/mlx5/fs.h>
38 #include "en.h"
39
40 static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
41 struct mlx5e_l2_rule *ai, int type);
42 static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
43 struct mlx5e_l2_rule *ai);
44
45 enum {
46 MLX5E_FULLMATCH = 0,
47 MLX5E_ALLMULTI = 1,
48 MLX5E_PROMISC = 2,
49 };
50
51 enum {
52 MLX5E_UC = 0,
53 MLX5E_MC_IPV4 = 1,
54 MLX5E_MC_IPV6 = 2,
55 MLX5E_MC_OTHER = 3,
56 };
57
58 enum {
59 MLX5E_ACTION_NONE = 0,
60 MLX5E_ACTION_ADD = 1,
61 MLX5E_ACTION_DEL = 2,
62 };
63
64 struct mlx5e_l2_hash_node {
65 struct hlist_node hlist;
66 u8 action;
67 struct mlx5e_l2_rule ai;
68 };
69
mlx5e_hash_l2(u8 * addr)70 static inline int mlx5e_hash_l2(u8 *addr)
71 {
72 return addr[5];
73 }
74
mlx5e_add_l2_to_hash(struct hlist_head * hash,u8 * addr)75 static void mlx5e_add_l2_to_hash(struct hlist_head *hash, u8 *addr)
76 {
77 struct mlx5e_l2_hash_node *hn;
78 int ix = mlx5e_hash_l2(addr);
79 int found = 0;
80
81 hlist_for_each_entry(hn, &hash[ix], hlist)
82 if (ether_addr_equal_64bits(hn->ai.addr, addr)) {
83 found = 1;
84 break;
85 }
86
87 if (found) {
88 hn->action = MLX5E_ACTION_NONE;
89 return;
90 }
91
92 hn = kzalloc(sizeof(*hn), GFP_ATOMIC);
93 if (!hn)
94 return;
95
96 ether_addr_copy(hn->ai.addr, addr);
97 hn->action = MLX5E_ACTION_ADD;
98
99 hlist_add_head(&hn->hlist, &hash[ix]);
100 }
101
mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node * hn)102 static void mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node *hn)
103 {
104 hlist_del(&hn->hlist);
105 kfree(hn);
106 }
107
mlx5e_vport_context_update_vlans(struct mlx5e_priv * priv)108 static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
109 {
110 struct net_device *ndev = priv->netdev;
111 int max_list_size;
112 int list_size;
113 u16 *vlans;
114 int vlan;
115 int err;
116 int i;
117
118 list_size = 0;
119 for_each_set_bit(vlan, priv->fs.vlan.active_vlans, VLAN_N_VID)
120 list_size++;
121
122 max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
123
124 if (list_size > max_list_size) {
125 netdev_warn(ndev,
126 "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
127 list_size, max_list_size);
128 list_size = max_list_size;
129 }
130
131 vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
132 if (!vlans)
133 return -ENOMEM;
134
135 i = 0;
136 for_each_set_bit(vlan, priv->fs.vlan.active_vlans, VLAN_N_VID) {
137 if (i >= list_size)
138 break;
139 vlans[i++] = vlan;
140 }
141
142 err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
143 if (err)
144 netdev_err(ndev, "Failed to modify vport vlans list err(%d)\n",
145 err);
146
147 kfree(vlans);
148 return err;
149 }
150
151 enum mlx5e_vlan_rule_type {
152 MLX5E_VLAN_RULE_TYPE_UNTAGGED,
153 MLX5E_VLAN_RULE_TYPE_ANY_VID,
154 MLX5E_VLAN_RULE_TYPE_MATCH_VID,
155 };
156
__mlx5e_add_vlan_rule(struct mlx5e_priv * priv,enum mlx5e_vlan_rule_type rule_type,u16 vid,struct mlx5_flow_spec * spec)157 static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
158 enum mlx5e_vlan_rule_type rule_type,
159 u16 vid, struct mlx5_flow_spec *spec)
160 {
161 struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
162 struct mlx5_flow_destination dest;
163 struct mlx5_flow_rule **rule_p;
164 int err = 0;
165
166 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
167 dest.ft = priv->fs.l2.ft.t;
168
169 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
170 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
171
172 switch (rule_type) {
173 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
174 rule_p = &priv->fs.vlan.untagged_rule;
175 break;
176 case MLX5E_VLAN_RULE_TYPE_ANY_VID:
177 rule_p = &priv->fs.vlan.any_vlan_rule;
178 MLX5_SET(fte_match_param, spec->match_value, outer_headers.vlan_tag, 1);
179 break;
180 default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
181 rule_p = &priv->fs.vlan.active_vlans_rule[vid];
182 MLX5_SET(fte_match_param, spec->match_value, outer_headers.vlan_tag, 1);
183 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
184 outer_headers.first_vid);
185 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
186 vid);
187 break;
188 }
189
190 *rule_p = mlx5_add_flow_rule(ft, spec,
191 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
192 MLX5_FS_DEFAULT_FLOW_TAG,
193 &dest);
194
195 if (IS_ERR(*rule_p)) {
196 err = PTR_ERR(*rule_p);
197 *rule_p = NULL;
198 netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
199 }
200
201 return err;
202 }
203
mlx5e_add_vlan_rule(struct mlx5e_priv * priv,enum mlx5e_vlan_rule_type rule_type,u16 vid)204 static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
205 enum mlx5e_vlan_rule_type rule_type, u16 vid)
206 {
207 struct mlx5_flow_spec *spec;
208 int err = 0;
209
210 spec = mlx5_vzalloc(sizeof(*spec));
211 if (!spec) {
212 netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
213 return -ENOMEM;
214 }
215
216 if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_VID)
217 mlx5e_vport_context_update_vlans(priv);
218
219 err = __mlx5e_add_vlan_rule(priv, rule_type, vid, spec);
220
221 kvfree(spec);
222
223 return err;
224 }
225
mlx5e_del_vlan_rule(struct mlx5e_priv * priv,enum mlx5e_vlan_rule_type rule_type,u16 vid)226 static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
227 enum mlx5e_vlan_rule_type rule_type, u16 vid)
228 {
229 switch (rule_type) {
230 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
231 if (priv->fs.vlan.untagged_rule) {
232 mlx5_del_flow_rule(priv->fs.vlan.untagged_rule);
233 priv->fs.vlan.untagged_rule = NULL;
234 }
235 break;
236 case MLX5E_VLAN_RULE_TYPE_ANY_VID:
237 if (priv->fs.vlan.any_vlan_rule) {
238 mlx5_del_flow_rule(priv->fs.vlan.any_vlan_rule);
239 priv->fs.vlan.any_vlan_rule = NULL;
240 }
241 break;
242 case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
243 mlx5e_vport_context_update_vlans(priv);
244 if (priv->fs.vlan.active_vlans_rule[vid]) {
245 mlx5_del_flow_rule(priv->fs.vlan.active_vlans_rule[vid]);
246 priv->fs.vlan.active_vlans_rule[vid] = NULL;
247 }
248 mlx5e_vport_context_update_vlans(priv);
249 break;
250 }
251 }
252
mlx5e_enable_vlan_filter(struct mlx5e_priv * priv)253 void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
254 {
255 if (!priv->fs.vlan.filter_disabled)
256 return;
257
258 priv->fs.vlan.filter_disabled = false;
259 if (priv->netdev->flags & IFF_PROMISC)
260 return;
261 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
262 }
263
mlx5e_disable_vlan_filter(struct mlx5e_priv * priv)264 void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
265 {
266 if (priv->fs.vlan.filter_disabled)
267 return;
268
269 priv->fs.vlan.filter_disabled = true;
270 if (priv->netdev->flags & IFF_PROMISC)
271 return;
272 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
273 }
274
mlx5e_vlan_rx_add_vid(struct net_device * dev,__always_unused __be16 proto,u16 vid)275 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
276 u16 vid)
277 {
278 struct mlx5e_priv *priv = netdev_priv(dev);
279
280 set_bit(vid, priv->fs.vlan.active_vlans);
281
282 return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
283 }
284
mlx5e_vlan_rx_kill_vid(struct net_device * dev,__always_unused __be16 proto,u16 vid)285 int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
286 u16 vid)
287 {
288 struct mlx5e_priv *priv = netdev_priv(dev);
289
290 clear_bit(vid, priv->fs.vlan.active_vlans);
291
292 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
293
294 return 0;
295 }
296
mlx5e_add_vlan_rules(struct mlx5e_priv * priv)297 static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv)
298 {
299 int i;
300
301 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
302
303 for_each_set_bit(i, priv->fs.vlan.active_vlans, VLAN_N_VID) {
304 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i);
305 }
306
307 if (priv->fs.vlan.filter_disabled &&
308 !(priv->netdev->flags & IFF_PROMISC))
309 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
310 }
311
mlx5e_del_vlan_rules(struct mlx5e_priv * priv)312 static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
313 {
314 int i;
315
316 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
317
318 for_each_set_bit(i, priv->fs.vlan.active_vlans, VLAN_N_VID) {
319 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i);
320 }
321
322 if (priv->fs.vlan.filter_disabled &&
323 !(priv->netdev->flags & IFF_PROMISC))
324 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
325 }
326
327 #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
328 for (i = 0; i < MLX5E_L2_ADDR_HASH_SIZE; i++) \
329 hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
330
mlx5e_execute_l2_action(struct mlx5e_priv * priv,struct mlx5e_l2_hash_node * hn)331 static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
332 struct mlx5e_l2_hash_node *hn)
333 {
334 switch (hn->action) {
335 case MLX5E_ACTION_ADD:
336 mlx5e_add_l2_flow_rule(priv, &hn->ai, MLX5E_FULLMATCH);
337 hn->action = MLX5E_ACTION_NONE;
338 break;
339
340 case MLX5E_ACTION_DEL:
341 mlx5e_del_l2_flow_rule(priv, &hn->ai);
342 mlx5e_del_l2_from_hash(hn);
343 break;
344 }
345 }
346
mlx5e_sync_netdev_addr(struct mlx5e_priv * priv)347 static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
348 {
349 struct net_device *netdev = priv->netdev;
350 struct netdev_hw_addr *ha;
351
352 netif_addr_lock_bh(netdev);
353
354 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc,
355 priv->netdev->dev_addr);
356
357 netdev_for_each_uc_addr(ha, netdev)
358 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc, ha->addr);
359
360 netdev_for_each_mc_addr(ha, netdev)
361 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_mc, ha->addr);
362
363 netif_addr_unlock_bh(netdev);
364 }
365
mlx5e_fill_addr_array(struct mlx5e_priv * priv,int list_type,u8 addr_array[][ETH_ALEN],int size)366 static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
367 u8 addr_array[][ETH_ALEN], int size)
368 {
369 bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
370 struct net_device *ndev = priv->netdev;
371 struct mlx5e_l2_hash_node *hn;
372 struct hlist_head *addr_list;
373 struct hlist_node *tmp;
374 int i = 0;
375 int hi;
376
377 addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
378
379 if (is_uc) /* Make sure our own address is pushed first */
380 ether_addr_copy(addr_array[i++], ndev->dev_addr);
381 else if (priv->fs.l2.broadcast_enabled)
382 ether_addr_copy(addr_array[i++], ndev->broadcast);
383
384 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
385 if (ether_addr_equal(ndev->dev_addr, hn->ai.addr))
386 continue;
387 if (i >= size)
388 break;
389 ether_addr_copy(addr_array[i++], hn->ai.addr);
390 }
391 }
392
mlx5e_vport_context_update_addr_list(struct mlx5e_priv * priv,int list_type)393 static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
394 int list_type)
395 {
396 bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
397 struct mlx5e_l2_hash_node *hn;
398 u8 (*addr_array)[ETH_ALEN] = NULL;
399 struct hlist_head *addr_list;
400 struct hlist_node *tmp;
401 int max_size;
402 int size;
403 int err;
404 int hi;
405
406 size = is_uc ? 0 : (priv->fs.l2.broadcast_enabled ? 1 : 0);
407 max_size = is_uc ?
408 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
409 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
410
411 addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
412 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
413 size++;
414
415 if (size > max_size) {
416 netdev_warn(priv->netdev,
417 "netdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
418 is_uc ? "UC" : "MC", size, max_size);
419 size = max_size;
420 }
421
422 if (size) {
423 addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL);
424 if (!addr_array) {
425 err = -ENOMEM;
426 goto out;
427 }
428 mlx5e_fill_addr_array(priv, list_type, addr_array, size);
429 }
430
431 err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
432 out:
433 if (err)
434 netdev_err(priv->netdev,
435 "Failed to modify vport %s list err(%d)\n",
436 is_uc ? "UC" : "MC", err);
437 kfree(addr_array);
438 }
439
mlx5e_vport_context_update(struct mlx5e_priv * priv)440 static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
441 {
442 struct mlx5e_l2_table *ea = &priv->fs.l2;
443
444 mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC);
445 mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC);
446 mlx5_modify_nic_vport_promisc(priv->mdev, 0,
447 ea->allmulti_enabled,
448 ea->promisc_enabled);
449 }
450
mlx5e_apply_netdev_addr(struct mlx5e_priv * priv)451 static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
452 {
453 struct mlx5e_l2_hash_node *hn;
454 struct hlist_node *tmp;
455 int i;
456
457 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
458 mlx5e_execute_l2_action(priv, hn);
459
460 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
461 mlx5e_execute_l2_action(priv, hn);
462 }
463
mlx5e_handle_netdev_addr(struct mlx5e_priv * priv)464 static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
465 {
466 struct mlx5e_l2_hash_node *hn;
467 struct hlist_node *tmp;
468 int i;
469
470 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
471 hn->action = MLX5E_ACTION_DEL;
472 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
473 hn->action = MLX5E_ACTION_DEL;
474
475 if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
476 mlx5e_sync_netdev_addr(priv);
477
478 mlx5e_apply_netdev_addr(priv);
479 }
480
mlx5e_set_rx_mode_work(struct work_struct * work)481 void mlx5e_set_rx_mode_work(struct work_struct *work)
482 {
483 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
484 set_rx_mode_work);
485
486 struct mlx5e_l2_table *ea = &priv->fs.l2;
487 struct net_device *ndev = priv->netdev;
488
489 bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
490 bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC);
491 bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
492 bool broadcast_enabled = rx_mode_enable;
493
494 bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
495 bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
496 bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
497 bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
498 bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
499 bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
500
501 if (enable_promisc) {
502 mlx5e_add_l2_flow_rule(priv, &ea->promisc, MLX5E_PROMISC);
503 if (!priv->fs.vlan.filter_disabled)
504 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
505 0);
506 }
507 if (enable_allmulti)
508 mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
509 if (enable_broadcast)
510 mlx5e_add_l2_flow_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
511
512 mlx5e_handle_netdev_addr(priv);
513
514 if (disable_broadcast)
515 mlx5e_del_l2_flow_rule(priv, &ea->broadcast);
516 if (disable_allmulti)
517 mlx5e_del_l2_flow_rule(priv, &ea->allmulti);
518 if (disable_promisc) {
519 if (!priv->fs.vlan.filter_disabled)
520 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
521 0);
522 mlx5e_del_l2_flow_rule(priv, &ea->promisc);
523 }
524
525 ea->promisc_enabled = promisc_enabled;
526 ea->allmulti_enabled = allmulti_enabled;
527 ea->broadcast_enabled = broadcast_enabled;
528
529 mlx5e_vport_context_update(priv);
530 }
531
mlx5e_destroy_groups(struct mlx5e_flow_table * ft)532 static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
533 {
534 int i;
535
536 for (i = ft->num_groups - 1; i >= 0; i--) {
537 if (!IS_ERR_OR_NULL(ft->g[i]))
538 mlx5_destroy_flow_group(ft->g[i]);
539 ft->g[i] = NULL;
540 }
541 ft->num_groups = 0;
542 }
543
mlx5e_init_l2_addr(struct mlx5e_priv * priv)544 void mlx5e_init_l2_addr(struct mlx5e_priv *priv)
545 {
546 ether_addr_copy(priv->fs.l2.broadcast.addr, priv->netdev->broadcast);
547 }
548
mlx5e_destroy_flow_table(struct mlx5e_flow_table * ft)549 void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
550 {
551 mlx5e_destroy_groups(ft);
552 kfree(ft->g);
553 mlx5_destroy_flow_table(ft->t);
554 ft->t = NULL;
555 }
556
mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table * ttc)557 static void mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table *ttc)
558 {
559 int i;
560
561 for (i = 0; i < MLX5E_NUM_TT; i++) {
562 if (!IS_ERR_OR_NULL(ttc->rules[i])) {
563 mlx5_del_flow_rule(ttc->rules[i]);
564 ttc->rules[i] = NULL;
565 }
566 }
567 }
568
569 static struct {
570 u16 etype;
571 u8 proto;
572 } ttc_rules[] = {
573 [MLX5E_TT_IPV4_TCP] = {
574 .etype = ETH_P_IP,
575 .proto = IPPROTO_TCP,
576 },
577 [MLX5E_TT_IPV6_TCP] = {
578 .etype = ETH_P_IPV6,
579 .proto = IPPROTO_TCP,
580 },
581 [MLX5E_TT_IPV4_UDP] = {
582 .etype = ETH_P_IP,
583 .proto = IPPROTO_UDP,
584 },
585 [MLX5E_TT_IPV6_UDP] = {
586 .etype = ETH_P_IPV6,
587 .proto = IPPROTO_UDP,
588 },
589 [MLX5E_TT_IPV4_IPSEC_AH] = {
590 .etype = ETH_P_IP,
591 .proto = IPPROTO_AH,
592 },
593 [MLX5E_TT_IPV6_IPSEC_AH] = {
594 .etype = ETH_P_IPV6,
595 .proto = IPPROTO_AH,
596 },
597 [MLX5E_TT_IPV4_IPSEC_ESP] = {
598 .etype = ETH_P_IP,
599 .proto = IPPROTO_ESP,
600 },
601 [MLX5E_TT_IPV6_IPSEC_ESP] = {
602 .etype = ETH_P_IPV6,
603 .proto = IPPROTO_ESP,
604 },
605 [MLX5E_TT_IPV4] = {
606 .etype = ETH_P_IP,
607 .proto = 0,
608 },
609 [MLX5E_TT_IPV6] = {
610 .etype = ETH_P_IPV6,
611 .proto = 0,
612 },
613 [MLX5E_TT_ANY] = {
614 .etype = 0,
615 .proto = 0,
616 },
617 };
618
mlx5e_generate_ttc_rule(struct mlx5e_priv * priv,struct mlx5_flow_table * ft,struct mlx5_flow_destination * dest,u16 etype,u8 proto)619 static struct mlx5_flow_rule *mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
620 struct mlx5_flow_table *ft,
621 struct mlx5_flow_destination *dest,
622 u16 etype,
623 u8 proto)
624 {
625 struct mlx5_flow_rule *rule;
626 struct mlx5_flow_spec *spec;
627 int err = 0;
628
629 spec = mlx5_vzalloc(sizeof(*spec));
630 if (!spec) {
631 netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
632 return ERR_PTR(-ENOMEM);
633 }
634
635 if (proto) {
636 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
637 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
638 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, proto);
639 }
640 if (etype) {
641 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
642 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
643 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype);
644 }
645
646 rule = mlx5_add_flow_rule(ft, spec,
647 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
648 MLX5_FS_DEFAULT_FLOW_TAG,
649 dest);
650 if (IS_ERR(rule)) {
651 err = PTR_ERR(rule);
652 netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
653 }
654
655 kvfree(spec);
656 return err ? ERR_PTR(err) : rule;
657 }
658
mlx5e_generate_ttc_table_rules(struct mlx5e_priv * priv)659 static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv)
660 {
661 struct mlx5_flow_destination dest;
662 struct mlx5e_ttc_table *ttc;
663 struct mlx5_flow_rule **rules;
664 struct mlx5_flow_table *ft;
665 int tt;
666 int err;
667
668 ttc = &priv->fs.ttc;
669 ft = ttc->ft.t;
670 rules = ttc->rules;
671
672 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
673 for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
674 if (tt == MLX5E_TT_ANY)
675 dest.tir_num = priv->direct_tir[0].tirn;
676 else
677 dest.tir_num = priv->indir_tir[tt].tirn;
678 rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
679 ttc_rules[tt].etype,
680 ttc_rules[tt].proto);
681 if (IS_ERR(rules[tt]))
682 goto del_rules;
683 }
684
685 return 0;
686
687 del_rules:
688 err = PTR_ERR(rules[tt]);
689 rules[tt] = NULL;
690 mlx5e_cleanup_ttc_rules(ttc);
691 return err;
692 }
693
694 #define MLX5E_TTC_NUM_GROUPS 3
695 #define MLX5E_TTC_GROUP1_SIZE BIT(3)
696 #define MLX5E_TTC_GROUP2_SIZE BIT(1)
697 #define MLX5E_TTC_GROUP3_SIZE BIT(0)
698 #define MLX5E_TTC_TABLE_SIZE (MLX5E_TTC_GROUP1_SIZE +\
699 MLX5E_TTC_GROUP2_SIZE +\
700 MLX5E_TTC_GROUP3_SIZE)
mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table * ttc)701 static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc)
702 {
703 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
704 struct mlx5e_flow_table *ft = &ttc->ft;
705 int ix = 0;
706 u32 *in;
707 int err;
708 u8 *mc;
709
710 ft->g = kcalloc(MLX5E_TTC_NUM_GROUPS,
711 sizeof(*ft->g), GFP_KERNEL);
712 if (!ft->g)
713 return -ENOMEM;
714 in = mlx5_vzalloc(inlen);
715 if (!in) {
716 kfree(ft->g);
717 return -ENOMEM;
718 }
719
720 /* L4 Group */
721 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
722 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
723 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
724 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
725 MLX5_SET_CFG(in, start_flow_index, ix);
726 ix += MLX5E_TTC_GROUP1_SIZE;
727 MLX5_SET_CFG(in, end_flow_index, ix - 1);
728 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
729 if (IS_ERR(ft->g[ft->num_groups]))
730 goto err;
731 ft->num_groups++;
732
733 /* L3 Group */
734 MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0);
735 MLX5_SET_CFG(in, start_flow_index, ix);
736 ix += MLX5E_TTC_GROUP2_SIZE;
737 MLX5_SET_CFG(in, end_flow_index, ix - 1);
738 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
739 if (IS_ERR(ft->g[ft->num_groups]))
740 goto err;
741 ft->num_groups++;
742
743 /* Any Group */
744 memset(in, 0, inlen);
745 MLX5_SET_CFG(in, start_flow_index, ix);
746 ix += MLX5E_TTC_GROUP3_SIZE;
747 MLX5_SET_CFG(in, end_flow_index, ix - 1);
748 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
749 if (IS_ERR(ft->g[ft->num_groups]))
750 goto err;
751 ft->num_groups++;
752
753 kvfree(in);
754 return 0;
755
756 err:
757 err = PTR_ERR(ft->g[ft->num_groups]);
758 ft->g[ft->num_groups] = NULL;
759 kvfree(in);
760
761 return err;
762 }
763
mlx5e_destroy_ttc_table(struct mlx5e_priv * priv)764 static void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv)
765 {
766 struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
767
768 mlx5e_cleanup_ttc_rules(ttc);
769 mlx5e_destroy_flow_table(&ttc->ft);
770 }
771
mlx5e_create_ttc_table(struct mlx5e_priv * priv)772 static int mlx5e_create_ttc_table(struct mlx5e_priv *priv)
773 {
774 struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
775 struct mlx5e_flow_table *ft = &ttc->ft;
776 int err;
777
778 ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
779 MLX5E_TTC_TABLE_SIZE, MLX5E_TTC_FT_LEVEL);
780 if (IS_ERR(ft->t)) {
781 err = PTR_ERR(ft->t);
782 ft->t = NULL;
783 return err;
784 }
785
786 err = mlx5e_create_ttc_table_groups(ttc);
787 if (err)
788 goto err;
789
790 err = mlx5e_generate_ttc_table_rules(priv);
791 if (err)
792 goto err;
793
794 return 0;
795 err:
796 mlx5e_destroy_flow_table(ft);
797 return err;
798 }
799
mlx5e_del_l2_flow_rule(struct mlx5e_priv * priv,struct mlx5e_l2_rule * ai)800 static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
801 struct mlx5e_l2_rule *ai)
802 {
803 if (!IS_ERR_OR_NULL(ai->rule)) {
804 mlx5_del_flow_rule(ai->rule);
805 ai->rule = NULL;
806 }
807 }
808
mlx5e_add_l2_flow_rule(struct mlx5e_priv * priv,struct mlx5e_l2_rule * ai,int type)809 static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
810 struct mlx5e_l2_rule *ai, int type)
811 {
812 struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
813 struct mlx5_flow_destination dest;
814 struct mlx5_flow_spec *spec;
815 int err = 0;
816 u8 *mc_dmac;
817 u8 *mv_dmac;
818
819 spec = mlx5_vzalloc(sizeof(*spec));
820 if (!spec) {
821 netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
822 return -ENOMEM;
823 }
824
825 mc_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
826 outer_headers.dmac_47_16);
827 mv_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_value,
828 outer_headers.dmac_47_16);
829
830 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
831 dest.ft = priv->fs.ttc.ft.t;
832
833 switch (type) {
834 case MLX5E_FULLMATCH:
835 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
836 eth_broadcast_addr(mc_dmac);
837 ether_addr_copy(mv_dmac, ai->addr);
838 break;
839
840 case MLX5E_ALLMULTI:
841 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
842 mc_dmac[0] = 0x01;
843 mv_dmac[0] = 0x01;
844 break;
845
846 case MLX5E_PROMISC:
847 break;
848 }
849
850 ai->rule = mlx5_add_flow_rule(ft, spec,
851 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
852 MLX5_FS_DEFAULT_FLOW_TAG, &dest);
853 if (IS_ERR(ai->rule)) {
854 netdev_err(priv->netdev, "%s: add l2 rule(mac:%pM) failed\n",
855 __func__, mv_dmac);
856 err = PTR_ERR(ai->rule);
857 ai->rule = NULL;
858 }
859
860 kvfree(spec);
861
862 return err;
863 }
864
865 #define MLX5E_NUM_L2_GROUPS 3
866 #define MLX5E_L2_GROUP1_SIZE BIT(0)
867 #define MLX5E_L2_GROUP2_SIZE BIT(15)
868 #define MLX5E_L2_GROUP3_SIZE BIT(0)
869 #define MLX5E_L2_TABLE_SIZE (MLX5E_L2_GROUP1_SIZE +\
870 MLX5E_L2_GROUP2_SIZE +\
871 MLX5E_L2_GROUP3_SIZE)
mlx5e_create_l2_table_groups(struct mlx5e_l2_table * l2_table)872 static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
873 {
874 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
875 struct mlx5e_flow_table *ft = &l2_table->ft;
876 int ix = 0;
877 u8 *mc_dmac;
878 u32 *in;
879 int err;
880 u8 *mc;
881
882 ft->g = kcalloc(MLX5E_NUM_L2_GROUPS, sizeof(*ft->g), GFP_KERNEL);
883 if (!ft->g)
884 return -ENOMEM;
885 in = mlx5_vzalloc(inlen);
886 if (!in) {
887 kfree(ft->g);
888 return -ENOMEM;
889 }
890
891 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
892 mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
893 outer_headers.dmac_47_16);
894 /* Flow Group for promiscuous */
895 MLX5_SET_CFG(in, start_flow_index, ix);
896 ix += MLX5E_L2_GROUP1_SIZE;
897 MLX5_SET_CFG(in, end_flow_index, ix - 1);
898 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
899 if (IS_ERR(ft->g[ft->num_groups]))
900 goto err_destroy_groups;
901 ft->num_groups++;
902
903 /* Flow Group for full match */
904 eth_broadcast_addr(mc_dmac);
905 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
906 MLX5_SET_CFG(in, start_flow_index, ix);
907 ix += MLX5E_L2_GROUP2_SIZE;
908 MLX5_SET_CFG(in, end_flow_index, ix - 1);
909 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
910 if (IS_ERR(ft->g[ft->num_groups]))
911 goto err_destroy_groups;
912 ft->num_groups++;
913
914 /* Flow Group for allmulti */
915 eth_zero_addr(mc_dmac);
916 mc_dmac[0] = 0x01;
917 MLX5_SET_CFG(in, start_flow_index, ix);
918 ix += MLX5E_L2_GROUP3_SIZE;
919 MLX5_SET_CFG(in, end_flow_index, ix - 1);
920 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
921 if (IS_ERR(ft->g[ft->num_groups]))
922 goto err_destroy_groups;
923 ft->num_groups++;
924
925 kvfree(in);
926 return 0;
927
928 err_destroy_groups:
929 err = PTR_ERR(ft->g[ft->num_groups]);
930 ft->g[ft->num_groups] = NULL;
931 mlx5e_destroy_groups(ft);
932 kvfree(in);
933
934 return err;
935 }
936
mlx5e_destroy_l2_table(struct mlx5e_priv * priv)937 static void mlx5e_destroy_l2_table(struct mlx5e_priv *priv)
938 {
939 mlx5e_destroy_flow_table(&priv->fs.l2.ft);
940 }
941
mlx5e_create_l2_table(struct mlx5e_priv * priv)942 static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
943 {
944 struct mlx5e_l2_table *l2_table = &priv->fs.l2;
945 struct mlx5e_flow_table *ft = &l2_table->ft;
946 int err;
947
948 ft->num_groups = 0;
949 ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
950 MLX5E_L2_TABLE_SIZE, MLX5E_L2_FT_LEVEL);
951
952 if (IS_ERR(ft->t)) {
953 err = PTR_ERR(ft->t);
954 ft->t = NULL;
955 return err;
956 }
957
958 err = mlx5e_create_l2_table_groups(l2_table);
959 if (err)
960 goto err_destroy_flow_table;
961
962 return 0;
963
964 err_destroy_flow_table:
965 mlx5_destroy_flow_table(ft->t);
966 ft->t = NULL;
967
968 return err;
969 }
970
971 #define MLX5E_NUM_VLAN_GROUPS 2
972 #define MLX5E_VLAN_GROUP0_SIZE BIT(12)
973 #define MLX5E_VLAN_GROUP1_SIZE BIT(1)
974 #define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\
975 MLX5E_VLAN_GROUP1_SIZE)
976
__mlx5e_create_vlan_table_groups(struct mlx5e_flow_table * ft,u32 * in,int inlen)977 static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in,
978 int inlen)
979 {
980 int err;
981 int ix = 0;
982 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
983
984 memset(in, 0, inlen);
985 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
986 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag);
987 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
988 MLX5_SET_CFG(in, start_flow_index, ix);
989 ix += MLX5E_VLAN_GROUP0_SIZE;
990 MLX5_SET_CFG(in, end_flow_index, ix - 1);
991 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
992 if (IS_ERR(ft->g[ft->num_groups]))
993 goto err_destroy_groups;
994 ft->num_groups++;
995
996 memset(in, 0, inlen);
997 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
998 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag);
999 MLX5_SET_CFG(in, start_flow_index, ix);
1000 ix += MLX5E_VLAN_GROUP1_SIZE;
1001 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1002 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1003 if (IS_ERR(ft->g[ft->num_groups]))
1004 goto err_destroy_groups;
1005 ft->num_groups++;
1006
1007 return 0;
1008
1009 err_destroy_groups:
1010 err = PTR_ERR(ft->g[ft->num_groups]);
1011 ft->g[ft->num_groups] = NULL;
1012 mlx5e_destroy_groups(ft);
1013
1014 return err;
1015 }
1016
mlx5e_create_vlan_table_groups(struct mlx5e_flow_table * ft)1017 static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft)
1018 {
1019 u32 *in;
1020 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1021 int err;
1022
1023 in = mlx5_vzalloc(inlen);
1024 if (!in)
1025 return -ENOMEM;
1026
1027 err = __mlx5e_create_vlan_table_groups(ft, in, inlen);
1028
1029 kvfree(in);
1030 return err;
1031 }
1032
mlx5e_create_vlan_table(struct mlx5e_priv * priv)1033 static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
1034 {
1035 struct mlx5e_flow_table *ft = &priv->fs.vlan.ft;
1036 int err;
1037
1038 ft->num_groups = 0;
1039 ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
1040 MLX5E_VLAN_TABLE_SIZE, MLX5E_VLAN_FT_LEVEL);
1041
1042 if (IS_ERR(ft->t)) {
1043 err = PTR_ERR(ft->t);
1044 ft->t = NULL;
1045 return err;
1046 }
1047 ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1048 if (!ft->g) {
1049 err = -ENOMEM;
1050 goto err_destroy_vlan_table;
1051 }
1052
1053 err = mlx5e_create_vlan_table_groups(ft);
1054 if (err)
1055 goto err_free_g;
1056
1057 mlx5e_add_vlan_rules(priv);
1058
1059 return 0;
1060
1061 err_free_g:
1062 kfree(ft->g);
1063 err_destroy_vlan_table:
1064 mlx5_destroy_flow_table(ft->t);
1065 ft->t = NULL;
1066
1067 return err;
1068 }
1069
mlx5e_destroy_vlan_table(struct mlx5e_priv * priv)1070 static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
1071 {
1072 mlx5e_del_vlan_rules(priv);
1073 mlx5e_destroy_flow_table(&priv->fs.vlan.ft);
1074 }
1075
mlx5e_create_flow_steering(struct mlx5e_priv * priv)1076 int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
1077 {
1078 int err;
1079
1080 priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
1081 MLX5_FLOW_NAMESPACE_KERNEL);
1082
1083 if (!priv->fs.ns)
1084 return -EOPNOTSUPP;
1085
1086 err = mlx5e_arfs_create_tables(priv);
1087 if (err) {
1088 netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n",
1089 err);
1090 priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
1091 }
1092
1093 err = mlx5e_create_ttc_table(priv);
1094 if (err) {
1095 netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
1096 err);
1097 goto err_destroy_arfs_tables;
1098 }
1099
1100 err = mlx5e_create_l2_table(priv);
1101 if (err) {
1102 netdev_err(priv->netdev, "Failed to create l2 table, err=%d\n",
1103 err);
1104 goto err_destroy_ttc_table;
1105 }
1106
1107 err = mlx5e_create_vlan_table(priv);
1108 if (err) {
1109 netdev_err(priv->netdev, "Failed to create vlan table, err=%d\n",
1110 err);
1111 goto err_destroy_l2_table;
1112 }
1113
1114 mlx5e_ethtool_init_steering(priv);
1115
1116 return 0;
1117
1118 err_destroy_l2_table:
1119 mlx5e_destroy_l2_table(priv);
1120 err_destroy_ttc_table:
1121 mlx5e_destroy_ttc_table(priv);
1122 err_destroy_arfs_tables:
1123 mlx5e_arfs_destroy_tables(priv);
1124
1125 return err;
1126 }
1127
mlx5e_destroy_flow_steering(struct mlx5e_priv * priv)1128 void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
1129 {
1130 mlx5e_destroy_vlan_table(priv);
1131 mlx5e_destroy_l2_table(priv);
1132 mlx5e_destroy_ttc_table(priv);
1133 mlx5e_arfs_destroy_tables(priv);
1134 mlx5e_ethtool_cleanup_steering(priv);
1135 }
1136