1 /*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/list.h>
34 #include <linux/ip.h>
35 #include <linux/ipv6.h>
36 #include <linux/tcp.h>
37 #include <linux/mlx5/fs.h>
38 #include <linux/mlx5/mpfs.h>
39 #include "en.h"
40 #include "lib/mpfs.h"
41
42 static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
43 struct mlx5e_l2_rule *ai, int type);
44 static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
45 struct mlx5e_l2_rule *ai);
46
47 enum {
48 MLX5E_FULLMATCH = 0,
49 MLX5E_ALLMULTI = 1,
50 MLX5E_PROMISC = 2,
51 };
52
53 enum {
54 MLX5E_UC = 0,
55 MLX5E_MC_IPV4 = 1,
56 MLX5E_MC_IPV6 = 2,
57 MLX5E_MC_OTHER = 3,
58 };
59
60 enum {
61 MLX5E_ACTION_NONE = 0,
62 MLX5E_ACTION_ADD = 1,
63 MLX5E_ACTION_DEL = 2,
64 };
65
66 struct mlx5e_l2_hash_node {
67 struct hlist_node hlist;
68 u8 action;
69 struct mlx5e_l2_rule ai;
70 bool mpfs;
71 };
72
mlx5e_hash_l2(u8 * addr)73 static inline int mlx5e_hash_l2(u8 *addr)
74 {
75 return addr[5];
76 }
77
mlx5e_add_l2_to_hash(struct hlist_head * hash,u8 * addr)78 static void mlx5e_add_l2_to_hash(struct hlist_head *hash, u8 *addr)
79 {
80 struct mlx5e_l2_hash_node *hn;
81 int ix = mlx5e_hash_l2(addr);
82 int found = 0;
83
84 hlist_for_each_entry(hn, &hash[ix], hlist)
85 if (ether_addr_equal_64bits(hn->ai.addr, addr)) {
86 found = 1;
87 break;
88 }
89
90 if (found) {
91 hn->action = MLX5E_ACTION_NONE;
92 return;
93 }
94
95 hn = kzalloc(sizeof(*hn), GFP_ATOMIC);
96 if (!hn)
97 return;
98
99 ether_addr_copy(hn->ai.addr, addr);
100 hn->action = MLX5E_ACTION_ADD;
101
102 hlist_add_head(&hn->hlist, &hash[ix]);
103 }
104
mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node * hn)105 static void mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node *hn)
106 {
107 hlist_del(&hn->hlist);
108 kfree(hn);
109 }
110
mlx5e_vport_context_update_vlans(struct mlx5e_priv * priv)111 static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
112 {
113 struct net_device *ndev = priv->netdev;
114 int max_list_size;
115 int list_size;
116 u16 *vlans;
117 int vlan;
118 int err;
119 int i;
120
121 list_size = 0;
122 for_each_set_bit(vlan, priv->fs.vlan.active_cvlans, VLAN_N_VID)
123 list_size++;
124
125 max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
126
127 if (list_size > max_list_size) {
128 netdev_warn(ndev,
129 "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
130 list_size, max_list_size);
131 list_size = max_list_size;
132 }
133
134 vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
135 if (!vlans)
136 return -ENOMEM;
137
138 i = 0;
139 for_each_set_bit(vlan, priv->fs.vlan.active_cvlans, VLAN_N_VID) {
140 if (i >= list_size)
141 break;
142 vlans[i++] = vlan;
143 }
144
145 err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
146 if (err)
147 netdev_err(ndev, "Failed to modify vport vlans list err(%d)\n",
148 err);
149
150 kfree(vlans);
151 return err;
152 }
153
154 enum mlx5e_vlan_rule_type {
155 MLX5E_VLAN_RULE_TYPE_UNTAGGED,
156 MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID,
157 MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID,
158 MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID,
159 MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID,
160 };
161
__mlx5e_add_vlan_rule(struct mlx5e_priv * priv,enum mlx5e_vlan_rule_type rule_type,u16 vid,struct mlx5_flow_spec * spec)162 static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
163 enum mlx5e_vlan_rule_type rule_type,
164 u16 vid, struct mlx5_flow_spec *spec)
165 {
166 struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
167 struct mlx5_flow_destination dest = {};
168 struct mlx5_flow_handle **rule_p;
169 MLX5_DECLARE_FLOW_ACT(flow_act);
170 int err = 0;
171
172 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
173 dest.ft = priv->fs.l2.ft.t;
174
175 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
176
177 switch (rule_type) {
178 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
179 /* cvlan_tag enabled in match criteria and
180 * disabled in match value means both S & C tags
181 * don't exist (untagged of both)
182 */
183 rule_p = &priv->fs.vlan.untagged_rule;
184 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
185 outer_headers.cvlan_tag);
186 break;
187 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
188 rule_p = &priv->fs.vlan.any_cvlan_rule;
189 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
190 outer_headers.cvlan_tag);
191 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
192 break;
193 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
194 rule_p = &priv->fs.vlan.any_svlan_rule;
195 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
196 outer_headers.svlan_tag);
197 MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
198 break;
199 case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
200 rule_p = &priv->fs.vlan.active_svlans_rule[vid];
201 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
202 outer_headers.svlan_tag);
203 MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
204 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
205 outer_headers.first_vid);
206 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
207 vid);
208 break;
209 default: /* MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID */
210 rule_p = &priv->fs.vlan.active_cvlans_rule[vid];
211 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
212 outer_headers.cvlan_tag);
213 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
214 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
215 outer_headers.first_vid);
216 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
217 vid);
218 break;
219 }
220
221 if (WARN_ONCE(*rule_p, "VLAN rule already exists type %d", rule_type))
222 return 0;
223
224 *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
225
226 if (IS_ERR(*rule_p)) {
227 err = PTR_ERR(*rule_p);
228 *rule_p = NULL;
229 netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
230 }
231
232 return err;
233 }
234
mlx5e_add_vlan_rule(struct mlx5e_priv * priv,enum mlx5e_vlan_rule_type rule_type,u16 vid)235 static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
236 enum mlx5e_vlan_rule_type rule_type, u16 vid)
237 {
238 struct mlx5_flow_spec *spec;
239 int err = 0;
240
241 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
242 if (!spec)
243 return -ENOMEM;
244
245 if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID)
246 mlx5e_vport_context_update_vlans(priv);
247
248 err = __mlx5e_add_vlan_rule(priv, rule_type, vid, spec);
249
250 kvfree(spec);
251
252 return err;
253 }
254
mlx5e_del_vlan_rule(struct mlx5e_priv * priv,enum mlx5e_vlan_rule_type rule_type,u16 vid)255 static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
256 enum mlx5e_vlan_rule_type rule_type, u16 vid)
257 {
258 switch (rule_type) {
259 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
260 if (priv->fs.vlan.untagged_rule) {
261 mlx5_del_flow_rules(priv->fs.vlan.untagged_rule);
262 priv->fs.vlan.untagged_rule = NULL;
263 }
264 break;
265 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
266 if (priv->fs.vlan.any_cvlan_rule) {
267 mlx5_del_flow_rules(priv->fs.vlan.any_cvlan_rule);
268 priv->fs.vlan.any_cvlan_rule = NULL;
269 }
270 break;
271 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
272 if (priv->fs.vlan.any_svlan_rule) {
273 mlx5_del_flow_rules(priv->fs.vlan.any_svlan_rule);
274 priv->fs.vlan.any_svlan_rule = NULL;
275 }
276 break;
277 case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
278 if (priv->fs.vlan.active_svlans_rule[vid]) {
279 mlx5_del_flow_rules(priv->fs.vlan.active_svlans_rule[vid]);
280 priv->fs.vlan.active_svlans_rule[vid] = NULL;
281 }
282 break;
283 case MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID:
284 if (priv->fs.vlan.active_cvlans_rule[vid]) {
285 mlx5_del_flow_rules(priv->fs.vlan.active_cvlans_rule[vid]);
286 priv->fs.vlan.active_cvlans_rule[vid] = NULL;
287 }
288 mlx5e_vport_context_update_vlans(priv);
289 break;
290 }
291 }
292
mlx5e_del_any_vid_rules(struct mlx5e_priv * priv)293 static void mlx5e_del_any_vid_rules(struct mlx5e_priv *priv)
294 {
295 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
296 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
297 }
298
mlx5e_add_any_vid_rules(struct mlx5e_priv * priv)299 static int mlx5e_add_any_vid_rules(struct mlx5e_priv *priv)
300 {
301 int err;
302
303 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
304 if (err)
305 return err;
306
307 return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
308 }
309
mlx5e_enable_cvlan_filter(struct mlx5e_priv * priv)310 void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv)
311 {
312 if (!priv->fs.vlan.cvlan_filter_disabled)
313 return;
314
315 priv->fs.vlan.cvlan_filter_disabled = false;
316 if (priv->netdev->flags & IFF_PROMISC)
317 return;
318 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
319 }
320
mlx5e_disable_cvlan_filter(struct mlx5e_priv * priv)321 void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv)
322 {
323 if (priv->fs.vlan.cvlan_filter_disabled)
324 return;
325
326 priv->fs.vlan.cvlan_filter_disabled = true;
327 if (priv->netdev->flags & IFF_PROMISC)
328 return;
329 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
330 }
331
mlx5e_vlan_rx_add_cvid(struct mlx5e_priv * priv,u16 vid)332 static int mlx5e_vlan_rx_add_cvid(struct mlx5e_priv *priv, u16 vid)
333 {
334 int err;
335
336 set_bit(vid, priv->fs.vlan.active_cvlans);
337
338 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
339 if (err)
340 clear_bit(vid, priv->fs.vlan.active_cvlans);
341
342 return err;
343 }
344
mlx5e_vlan_rx_add_svid(struct mlx5e_priv * priv,u16 vid)345 static int mlx5e_vlan_rx_add_svid(struct mlx5e_priv *priv, u16 vid)
346 {
347 struct net_device *netdev = priv->netdev;
348 int err;
349
350 set_bit(vid, priv->fs.vlan.active_svlans);
351
352 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
353 if (err) {
354 clear_bit(vid, priv->fs.vlan.active_svlans);
355 return err;
356 }
357
358 /* Need to fix some features.. */
359 netdev_update_features(netdev);
360 return err;
361 }
362
mlx5e_vlan_rx_add_vid(struct net_device * dev,__be16 proto,u16 vid)363 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
364 {
365 struct mlx5e_priv *priv = netdev_priv(dev);
366
367 if (be16_to_cpu(proto) == ETH_P_8021Q)
368 return mlx5e_vlan_rx_add_cvid(priv, vid);
369 else if (be16_to_cpu(proto) == ETH_P_8021AD)
370 return mlx5e_vlan_rx_add_svid(priv, vid);
371
372 return -EOPNOTSUPP;
373 }
374
mlx5e_vlan_rx_kill_vid(struct net_device * dev,__be16 proto,u16 vid)375 int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
376 {
377 struct mlx5e_priv *priv = netdev_priv(dev);
378
379 if (be16_to_cpu(proto) == ETH_P_8021Q) {
380 clear_bit(vid, priv->fs.vlan.active_cvlans);
381 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
382 } else if (be16_to_cpu(proto) == ETH_P_8021AD) {
383 clear_bit(vid, priv->fs.vlan.active_svlans);
384 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
385 netdev_update_features(dev);
386 }
387
388 return 0;
389 }
390
mlx5e_add_vlan_rules(struct mlx5e_priv * priv)391 static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv)
392 {
393 int i;
394
395 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
396
397 for_each_set_bit(i, priv->fs.vlan.active_cvlans, VLAN_N_VID) {
398 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
399 }
400
401 for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID)
402 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
403
404 if (priv->fs.vlan.cvlan_filter_disabled)
405 mlx5e_add_any_vid_rules(priv);
406 }
407
mlx5e_del_vlan_rules(struct mlx5e_priv * priv)408 static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
409 {
410 int i;
411
412 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
413
414 for_each_set_bit(i, priv->fs.vlan.active_cvlans, VLAN_N_VID) {
415 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
416 }
417
418 for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID)
419 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
420
421 WARN_ON_ONCE(!(test_bit(MLX5E_STATE_DESTROYING, &priv->state)));
422
423 /* must be called after DESTROY bit is set and
424 * set_rx_mode is called and flushed
425 */
426 if (priv->fs.vlan.cvlan_filter_disabled)
427 mlx5e_del_any_vid_rules(priv);
428 }
429
430 #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
431 for (i = 0; i < MLX5E_L2_ADDR_HASH_SIZE; i++) \
432 hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
433
mlx5e_execute_l2_action(struct mlx5e_priv * priv,struct mlx5e_l2_hash_node * hn)434 static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
435 struct mlx5e_l2_hash_node *hn)
436 {
437 u8 action = hn->action;
438 u8 mac_addr[ETH_ALEN];
439 int l2_err = 0;
440
441 ether_addr_copy(mac_addr, hn->ai.addr);
442
443 switch (action) {
444 case MLX5E_ACTION_ADD:
445 mlx5e_add_l2_flow_rule(priv, &hn->ai, MLX5E_FULLMATCH);
446 if (!is_multicast_ether_addr(mac_addr)) {
447 l2_err = mlx5_mpfs_add_mac(priv->mdev, mac_addr);
448 hn->mpfs = !l2_err;
449 }
450 hn->action = MLX5E_ACTION_NONE;
451 break;
452
453 case MLX5E_ACTION_DEL:
454 if (!is_multicast_ether_addr(mac_addr) && hn->mpfs)
455 l2_err = mlx5_mpfs_del_mac(priv->mdev, mac_addr);
456 mlx5e_del_l2_flow_rule(priv, &hn->ai);
457 mlx5e_del_l2_from_hash(hn);
458 break;
459 }
460
461 if (l2_err)
462 netdev_warn(priv->netdev, "MPFS, failed to %s mac %pM, err(%d)\n",
463 action == MLX5E_ACTION_ADD ? "add" : "del", mac_addr, l2_err);
464 }
465
mlx5e_sync_netdev_addr(struct mlx5e_priv * priv)466 static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
467 {
468 struct net_device *netdev = priv->netdev;
469 struct netdev_hw_addr *ha;
470
471 netif_addr_lock_bh(netdev);
472
473 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc,
474 priv->netdev->dev_addr);
475
476 netdev_for_each_uc_addr(ha, netdev)
477 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc, ha->addr);
478
479 netdev_for_each_mc_addr(ha, netdev)
480 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_mc, ha->addr);
481
482 netif_addr_unlock_bh(netdev);
483 }
484
mlx5e_fill_addr_array(struct mlx5e_priv * priv,int list_type,u8 addr_array[][ETH_ALEN],int size)485 static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
486 u8 addr_array[][ETH_ALEN], int size)
487 {
488 bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
489 struct net_device *ndev = priv->netdev;
490 struct mlx5e_l2_hash_node *hn;
491 struct hlist_head *addr_list;
492 struct hlist_node *tmp;
493 int i = 0;
494 int hi;
495
496 addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
497
498 if (is_uc) /* Make sure our own address is pushed first */
499 ether_addr_copy(addr_array[i++], ndev->dev_addr);
500 else if (priv->fs.l2.broadcast_enabled)
501 ether_addr_copy(addr_array[i++], ndev->broadcast);
502
503 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
504 if (ether_addr_equal(ndev->dev_addr, hn->ai.addr))
505 continue;
506 if (i >= size)
507 break;
508 ether_addr_copy(addr_array[i++], hn->ai.addr);
509 }
510 }
511
mlx5e_vport_context_update_addr_list(struct mlx5e_priv * priv,int list_type)512 static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
513 int list_type)
514 {
515 bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
516 struct mlx5e_l2_hash_node *hn;
517 u8 (*addr_array)[ETH_ALEN] = NULL;
518 struct hlist_head *addr_list;
519 struct hlist_node *tmp;
520 int max_size;
521 int size;
522 int err;
523 int hi;
524
525 size = is_uc ? 0 : (priv->fs.l2.broadcast_enabled ? 1 : 0);
526 max_size = is_uc ?
527 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
528 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
529
530 addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
531 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
532 size++;
533
534 if (size > max_size) {
535 netdev_warn(priv->netdev,
536 "netdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
537 is_uc ? "UC" : "MC", size, max_size);
538 size = max_size;
539 }
540
541 if (size) {
542 addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL);
543 if (!addr_array) {
544 err = -ENOMEM;
545 goto out;
546 }
547 mlx5e_fill_addr_array(priv, list_type, addr_array, size);
548 }
549
550 err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
551 out:
552 if (err)
553 netdev_err(priv->netdev,
554 "Failed to modify vport %s list err(%d)\n",
555 is_uc ? "UC" : "MC", err);
556 kfree(addr_array);
557 }
558
mlx5e_vport_context_update(struct mlx5e_priv * priv)559 static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
560 {
561 struct mlx5e_l2_table *ea = &priv->fs.l2;
562
563 mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC);
564 mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC);
565 mlx5_modify_nic_vport_promisc(priv->mdev, 0,
566 ea->allmulti_enabled,
567 ea->promisc_enabled);
568 }
569
mlx5e_apply_netdev_addr(struct mlx5e_priv * priv)570 static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
571 {
572 struct mlx5e_l2_hash_node *hn;
573 struct hlist_node *tmp;
574 int i;
575
576 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
577 mlx5e_execute_l2_action(priv, hn);
578
579 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
580 mlx5e_execute_l2_action(priv, hn);
581 }
582
mlx5e_handle_netdev_addr(struct mlx5e_priv * priv)583 static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
584 {
585 struct mlx5e_l2_hash_node *hn;
586 struct hlist_node *tmp;
587 int i;
588
589 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
590 hn->action = MLX5E_ACTION_DEL;
591 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
592 hn->action = MLX5E_ACTION_DEL;
593
594 if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
595 mlx5e_sync_netdev_addr(priv);
596
597 mlx5e_apply_netdev_addr(priv);
598 }
599
mlx5e_set_rx_mode_work(struct work_struct * work)600 void mlx5e_set_rx_mode_work(struct work_struct *work)
601 {
602 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
603 set_rx_mode_work);
604
605 struct mlx5e_l2_table *ea = &priv->fs.l2;
606 struct net_device *ndev = priv->netdev;
607
608 bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
609 bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC);
610 bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
611 bool broadcast_enabled = rx_mode_enable;
612
613 bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
614 bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
615 bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
616 bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
617 bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
618 bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
619
620 if (enable_promisc) {
621 if (!priv->channels.params.vlan_strip_disable)
622 netdev_warn_once(ndev,
623 "S-tagged traffic will be dropped while C-tag vlan stripping is enabled\n");
624 mlx5e_add_l2_flow_rule(priv, &ea->promisc, MLX5E_PROMISC);
625 if (!priv->fs.vlan.cvlan_filter_disabled)
626 mlx5e_add_any_vid_rules(priv);
627 }
628 if (enable_allmulti)
629 mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
630 if (enable_broadcast)
631 mlx5e_add_l2_flow_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
632
633 mlx5e_handle_netdev_addr(priv);
634
635 if (disable_broadcast)
636 mlx5e_del_l2_flow_rule(priv, &ea->broadcast);
637 if (disable_allmulti)
638 mlx5e_del_l2_flow_rule(priv, &ea->allmulti);
639 if (disable_promisc) {
640 if (!priv->fs.vlan.cvlan_filter_disabled)
641 mlx5e_del_any_vid_rules(priv);
642 mlx5e_del_l2_flow_rule(priv, &ea->promisc);
643 }
644
645 ea->promisc_enabled = promisc_enabled;
646 ea->allmulti_enabled = allmulti_enabled;
647 ea->broadcast_enabled = broadcast_enabled;
648
649 mlx5e_vport_context_update(priv);
650 }
651
mlx5e_destroy_groups(struct mlx5e_flow_table * ft)652 static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
653 {
654 int i;
655
656 for (i = ft->num_groups - 1; i >= 0; i--) {
657 if (!IS_ERR_OR_NULL(ft->g[i]))
658 mlx5_destroy_flow_group(ft->g[i]);
659 ft->g[i] = NULL;
660 }
661 ft->num_groups = 0;
662 }
663
mlx5e_init_l2_addr(struct mlx5e_priv * priv)664 void mlx5e_init_l2_addr(struct mlx5e_priv *priv)
665 {
666 ether_addr_copy(priv->fs.l2.broadcast.addr, priv->netdev->broadcast);
667 }
668
mlx5e_destroy_flow_table(struct mlx5e_flow_table * ft)669 void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
670 {
671 mlx5e_destroy_groups(ft);
672 kfree(ft->g);
673 mlx5_destroy_flow_table(ft->t);
674 ft->t = NULL;
675 }
676
mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table * ttc)677 static void mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table *ttc)
678 {
679 int i;
680
681 for (i = 0; i < MLX5E_NUM_TT; i++) {
682 if (!IS_ERR_OR_NULL(ttc->rules[i].rule)) {
683 mlx5_del_flow_rules(ttc->rules[i].rule);
684 ttc->rules[i].rule = NULL;
685 }
686 }
687
688 for (i = 0; i < MLX5E_NUM_TUNNEL_TT; i++) {
689 if (!IS_ERR_OR_NULL(ttc->tunnel_rules[i])) {
690 mlx5_del_flow_rules(ttc->tunnel_rules[i]);
691 ttc->tunnel_rules[i] = NULL;
692 }
693 }
694 }
695
696 struct mlx5e_etype_proto {
697 u16 etype;
698 u8 proto;
699 };
700
701 static struct mlx5e_etype_proto ttc_rules[] = {
702 [MLX5E_TT_IPV4_TCP] = {
703 .etype = ETH_P_IP,
704 .proto = IPPROTO_TCP,
705 },
706 [MLX5E_TT_IPV6_TCP] = {
707 .etype = ETH_P_IPV6,
708 .proto = IPPROTO_TCP,
709 },
710 [MLX5E_TT_IPV4_UDP] = {
711 .etype = ETH_P_IP,
712 .proto = IPPROTO_UDP,
713 },
714 [MLX5E_TT_IPV6_UDP] = {
715 .etype = ETH_P_IPV6,
716 .proto = IPPROTO_UDP,
717 },
718 [MLX5E_TT_IPV4_IPSEC_AH] = {
719 .etype = ETH_P_IP,
720 .proto = IPPROTO_AH,
721 },
722 [MLX5E_TT_IPV6_IPSEC_AH] = {
723 .etype = ETH_P_IPV6,
724 .proto = IPPROTO_AH,
725 },
726 [MLX5E_TT_IPV4_IPSEC_ESP] = {
727 .etype = ETH_P_IP,
728 .proto = IPPROTO_ESP,
729 },
730 [MLX5E_TT_IPV6_IPSEC_ESP] = {
731 .etype = ETH_P_IPV6,
732 .proto = IPPROTO_ESP,
733 },
734 [MLX5E_TT_IPV4] = {
735 .etype = ETH_P_IP,
736 .proto = 0,
737 },
738 [MLX5E_TT_IPV6] = {
739 .etype = ETH_P_IPV6,
740 .proto = 0,
741 },
742 [MLX5E_TT_ANY] = {
743 .etype = 0,
744 .proto = 0,
745 },
746 };
747
748 static struct mlx5e_etype_proto ttc_tunnel_rules[] = {
749 [MLX5E_TT_IPV4_GRE] = {
750 .etype = ETH_P_IP,
751 .proto = IPPROTO_GRE,
752 },
753 [MLX5E_TT_IPV6_GRE] = {
754 .etype = ETH_P_IPV6,
755 .proto = IPPROTO_GRE,
756 },
757 [MLX5E_TT_IPV4_IPIP] = {
758 .etype = ETH_P_IP,
759 .proto = IPPROTO_IPIP,
760 },
761 [MLX5E_TT_IPV6_IPIP] = {
762 .etype = ETH_P_IPV6,
763 .proto = IPPROTO_IPIP,
764 },
765 [MLX5E_TT_IPV4_IPV6] = {
766 .etype = ETH_P_IP,
767 .proto = IPPROTO_IPV6,
768 },
769 [MLX5E_TT_IPV6_IPV6] = {
770 .etype = ETH_P_IPV6,
771 .proto = IPPROTO_IPV6,
772 },
773
774 };
775
mlx5e_tunnel_proto_supported(struct mlx5_core_dev * mdev,u8 proto_type)776 bool mlx5e_tunnel_proto_supported(struct mlx5_core_dev *mdev, u8 proto_type)
777 {
778 switch (proto_type) {
779 case IPPROTO_GRE:
780 return MLX5_CAP_ETH(mdev, tunnel_stateless_gre);
781 case IPPROTO_IPIP:
782 case IPPROTO_IPV6:
783 return MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip);
784 default:
785 return false;
786 }
787 }
788
mlx5e_any_tunnel_proto_supported(struct mlx5_core_dev * mdev)789 bool mlx5e_any_tunnel_proto_supported(struct mlx5_core_dev *mdev)
790 {
791 int tt;
792
793 for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) {
794 if (mlx5e_tunnel_proto_supported(mdev, ttc_tunnel_rules[tt].proto))
795 return true;
796 }
797 return false;
798 }
799
mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev * mdev)800 bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
801 {
802 return (mlx5e_any_tunnel_proto_supported(mdev) &&
803 MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version));
804 }
805
mlx5e_etype_to_ipv(u16 ethertype)806 static u8 mlx5e_etype_to_ipv(u16 ethertype)
807 {
808 if (ethertype == ETH_P_IP)
809 return 4;
810
811 if (ethertype == ETH_P_IPV6)
812 return 6;
813
814 return 0;
815 }
816
817 static struct mlx5_flow_handle *
mlx5e_generate_ttc_rule(struct mlx5e_priv * priv,struct mlx5_flow_table * ft,struct mlx5_flow_destination * dest,u16 etype,u8 proto)818 mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
819 struct mlx5_flow_table *ft,
820 struct mlx5_flow_destination *dest,
821 u16 etype,
822 u8 proto)
823 {
824 int match_ipv_outer = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version);
825 MLX5_DECLARE_FLOW_ACT(flow_act);
826 struct mlx5_flow_handle *rule;
827 struct mlx5_flow_spec *spec;
828 int err = 0;
829 u8 ipv;
830
831 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
832 if (!spec)
833 return ERR_PTR(-ENOMEM);
834
835 if (proto) {
836 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
837 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
838 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, proto);
839 }
840
841 ipv = mlx5e_etype_to_ipv(etype);
842 if (match_ipv_outer && ipv) {
843 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
844 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
845 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, ipv);
846 } else if (etype) {
847 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
848 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
849 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype);
850 }
851
852 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
853 if (IS_ERR(rule)) {
854 err = PTR_ERR(rule);
855 netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
856 }
857
858 kvfree(spec);
859 return err ? ERR_PTR(err) : rule;
860 }
861
mlx5e_generate_ttc_table_rules(struct mlx5e_priv * priv,struct ttc_params * params,struct mlx5e_ttc_table * ttc)862 static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv,
863 struct ttc_params *params,
864 struct mlx5e_ttc_table *ttc)
865 {
866 struct mlx5_flow_destination dest = {};
867 struct mlx5_flow_handle **trules;
868 struct mlx5e_ttc_rule *rules;
869 struct mlx5_flow_table *ft;
870 int tt;
871 int err;
872
873 ft = ttc->ft.t;
874 rules = ttc->rules;
875
876 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
877 for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
878 struct mlx5e_ttc_rule *rule = &rules[tt];
879
880 if (tt == MLX5E_TT_ANY)
881 dest.tir_num = params->any_tt_tirn;
882 else
883 dest.tir_num = params->indir_tirn[tt];
884
885 rule->rule = mlx5e_generate_ttc_rule(priv, ft, &dest,
886 ttc_rules[tt].etype,
887 ttc_rules[tt].proto);
888 if (IS_ERR(rule->rule)) {
889 err = PTR_ERR(rule->rule);
890 rule->rule = NULL;
891 goto del_rules;
892 }
893 rule->default_dest = dest;
894 }
895
896 if (!params->inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev))
897 return 0;
898
899 trules = ttc->tunnel_rules;
900 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
901 dest.ft = params->inner_ttc->ft.t;
902 for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) {
903 if (!mlx5e_tunnel_proto_supported(priv->mdev,
904 ttc_tunnel_rules[tt].proto))
905 continue;
906 trules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
907 ttc_tunnel_rules[tt].etype,
908 ttc_tunnel_rules[tt].proto);
909 if (IS_ERR(trules[tt])) {
910 err = PTR_ERR(trules[tt]);
911 trules[tt] = NULL;
912 goto del_rules;
913 }
914 }
915
916 return 0;
917
918 del_rules:
919 mlx5e_cleanup_ttc_rules(ttc);
920 return err;
921 }
922
mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table * ttc,bool use_ipv)923 static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc,
924 bool use_ipv)
925 {
926 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
927 struct mlx5e_flow_table *ft = &ttc->ft;
928 int ix = 0;
929 u32 *in;
930 int err;
931 u8 *mc;
932
933 ft->g = kcalloc(MLX5E_TTC_NUM_GROUPS,
934 sizeof(*ft->g), GFP_KERNEL);
935 if (!ft->g)
936 return -ENOMEM;
937 in = kvzalloc(inlen, GFP_KERNEL);
938 if (!in) {
939 kfree(ft->g);
940 ft->g = NULL;
941 return -ENOMEM;
942 }
943
944 /* L4 Group */
945 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
946 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
947 if (use_ipv)
948 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_version);
949 else
950 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
951 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
952 MLX5_SET_CFG(in, start_flow_index, ix);
953 ix += MLX5E_TTC_GROUP1_SIZE;
954 MLX5_SET_CFG(in, end_flow_index, ix - 1);
955 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
956 if (IS_ERR(ft->g[ft->num_groups]))
957 goto err;
958 ft->num_groups++;
959
960 /* L3 Group */
961 MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0);
962 MLX5_SET_CFG(in, start_flow_index, ix);
963 ix += MLX5E_TTC_GROUP2_SIZE;
964 MLX5_SET_CFG(in, end_flow_index, ix - 1);
965 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
966 if (IS_ERR(ft->g[ft->num_groups]))
967 goto err;
968 ft->num_groups++;
969
970 /* Any Group */
971 memset(in, 0, inlen);
972 MLX5_SET_CFG(in, start_flow_index, ix);
973 ix += MLX5E_TTC_GROUP3_SIZE;
974 MLX5_SET_CFG(in, end_flow_index, ix - 1);
975 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
976 if (IS_ERR(ft->g[ft->num_groups]))
977 goto err;
978 ft->num_groups++;
979
980 kvfree(in);
981 return 0;
982
983 err:
984 err = PTR_ERR(ft->g[ft->num_groups]);
985 ft->g[ft->num_groups] = NULL;
986 kvfree(in);
987
988 return err;
989 }
990
991 static struct mlx5_flow_handle *
mlx5e_generate_inner_ttc_rule(struct mlx5e_priv * priv,struct mlx5_flow_table * ft,struct mlx5_flow_destination * dest,u16 etype,u8 proto)992 mlx5e_generate_inner_ttc_rule(struct mlx5e_priv *priv,
993 struct mlx5_flow_table *ft,
994 struct mlx5_flow_destination *dest,
995 u16 etype, u8 proto)
996 {
997 MLX5_DECLARE_FLOW_ACT(flow_act);
998 struct mlx5_flow_handle *rule;
999 struct mlx5_flow_spec *spec;
1000 int err = 0;
1001 u8 ipv;
1002
1003 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1004 if (!spec)
1005 return ERR_PTR(-ENOMEM);
1006
1007 ipv = mlx5e_etype_to_ipv(etype);
1008 if (etype && ipv) {
1009 spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
1010 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_version);
1011 MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_version, ipv);
1012 }
1013
1014 if (proto) {
1015 spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
1016 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_protocol);
1017 MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_protocol, proto);
1018 }
1019
1020 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
1021 if (IS_ERR(rule)) {
1022 err = PTR_ERR(rule);
1023 netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
1024 }
1025
1026 kvfree(spec);
1027 return err ? ERR_PTR(err) : rule;
1028 }
1029
mlx5e_generate_inner_ttc_table_rules(struct mlx5e_priv * priv,struct ttc_params * params,struct mlx5e_ttc_table * ttc)1030 static int mlx5e_generate_inner_ttc_table_rules(struct mlx5e_priv *priv,
1031 struct ttc_params *params,
1032 struct mlx5e_ttc_table *ttc)
1033 {
1034 struct mlx5_flow_destination dest = {};
1035 struct mlx5e_ttc_rule *rules;
1036 struct mlx5_flow_table *ft;
1037 int err;
1038 int tt;
1039
1040 ft = ttc->ft.t;
1041 rules = ttc->rules;
1042 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
1043
1044 for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
1045 struct mlx5e_ttc_rule *rule = &rules[tt];
1046
1047 if (tt == MLX5E_TT_ANY)
1048 dest.tir_num = params->any_tt_tirn;
1049 else
1050 dest.tir_num = params->indir_tirn[tt];
1051
1052 rule->rule = mlx5e_generate_inner_ttc_rule(priv, ft, &dest,
1053 ttc_rules[tt].etype,
1054 ttc_rules[tt].proto);
1055 if (IS_ERR(rule->rule)) {
1056 err = PTR_ERR(rule->rule);
1057 rule->rule = NULL;
1058 goto del_rules;
1059 }
1060 rule->default_dest = dest;
1061 }
1062
1063 return 0;
1064
1065 del_rules:
1066
1067 mlx5e_cleanup_ttc_rules(ttc);
1068 return err;
1069 }
1070
mlx5e_create_inner_ttc_table_groups(struct mlx5e_ttc_table * ttc)1071 static int mlx5e_create_inner_ttc_table_groups(struct mlx5e_ttc_table *ttc)
1072 {
1073 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1074 struct mlx5e_flow_table *ft = &ttc->ft;
1075 int ix = 0;
1076 u32 *in;
1077 int err;
1078 u8 *mc;
1079
1080 ft->g = kcalloc(MLX5E_INNER_TTC_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1081 if (!ft->g)
1082 return -ENOMEM;
1083 in = kvzalloc(inlen, GFP_KERNEL);
1084 if (!in) {
1085 kfree(ft->g);
1086 ft->g = NULL;
1087 return -ENOMEM;
1088 }
1089
1090 /* L4 Group */
1091 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1092 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
1093 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_version);
1094 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
1095 MLX5_SET_CFG(in, start_flow_index, ix);
1096 ix += MLX5E_INNER_TTC_GROUP1_SIZE;
1097 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1098 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1099 if (IS_ERR(ft->g[ft->num_groups]))
1100 goto err;
1101 ft->num_groups++;
1102
1103 /* L3 Group */
1104 MLX5_SET(fte_match_param, mc, inner_headers.ip_protocol, 0);
1105 MLX5_SET_CFG(in, start_flow_index, ix);
1106 ix += MLX5E_INNER_TTC_GROUP2_SIZE;
1107 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1108 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1109 if (IS_ERR(ft->g[ft->num_groups]))
1110 goto err;
1111 ft->num_groups++;
1112
1113 /* Any Group */
1114 memset(in, 0, inlen);
1115 MLX5_SET_CFG(in, start_flow_index, ix);
1116 ix += MLX5E_INNER_TTC_GROUP3_SIZE;
1117 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1118 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1119 if (IS_ERR(ft->g[ft->num_groups]))
1120 goto err;
1121 ft->num_groups++;
1122
1123 kvfree(in);
1124 return 0;
1125
1126 err:
1127 err = PTR_ERR(ft->g[ft->num_groups]);
1128 ft->g[ft->num_groups] = NULL;
1129 kvfree(in);
1130
1131 return err;
1132 }
1133
mlx5e_set_ttc_basic_params(struct mlx5e_priv * priv,struct ttc_params * ttc_params)1134 void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv,
1135 struct ttc_params *ttc_params)
1136 {
1137 ttc_params->any_tt_tirn = priv->direct_tir[0].tirn;
1138 ttc_params->inner_ttc = &priv->fs.inner_ttc;
1139 }
1140
mlx5e_set_inner_ttc_ft_params(struct ttc_params * ttc_params)1141 static void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params)
1142 {
1143 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
1144
1145 ft_attr->max_fte = MLX5E_INNER_TTC_TABLE_SIZE;
1146 ft_attr->level = MLX5E_INNER_TTC_FT_LEVEL;
1147 ft_attr->prio = MLX5E_NIC_PRIO;
1148 }
1149
mlx5e_set_ttc_ft_params(struct ttc_params * ttc_params)1150 void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params)
1151
1152 {
1153 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
1154
1155 ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE;
1156 ft_attr->level = MLX5E_TTC_FT_LEVEL;
1157 ft_attr->prio = MLX5E_NIC_PRIO;
1158 }
1159
mlx5e_create_inner_ttc_table(struct mlx5e_priv * priv,struct ttc_params * params,struct mlx5e_ttc_table * ttc)1160 static int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
1161 struct mlx5e_ttc_table *ttc)
1162 {
1163 struct mlx5e_flow_table *ft = &ttc->ft;
1164 int err;
1165
1166 if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
1167 return 0;
1168
1169 ft->t = mlx5_create_flow_table(priv->fs.ns, ¶ms->ft_attr);
1170 if (IS_ERR(ft->t)) {
1171 err = PTR_ERR(ft->t);
1172 ft->t = NULL;
1173 return err;
1174 }
1175
1176 err = mlx5e_create_inner_ttc_table_groups(ttc);
1177 if (err)
1178 goto err;
1179
1180 err = mlx5e_generate_inner_ttc_table_rules(priv, params, ttc);
1181 if (err)
1182 goto err;
1183
1184 return 0;
1185
1186 err:
1187 mlx5e_destroy_flow_table(ft);
1188 return err;
1189 }
1190
mlx5e_destroy_inner_ttc_table(struct mlx5e_priv * priv,struct mlx5e_ttc_table * ttc)1191 static void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv,
1192 struct mlx5e_ttc_table *ttc)
1193 {
1194 if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
1195 return;
1196
1197 mlx5e_cleanup_ttc_rules(ttc);
1198 mlx5e_destroy_flow_table(&ttc->ft);
1199 }
1200
mlx5e_destroy_ttc_table(struct mlx5e_priv * priv,struct mlx5e_ttc_table * ttc)1201 void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv,
1202 struct mlx5e_ttc_table *ttc)
1203 {
1204 mlx5e_cleanup_ttc_rules(ttc);
1205 mlx5e_destroy_flow_table(&ttc->ft);
1206 }
1207
mlx5e_create_ttc_table(struct mlx5e_priv * priv,struct ttc_params * params,struct mlx5e_ttc_table * ttc)1208 int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
1209 struct mlx5e_ttc_table *ttc)
1210 {
1211 bool match_ipv_outer = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version);
1212 struct mlx5e_flow_table *ft = &ttc->ft;
1213 int err;
1214
1215 ft->t = mlx5_create_flow_table(priv->fs.ns, ¶ms->ft_attr);
1216 if (IS_ERR(ft->t)) {
1217 err = PTR_ERR(ft->t);
1218 ft->t = NULL;
1219 return err;
1220 }
1221
1222 err = mlx5e_create_ttc_table_groups(ttc, match_ipv_outer);
1223 if (err)
1224 goto err;
1225
1226 err = mlx5e_generate_ttc_table_rules(priv, params, ttc);
1227 if (err)
1228 goto err;
1229
1230 return 0;
1231 err:
1232 mlx5e_destroy_flow_table(ft);
1233 return err;
1234 }
1235
mlx5e_ttc_fwd_dest(struct mlx5e_priv * priv,enum mlx5e_traffic_types type,struct mlx5_flow_destination * new_dest)1236 int mlx5e_ttc_fwd_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type,
1237 struct mlx5_flow_destination *new_dest)
1238 {
1239 return mlx5_modify_rule_destination(priv->fs.ttc.rules[type].rule, new_dest, NULL);
1240 }
1241
1242 struct mlx5_flow_destination
mlx5e_ttc_get_default_dest(struct mlx5e_priv * priv,enum mlx5e_traffic_types type)1243 mlx5e_ttc_get_default_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type)
1244 {
1245 struct mlx5_flow_destination *dest = &priv->fs.ttc.rules[type].default_dest;
1246
1247 WARN_ONCE(dest->type != MLX5_FLOW_DESTINATION_TYPE_TIR,
1248 "TTC[%d] default dest is not setup yet", type);
1249
1250 return *dest;
1251 }
1252
mlx5e_ttc_fwd_default_dest(struct mlx5e_priv * priv,enum mlx5e_traffic_types type)1253 int mlx5e_ttc_fwd_default_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type)
1254 {
1255 struct mlx5_flow_destination dest = mlx5e_ttc_get_default_dest(priv, type);
1256
1257 return mlx5e_ttc_fwd_dest(priv, type, &dest);
1258 }
1259
mlx5e_del_l2_flow_rule(struct mlx5e_priv * priv,struct mlx5e_l2_rule * ai)1260 static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
1261 struct mlx5e_l2_rule *ai)
1262 {
1263 if (!IS_ERR_OR_NULL(ai->rule)) {
1264 mlx5_del_flow_rules(ai->rule);
1265 ai->rule = NULL;
1266 }
1267 }
1268
mlx5e_add_l2_flow_rule(struct mlx5e_priv * priv,struct mlx5e_l2_rule * ai,int type)1269 static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
1270 struct mlx5e_l2_rule *ai, int type)
1271 {
1272 struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
1273 struct mlx5_flow_destination dest = {};
1274 MLX5_DECLARE_FLOW_ACT(flow_act);
1275 struct mlx5_flow_spec *spec;
1276 int err = 0;
1277 u8 *mc_dmac;
1278 u8 *mv_dmac;
1279
1280 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1281 if (!spec)
1282 return -ENOMEM;
1283
1284 mc_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1285 outer_headers.dmac_47_16);
1286 mv_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1287 outer_headers.dmac_47_16);
1288
1289 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1290 dest.ft = priv->fs.ttc.ft.t;
1291
1292 switch (type) {
1293 case MLX5E_FULLMATCH:
1294 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1295 eth_broadcast_addr(mc_dmac);
1296 ether_addr_copy(mv_dmac, ai->addr);
1297 break;
1298
1299 case MLX5E_ALLMULTI:
1300 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1301 mc_dmac[0] = 0x01;
1302 mv_dmac[0] = 0x01;
1303 break;
1304
1305 case MLX5E_PROMISC:
1306 break;
1307 }
1308
1309 ai->rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
1310 if (IS_ERR(ai->rule)) {
1311 netdev_err(priv->netdev, "%s: add l2 rule(mac:%pM) failed\n",
1312 __func__, mv_dmac);
1313 err = PTR_ERR(ai->rule);
1314 ai->rule = NULL;
1315 }
1316
1317 kvfree(spec);
1318
1319 return err;
1320 }
1321
1322 #define MLX5E_NUM_L2_GROUPS 3
1323 #define MLX5E_L2_GROUP1_SIZE BIT(0)
1324 #define MLX5E_L2_GROUP2_SIZE BIT(15)
1325 #define MLX5E_L2_GROUP3_SIZE BIT(0)
1326 #define MLX5E_L2_TABLE_SIZE (MLX5E_L2_GROUP1_SIZE +\
1327 MLX5E_L2_GROUP2_SIZE +\
1328 MLX5E_L2_GROUP3_SIZE)
mlx5e_create_l2_table_groups(struct mlx5e_l2_table * l2_table)1329 static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
1330 {
1331 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1332 struct mlx5e_flow_table *ft = &l2_table->ft;
1333 int ix = 0;
1334 u8 *mc_dmac;
1335 u32 *in;
1336 int err;
1337 u8 *mc;
1338
1339 ft->g = kcalloc(MLX5E_NUM_L2_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1340 if (!ft->g)
1341 return -ENOMEM;
1342 in = kvzalloc(inlen, GFP_KERNEL);
1343 if (!in) {
1344 kfree(ft->g);
1345 return -ENOMEM;
1346 }
1347
1348 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1349 mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
1350 outer_headers.dmac_47_16);
1351 /* Flow Group for promiscuous */
1352 MLX5_SET_CFG(in, start_flow_index, ix);
1353 ix += MLX5E_L2_GROUP1_SIZE;
1354 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1355 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1356 if (IS_ERR(ft->g[ft->num_groups]))
1357 goto err_destroy_groups;
1358 ft->num_groups++;
1359
1360 /* Flow Group for full match */
1361 eth_broadcast_addr(mc_dmac);
1362 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1363 MLX5_SET_CFG(in, start_flow_index, ix);
1364 ix += MLX5E_L2_GROUP2_SIZE;
1365 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1366 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1367 if (IS_ERR(ft->g[ft->num_groups]))
1368 goto err_destroy_groups;
1369 ft->num_groups++;
1370
1371 /* Flow Group for allmulti */
1372 eth_zero_addr(mc_dmac);
1373 mc_dmac[0] = 0x01;
1374 MLX5_SET_CFG(in, start_flow_index, ix);
1375 ix += MLX5E_L2_GROUP3_SIZE;
1376 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1377 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1378 if (IS_ERR(ft->g[ft->num_groups]))
1379 goto err_destroy_groups;
1380 ft->num_groups++;
1381
1382 kvfree(in);
1383 return 0;
1384
1385 err_destroy_groups:
1386 err = PTR_ERR(ft->g[ft->num_groups]);
1387 ft->g[ft->num_groups] = NULL;
1388 mlx5e_destroy_groups(ft);
1389 kvfree(in);
1390 kfree(ft->g);
1391
1392 return err;
1393 }
1394
mlx5e_destroy_l2_table(struct mlx5e_priv * priv)1395 static void mlx5e_destroy_l2_table(struct mlx5e_priv *priv)
1396 {
1397 mlx5e_destroy_flow_table(&priv->fs.l2.ft);
1398 }
1399
mlx5e_create_l2_table(struct mlx5e_priv * priv)1400 static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
1401 {
1402 struct mlx5e_l2_table *l2_table = &priv->fs.l2;
1403 struct mlx5e_flow_table *ft = &l2_table->ft;
1404 struct mlx5_flow_table_attr ft_attr = {};
1405 int err;
1406
1407 ft->num_groups = 0;
1408
1409 ft_attr.max_fte = MLX5E_L2_TABLE_SIZE;
1410 ft_attr.level = MLX5E_L2_FT_LEVEL;
1411 ft_attr.prio = MLX5E_NIC_PRIO;
1412
1413 ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
1414 if (IS_ERR(ft->t)) {
1415 err = PTR_ERR(ft->t);
1416 ft->t = NULL;
1417 return err;
1418 }
1419
1420 err = mlx5e_create_l2_table_groups(l2_table);
1421 if (err)
1422 goto err_destroy_flow_table;
1423
1424 return 0;
1425
1426 err_destroy_flow_table:
1427 mlx5_destroy_flow_table(ft->t);
1428 ft->t = NULL;
1429
1430 return err;
1431 }
1432
1433 #define MLX5E_NUM_VLAN_GROUPS 4
1434 #define MLX5E_VLAN_GROUP0_SIZE BIT(12)
1435 #define MLX5E_VLAN_GROUP1_SIZE BIT(12)
1436 #define MLX5E_VLAN_GROUP2_SIZE BIT(1)
1437 #define MLX5E_VLAN_GROUP3_SIZE BIT(0)
1438 #define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\
1439 MLX5E_VLAN_GROUP1_SIZE +\
1440 MLX5E_VLAN_GROUP2_SIZE +\
1441 MLX5E_VLAN_GROUP3_SIZE)
1442
__mlx5e_create_vlan_table_groups(struct mlx5e_flow_table * ft,u32 * in,int inlen)1443 static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in,
1444 int inlen)
1445 {
1446 int err;
1447 int ix = 0;
1448 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1449
1450 memset(in, 0, inlen);
1451 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1452 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1453 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1454 MLX5_SET_CFG(in, start_flow_index, ix);
1455 ix += MLX5E_VLAN_GROUP0_SIZE;
1456 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1457 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1458 if (IS_ERR(ft->g[ft->num_groups]))
1459 goto err_destroy_groups;
1460 ft->num_groups++;
1461
1462 memset(in, 0, inlen);
1463 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1464 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
1465 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1466 MLX5_SET_CFG(in, start_flow_index, ix);
1467 ix += MLX5E_VLAN_GROUP1_SIZE;
1468 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1469 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1470 if (IS_ERR(ft->g[ft->num_groups]))
1471 goto err_destroy_groups;
1472 ft->num_groups++;
1473
1474 memset(in, 0, inlen);
1475 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1476 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1477 MLX5_SET_CFG(in, start_flow_index, ix);
1478 ix += MLX5E_VLAN_GROUP2_SIZE;
1479 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1480 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1481 if (IS_ERR(ft->g[ft->num_groups]))
1482 goto err_destroy_groups;
1483 ft->num_groups++;
1484
1485 memset(in, 0, inlen);
1486 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1487 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
1488 MLX5_SET_CFG(in, start_flow_index, ix);
1489 ix += MLX5E_VLAN_GROUP3_SIZE;
1490 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1491 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1492 if (IS_ERR(ft->g[ft->num_groups]))
1493 goto err_destroy_groups;
1494 ft->num_groups++;
1495
1496 return 0;
1497
1498 err_destroy_groups:
1499 err = PTR_ERR(ft->g[ft->num_groups]);
1500 ft->g[ft->num_groups] = NULL;
1501 mlx5e_destroy_groups(ft);
1502
1503 return err;
1504 }
1505
mlx5e_create_vlan_table_groups(struct mlx5e_flow_table * ft)1506 static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft)
1507 {
1508 u32 *in;
1509 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1510 int err;
1511
1512 in = kvzalloc(inlen, GFP_KERNEL);
1513 if (!in)
1514 return -ENOMEM;
1515
1516 err = __mlx5e_create_vlan_table_groups(ft, in, inlen);
1517
1518 kvfree(in);
1519 return err;
1520 }
1521
mlx5e_create_vlan_table(struct mlx5e_priv * priv)1522 static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
1523 {
1524 struct mlx5e_flow_table *ft = &priv->fs.vlan.ft;
1525 struct mlx5_flow_table_attr ft_attr = {};
1526 int err;
1527
1528 ft->num_groups = 0;
1529
1530 ft_attr.max_fte = MLX5E_VLAN_TABLE_SIZE;
1531 ft_attr.level = MLX5E_VLAN_FT_LEVEL;
1532 ft_attr.prio = MLX5E_NIC_PRIO;
1533
1534 ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
1535
1536 if (IS_ERR(ft->t)) {
1537 err = PTR_ERR(ft->t);
1538 ft->t = NULL;
1539 return err;
1540 }
1541 ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1542 if (!ft->g) {
1543 err = -ENOMEM;
1544 goto err_destroy_vlan_table;
1545 }
1546
1547 err = mlx5e_create_vlan_table_groups(ft);
1548 if (err)
1549 goto err_free_g;
1550
1551 mlx5e_add_vlan_rules(priv);
1552
1553 return 0;
1554
1555 err_free_g:
1556 kfree(ft->g);
1557 err_destroy_vlan_table:
1558 mlx5_destroy_flow_table(ft->t);
1559 ft->t = NULL;
1560
1561 return err;
1562 }
1563
mlx5e_destroy_vlan_table(struct mlx5e_priv * priv)1564 static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
1565 {
1566 mlx5e_del_vlan_rules(priv);
1567 mlx5e_destroy_flow_table(&priv->fs.vlan.ft);
1568 }
1569
mlx5e_create_flow_steering(struct mlx5e_priv * priv)1570 int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
1571 {
1572 struct ttc_params ttc_params = {};
1573 int tt, err;
1574
1575 priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
1576 MLX5_FLOW_NAMESPACE_KERNEL);
1577
1578 if (!priv->fs.ns)
1579 return -EOPNOTSUPP;
1580
1581 err = mlx5e_arfs_create_tables(priv);
1582 if (err) {
1583 netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n",
1584 err);
1585 priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
1586 }
1587
1588 mlx5e_set_ttc_basic_params(priv, &ttc_params);
1589 mlx5e_set_inner_ttc_ft_params(&ttc_params);
1590 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
1591 ttc_params.indir_tirn[tt] = priv->inner_indir_tir[tt].tirn;
1592
1593 err = mlx5e_create_inner_ttc_table(priv, &ttc_params, &priv->fs.inner_ttc);
1594 if (err) {
1595 netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n",
1596 err);
1597 goto err_destroy_arfs_tables;
1598 }
1599
1600 mlx5e_set_ttc_ft_params(&ttc_params);
1601 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
1602 ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
1603
1604 err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc);
1605 if (err) {
1606 netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
1607 err);
1608 goto err_destroy_inner_ttc_table;
1609 }
1610
1611 err = mlx5e_create_l2_table(priv);
1612 if (err) {
1613 netdev_err(priv->netdev, "Failed to create l2 table, err=%d\n",
1614 err);
1615 goto err_destroy_ttc_table;
1616 }
1617
1618 err = mlx5e_create_vlan_table(priv);
1619 if (err) {
1620 netdev_err(priv->netdev, "Failed to create vlan table, err=%d\n",
1621 err);
1622 goto err_destroy_l2_table;
1623 }
1624
1625 mlx5e_ethtool_init_steering(priv);
1626
1627 return 0;
1628
1629 err_destroy_l2_table:
1630 mlx5e_destroy_l2_table(priv);
1631 err_destroy_ttc_table:
1632 mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
1633 err_destroy_inner_ttc_table:
1634 mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
1635 err_destroy_arfs_tables:
1636 mlx5e_arfs_destroy_tables(priv);
1637
1638 return err;
1639 }
1640
mlx5e_destroy_flow_steering(struct mlx5e_priv * priv)1641 void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
1642 {
1643 mlx5e_destroy_vlan_table(priv);
1644 mlx5e_destroy_l2_table(priv);
1645 mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
1646 mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
1647 mlx5e_arfs_destroy_tables(priv);
1648 mlx5e_ethtool_cleanup_steering(priv);
1649 }
1650