• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2021 Mellanox Technologies. */
3 
4 #include <linux/list.h>
5 #include <linux/notifier.h>
6 #include <net/netevent.h>
7 #include <net/switchdev.h>
8 #include "lib/devcom.h"
9 #include "bridge.h"
10 #include "eswitch.h"
11 #include "bridge_priv.h"
12 #define CREATE_TRACE_POINTS
13 #include "diag/bridge_tracepoint.h"
14 
15 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE 64000
16 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_FROM 0
17 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO (MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE / 4 - 1)
18 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_FROM \
19 	(MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO + 1)
20 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_TO \
21 	(MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE / 2 - 1)
22 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_FROM \
23 	(MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_TO + 1)
24 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_TO (MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE - 1)
25 
26 #define MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE 64000
27 #define MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_FROM 0
28 #define MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE / 2 - 1)
29 #define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_FROM \
30 	(MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO + 1)
31 #define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE - 1)
32 
33 #define MLX5_ESW_BRIDGE_SKIP_TABLE_SIZE 0
34 
35 enum {
36 	MLX5_ESW_BRIDGE_LEVEL_INGRESS_TABLE,
37 	MLX5_ESW_BRIDGE_LEVEL_EGRESS_TABLE,
38 	MLX5_ESW_BRIDGE_LEVEL_SKIP_TABLE,
39 };
40 
41 static const struct rhashtable_params fdb_ht_params = {
42 	.key_offset = offsetof(struct mlx5_esw_bridge_fdb_entry, key),
43 	.key_len = sizeof(struct mlx5_esw_bridge_fdb_key),
44 	.head_offset = offsetof(struct mlx5_esw_bridge_fdb_entry, ht_node),
45 	.automatic_shrinking = true,
46 };
47 
48 enum {
49 	MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG = BIT(0),
50 };
51 
52 struct mlx5_esw_bridge {
53 	int ifindex;
54 	int refcnt;
55 	struct list_head list;
56 	struct mlx5_esw_bridge_offloads *br_offloads;
57 
58 	struct list_head fdb_list;
59 	struct rhashtable fdb_ht;
60 
61 	struct mlx5_flow_table *egress_ft;
62 	struct mlx5_flow_group *egress_vlan_fg;
63 	struct mlx5_flow_group *egress_mac_fg;
64 	unsigned long ageing_time;
65 	u32 flags;
66 };
67 
68 static void
mlx5_esw_bridge_fdb_offload_notify(struct net_device * dev,const unsigned char * addr,u16 vid,unsigned long val)69 mlx5_esw_bridge_fdb_offload_notify(struct net_device *dev, const unsigned char *addr, u16 vid,
70 				   unsigned long val)
71 {
72 	struct switchdev_notifier_fdb_info send_info = {};
73 
74 	send_info.addr = addr;
75 	send_info.vid = vid;
76 	send_info.offloaded = true;
77 	call_switchdev_notifiers(val, dev, &send_info.info, NULL);
78 }
79 
80 static void
mlx5_esw_bridge_fdb_del_notify(struct mlx5_esw_bridge_fdb_entry * entry)81 mlx5_esw_bridge_fdb_del_notify(struct mlx5_esw_bridge_fdb_entry *entry)
82 {
83 	if (!(entry->flags & (MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER | MLX5_ESW_BRIDGE_FLAG_PEER)))
84 		mlx5_esw_bridge_fdb_offload_notify(entry->dev, entry->key.addr,
85 						   entry->key.vid,
86 						   SWITCHDEV_FDB_DEL_TO_BRIDGE);
87 }
88 
89 static struct mlx5_flow_table *
mlx5_esw_bridge_table_create(int max_fte,u32 level,struct mlx5_eswitch * esw)90 mlx5_esw_bridge_table_create(int max_fte, u32 level, struct mlx5_eswitch *esw)
91 {
92 	struct mlx5_flow_table_attr ft_attr = {};
93 	struct mlx5_core_dev *dev = esw->dev;
94 	struct mlx5_flow_namespace *ns;
95 	struct mlx5_flow_table *fdb;
96 
97 	ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
98 	if (!ns) {
99 		esw_warn(dev, "Failed to get FDB namespace\n");
100 		return ERR_PTR(-ENOENT);
101 	}
102 
103 	ft_attr.flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
104 	ft_attr.max_fte = max_fte;
105 	ft_attr.level = level;
106 	ft_attr.prio = FDB_BR_OFFLOAD;
107 	fdb = mlx5_create_flow_table(ns, &ft_attr);
108 	if (IS_ERR(fdb))
109 		esw_warn(dev, "Failed to create bridge FDB Table (err=%ld)\n", PTR_ERR(fdb));
110 
111 	return fdb;
112 }
113 
114 static struct mlx5_flow_group *
mlx5_esw_bridge_ingress_vlan_fg_create(struct mlx5_eswitch * esw,struct mlx5_flow_table * ingress_ft)115 mlx5_esw_bridge_ingress_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *ingress_ft)
116 {
117 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
118 	struct mlx5_flow_group *fg;
119 	u32 *in, *match;
120 
121 	in = kvzalloc(inlen, GFP_KERNEL);
122 	if (!in)
123 		return ERR_PTR(-ENOMEM);
124 
125 	MLX5_SET(create_flow_group_in, in, match_criteria_enable,
126 		 MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2);
127 	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
128 
129 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16);
130 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0);
131 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
132 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.first_vid);
133 
134 	MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
135 		 mlx5_eswitch_get_vport_metadata_mask());
136 
137 	MLX5_SET(create_flow_group_in, in, start_flow_index,
138 		 MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_FROM);
139 	MLX5_SET(create_flow_group_in, in, end_flow_index,
140 		 MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO);
141 
142 	fg = mlx5_create_flow_group(ingress_ft, in);
143 	kvfree(in);
144 	if (IS_ERR(fg))
145 		esw_warn(esw->dev,
146 			 "Failed to create VLAN flow group for bridge ingress table (err=%ld)\n",
147 			 PTR_ERR(fg));
148 
149 	return fg;
150 }
151 
152 static struct mlx5_flow_group *
mlx5_esw_bridge_ingress_filter_fg_create(struct mlx5_eswitch * esw,struct mlx5_flow_table * ingress_ft)153 mlx5_esw_bridge_ingress_filter_fg_create(struct mlx5_eswitch *esw,
154 					 struct mlx5_flow_table *ingress_ft)
155 {
156 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
157 	struct mlx5_flow_group *fg;
158 	u32 *in, *match;
159 
160 	in = kvzalloc(inlen, GFP_KERNEL);
161 	if (!in)
162 		return ERR_PTR(-ENOMEM);
163 
164 	MLX5_SET(create_flow_group_in, in, match_criteria_enable,
165 		 MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2);
166 	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
167 
168 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16);
169 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0);
170 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
171 
172 	MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
173 		 mlx5_eswitch_get_vport_metadata_mask());
174 
175 	MLX5_SET(create_flow_group_in, in, start_flow_index,
176 		 MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_FROM);
177 	MLX5_SET(create_flow_group_in, in, end_flow_index,
178 		 MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_TO);
179 
180 	fg = mlx5_create_flow_group(ingress_ft, in);
181 	if (IS_ERR(fg))
182 		esw_warn(esw->dev,
183 			 "Failed to create bridge ingress table VLAN filter flow group (err=%ld)\n",
184 			 PTR_ERR(fg));
185 
186 	kvfree(in);
187 	return fg;
188 }
189 
190 static struct mlx5_flow_group *
mlx5_esw_bridge_ingress_mac_fg_create(struct mlx5_eswitch * esw,struct mlx5_flow_table * ingress_ft)191 mlx5_esw_bridge_ingress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *ingress_ft)
192 {
193 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
194 	struct mlx5_flow_group *fg;
195 	u32 *in, *match;
196 
197 	in = kvzalloc(inlen, GFP_KERNEL);
198 	if (!in)
199 		return ERR_PTR(-ENOMEM);
200 
201 	MLX5_SET(create_flow_group_in, in, match_criteria_enable,
202 		 MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2);
203 	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
204 
205 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16);
206 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0);
207 
208 	MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
209 		 mlx5_eswitch_get_vport_metadata_mask());
210 
211 	MLX5_SET(create_flow_group_in, in, start_flow_index,
212 		 MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_FROM);
213 	MLX5_SET(create_flow_group_in, in, end_flow_index,
214 		 MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_TO);
215 
216 	fg = mlx5_create_flow_group(ingress_ft, in);
217 	if (IS_ERR(fg))
218 		esw_warn(esw->dev,
219 			 "Failed to create MAC flow group for bridge ingress table (err=%ld)\n",
220 			 PTR_ERR(fg));
221 
222 	kvfree(in);
223 	return fg;
224 }
225 
226 static struct mlx5_flow_group *
mlx5_esw_bridge_egress_vlan_fg_create(struct mlx5_eswitch * esw,struct mlx5_flow_table * egress_ft)227 mlx5_esw_bridge_egress_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft)
228 {
229 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
230 	struct mlx5_flow_group *fg;
231 	u32 *in, *match;
232 
233 	in = kvzalloc(inlen, GFP_KERNEL);
234 	if (!in)
235 		return ERR_PTR(-ENOMEM);
236 
237 	MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
238 	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
239 
240 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_47_16);
241 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_15_0);
242 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
243 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.first_vid);
244 
245 	MLX5_SET(create_flow_group_in, in, start_flow_index,
246 		 MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_FROM);
247 	MLX5_SET(create_flow_group_in, in, end_flow_index,
248 		 MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO);
249 
250 	fg = mlx5_create_flow_group(egress_ft, in);
251 	if (IS_ERR(fg))
252 		esw_warn(esw->dev,
253 			 "Failed to create VLAN flow group for bridge egress table (err=%ld)\n",
254 			 PTR_ERR(fg));
255 	kvfree(in);
256 	return fg;
257 }
258 
259 static struct mlx5_flow_group *
mlx5_esw_bridge_egress_mac_fg_create(struct mlx5_eswitch * esw,struct mlx5_flow_table * egress_ft)260 mlx5_esw_bridge_egress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft)
261 {
262 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
263 	struct mlx5_flow_group *fg;
264 	u32 *in, *match;
265 
266 	in = kvzalloc(inlen, GFP_KERNEL);
267 	if (!in)
268 		return ERR_PTR(-ENOMEM);
269 
270 	MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
271 	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
272 
273 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_47_16);
274 	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_15_0);
275 
276 	MLX5_SET(create_flow_group_in, in, start_flow_index,
277 		 MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_FROM);
278 	MLX5_SET(create_flow_group_in, in, end_flow_index,
279 		 MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO);
280 
281 	fg = mlx5_create_flow_group(egress_ft, in);
282 	if (IS_ERR(fg))
283 		esw_warn(esw->dev,
284 			 "Failed to create bridge egress table MAC flow group (err=%ld)\n",
285 			 PTR_ERR(fg));
286 	kvfree(in);
287 	return fg;
288 }
289 
290 static int
mlx5_esw_bridge_ingress_table_init(struct mlx5_esw_bridge_offloads * br_offloads)291 mlx5_esw_bridge_ingress_table_init(struct mlx5_esw_bridge_offloads *br_offloads)
292 {
293 	struct mlx5_flow_group *mac_fg, *filter_fg, *vlan_fg;
294 	struct mlx5_flow_table *ingress_ft, *skip_ft;
295 	int err;
296 
297 	if (!mlx5_eswitch_vport_match_metadata_enabled(br_offloads->esw))
298 		return -EOPNOTSUPP;
299 
300 	ingress_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE,
301 						  MLX5_ESW_BRIDGE_LEVEL_INGRESS_TABLE,
302 						  br_offloads->esw);
303 	if (IS_ERR(ingress_ft))
304 		return PTR_ERR(ingress_ft);
305 
306 	skip_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_SKIP_TABLE_SIZE,
307 					       MLX5_ESW_BRIDGE_LEVEL_SKIP_TABLE,
308 					       br_offloads->esw);
309 	if (IS_ERR(skip_ft)) {
310 		err = PTR_ERR(skip_ft);
311 		goto err_skip_tbl;
312 	}
313 
314 	vlan_fg = mlx5_esw_bridge_ingress_vlan_fg_create(br_offloads->esw, ingress_ft);
315 	if (IS_ERR(vlan_fg)) {
316 		err = PTR_ERR(vlan_fg);
317 		goto err_vlan_fg;
318 	}
319 
320 	filter_fg = mlx5_esw_bridge_ingress_filter_fg_create(br_offloads->esw, ingress_ft);
321 	if (IS_ERR(filter_fg)) {
322 		err = PTR_ERR(filter_fg);
323 		goto err_filter_fg;
324 	}
325 
326 	mac_fg = mlx5_esw_bridge_ingress_mac_fg_create(br_offloads->esw, ingress_ft);
327 	if (IS_ERR(mac_fg)) {
328 		err = PTR_ERR(mac_fg);
329 		goto err_mac_fg;
330 	}
331 
332 	br_offloads->ingress_ft = ingress_ft;
333 	br_offloads->skip_ft = skip_ft;
334 	br_offloads->ingress_vlan_fg = vlan_fg;
335 	br_offloads->ingress_filter_fg = filter_fg;
336 	br_offloads->ingress_mac_fg = mac_fg;
337 	return 0;
338 
339 err_mac_fg:
340 	mlx5_destroy_flow_group(filter_fg);
341 err_filter_fg:
342 	mlx5_destroy_flow_group(vlan_fg);
343 err_vlan_fg:
344 	mlx5_destroy_flow_table(skip_ft);
345 err_skip_tbl:
346 	mlx5_destroy_flow_table(ingress_ft);
347 	return err;
348 }
349 
350 static void
mlx5_esw_bridge_ingress_table_cleanup(struct mlx5_esw_bridge_offloads * br_offloads)351 mlx5_esw_bridge_ingress_table_cleanup(struct mlx5_esw_bridge_offloads *br_offloads)
352 {
353 	mlx5_destroy_flow_group(br_offloads->ingress_mac_fg);
354 	br_offloads->ingress_mac_fg = NULL;
355 	mlx5_destroy_flow_group(br_offloads->ingress_filter_fg);
356 	br_offloads->ingress_filter_fg = NULL;
357 	mlx5_destroy_flow_group(br_offloads->ingress_vlan_fg);
358 	br_offloads->ingress_vlan_fg = NULL;
359 	mlx5_destroy_flow_table(br_offloads->skip_ft);
360 	br_offloads->skip_ft = NULL;
361 	mlx5_destroy_flow_table(br_offloads->ingress_ft);
362 	br_offloads->ingress_ft = NULL;
363 }
364 
365 static int
mlx5_esw_bridge_egress_table_init(struct mlx5_esw_bridge_offloads * br_offloads,struct mlx5_esw_bridge * bridge)366 mlx5_esw_bridge_egress_table_init(struct mlx5_esw_bridge_offloads *br_offloads,
367 				  struct mlx5_esw_bridge *bridge)
368 {
369 	struct mlx5_flow_group *mac_fg, *vlan_fg;
370 	struct mlx5_flow_table *egress_ft;
371 	int err;
372 
373 	egress_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE,
374 						 MLX5_ESW_BRIDGE_LEVEL_EGRESS_TABLE,
375 						 br_offloads->esw);
376 	if (IS_ERR(egress_ft))
377 		return PTR_ERR(egress_ft);
378 
379 	vlan_fg = mlx5_esw_bridge_egress_vlan_fg_create(br_offloads->esw, egress_ft);
380 	if (IS_ERR(vlan_fg)) {
381 		err = PTR_ERR(vlan_fg);
382 		goto err_vlan_fg;
383 	}
384 
385 	mac_fg = mlx5_esw_bridge_egress_mac_fg_create(br_offloads->esw, egress_ft);
386 	if (IS_ERR(mac_fg)) {
387 		err = PTR_ERR(mac_fg);
388 		goto err_mac_fg;
389 	}
390 
391 	bridge->egress_ft = egress_ft;
392 	bridge->egress_vlan_fg = vlan_fg;
393 	bridge->egress_mac_fg = mac_fg;
394 	return 0;
395 
396 err_mac_fg:
397 	mlx5_destroy_flow_group(vlan_fg);
398 err_vlan_fg:
399 	mlx5_destroy_flow_table(egress_ft);
400 	return err;
401 }
402 
403 static void
mlx5_esw_bridge_egress_table_cleanup(struct mlx5_esw_bridge * bridge)404 mlx5_esw_bridge_egress_table_cleanup(struct mlx5_esw_bridge *bridge)
405 {
406 	mlx5_destroy_flow_group(bridge->egress_mac_fg);
407 	mlx5_destroy_flow_group(bridge->egress_vlan_fg);
408 	mlx5_destroy_flow_table(bridge->egress_ft);
409 }
410 
411 static struct mlx5_flow_handle *
mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num,const unsigned char * addr,struct mlx5_esw_bridge_vlan * vlan,u32 counter_id,struct mlx5_esw_bridge * bridge,struct mlx5_eswitch * esw)412 mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num, const unsigned char *addr,
413 					     struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
414 					     struct mlx5_esw_bridge *bridge,
415 					     struct mlx5_eswitch *esw)
416 {
417 	struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
418 	struct mlx5_flow_act flow_act = {
419 		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_COUNT,
420 		.flags = FLOW_ACT_NO_APPEND,
421 	};
422 	struct mlx5_flow_destination dests[2] = {};
423 	struct mlx5_flow_spec *rule_spec;
424 	struct mlx5_flow_handle *handle;
425 	u8 *smac_v, *smac_c;
426 
427 	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
428 	if (!rule_spec)
429 		return ERR_PTR(-ENOMEM);
430 
431 	rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2;
432 
433 	smac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value,
434 			      outer_headers.smac_47_16);
435 	ether_addr_copy(smac_v, addr);
436 	smac_c = MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria,
437 			      outer_headers.smac_47_16);
438 	eth_broadcast_addr(smac_c);
439 
440 	MLX5_SET(fte_match_param, rule_spec->match_criteria,
441 		 misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
442 	MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_0,
443 		 mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num));
444 
445 	if (vlan && vlan->pkt_reformat_push) {
446 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
447 		flow_act.pkt_reformat = vlan->pkt_reformat_push;
448 	} else if (vlan) {
449 		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
450 				 outer_headers.cvlan_tag);
451 		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
452 				 outer_headers.cvlan_tag);
453 		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
454 				 outer_headers.first_vid);
455 		MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.first_vid,
456 			 vlan->vid);
457 	}
458 
459 	dests[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
460 	dests[0].ft = bridge->egress_ft;
461 	dests[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
462 	dests[1].counter_id = counter_id;
463 
464 	handle = mlx5_add_flow_rules(br_offloads->ingress_ft, rule_spec, &flow_act, dests,
465 				     ARRAY_SIZE(dests));
466 
467 	kvfree(rule_spec);
468 	return handle;
469 }
470 
471 static struct mlx5_flow_handle *
mlx5_esw_bridge_ingress_flow_create(u16 vport_num,const unsigned char * addr,struct mlx5_esw_bridge_vlan * vlan,u32 counter_id,struct mlx5_esw_bridge * bridge)472 mlx5_esw_bridge_ingress_flow_create(u16 vport_num, const unsigned char *addr,
473 				    struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
474 				    struct mlx5_esw_bridge *bridge)
475 {
476 	return mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id,
477 							    bridge, bridge->br_offloads->esw);
478 }
479 
480 static struct mlx5_flow_handle *
mlx5_esw_bridge_ingress_flow_peer_create(u16 vport_num,const unsigned char * addr,struct mlx5_esw_bridge_vlan * vlan,u32 counter_id,struct mlx5_esw_bridge * bridge)481 mlx5_esw_bridge_ingress_flow_peer_create(u16 vport_num, const unsigned char *addr,
482 					 struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
483 					 struct mlx5_esw_bridge *bridge)
484 {
485 	struct mlx5_devcom *devcom = bridge->br_offloads->esw->dev->priv.devcom;
486 	static struct mlx5_flow_handle *handle;
487 	struct mlx5_eswitch *peer_esw;
488 
489 	peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
490 	if (!peer_esw)
491 		return ERR_PTR(-ENODEV);
492 
493 	handle = mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id,
494 							      bridge, peer_esw);
495 
496 	mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
497 	return handle;
498 }
499 
500 static struct mlx5_flow_handle *
mlx5_esw_bridge_ingress_filter_flow_create(u16 vport_num,const unsigned char * addr,struct mlx5_esw_bridge * bridge)501 mlx5_esw_bridge_ingress_filter_flow_create(u16 vport_num, const unsigned char *addr,
502 					   struct mlx5_esw_bridge *bridge)
503 {
504 	struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
505 	struct mlx5_flow_destination dest = {
506 		.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
507 		.ft = br_offloads->skip_ft,
508 	};
509 	struct mlx5_flow_act flow_act = {
510 		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
511 		.flags = FLOW_ACT_NO_APPEND,
512 	};
513 	struct mlx5_flow_spec *rule_spec;
514 	struct mlx5_flow_handle *handle;
515 	u8 *smac_v, *smac_c;
516 
517 	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
518 	if (!rule_spec)
519 		return ERR_PTR(-ENOMEM);
520 
521 	rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2;
522 
523 	smac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value,
524 			      outer_headers.smac_47_16);
525 	ether_addr_copy(smac_v, addr);
526 	smac_c = MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria,
527 			      outer_headers.smac_47_16);
528 	eth_broadcast_addr(smac_c);
529 
530 	MLX5_SET(fte_match_param, rule_spec->match_criteria,
531 		 misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
532 	MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_0,
533 		 mlx5_eswitch_get_vport_metadata_for_match(br_offloads->esw, vport_num));
534 
535 	MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
536 			 outer_headers.cvlan_tag);
537 	MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
538 			 outer_headers.cvlan_tag);
539 
540 	handle = mlx5_add_flow_rules(br_offloads->ingress_ft, rule_spec, &flow_act, &dest, 1);
541 
542 	kvfree(rule_spec);
543 	return handle;
544 }
545 
546 static struct mlx5_flow_handle *
mlx5_esw_bridge_egress_flow_create(u16 vport_num,u16 esw_owner_vhca_id,const unsigned char * addr,struct mlx5_esw_bridge_vlan * vlan,struct mlx5_esw_bridge * bridge)547 mlx5_esw_bridge_egress_flow_create(u16 vport_num, u16 esw_owner_vhca_id, const unsigned char *addr,
548 				   struct mlx5_esw_bridge_vlan *vlan,
549 				   struct mlx5_esw_bridge *bridge)
550 {
551 	struct mlx5_flow_destination dest = {
552 		.type = MLX5_FLOW_DESTINATION_TYPE_VPORT,
553 		.vport.num = vport_num,
554 	};
555 	struct mlx5_flow_act flow_act = {
556 		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
557 		.flags = FLOW_ACT_NO_APPEND,
558 	};
559 	struct mlx5_flow_spec *rule_spec;
560 	struct mlx5_flow_handle *handle;
561 	u8 *dmac_v, *dmac_c;
562 
563 	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
564 	if (!rule_spec)
565 		return ERR_PTR(-ENOMEM);
566 
567 	rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
568 
569 	dmac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value,
570 			      outer_headers.dmac_47_16);
571 	ether_addr_copy(dmac_v, addr);
572 	dmac_c = MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria,
573 			      outer_headers.dmac_47_16);
574 	eth_broadcast_addr(dmac_c);
575 
576 	if (vlan) {
577 		if (vlan->pkt_reformat_pop) {
578 			flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
579 			flow_act.pkt_reformat = vlan->pkt_reformat_pop;
580 		}
581 
582 		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
583 				 outer_headers.cvlan_tag);
584 		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
585 				 outer_headers.cvlan_tag);
586 		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
587 				 outer_headers.first_vid);
588 		MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.first_vid,
589 			 vlan->vid);
590 	}
591 
592 	if (MLX5_CAP_ESW(bridge->br_offloads->esw->dev, merged_eswitch)) {
593 		dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
594 		dest.vport.vhca_id = esw_owner_vhca_id;
595 	}
596 	handle = mlx5_add_flow_rules(bridge->egress_ft, rule_spec, &flow_act, &dest, 1);
597 
598 	kvfree(rule_spec);
599 	return handle;
600 }
601 
mlx5_esw_bridge_create(int ifindex,struct mlx5_esw_bridge_offloads * br_offloads)602 static struct mlx5_esw_bridge *mlx5_esw_bridge_create(int ifindex,
603 						      struct mlx5_esw_bridge_offloads *br_offloads)
604 {
605 	struct mlx5_esw_bridge *bridge;
606 	int err;
607 
608 	bridge = kvzalloc(sizeof(*bridge), GFP_KERNEL);
609 	if (!bridge)
610 		return ERR_PTR(-ENOMEM);
611 
612 	bridge->br_offloads = br_offloads;
613 	err = mlx5_esw_bridge_egress_table_init(br_offloads, bridge);
614 	if (err)
615 		goto err_egress_tbl;
616 
617 	err = rhashtable_init(&bridge->fdb_ht, &fdb_ht_params);
618 	if (err)
619 		goto err_fdb_ht;
620 
621 	INIT_LIST_HEAD(&bridge->fdb_list);
622 	bridge->ifindex = ifindex;
623 	bridge->refcnt = 1;
624 	bridge->ageing_time = clock_t_to_jiffies(BR_DEFAULT_AGEING_TIME);
625 	list_add(&bridge->list, &br_offloads->bridges);
626 
627 	return bridge;
628 
629 err_fdb_ht:
630 	mlx5_esw_bridge_egress_table_cleanup(bridge);
631 err_egress_tbl:
632 	kvfree(bridge);
633 	return ERR_PTR(err);
634 }
635 
mlx5_esw_bridge_get(struct mlx5_esw_bridge * bridge)636 static void mlx5_esw_bridge_get(struct mlx5_esw_bridge *bridge)
637 {
638 	bridge->refcnt++;
639 }
640 
mlx5_esw_bridge_put(struct mlx5_esw_bridge_offloads * br_offloads,struct mlx5_esw_bridge * bridge)641 static void mlx5_esw_bridge_put(struct mlx5_esw_bridge_offloads *br_offloads,
642 				struct mlx5_esw_bridge *bridge)
643 {
644 	if (--bridge->refcnt)
645 		return;
646 
647 	mlx5_esw_bridge_egress_table_cleanup(bridge);
648 	list_del(&bridge->list);
649 	rhashtable_destroy(&bridge->fdb_ht);
650 	kvfree(bridge);
651 
652 	if (list_empty(&br_offloads->bridges))
653 		mlx5_esw_bridge_ingress_table_cleanup(br_offloads);
654 }
655 
656 static struct mlx5_esw_bridge *
mlx5_esw_bridge_lookup(int ifindex,struct mlx5_esw_bridge_offloads * br_offloads)657 mlx5_esw_bridge_lookup(int ifindex, struct mlx5_esw_bridge_offloads *br_offloads)
658 {
659 	struct mlx5_esw_bridge *bridge;
660 
661 	ASSERT_RTNL();
662 
663 	list_for_each_entry(bridge, &br_offloads->bridges, list) {
664 		if (bridge->ifindex == ifindex) {
665 			mlx5_esw_bridge_get(bridge);
666 			return bridge;
667 		}
668 	}
669 
670 	if (!br_offloads->ingress_ft) {
671 		int err = mlx5_esw_bridge_ingress_table_init(br_offloads);
672 
673 		if (err)
674 			return ERR_PTR(err);
675 	}
676 
677 	bridge = mlx5_esw_bridge_create(ifindex, br_offloads);
678 	if (IS_ERR(bridge) && list_empty(&br_offloads->bridges))
679 		mlx5_esw_bridge_ingress_table_cleanup(br_offloads);
680 	return bridge;
681 }
682 
mlx5_esw_bridge_port_key_from_data(u16 vport_num,u16 esw_owner_vhca_id)683 static unsigned long mlx5_esw_bridge_port_key_from_data(u16 vport_num, u16 esw_owner_vhca_id)
684 {
685 	return vport_num | (unsigned long)esw_owner_vhca_id << sizeof(vport_num) * BITS_PER_BYTE;
686 }
687 
mlx5_esw_bridge_port_key(struct mlx5_esw_bridge_port * port)688 static unsigned long mlx5_esw_bridge_port_key(struct mlx5_esw_bridge_port *port)
689 {
690 	return mlx5_esw_bridge_port_key_from_data(port->vport_num, port->esw_owner_vhca_id);
691 }
692 
mlx5_esw_bridge_port_insert(struct mlx5_esw_bridge_port * port,struct mlx5_esw_bridge_offloads * br_offloads)693 static int mlx5_esw_bridge_port_insert(struct mlx5_esw_bridge_port *port,
694 				       struct mlx5_esw_bridge_offloads *br_offloads)
695 {
696 	return xa_insert(&br_offloads->ports, mlx5_esw_bridge_port_key(port), port, GFP_KERNEL);
697 }
698 
699 static struct mlx5_esw_bridge_port *
mlx5_esw_bridge_port_lookup(u16 vport_num,u16 esw_owner_vhca_id,struct mlx5_esw_bridge_offloads * br_offloads)700 mlx5_esw_bridge_port_lookup(u16 vport_num, u16 esw_owner_vhca_id,
701 			    struct mlx5_esw_bridge_offloads *br_offloads)
702 {
703 	return xa_load(&br_offloads->ports, mlx5_esw_bridge_port_key_from_data(vport_num,
704 									       esw_owner_vhca_id));
705 }
706 
mlx5_esw_bridge_port_erase(struct mlx5_esw_bridge_port * port,struct mlx5_esw_bridge_offloads * br_offloads)707 static void mlx5_esw_bridge_port_erase(struct mlx5_esw_bridge_port *port,
708 				       struct mlx5_esw_bridge_offloads *br_offloads)
709 {
710 	xa_erase(&br_offloads->ports, mlx5_esw_bridge_port_key(port));
711 }
712 
mlx5_esw_bridge_fdb_entry_refresh(struct mlx5_esw_bridge_fdb_entry * entry)713 static void mlx5_esw_bridge_fdb_entry_refresh(struct mlx5_esw_bridge_fdb_entry *entry)
714 {
715 	trace_mlx5_esw_bridge_fdb_entry_refresh(entry);
716 
717 	mlx5_esw_bridge_fdb_offload_notify(entry->dev, entry->key.addr,
718 					   entry->key.vid,
719 					   SWITCHDEV_FDB_ADD_TO_BRIDGE);
720 }
721 
722 static void
mlx5_esw_bridge_fdb_entry_cleanup(struct mlx5_esw_bridge_fdb_entry * entry,struct mlx5_esw_bridge * bridge)723 mlx5_esw_bridge_fdb_entry_cleanup(struct mlx5_esw_bridge_fdb_entry *entry,
724 				  struct mlx5_esw_bridge *bridge)
725 {
726 	trace_mlx5_esw_bridge_fdb_entry_cleanup(entry);
727 
728 	rhashtable_remove_fast(&bridge->fdb_ht, &entry->ht_node, fdb_ht_params);
729 	mlx5_del_flow_rules(entry->egress_handle);
730 	if (entry->filter_handle)
731 		mlx5_del_flow_rules(entry->filter_handle);
732 	mlx5_del_flow_rules(entry->ingress_handle);
733 	mlx5_fc_destroy(bridge->br_offloads->esw->dev, entry->ingress_counter);
734 	list_del(&entry->vlan_list);
735 	list_del(&entry->list);
736 	kvfree(entry);
737 }
738 
mlx5_esw_bridge_fdb_flush(struct mlx5_esw_bridge * bridge)739 static void mlx5_esw_bridge_fdb_flush(struct mlx5_esw_bridge *bridge)
740 {
741 	struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
742 
743 	list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list) {
744 		mlx5_esw_bridge_fdb_del_notify(entry);
745 		mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
746 	}
747 }
748 
749 static struct mlx5_esw_bridge_vlan *
mlx5_esw_bridge_vlan_lookup(u16 vid,struct mlx5_esw_bridge_port * port)750 mlx5_esw_bridge_vlan_lookup(u16 vid, struct mlx5_esw_bridge_port *port)
751 {
752 	return xa_load(&port->vlans, vid);
753 }
754 
755 static int
mlx5_esw_bridge_vlan_push_create(struct mlx5_esw_bridge_vlan * vlan,struct mlx5_eswitch * esw)756 mlx5_esw_bridge_vlan_push_create(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
757 {
758 	struct {
759 		__be16	h_vlan_proto;
760 		__be16	h_vlan_TCI;
761 	} vlan_hdr = { htons(ETH_P_8021Q), htons(vlan->vid) };
762 	struct mlx5_pkt_reformat_params reformat_params = {};
763 	struct mlx5_pkt_reformat *pkt_reformat;
764 
765 	if (!BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat_insert)) ||
766 	    MLX5_CAP_GEN_2(esw->dev, max_reformat_insert_size) < sizeof(vlan_hdr) ||
767 	    MLX5_CAP_GEN_2(esw->dev, max_reformat_insert_offset) <
768 	    offsetof(struct vlan_ethhdr, h_vlan_proto)) {
769 		esw_warn(esw->dev, "Packet reformat INSERT_HEADER is not supported\n");
770 		return -EOPNOTSUPP;
771 	}
772 
773 	reformat_params.type = MLX5_REFORMAT_TYPE_INSERT_HDR;
774 	reformat_params.param_0 = MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START;
775 	reformat_params.param_1 = offsetof(struct vlan_ethhdr, h_vlan_proto);
776 	reformat_params.size = sizeof(vlan_hdr);
777 	reformat_params.data = &vlan_hdr;
778 	pkt_reformat = mlx5_packet_reformat_alloc(esw->dev,
779 						  &reformat_params,
780 						  MLX5_FLOW_NAMESPACE_FDB);
781 	if (IS_ERR(pkt_reformat)) {
782 		esw_warn(esw->dev, "Failed to alloc packet reformat INSERT_HEADER (err=%ld)\n",
783 			 PTR_ERR(pkt_reformat));
784 		return PTR_ERR(pkt_reformat);
785 	}
786 
787 	vlan->pkt_reformat_push = pkt_reformat;
788 	return 0;
789 }
790 
791 static void
mlx5_esw_bridge_vlan_push_cleanup(struct mlx5_esw_bridge_vlan * vlan,struct mlx5_eswitch * esw)792 mlx5_esw_bridge_vlan_push_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
793 {
794 	mlx5_packet_reformat_dealloc(esw->dev, vlan->pkt_reformat_push);
795 	vlan->pkt_reformat_push = NULL;
796 }
797 
798 static int
mlx5_esw_bridge_vlan_pop_create(struct mlx5_esw_bridge_vlan * vlan,struct mlx5_eswitch * esw)799 mlx5_esw_bridge_vlan_pop_create(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
800 {
801 	struct mlx5_pkt_reformat_params reformat_params = {};
802 	struct mlx5_pkt_reformat *pkt_reformat;
803 
804 	if (!BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat_remove)) ||
805 	    MLX5_CAP_GEN_2(esw->dev, max_reformat_remove_size) < sizeof(struct vlan_hdr) ||
806 	    MLX5_CAP_GEN_2(esw->dev, max_reformat_remove_offset) <
807 	    offsetof(struct vlan_ethhdr, h_vlan_proto)) {
808 		esw_warn(esw->dev, "Packet reformat REMOVE_HEADER is not supported\n");
809 		return -EOPNOTSUPP;
810 	}
811 
812 	reformat_params.type = MLX5_REFORMAT_TYPE_REMOVE_HDR;
813 	reformat_params.param_0 = MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START;
814 	reformat_params.param_1 = offsetof(struct vlan_ethhdr, h_vlan_proto);
815 	reformat_params.size = sizeof(struct vlan_hdr);
816 	pkt_reformat = mlx5_packet_reformat_alloc(esw->dev,
817 						  &reformat_params,
818 						  MLX5_FLOW_NAMESPACE_FDB);
819 	if (IS_ERR(pkt_reformat)) {
820 		esw_warn(esw->dev, "Failed to alloc packet reformat REMOVE_HEADER (err=%ld)\n",
821 			 PTR_ERR(pkt_reformat));
822 		return PTR_ERR(pkt_reformat);
823 	}
824 
825 	vlan->pkt_reformat_pop = pkt_reformat;
826 	return 0;
827 }
828 
829 static void
mlx5_esw_bridge_vlan_pop_cleanup(struct mlx5_esw_bridge_vlan * vlan,struct mlx5_eswitch * esw)830 mlx5_esw_bridge_vlan_pop_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
831 {
832 	mlx5_packet_reformat_dealloc(esw->dev, vlan->pkt_reformat_pop);
833 	vlan->pkt_reformat_pop = NULL;
834 }
835 
836 static struct mlx5_esw_bridge_vlan *
mlx5_esw_bridge_vlan_create(u16 vid,u16 flags,struct mlx5_esw_bridge_port * port,struct mlx5_eswitch * esw)837 mlx5_esw_bridge_vlan_create(u16 vid, u16 flags, struct mlx5_esw_bridge_port *port,
838 			    struct mlx5_eswitch *esw)
839 {
840 	struct mlx5_esw_bridge_vlan *vlan;
841 	int err;
842 
843 	vlan = kvzalloc(sizeof(*vlan), GFP_KERNEL);
844 	if (!vlan)
845 		return ERR_PTR(-ENOMEM);
846 
847 	vlan->vid = vid;
848 	vlan->flags = flags;
849 	INIT_LIST_HEAD(&vlan->fdb_list);
850 
851 	if (flags & BRIDGE_VLAN_INFO_PVID) {
852 		err = mlx5_esw_bridge_vlan_push_create(vlan, esw);
853 		if (err)
854 			goto err_vlan_push;
855 	}
856 	if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
857 		err = mlx5_esw_bridge_vlan_pop_create(vlan, esw);
858 		if (err)
859 			goto err_vlan_pop;
860 	}
861 
862 	err = xa_insert(&port->vlans, vid, vlan, GFP_KERNEL);
863 	if (err)
864 		goto err_xa_insert;
865 
866 	trace_mlx5_esw_bridge_vlan_create(vlan);
867 	return vlan;
868 
869 err_xa_insert:
870 	if (vlan->pkt_reformat_pop)
871 		mlx5_esw_bridge_vlan_pop_cleanup(vlan, esw);
872 err_vlan_pop:
873 	if (vlan->pkt_reformat_push)
874 		mlx5_esw_bridge_vlan_push_cleanup(vlan, esw);
875 err_vlan_push:
876 	kvfree(vlan);
877 	return ERR_PTR(err);
878 }
879 
mlx5_esw_bridge_vlan_erase(struct mlx5_esw_bridge_port * port,struct mlx5_esw_bridge_vlan * vlan)880 static void mlx5_esw_bridge_vlan_erase(struct mlx5_esw_bridge_port *port,
881 				       struct mlx5_esw_bridge_vlan *vlan)
882 {
883 	xa_erase(&port->vlans, vlan->vid);
884 }
885 
mlx5_esw_bridge_vlan_flush(struct mlx5_esw_bridge_vlan * vlan,struct mlx5_esw_bridge * bridge)886 static void mlx5_esw_bridge_vlan_flush(struct mlx5_esw_bridge_vlan *vlan,
887 				       struct mlx5_esw_bridge *bridge)
888 {
889 	struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
890 
891 	list_for_each_entry_safe(entry, tmp, &vlan->fdb_list, vlan_list) {
892 		mlx5_esw_bridge_fdb_del_notify(entry);
893 		mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
894 	}
895 
896 	if (vlan->pkt_reformat_pop)
897 		mlx5_esw_bridge_vlan_pop_cleanup(vlan, bridge->br_offloads->esw);
898 	if (vlan->pkt_reformat_push)
899 		mlx5_esw_bridge_vlan_push_cleanup(vlan, bridge->br_offloads->esw);
900 }
901 
mlx5_esw_bridge_vlan_cleanup(struct mlx5_esw_bridge_port * port,struct mlx5_esw_bridge_vlan * vlan,struct mlx5_esw_bridge * bridge)902 static void mlx5_esw_bridge_vlan_cleanup(struct mlx5_esw_bridge_port *port,
903 					 struct mlx5_esw_bridge_vlan *vlan,
904 					 struct mlx5_esw_bridge *bridge)
905 {
906 	trace_mlx5_esw_bridge_vlan_cleanup(vlan);
907 	mlx5_esw_bridge_vlan_flush(vlan, bridge);
908 	mlx5_esw_bridge_vlan_erase(port, vlan);
909 	kvfree(vlan);
910 }
911 
mlx5_esw_bridge_port_vlans_flush(struct mlx5_esw_bridge_port * port,struct mlx5_esw_bridge * bridge)912 static void mlx5_esw_bridge_port_vlans_flush(struct mlx5_esw_bridge_port *port,
913 					     struct mlx5_esw_bridge *bridge)
914 {
915 	struct mlx5_esw_bridge_vlan *vlan;
916 	unsigned long index;
917 
918 	xa_for_each(&port->vlans, index, vlan)
919 		mlx5_esw_bridge_vlan_cleanup(port, vlan, bridge);
920 }
921 
922 static struct mlx5_esw_bridge_vlan *
mlx5_esw_bridge_port_vlan_lookup(u16 vid,u16 vport_num,u16 esw_owner_vhca_id,struct mlx5_esw_bridge * bridge,struct mlx5_eswitch * esw)923 mlx5_esw_bridge_port_vlan_lookup(u16 vid, u16 vport_num, u16 esw_owner_vhca_id,
924 				 struct mlx5_esw_bridge *bridge, struct mlx5_eswitch *esw)
925 {
926 	struct mlx5_esw_bridge_port *port;
927 	struct mlx5_esw_bridge_vlan *vlan;
928 
929 	port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, bridge->br_offloads);
930 	if (!port) {
931 		/* FDB is added asynchronously on wq while port might have been deleted
932 		 * concurrently. Report on 'info' logging level and skip the FDB offload.
933 		 */
934 		esw_info(esw->dev, "Failed to lookup bridge port (vport=%u)\n", vport_num);
935 		return ERR_PTR(-EINVAL);
936 	}
937 
938 	vlan = mlx5_esw_bridge_vlan_lookup(vid, port);
939 	if (!vlan) {
940 		/* FDB is added asynchronously on wq while vlan might have been deleted
941 		 * concurrently. Report on 'info' logging level and skip the FDB offload.
942 		 */
943 		esw_info(esw->dev, "Failed to lookup bridge port vlan metadata (vport=%u)\n",
944 			 vport_num);
945 		return ERR_PTR(-EINVAL);
946 	}
947 
948 	return vlan;
949 }
950 
951 static struct mlx5_esw_bridge_fdb_entry *
mlx5_esw_bridge_fdb_entry_init(struct net_device * dev,u16 vport_num,u16 esw_owner_vhca_id,const unsigned char * addr,u16 vid,bool added_by_user,bool peer,struct mlx5_eswitch * esw,struct mlx5_esw_bridge * bridge)952 mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
953 			       const unsigned char *addr, u16 vid, bool added_by_user, bool peer,
954 			       struct mlx5_eswitch *esw, struct mlx5_esw_bridge *bridge)
955 {
956 	struct mlx5_esw_bridge_vlan *vlan = NULL;
957 	struct mlx5_esw_bridge_fdb_entry *entry;
958 	struct mlx5_flow_handle *handle;
959 	struct mlx5_fc *counter;
960 	int err;
961 
962 	if (bridge->flags & MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG && vid) {
963 		vlan = mlx5_esw_bridge_port_vlan_lookup(vid, vport_num, esw_owner_vhca_id, bridge,
964 							esw);
965 		if (IS_ERR(vlan))
966 			return ERR_CAST(vlan);
967 	}
968 
969 	entry = kvzalloc(sizeof(*entry), GFP_KERNEL);
970 	if (!entry)
971 		return ERR_PTR(-ENOMEM);
972 
973 	ether_addr_copy(entry->key.addr, addr);
974 	entry->key.vid = vid;
975 	entry->dev = dev;
976 	entry->vport_num = vport_num;
977 	entry->esw_owner_vhca_id = esw_owner_vhca_id;
978 	entry->lastuse = jiffies;
979 	if (added_by_user)
980 		entry->flags |= MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER;
981 	if (peer)
982 		entry->flags |= MLX5_ESW_BRIDGE_FLAG_PEER;
983 
984 	counter = mlx5_fc_create(esw->dev, true);
985 	if (IS_ERR(counter)) {
986 		err = PTR_ERR(counter);
987 		goto err_ingress_fc_create;
988 	}
989 	entry->ingress_counter = counter;
990 
991 	handle = peer ?
992 		mlx5_esw_bridge_ingress_flow_peer_create(vport_num, addr, vlan,
993 							 mlx5_fc_id(counter), bridge) :
994 		mlx5_esw_bridge_ingress_flow_create(vport_num, addr, vlan,
995 						    mlx5_fc_id(counter), bridge);
996 	if (IS_ERR(handle)) {
997 		err = PTR_ERR(handle);
998 		esw_warn(esw->dev, "Failed to create ingress flow(vport=%u,err=%d)\n",
999 			 vport_num, err);
1000 		goto err_ingress_flow_create;
1001 	}
1002 	entry->ingress_handle = handle;
1003 
1004 	if (bridge->flags & MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG) {
1005 		handle = mlx5_esw_bridge_ingress_filter_flow_create(vport_num, addr, bridge);
1006 		if (IS_ERR(handle)) {
1007 			err = PTR_ERR(handle);
1008 			esw_warn(esw->dev, "Failed to create ingress filter(vport=%u,err=%d)\n",
1009 				 vport_num, err);
1010 			goto err_ingress_filter_flow_create;
1011 		}
1012 		entry->filter_handle = handle;
1013 	}
1014 
1015 	handle = mlx5_esw_bridge_egress_flow_create(vport_num, esw_owner_vhca_id, addr, vlan,
1016 						    bridge);
1017 	if (IS_ERR(handle)) {
1018 		err = PTR_ERR(handle);
1019 		esw_warn(esw->dev, "Failed to create egress flow(vport=%u,err=%d)\n",
1020 			 vport_num, err);
1021 		goto err_egress_flow_create;
1022 	}
1023 	entry->egress_handle = handle;
1024 
1025 	err = rhashtable_insert_fast(&bridge->fdb_ht, &entry->ht_node, fdb_ht_params);
1026 	if (err) {
1027 		esw_warn(esw->dev, "Failed to insert FDB flow(vport=%u,err=%d)\n", vport_num, err);
1028 		goto err_ht_init;
1029 	}
1030 
1031 	if (vlan)
1032 		list_add(&entry->vlan_list, &vlan->fdb_list);
1033 	else
1034 		INIT_LIST_HEAD(&entry->vlan_list);
1035 	list_add(&entry->list, &bridge->fdb_list);
1036 
1037 	trace_mlx5_esw_bridge_fdb_entry_init(entry);
1038 	return entry;
1039 
1040 err_ht_init:
1041 	mlx5_del_flow_rules(entry->egress_handle);
1042 err_egress_flow_create:
1043 	if (entry->filter_handle)
1044 		mlx5_del_flow_rules(entry->filter_handle);
1045 err_ingress_filter_flow_create:
1046 	mlx5_del_flow_rules(entry->ingress_handle);
1047 err_ingress_flow_create:
1048 	mlx5_fc_destroy(esw->dev, entry->ingress_counter);
1049 err_ingress_fc_create:
1050 	kvfree(entry);
1051 	return ERR_PTR(err);
1052 }
1053 
mlx5_esw_bridge_ageing_time_set(u16 vport_num,u16 esw_owner_vhca_id,unsigned long ageing_time,struct mlx5_esw_bridge_offloads * br_offloads)1054 int mlx5_esw_bridge_ageing_time_set(u16 vport_num, u16 esw_owner_vhca_id, unsigned long ageing_time,
1055 				    struct mlx5_esw_bridge_offloads *br_offloads)
1056 {
1057 	struct mlx5_esw_bridge_port *port;
1058 
1059 	port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1060 	if (!port)
1061 		return -EINVAL;
1062 
1063 	port->bridge->ageing_time = clock_t_to_jiffies(ageing_time);
1064 	return 0;
1065 }
1066 
mlx5_esw_bridge_vlan_filtering_set(u16 vport_num,u16 esw_owner_vhca_id,bool enable,struct mlx5_esw_bridge_offloads * br_offloads)1067 int mlx5_esw_bridge_vlan_filtering_set(u16 vport_num, u16 esw_owner_vhca_id, bool enable,
1068 				       struct mlx5_esw_bridge_offloads *br_offloads)
1069 {
1070 	struct mlx5_esw_bridge_port *port;
1071 	struct mlx5_esw_bridge *bridge;
1072 	bool filtering;
1073 
1074 	port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1075 	if (!port)
1076 		return -EINVAL;
1077 
1078 	bridge = port->bridge;
1079 	filtering = bridge->flags & MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG;
1080 	if (filtering == enable)
1081 		return 0;
1082 
1083 	mlx5_esw_bridge_fdb_flush(bridge);
1084 	if (enable)
1085 		bridge->flags |= MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG;
1086 	else
1087 		bridge->flags &= ~MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG;
1088 
1089 	return 0;
1090 }
1091 
mlx5_esw_bridge_vport_init(u16 vport_num,u16 esw_owner_vhca_id,u16 flags,struct mlx5_esw_bridge_offloads * br_offloads,struct mlx5_esw_bridge * bridge)1092 static int mlx5_esw_bridge_vport_init(u16 vport_num, u16 esw_owner_vhca_id, u16 flags,
1093 				      struct mlx5_esw_bridge_offloads *br_offloads,
1094 				      struct mlx5_esw_bridge *bridge)
1095 {
1096 	struct mlx5_eswitch *esw = br_offloads->esw;
1097 	struct mlx5_esw_bridge_port *port;
1098 	int err;
1099 
1100 	port = kvzalloc(sizeof(*port), GFP_KERNEL);
1101 	if (!port)
1102 		return -ENOMEM;
1103 
1104 	port->vport_num = vport_num;
1105 	port->esw_owner_vhca_id = esw_owner_vhca_id;
1106 	port->bridge = bridge;
1107 	port->flags |= flags;
1108 	xa_init(&port->vlans);
1109 	err = mlx5_esw_bridge_port_insert(port, br_offloads);
1110 	if (err) {
1111 		esw_warn(esw->dev,
1112 			 "Failed to insert port metadata (vport=%u,esw_owner_vhca_id=%u,err=%d)\n",
1113 			 port->vport_num, port->esw_owner_vhca_id, err);
1114 		goto err_port_insert;
1115 	}
1116 	trace_mlx5_esw_bridge_vport_init(port);
1117 
1118 	return 0;
1119 
1120 err_port_insert:
1121 	kvfree(port);
1122 	return err;
1123 }
1124 
mlx5_esw_bridge_vport_cleanup(struct mlx5_esw_bridge_offloads * br_offloads,struct mlx5_esw_bridge_port * port)1125 static int mlx5_esw_bridge_vport_cleanup(struct mlx5_esw_bridge_offloads *br_offloads,
1126 					 struct mlx5_esw_bridge_port *port)
1127 {
1128 	u16 vport_num = port->vport_num, esw_owner_vhca_id = port->esw_owner_vhca_id;
1129 	struct mlx5_esw_bridge *bridge = port->bridge;
1130 	struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
1131 
1132 	list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list)
1133 		if (entry->vport_num == vport_num && entry->esw_owner_vhca_id == esw_owner_vhca_id)
1134 			mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
1135 
1136 	trace_mlx5_esw_bridge_vport_cleanup(port);
1137 	mlx5_esw_bridge_port_vlans_flush(port, bridge);
1138 	mlx5_esw_bridge_port_erase(port, br_offloads);
1139 	kvfree(port);
1140 	mlx5_esw_bridge_put(br_offloads, bridge);
1141 	return 0;
1142 }
1143 
mlx5_esw_bridge_vport_link_with_flags(int ifindex,u16 vport_num,u16 esw_owner_vhca_id,u16 flags,struct mlx5_esw_bridge_offloads * br_offloads,struct netlink_ext_ack * extack)1144 static int mlx5_esw_bridge_vport_link_with_flags(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
1145 						 u16 flags,
1146 						 struct mlx5_esw_bridge_offloads *br_offloads,
1147 						 struct netlink_ext_ack *extack)
1148 {
1149 	struct mlx5_esw_bridge *bridge;
1150 	int err;
1151 
1152 	bridge = mlx5_esw_bridge_lookup(ifindex, br_offloads);
1153 	if (IS_ERR(bridge)) {
1154 		NL_SET_ERR_MSG_MOD(extack, "Error checking for existing bridge with same ifindex");
1155 		return PTR_ERR(bridge);
1156 	}
1157 
1158 	err = mlx5_esw_bridge_vport_init(vport_num, esw_owner_vhca_id, flags, br_offloads, bridge);
1159 	if (err) {
1160 		NL_SET_ERR_MSG_MOD(extack, "Error initializing port");
1161 		goto err_vport;
1162 	}
1163 	return 0;
1164 
1165 err_vport:
1166 	mlx5_esw_bridge_put(br_offloads, bridge);
1167 	return err;
1168 }
1169 
mlx5_esw_bridge_vport_link(int ifindex,u16 vport_num,u16 esw_owner_vhca_id,struct mlx5_esw_bridge_offloads * br_offloads,struct netlink_ext_ack * extack)1170 int mlx5_esw_bridge_vport_link(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
1171 			       struct mlx5_esw_bridge_offloads *br_offloads,
1172 			       struct netlink_ext_ack *extack)
1173 {
1174 	return mlx5_esw_bridge_vport_link_with_flags(ifindex, vport_num, esw_owner_vhca_id, 0,
1175 						     br_offloads, extack);
1176 }
1177 
mlx5_esw_bridge_vport_unlink(int ifindex,u16 vport_num,u16 esw_owner_vhca_id,struct mlx5_esw_bridge_offloads * br_offloads,struct netlink_ext_ack * extack)1178 int mlx5_esw_bridge_vport_unlink(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
1179 				 struct mlx5_esw_bridge_offloads *br_offloads,
1180 				 struct netlink_ext_ack *extack)
1181 {
1182 	struct mlx5_esw_bridge_port *port;
1183 	int err;
1184 
1185 	port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1186 	if (!port) {
1187 		NL_SET_ERR_MSG_MOD(extack, "Port is not attached to any bridge");
1188 		return -EINVAL;
1189 	}
1190 	if (port->bridge->ifindex != ifindex) {
1191 		NL_SET_ERR_MSG_MOD(extack, "Port is attached to another bridge");
1192 		return -EINVAL;
1193 	}
1194 
1195 	err = mlx5_esw_bridge_vport_cleanup(br_offloads, port);
1196 	if (err)
1197 		NL_SET_ERR_MSG_MOD(extack, "Port cleanup failed");
1198 	return err;
1199 }
1200 
mlx5_esw_bridge_vport_peer_link(int ifindex,u16 vport_num,u16 esw_owner_vhca_id,struct mlx5_esw_bridge_offloads * br_offloads,struct netlink_ext_ack * extack)1201 int mlx5_esw_bridge_vport_peer_link(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
1202 				    struct mlx5_esw_bridge_offloads *br_offloads,
1203 				    struct netlink_ext_ack *extack)
1204 {
1205 	if (!MLX5_CAP_ESW(br_offloads->esw->dev, merged_eswitch))
1206 		return 0;
1207 
1208 	return mlx5_esw_bridge_vport_link_with_flags(ifindex, vport_num, esw_owner_vhca_id,
1209 						     MLX5_ESW_BRIDGE_PORT_FLAG_PEER,
1210 						     br_offloads, extack);
1211 }
1212 
mlx5_esw_bridge_vport_peer_unlink(int ifindex,u16 vport_num,u16 esw_owner_vhca_id,struct mlx5_esw_bridge_offloads * br_offloads,struct netlink_ext_ack * extack)1213 int mlx5_esw_bridge_vport_peer_unlink(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
1214 				      struct mlx5_esw_bridge_offloads *br_offloads,
1215 				      struct netlink_ext_ack *extack)
1216 {
1217 	return mlx5_esw_bridge_vport_unlink(ifindex, vport_num, esw_owner_vhca_id, br_offloads,
1218 					    extack);
1219 }
1220 
mlx5_esw_bridge_port_vlan_add(u16 vport_num,u16 esw_owner_vhca_id,u16 vid,u16 flags,struct mlx5_esw_bridge_offloads * br_offloads,struct netlink_ext_ack * extack)1221 int mlx5_esw_bridge_port_vlan_add(u16 vport_num, u16 esw_owner_vhca_id, u16 vid, u16 flags,
1222 				  struct mlx5_esw_bridge_offloads *br_offloads,
1223 				  struct netlink_ext_ack *extack)
1224 {
1225 	struct mlx5_esw_bridge_port *port;
1226 	struct mlx5_esw_bridge_vlan *vlan;
1227 
1228 	port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1229 	if (!port)
1230 		return -EINVAL;
1231 
1232 	vlan = mlx5_esw_bridge_vlan_lookup(vid, port);
1233 	if (vlan) {
1234 		if (vlan->flags == flags)
1235 			return 0;
1236 		mlx5_esw_bridge_vlan_cleanup(port, vlan, port->bridge);
1237 	}
1238 
1239 	vlan = mlx5_esw_bridge_vlan_create(vid, flags, port, br_offloads->esw);
1240 	if (IS_ERR(vlan)) {
1241 		NL_SET_ERR_MSG_MOD(extack, "Failed to create VLAN entry");
1242 		return PTR_ERR(vlan);
1243 	}
1244 	return 0;
1245 }
1246 
mlx5_esw_bridge_port_vlan_del(u16 vport_num,u16 esw_owner_vhca_id,u16 vid,struct mlx5_esw_bridge_offloads * br_offloads)1247 void mlx5_esw_bridge_port_vlan_del(u16 vport_num, u16 esw_owner_vhca_id, u16 vid,
1248 				   struct mlx5_esw_bridge_offloads *br_offloads)
1249 {
1250 	struct mlx5_esw_bridge_port *port;
1251 	struct mlx5_esw_bridge_vlan *vlan;
1252 
1253 	port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1254 	if (!port)
1255 		return;
1256 
1257 	vlan = mlx5_esw_bridge_vlan_lookup(vid, port);
1258 	if (!vlan)
1259 		return;
1260 	mlx5_esw_bridge_vlan_cleanup(port, vlan, port->bridge);
1261 }
1262 
mlx5_esw_bridge_fdb_update_used(struct net_device * dev,u16 vport_num,u16 esw_owner_vhca_id,struct mlx5_esw_bridge_offloads * br_offloads,struct switchdev_notifier_fdb_info * fdb_info)1263 void mlx5_esw_bridge_fdb_update_used(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
1264 				     struct mlx5_esw_bridge_offloads *br_offloads,
1265 				     struct switchdev_notifier_fdb_info *fdb_info)
1266 {
1267 	struct mlx5_esw_bridge_fdb_entry *entry;
1268 	struct mlx5_esw_bridge_fdb_key key;
1269 	struct mlx5_esw_bridge_port *port;
1270 	struct mlx5_esw_bridge *bridge;
1271 
1272 	port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1273 	if (!port)
1274 		return;
1275 
1276 	bridge = port->bridge;
1277 	ether_addr_copy(key.addr, fdb_info->addr);
1278 	key.vid = fdb_info->vid;
1279 	entry = rhashtable_lookup_fast(&bridge->fdb_ht, &key, fdb_ht_params);
1280 	if (!entry) {
1281 		esw_debug(br_offloads->esw->dev,
1282 			  "FDB entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n",
1283 			  key.addr, key.vid, vport_num);
1284 		return;
1285 	}
1286 
1287 	entry->lastuse = jiffies;
1288 }
1289 
mlx5_esw_bridge_fdb_create(struct net_device * dev,u16 vport_num,u16 esw_owner_vhca_id,struct mlx5_esw_bridge_offloads * br_offloads,struct switchdev_notifier_fdb_info * fdb_info)1290 void mlx5_esw_bridge_fdb_create(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
1291 				struct mlx5_esw_bridge_offloads *br_offloads,
1292 				struct switchdev_notifier_fdb_info *fdb_info)
1293 {
1294 	struct mlx5_esw_bridge_fdb_entry *entry;
1295 	struct mlx5_esw_bridge_port *port;
1296 	struct mlx5_esw_bridge *bridge;
1297 
1298 	port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1299 	if (!port)
1300 		return;
1301 
1302 	bridge = port->bridge;
1303 	entry = mlx5_esw_bridge_fdb_entry_init(dev, vport_num, esw_owner_vhca_id, fdb_info->addr,
1304 					       fdb_info->vid, fdb_info->added_by_user,
1305 					       port->flags & MLX5_ESW_BRIDGE_PORT_FLAG_PEER,
1306 					       br_offloads->esw, bridge);
1307 	if (IS_ERR(entry))
1308 		return;
1309 
1310 	if (entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER)
1311 		mlx5_esw_bridge_fdb_offload_notify(dev, entry->key.addr, entry->key.vid,
1312 						   SWITCHDEV_FDB_OFFLOADED);
1313 	else if (!(entry->flags & MLX5_ESW_BRIDGE_FLAG_PEER))
1314 		/* Take over dynamic entries to prevent kernel bridge from aging them out. */
1315 		mlx5_esw_bridge_fdb_offload_notify(dev, entry->key.addr, entry->key.vid,
1316 						   SWITCHDEV_FDB_ADD_TO_BRIDGE);
1317 }
1318 
mlx5_esw_bridge_fdb_remove(struct net_device * dev,u16 vport_num,u16 esw_owner_vhca_id,struct mlx5_esw_bridge_offloads * br_offloads,struct switchdev_notifier_fdb_info * fdb_info)1319 void mlx5_esw_bridge_fdb_remove(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
1320 				struct mlx5_esw_bridge_offloads *br_offloads,
1321 				struct switchdev_notifier_fdb_info *fdb_info)
1322 {
1323 	struct mlx5_eswitch *esw = br_offloads->esw;
1324 	struct mlx5_esw_bridge_fdb_entry *entry;
1325 	struct mlx5_esw_bridge_fdb_key key;
1326 	struct mlx5_esw_bridge_port *port;
1327 	struct mlx5_esw_bridge *bridge;
1328 
1329 	port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1330 	if (!port)
1331 		return;
1332 
1333 	bridge = port->bridge;
1334 	ether_addr_copy(key.addr, fdb_info->addr);
1335 	key.vid = fdb_info->vid;
1336 	entry = rhashtable_lookup_fast(&bridge->fdb_ht, &key, fdb_ht_params);
1337 	if (!entry) {
1338 		esw_warn(esw->dev,
1339 			 "FDB entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n",
1340 			 key.addr, key.vid, vport_num);
1341 		return;
1342 	}
1343 
1344 	mlx5_esw_bridge_fdb_del_notify(entry);
1345 	mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
1346 }
1347 
mlx5_esw_bridge_update(struct mlx5_esw_bridge_offloads * br_offloads)1348 void mlx5_esw_bridge_update(struct mlx5_esw_bridge_offloads *br_offloads)
1349 {
1350 	struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
1351 	struct mlx5_esw_bridge *bridge;
1352 
1353 	list_for_each_entry(bridge, &br_offloads->bridges, list) {
1354 		list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list) {
1355 			unsigned long lastuse =
1356 				(unsigned long)mlx5_fc_query_lastuse(entry->ingress_counter);
1357 
1358 			if (entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER)
1359 				continue;
1360 
1361 			if (time_after(lastuse, entry->lastuse)) {
1362 				mlx5_esw_bridge_fdb_entry_refresh(entry);
1363 			} else if (!(entry->flags & MLX5_ESW_BRIDGE_FLAG_PEER) &&
1364 				   time_is_before_jiffies(entry->lastuse + bridge->ageing_time)) {
1365 				mlx5_esw_bridge_fdb_del_notify(entry);
1366 				mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
1367 			}
1368 		}
1369 	}
1370 }
1371 
mlx5_esw_bridge_flush(struct mlx5_esw_bridge_offloads * br_offloads)1372 static void mlx5_esw_bridge_flush(struct mlx5_esw_bridge_offloads *br_offloads)
1373 {
1374 	struct mlx5_esw_bridge_port *port;
1375 	unsigned long i;
1376 
1377 	xa_for_each(&br_offloads->ports, i, port)
1378 		mlx5_esw_bridge_vport_cleanup(br_offloads, port);
1379 
1380 	WARN_ONCE(!list_empty(&br_offloads->bridges),
1381 		  "Cleaning up bridge offloads while still having bridges attached\n");
1382 }
1383 
mlx5_esw_bridge_init(struct mlx5_eswitch * esw)1384 struct mlx5_esw_bridge_offloads *mlx5_esw_bridge_init(struct mlx5_eswitch *esw)
1385 {
1386 	struct mlx5_esw_bridge_offloads *br_offloads;
1387 
1388 	ASSERT_RTNL();
1389 
1390 	br_offloads = kvzalloc(sizeof(*br_offloads), GFP_KERNEL);
1391 	if (!br_offloads)
1392 		return ERR_PTR(-ENOMEM);
1393 
1394 	INIT_LIST_HEAD(&br_offloads->bridges);
1395 	xa_init(&br_offloads->ports);
1396 	br_offloads->esw = esw;
1397 	esw->br_offloads = br_offloads;
1398 
1399 	return br_offloads;
1400 }
1401 
mlx5_esw_bridge_cleanup(struct mlx5_eswitch * esw)1402 void mlx5_esw_bridge_cleanup(struct mlx5_eswitch *esw)
1403 {
1404 	struct mlx5_esw_bridge_offloads *br_offloads = esw->br_offloads;
1405 
1406 	ASSERT_RTNL();
1407 
1408 	if (!br_offloads)
1409 		return;
1410 
1411 	mlx5_esw_bridge_flush(br_offloads);
1412 	WARN_ON(!xa_empty(&br_offloads->ports));
1413 
1414 	esw->br_offloads = NULL;
1415 	kvfree(br_offloads);
1416 }
1417