1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 Mellanox Technologies Inc. All rights reserved. */
3
4 #include "mlx5_core.h"
5 #include "eswitch.h"
6 #include "helper.h"
7 #include "ofld.h"
8
9 static bool
esw_acl_ingress_prio_tag_enabled(const struct mlx5_eswitch * esw,const struct mlx5_vport * vport)10 esw_acl_ingress_prio_tag_enabled(const struct mlx5_eswitch *esw,
11 const struct mlx5_vport *vport)
12 {
13 return (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
14 mlx5_eswitch_is_vf_vport(esw, vport->vport));
15 }
16
esw_acl_ingress_prio_tag_create(struct mlx5_eswitch * esw,struct mlx5_vport * vport)17 static int esw_acl_ingress_prio_tag_create(struct mlx5_eswitch *esw,
18 struct mlx5_vport *vport)
19 {
20 struct mlx5_flow_act flow_act = {};
21 struct mlx5_flow_spec *spec;
22 int err = 0;
23
24 /* For prio tag mode, there is only 1 FTEs:
25 * 1) Untagged packets - push prio tag VLAN and modify metadata if
26 * required, allow
27 * Unmatched traffic is allowed by default
28 */
29 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
30 if (!spec)
31 return -ENOMEM;
32
33 /* Untagged packets - push prio tag VLAN, allow */
34 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
35 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 0);
36 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
37 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
38 MLX5_FLOW_CONTEXT_ACTION_ALLOW;
39 flow_act.vlan[0].ethtype = ETH_P_8021Q;
40 flow_act.vlan[0].vid = 0;
41 flow_act.vlan[0].prio = 0;
42
43 if (vport->ingress.offloads.modify_metadata_rule) {
44 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
45 flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
46 }
47
48 vport->ingress.allow_rule = mlx5_add_flow_rules(vport->ingress.acl, spec,
49 &flow_act, NULL, 0);
50 if (IS_ERR(vport->ingress.allow_rule)) {
51 err = PTR_ERR(vport->ingress.allow_rule);
52 esw_warn(esw->dev,
53 "vport[%d] configure ingress untagged allow rule, err(%d)\n",
54 vport->vport, err);
55 vport->ingress.allow_rule = NULL;
56 }
57
58 kvfree(spec);
59 return err;
60 }
61
esw_acl_ingress_mod_metadata_create(struct mlx5_eswitch * esw,struct mlx5_vport * vport)62 static int esw_acl_ingress_mod_metadata_create(struct mlx5_eswitch *esw,
63 struct mlx5_vport *vport)
64 {
65 u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
66 struct mlx5_flow_act flow_act = {};
67 int err = 0;
68 u32 key;
69
70 key = mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport);
71 key >>= ESW_SOURCE_PORT_METADATA_OFFSET;
72
73 MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
74 MLX5_SET(set_action_in, action, field,
75 MLX5_ACTION_IN_FIELD_METADATA_REG_C_0);
76 MLX5_SET(set_action_in, action, data, key);
77 MLX5_SET(set_action_in, action, offset,
78 ESW_SOURCE_PORT_METADATA_OFFSET);
79 MLX5_SET(set_action_in, action, length,
80 ESW_SOURCE_PORT_METADATA_BITS);
81
82 vport->ingress.offloads.modify_metadata =
83 mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
84 1, action);
85 if (IS_ERR(vport->ingress.offloads.modify_metadata)) {
86 err = PTR_ERR(vport->ingress.offloads.modify_metadata);
87 esw_warn(esw->dev,
88 "failed to alloc modify header for vport %d ingress acl (%d)\n",
89 vport->vport, err);
90 return err;
91 }
92
93 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
94 flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
95 vport->ingress.offloads.modify_metadata_rule =
96 mlx5_add_flow_rules(vport->ingress.acl,
97 NULL, &flow_act, NULL, 0);
98 if (IS_ERR(vport->ingress.offloads.modify_metadata_rule)) {
99 err = PTR_ERR(vport->ingress.offloads.modify_metadata_rule);
100 esw_warn(esw->dev,
101 "failed to add setting metadata rule for vport %d ingress acl, err(%d)\n",
102 vport->vport, err);
103 mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
104 vport->ingress.offloads.modify_metadata_rule = NULL;
105 }
106 return err;
107 }
108
esw_acl_ingress_mod_metadata_destroy(struct mlx5_eswitch * esw,struct mlx5_vport * vport)109 static void esw_acl_ingress_mod_metadata_destroy(struct mlx5_eswitch *esw,
110 struct mlx5_vport *vport)
111 {
112 if (!vport->ingress.offloads.modify_metadata_rule)
113 return;
114
115 mlx5_del_flow_rules(vport->ingress.offloads.modify_metadata_rule);
116 mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
117 vport->ingress.offloads.modify_metadata_rule = NULL;
118 }
119
esw_acl_ingress_ofld_rules_create(struct mlx5_eswitch * esw,struct mlx5_vport * vport)120 static int esw_acl_ingress_ofld_rules_create(struct mlx5_eswitch *esw,
121 struct mlx5_vport *vport)
122 {
123 int err;
124
125 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
126 err = esw_acl_ingress_mod_metadata_create(esw, vport);
127 if (err) {
128 esw_warn(esw->dev,
129 "vport(%d) create ingress modify metadata, err(%d)\n",
130 vport->vport, err);
131 return err;
132 }
133 }
134
135 if (esw_acl_ingress_prio_tag_enabled(esw, vport)) {
136 err = esw_acl_ingress_prio_tag_create(esw, vport);
137 if (err) {
138 esw_warn(esw->dev,
139 "vport(%d) create ingress prio tag rule, err(%d)\n",
140 vport->vport, err);
141 goto prio_tag_err;
142 }
143 }
144
145 return 0;
146
147 prio_tag_err:
148 esw_acl_ingress_mod_metadata_destroy(esw, vport);
149 return err;
150 }
151
esw_acl_ingress_ofld_rules_destroy(struct mlx5_eswitch * esw,struct mlx5_vport * vport)152 static void esw_acl_ingress_ofld_rules_destroy(struct mlx5_eswitch *esw,
153 struct mlx5_vport *vport)
154 {
155 esw_acl_ingress_allow_rule_destroy(vport);
156 esw_acl_ingress_mod_metadata_destroy(esw, vport);
157 }
158
esw_acl_ingress_ofld_groups_create(struct mlx5_eswitch * esw,struct mlx5_vport * vport)159 static int esw_acl_ingress_ofld_groups_create(struct mlx5_eswitch *esw,
160 struct mlx5_vport *vport)
161 {
162 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
163 struct mlx5_flow_group *g;
164 void *match_criteria;
165 u32 *flow_group_in;
166 u32 flow_index = 0;
167 int ret = 0;
168
169 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
170 if (!flow_group_in)
171 return -ENOMEM;
172
173 if (esw_acl_ingress_prio_tag_enabled(esw, vport)) {
174 /* This group is to hold FTE to match untagged packets when prio_tag
175 * is enabled.
176 */
177 match_criteria = MLX5_ADDR_OF(create_flow_group_in,
178 flow_group_in, match_criteria);
179 MLX5_SET(create_flow_group_in, flow_group_in,
180 match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
181 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
182 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
183 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
184
185 g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
186 if (IS_ERR(g)) {
187 ret = PTR_ERR(g);
188 esw_warn(esw->dev, "vport[%d] ingress create untagged flow group, err(%d)\n",
189 vport->vport, ret);
190 goto prio_tag_err;
191 }
192 vport->ingress.offloads.metadata_prio_tag_grp = g;
193 flow_index++;
194 }
195
196 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
197 /* This group holds an FTE with no match to add metadata for
198 * tagged packets if prio-tag is enabled, or for all untagged
199 * traffic in case prio-tag is disabled.
200 */
201 memset(flow_group_in, 0, inlen);
202 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
203 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
204
205 g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
206 if (IS_ERR(g)) {
207 ret = PTR_ERR(g);
208 esw_warn(esw->dev, "vport[%d] ingress create drop flow group, err(%d)\n",
209 vport->vport, ret);
210 goto metadata_err;
211 }
212 vport->ingress.offloads.metadata_allmatch_grp = g;
213 }
214
215 kvfree(flow_group_in);
216 return 0;
217
218 metadata_err:
219 if (!IS_ERR_OR_NULL(vport->ingress.offloads.metadata_prio_tag_grp)) {
220 mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
221 vport->ingress.offloads.metadata_prio_tag_grp = NULL;
222 }
223 prio_tag_err:
224 kvfree(flow_group_in);
225 return ret;
226 }
227
esw_acl_ingress_ofld_groups_destroy(struct mlx5_vport * vport)228 static void esw_acl_ingress_ofld_groups_destroy(struct mlx5_vport *vport)
229 {
230 if (vport->ingress.offloads.metadata_allmatch_grp) {
231 mlx5_destroy_flow_group(vport->ingress.offloads.metadata_allmatch_grp);
232 vport->ingress.offloads.metadata_allmatch_grp = NULL;
233 }
234
235 if (vport->ingress.offloads.metadata_prio_tag_grp) {
236 mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
237 vport->ingress.offloads.metadata_prio_tag_grp = NULL;
238 }
239 }
240
esw_acl_ingress_ofld_setup(struct mlx5_eswitch * esw,struct mlx5_vport * vport)241 int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw,
242 struct mlx5_vport *vport)
243 {
244 int num_ftes = 0;
245 int err;
246
247 if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
248 !esw_acl_ingress_prio_tag_enabled(esw, vport))
249 return 0;
250
251 esw_acl_ingress_allow_rule_destroy(vport);
252
253 if (mlx5_eswitch_vport_match_metadata_enabled(esw))
254 num_ftes++;
255 if (esw_acl_ingress_prio_tag_enabled(esw, vport))
256 num_ftes++;
257
258 vport->ingress.acl = esw_acl_table_create(esw, vport->vport,
259 MLX5_FLOW_NAMESPACE_ESW_INGRESS,
260 num_ftes);
261 if (IS_ERR(vport->ingress.acl)) {
262 err = PTR_ERR(vport->ingress.acl);
263 vport->ingress.acl = NULL;
264 return err;
265 }
266
267 err = esw_acl_ingress_ofld_groups_create(esw, vport);
268 if (err)
269 goto group_err;
270
271 esw_debug(esw->dev,
272 "vport[%d] configure ingress rules\n", vport->vport);
273
274 err = esw_acl_ingress_ofld_rules_create(esw, vport);
275 if (err)
276 goto rules_err;
277
278 return 0;
279
280 rules_err:
281 esw_acl_ingress_ofld_groups_destroy(vport);
282 group_err:
283 esw_acl_ingress_table_destroy(vport);
284 return err;
285 }
286
esw_acl_ingress_ofld_cleanup(struct mlx5_eswitch * esw,struct mlx5_vport * vport)287 void esw_acl_ingress_ofld_cleanup(struct mlx5_eswitch *esw,
288 struct mlx5_vport *vport)
289 {
290 esw_acl_ingress_ofld_rules_destroy(esw, vport);
291 esw_acl_ingress_ofld_groups_destroy(vport);
292 esw_acl_ingress_table_destroy(vport);
293 }
294
295 /* Caller must hold rtnl_lock */
mlx5_esw_acl_ingress_vport_bond_update(struct mlx5_eswitch * esw,u16 vport_num,u32 metadata)296 int mlx5_esw_acl_ingress_vport_bond_update(struct mlx5_eswitch *esw, u16 vport_num,
297 u32 metadata)
298 {
299 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
300 int err;
301
302 if (WARN_ON_ONCE(IS_ERR(vport))) {
303 esw_warn(esw->dev, "vport(%d) invalid!\n", vport_num);
304 return PTR_ERR(vport);
305 }
306
307 esw_acl_ingress_ofld_rules_destroy(esw, vport);
308
309 vport->metadata = metadata ? metadata : vport->default_metadata;
310
311 /* Recreate ingress acl rules with vport->metadata */
312 err = esw_acl_ingress_ofld_rules_create(esw, vport);
313 if (err)
314 goto out;
315
316 return 0;
317
318 out:
319 vport->metadata = vport->default_metadata;
320 return err;
321 }
322