1 /*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
3 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the names of the copyright holders nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
17 *
18 * Alternatively, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") version 2 as published by the Free
20 * Software Foundation.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <linux/kernel.h>
36 #include <linux/errno.h>
37 #include <linux/netdevice.h>
38 #include <net/flow_dissector.h>
39 #include <net/pkt_cls.h>
40 #include <net/tc_act/tc_gact.h>
41 #include <net/tc_act/tc_mirred.h>
42 #include <net/tc_act/tc_vlan.h>
43
44 #include "spectrum.h"
45 #include "core_acl_flex_keys.h"
46
mlxsw_sp_flower_parse_actions(struct mlxsw_sp * mlxsw_sp,struct net_device * dev,bool ingress,struct mlxsw_sp_acl_rule_info * rulei,struct tcf_exts * exts)47 static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
48 struct net_device *dev, bool ingress,
49 struct mlxsw_sp_acl_rule_info *rulei,
50 struct tcf_exts *exts)
51 {
52 const struct tc_action *a;
53 LIST_HEAD(actions);
54 int err;
55
56 if (!tcf_exts_has_actions(exts))
57 return 0;
58
59 /* Count action is inserted first */
60 err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei);
61 if (err)
62 return err;
63
64 tcf_exts_to_list(exts, &actions);
65 list_for_each_entry(a, &actions, list) {
66 if (is_tcf_gact_shot(a)) {
67 err = mlxsw_sp_acl_rulei_act_drop(rulei);
68 if (err)
69 return err;
70 } else if (is_tcf_gact_trap(a)) {
71 err = mlxsw_sp_acl_rulei_act_trap(rulei);
72 if (err)
73 return err;
74 } else if (is_tcf_gact_goto_chain(a)) {
75 u32 chain_index = tcf_gact_goto_chain_index(a);
76 struct mlxsw_sp_acl_ruleset *ruleset;
77 u16 group_id;
78
79 ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, dev,
80 ingress,
81 chain_index,
82 MLXSW_SP_ACL_PROFILE_FLOWER);
83 if (IS_ERR(ruleset))
84 return PTR_ERR(ruleset);
85
86 group_id = mlxsw_sp_acl_ruleset_group_id(ruleset);
87 mlxsw_sp_acl_rulei_act_jump(rulei, group_id);
88 } else if (is_tcf_mirred_egress_redirect(a)) {
89 int ifindex = tcf_mirred_ifindex(a);
90 struct net_device *out_dev;
91 struct mlxsw_sp_fid *fid;
92 u16 fid_index;
93
94 fid = mlxsw_sp_acl_dummy_fid(mlxsw_sp);
95 fid_index = mlxsw_sp_fid_index(fid);
96 err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei,
97 fid_index);
98 if (err)
99 return err;
100
101 out_dev = __dev_get_by_index(dev_net(dev), ifindex);
102 if (out_dev == dev)
103 out_dev = NULL;
104
105 err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei,
106 out_dev);
107 if (err)
108 return err;
109 } else if (is_tcf_vlan(a)) {
110 u16 proto = be16_to_cpu(tcf_vlan_push_proto(a));
111 u32 action = tcf_vlan_action(a);
112 u8 prio = tcf_vlan_push_prio(a);
113 u16 vid = tcf_vlan_push_vid(a);
114
115 return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
116 action, vid,
117 proto, prio);
118 } else {
119 dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n");
120 return -EOPNOTSUPP;
121 }
122 }
123 return 0;
124 }
125
mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info * rulei,struct tc_cls_flower_offload * f)126 static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei,
127 struct tc_cls_flower_offload *f)
128 {
129 struct flow_dissector_key_ipv4_addrs *key =
130 skb_flow_dissector_target(f->dissector,
131 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
132 f->key);
133 struct flow_dissector_key_ipv4_addrs *mask =
134 skb_flow_dissector_target(f->dissector,
135 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
136 f->mask);
137
138 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_IP4,
139 ntohl(key->src), ntohl(mask->src));
140 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_IP4,
141 ntohl(key->dst), ntohl(mask->dst));
142 }
143
mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info * rulei,struct tc_cls_flower_offload * f)144 static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei,
145 struct tc_cls_flower_offload *f)
146 {
147 struct flow_dissector_key_ipv6_addrs *key =
148 skb_flow_dissector_target(f->dissector,
149 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
150 f->key);
151 struct flow_dissector_key_ipv6_addrs *mask =
152 skb_flow_dissector_target(f->dissector,
153 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
154 f->mask);
155 size_t addr_half_size = sizeof(key->src) / 2;
156
157 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP6_HI,
158 &key->src.s6_addr[0],
159 &mask->src.s6_addr[0],
160 addr_half_size);
161 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP6_LO,
162 &key->src.s6_addr[addr_half_size],
163 &mask->src.s6_addr[addr_half_size],
164 addr_half_size);
165 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP6_HI,
166 &key->dst.s6_addr[0],
167 &mask->dst.s6_addr[0],
168 addr_half_size);
169 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP6_LO,
170 &key->dst.s6_addr[addr_half_size],
171 &mask->dst.s6_addr[addr_half_size],
172 addr_half_size);
173 }
174
mlxsw_sp_flower_parse_ports(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_rule_info * rulei,struct tc_cls_flower_offload * f,u8 ip_proto)175 static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
176 struct mlxsw_sp_acl_rule_info *rulei,
177 struct tc_cls_flower_offload *f,
178 u8 ip_proto)
179 {
180 struct flow_dissector_key_ports *key, *mask;
181
182 if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS))
183 return 0;
184
185 if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
186 dev_err(mlxsw_sp->bus_info->dev, "Only UDP and TCP keys are supported\n");
187 return -EINVAL;
188 }
189
190 key = skb_flow_dissector_target(f->dissector,
191 FLOW_DISSECTOR_KEY_PORTS,
192 f->key);
193 mask = skb_flow_dissector_target(f->dissector,
194 FLOW_DISSECTOR_KEY_PORTS,
195 f->mask);
196 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_L4_PORT,
197 ntohs(key->dst), ntohs(mask->dst));
198 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_L4_PORT,
199 ntohs(key->src), ntohs(mask->src));
200 return 0;
201 }
202
mlxsw_sp_flower_parse_tcp(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_rule_info * rulei,struct tc_cls_flower_offload * f,u8 ip_proto)203 static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp,
204 struct mlxsw_sp_acl_rule_info *rulei,
205 struct tc_cls_flower_offload *f,
206 u8 ip_proto)
207 {
208 struct flow_dissector_key_tcp *key, *mask;
209
210 if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP))
211 return 0;
212
213 if (ip_proto != IPPROTO_TCP) {
214 dev_err(mlxsw_sp->bus_info->dev, "TCP keys supported only for TCP\n");
215 return -EINVAL;
216 }
217
218 key = skb_flow_dissector_target(f->dissector,
219 FLOW_DISSECTOR_KEY_TCP,
220 f->key);
221 mask = skb_flow_dissector_target(f->dissector,
222 FLOW_DISSECTOR_KEY_TCP,
223 f->mask);
224 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_TCP_FLAGS,
225 ntohs(key->flags), ntohs(mask->flags));
226 return 0;
227 }
228
mlxsw_sp_flower_parse_ip(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_rule_info * rulei,struct tc_cls_flower_offload * f,u16 n_proto)229 static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp,
230 struct mlxsw_sp_acl_rule_info *rulei,
231 struct tc_cls_flower_offload *f,
232 u16 n_proto)
233 {
234 struct flow_dissector_key_ip *key, *mask;
235
236 if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP))
237 return 0;
238
239 if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) {
240 dev_err(mlxsw_sp->bus_info->dev, "IP keys supported only for IPv4/6\n");
241 return -EINVAL;
242 }
243
244 key = skb_flow_dissector_target(f->dissector,
245 FLOW_DISSECTOR_KEY_IP,
246 f->key);
247 mask = skb_flow_dissector_target(f->dissector,
248 FLOW_DISSECTOR_KEY_IP,
249 f->mask);
250 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_TTL_,
251 key->ttl, mask->ttl);
252
253 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_ECN,
254 key->tos & 0x3, mask->tos & 0x3);
255
256 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_DSCP,
257 key->tos >> 6, mask->tos >> 6);
258
259 return 0;
260 }
261
mlxsw_sp_flower_parse(struct mlxsw_sp * mlxsw_sp,struct net_device * dev,bool ingress,struct mlxsw_sp_acl_rule_info * rulei,struct tc_cls_flower_offload * f)262 static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
263 struct net_device *dev, bool ingress,
264 struct mlxsw_sp_acl_rule_info *rulei,
265 struct tc_cls_flower_offload *f)
266 {
267 u16 n_proto_mask = 0;
268 u16 n_proto_key = 0;
269 u16 addr_type = 0;
270 u8 ip_proto = 0;
271 int err;
272
273 if (f->dissector->used_keys &
274 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
275 BIT(FLOW_DISSECTOR_KEY_BASIC) |
276 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
277 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
278 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
279 BIT(FLOW_DISSECTOR_KEY_PORTS) |
280 BIT(FLOW_DISSECTOR_KEY_TCP) |
281 BIT(FLOW_DISSECTOR_KEY_IP) |
282 BIT(FLOW_DISSECTOR_KEY_VLAN))) {
283 dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n");
284 return -EOPNOTSUPP;
285 }
286
287 mlxsw_sp_acl_rulei_priority(rulei, f->common.prio);
288
289 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
290 struct flow_dissector_key_control *key =
291 skb_flow_dissector_target(f->dissector,
292 FLOW_DISSECTOR_KEY_CONTROL,
293 f->key);
294 addr_type = key->addr_type;
295 }
296
297 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
298 struct flow_dissector_key_basic *key =
299 skb_flow_dissector_target(f->dissector,
300 FLOW_DISSECTOR_KEY_BASIC,
301 f->key);
302 struct flow_dissector_key_basic *mask =
303 skb_flow_dissector_target(f->dissector,
304 FLOW_DISSECTOR_KEY_BASIC,
305 f->mask);
306 n_proto_key = ntohs(key->n_proto);
307 n_proto_mask = ntohs(mask->n_proto);
308
309 if (n_proto_key == ETH_P_ALL) {
310 n_proto_key = 0;
311 n_proto_mask = 0;
312 }
313 mlxsw_sp_acl_rulei_keymask_u32(rulei,
314 MLXSW_AFK_ELEMENT_ETHERTYPE,
315 n_proto_key, n_proto_mask);
316
317 ip_proto = key->ip_proto;
318 mlxsw_sp_acl_rulei_keymask_u32(rulei,
319 MLXSW_AFK_ELEMENT_IP_PROTO,
320 key->ip_proto, mask->ip_proto);
321 }
322
323 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
324 struct flow_dissector_key_eth_addrs *key =
325 skb_flow_dissector_target(f->dissector,
326 FLOW_DISSECTOR_KEY_ETH_ADDRS,
327 f->key);
328 struct flow_dissector_key_eth_addrs *mask =
329 skb_flow_dissector_target(f->dissector,
330 FLOW_DISSECTOR_KEY_ETH_ADDRS,
331 f->mask);
332
333 mlxsw_sp_acl_rulei_keymask_buf(rulei,
334 MLXSW_AFK_ELEMENT_DMAC,
335 key->dst, mask->dst,
336 sizeof(key->dst));
337 mlxsw_sp_acl_rulei_keymask_buf(rulei,
338 MLXSW_AFK_ELEMENT_SMAC,
339 key->src, mask->src,
340 sizeof(key->src));
341 }
342
343 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
344 struct flow_dissector_key_vlan *key =
345 skb_flow_dissector_target(f->dissector,
346 FLOW_DISSECTOR_KEY_VLAN,
347 f->key);
348 struct flow_dissector_key_vlan *mask =
349 skb_flow_dissector_target(f->dissector,
350 FLOW_DISSECTOR_KEY_VLAN,
351 f->mask);
352 if (mask->vlan_id != 0)
353 mlxsw_sp_acl_rulei_keymask_u32(rulei,
354 MLXSW_AFK_ELEMENT_VID,
355 key->vlan_id,
356 mask->vlan_id);
357 if (mask->vlan_priority != 0)
358 mlxsw_sp_acl_rulei_keymask_u32(rulei,
359 MLXSW_AFK_ELEMENT_PCP,
360 key->vlan_priority,
361 mask->vlan_priority);
362 }
363
364 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
365 mlxsw_sp_flower_parse_ipv4(rulei, f);
366
367 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS)
368 mlxsw_sp_flower_parse_ipv6(rulei, f);
369
370 err = mlxsw_sp_flower_parse_ports(mlxsw_sp, rulei, f, ip_proto);
371 if (err)
372 return err;
373 err = mlxsw_sp_flower_parse_tcp(mlxsw_sp, rulei, f, ip_proto);
374 if (err)
375 return err;
376
377 err = mlxsw_sp_flower_parse_ip(mlxsw_sp, rulei, f, n_proto_key & n_proto_mask);
378 if (err)
379 return err;
380
381 return mlxsw_sp_flower_parse_actions(mlxsw_sp, dev, ingress,
382 rulei, f->exts);
383 }
384
mlxsw_sp_flower_replace(struct mlxsw_sp_port * mlxsw_sp_port,bool ingress,struct tc_cls_flower_offload * f)385 int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
386 struct tc_cls_flower_offload *f)
387 {
388 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
389 struct net_device *dev = mlxsw_sp_port->dev;
390 struct mlxsw_sp_acl_rule_info *rulei;
391 struct mlxsw_sp_acl_ruleset *ruleset;
392 struct mlxsw_sp_acl_rule *rule;
393 int err;
394
395 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, dev, ingress,
396 f->common.chain_index,
397 MLXSW_SP_ACL_PROFILE_FLOWER);
398 if (IS_ERR(ruleset))
399 return PTR_ERR(ruleset);
400
401 rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie);
402 if (IS_ERR(rule)) {
403 err = PTR_ERR(rule);
404 goto err_rule_create;
405 }
406
407 rulei = mlxsw_sp_acl_rule_rulei(rule);
408 err = mlxsw_sp_flower_parse(mlxsw_sp, dev, ingress, rulei, f);
409 if (err)
410 goto err_flower_parse;
411
412 err = mlxsw_sp_acl_rulei_commit(rulei);
413 if (err)
414 goto err_rulei_commit;
415
416 err = mlxsw_sp_acl_rule_add(mlxsw_sp, rule);
417 if (err)
418 goto err_rule_add;
419
420 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
421 return 0;
422
423 err_rule_add:
424 err_rulei_commit:
425 err_flower_parse:
426 mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
427 err_rule_create:
428 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
429 return err;
430 }
431
mlxsw_sp_flower_destroy(struct mlxsw_sp_port * mlxsw_sp_port,bool ingress,struct tc_cls_flower_offload * f)432 void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
433 struct tc_cls_flower_offload *f)
434 {
435 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
436 struct mlxsw_sp_acl_ruleset *ruleset;
437 struct mlxsw_sp_acl_rule *rule;
438
439 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev,
440 ingress, f->common.chain_index,
441 MLXSW_SP_ACL_PROFILE_FLOWER);
442 if (IS_ERR(ruleset))
443 return;
444
445 rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
446 if (rule) {
447 mlxsw_sp_acl_rule_del(mlxsw_sp, rule);
448 mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
449 }
450
451 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
452 }
453
mlxsw_sp_flower_stats(struct mlxsw_sp_port * mlxsw_sp_port,bool ingress,struct tc_cls_flower_offload * f)454 int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
455 struct tc_cls_flower_offload *f)
456 {
457 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
458 struct mlxsw_sp_acl_ruleset *ruleset;
459 struct mlxsw_sp_acl_rule *rule;
460 u64 packets;
461 u64 lastuse;
462 u64 bytes;
463 int err;
464
465 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev,
466 ingress, f->common.chain_index,
467 MLXSW_SP_ACL_PROFILE_FLOWER);
468 if (WARN_ON(IS_ERR(ruleset)))
469 return -EINVAL;
470
471 rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
472 if (!rule)
473 return -EINVAL;
474
475 err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &packets, &bytes,
476 &lastuse);
477 if (err)
478 goto err_rule_get_stats;
479
480 tcf_exts_stats_update(f->exts, bytes, packets, lastuse);
481
482 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
483 return 0;
484
485 err_rule_get_stats:
486 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
487 return err;
488 }
489