1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * net/sched/cls_flower.c Flower classifier
4 *
5 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
6 */
7
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/rhashtable.h>
12 #include <linux/workqueue.h>
13 #include <linux/refcount.h>
14
15 #include <linux/if_ether.h>
16 #include <linux/in6.h>
17 #include <linux/ip.h>
18 #include <linux/mpls.h>
19
20 #include <net/sch_generic.h>
21 #include <net/pkt_cls.h>
22 #include <net/ip.h>
23 #include <net/flow_dissector.h>
24 #include <net/geneve.h>
25 #include <net/vxlan.h>
26 #include <net/erspan.h>
27
28 #include <net/dst.h>
29 #include <net/dst_metadata.h>
30
31 #include <uapi/linux/netfilter/nf_conntrack_common.h>
32
33 #define TCA_FLOWER_KEY_CT_FLAGS_MAX \
34 ((__TCA_FLOWER_KEY_CT_FLAGS_MAX - 1) << 1)
35 #define TCA_FLOWER_KEY_CT_FLAGS_MASK \
36 (TCA_FLOWER_KEY_CT_FLAGS_MAX - 1)
37
38 struct fl_flow_key {
39 struct flow_dissector_key_meta meta;
40 struct flow_dissector_key_control control;
41 struct flow_dissector_key_control enc_control;
42 struct flow_dissector_key_basic basic;
43 struct flow_dissector_key_eth_addrs eth;
44 struct flow_dissector_key_vlan vlan;
45 struct flow_dissector_key_vlan cvlan;
46 union {
47 struct flow_dissector_key_ipv4_addrs ipv4;
48 struct flow_dissector_key_ipv6_addrs ipv6;
49 };
50 struct flow_dissector_key_ports tp;
51 struct flow_dissector_key_icmp icmp;
52 struct flow_dissector_key_arp arp;
53 struct flow_dissector_key_keyid enc_key_id;
54 union {
55 struct flow_dissector_key_ipv4_addrs enc_ipv4;
56 struct flow_dissector_key_ipv6_addrs enc_ipv6;
57 };
58 struct flow_dissector_key_ports enc_tp;
59 struct flow_dissector_key_mpls mpls;
60 struct flow_dissector_key_tcp tcp;
61 struct flow_dissector_key_ip ip;
62 struct flow_dissector_key_ip enc_ip;
63 struct flow_dissector_key_enc_opts enc_opts;
64 union {
65 struct flow_dissector_key_ports tp;
66 struct {
67 struct flow_dissector_key_ports tp_min;
68 struct flow_dissector_key_ports tp_max;
69 };
70 } tp_range;
71 struct flow_dissector_key_ct ct;
72 struct flow_dissector_key_hash hash;
73 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
74
75 struct fl_flow_mask_range {
76 unsigned short int start;
77 unsigned short int end;
78 };
79
80 struct fl_flow_mask {
81 struct fl_flow_key key;
82 struct fl_flow_mask_range range;
83 u32 flags;
84 struct rhash_head ht_node;
85 struct rhashtable ht;
86 struct rhashtable_params filter_ht_params;
87 struct flow_dissector dissector;
88 struct list_head filters;
89 struct rcu_work rwork;
90 struct list_head list;
91 refcount_t refcnt;
92 };
93
94 struct fl_flow_tmplt {
95 struct fl_flow_key dummy_key;
96 struct fl_flow_key mask;
97 struct flow_dissector dissector;
98 struct tcf_chain *chain;
99 };
100
101 struct cls_fl_head {
102 struct rhashtable ht;
103 spinlock_t masks_lock; /* Protect masks list */
104 struct list_head masks;
105 struct list_head hw_filters;
106 struct rcu_work rwork;
107 struct idr handle_idr;
108 };
109
110 struct cls_fl_filter {
111 struct fl_flow_mask *mask;
112 struct rhash_head ht_node;
113 struct fl_flow_key mkey;
114 struct tcf_exts exts;
115 struct tcf_result res;
116 struct fl_flow_key key;
117 struct list_head list;
118 struct list_head hw_list;
119 u32 handle;
120 u32 flags;
121 u32 in_hw_count;
122 struct rcu_work rwork;
123 struct net_device *hw_dev;
124 /* Flower classifier is unlocked, which means that its reference counter
125 * can be changed concurrently without any kind of external
126 * synchronization. Use atomic reference counter to be concurrency-safe.
127 */
128 refcount_t refcnt;
129 bool deleted;
130 };
131
132 static const struct rhashtable_params mask_ht_params = {
133 .key_offset = offsetof(struct fl_flow_mask, key),
134 .key_len = sizeof(struct fl_flow_key),
135 .head_offset = offsetof(struct fl_flow_mask, ht_node),
136 .automatic_shrinking = true,
137 };
138
fl_mask_range(const struct fl_flow_mask * mask)139 static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
140 {
141 return mask->range.end - mask->range.start;
142 }
143
fl_mask_update_range(struct fl_flow_mask * mask)144 static void fl_mask_update_range(struct fl_flow_mask *mask)
145 {
146 const u8 *bytes = (const u8 *) &mask->key;
147 size_t size = sizeof(mask->key);
148 size_t i, first = 0, last;
149
150 for (i = 0; i < size; i++) {
151 if (bytes[i]) {
152 first = i;
153 break;
154 }
155 }
156 last = first;
157 for (i = size - 1; i != first; i--) {
158 if (bytes[i]) {
159 last = i;
160 break;
161 }
162 }
163 mask->range.start = rounddown(first, sizeof(long));
164 mask->range.end = roundup(last + 1, sizeof(long));
165 }
166
fl_key_get_start(struct fl_flow_key * key,const struct fl_flow_mask * mask)167 static void *fl_key_get_start(struct fl_flow_key *key,
168 const struct fl_flow_mask *mask)
169 {
170 return (u8 *) key + mask->range.start;
171 }
172
fl_set_masked_key(struct fl_flow_key * mkey,struct fl_flow_key * key,struct fl_flow_mask * mask)173 static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
174 struct fl_flow_mask *mask)
175 {
176 const long *lkey = fl_key_get_start(key, mask);
177 const long *lmask = fl_key_get_start(&mask->key, mask);
178 long *lmkey = fl_key_get_start(mkey, mask);
179 int i;
180
181 for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
182 *lmkey++ = *lkey++ & *lmask++;
183 }
184
fl_mask_fits_tmplt(struct fl_flow_tmplt * tmplt,struct fl_flow_mask * mask)185 static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt,
186 struct fl_flow_mask *mask)
187 {
188 const long *lmask = fl_key_get_start(&mask->key, mask);
189 const long *ltmplt;
190 int i;
191
192 if (!tmplt)
193 return true;
194 ltmplt = fl_key_get_start(&tmplt->mask, mask);
195 for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) {
196 if (~*ltmplt++ & *lmask++)
197 return false;
198 }
199 return true;
200 }
201
fl_clear_masked_range(struct fl_flow_key * key,struct fl_flow_mask * mask)202 static void fl_clear_masked_range(struct fl_flow_key *key,
203 struct fl_flow_mask *mask)
204 {
205 memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
206 }
207
fl_range_port_dst_cmp(struct cls_fl_filter * filter,struct fl_flow_key * key,struct fl_flow_key * mkey)208 static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
209 struct fl_flow_key *key,
210 struct fl_flow_key *mkey)
211 {
212 u16 min_mask, max_mask, min_val, max_val;
213
214 min_mask = ntohs(filter->mask->key.tp_range.tp_min.dst);
215 max_mask = ntohs(filter->mask->key.tp_range.tp_max.dst);
216 min_val = ntohs(filter->key.tp_range.tp_min.dst);
217 max_val = ntohs(filter->key.tp_range.tp_max.dst);
218
219 if (min_mask && max_mask) {
220 if (ntohs(key->tp_range.tp.dst) < min_val ||
221 ntohs(key->tp_range.tp.dst) > max_val)
222 return false;
223
224 /* skb does not have min and max values */
225 mkey->tp_range.tp_min.dst = filter->mkey.tp_range.tp_min.dst;
226 mkey->tp_range.tp_max.dst = filter->mkey.tp_range.tp_max.dst;
227 }
228 return true;
229 }
230
fl_range_port_src_cmp(struct cls_fl_filter * filter,struct fl_flow_key * key,struct fl_flow_key * mkey)231 static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
232 struct fl_flow_key *key,
233 struct fl_flow_key *mkey)
234 {
235 u16 min_mask, max_mask, min_val, max_val;
236
237 min_mask = ntohs(filter->mask->key.tp_range.tp_min.src);
238 max_mask = ntohs(filter->mask->key.tp_range.tp_max.src);
239 min_val = ntohs(filter->key.tp_range.tp_min.src);
240 max_val = ntohs(filter->key.tp_range.tp_max.src);
241
242 if (min_mask && max_mask) {
243 if (ntohs(key->tp_range.tp.src) < min_val ||
244 ntohs(key->tp_range.tp.src) > max_val)
245 return false;
246
247 /* skb does not have min and max values */
248 mkey->tp_range.tp_min.src = filter->mkey.tp_range.tp_min.src;
249 mkey->tp_range.tp_max.src = filter->mkey.tp_range.tp_max.src;
250 }
251 return true;
252 }
253
__fl_lookup(struct fl_flow_mask * mask,struct fl_flow_key * mkey)254 static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask,
255 struct fl_flow_key *mkey)
256 {
257 return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask),
258 mask->filter_ht_params);
259 }
260
fl_lookup_range(struct fl_flow_mask * mask,struct fl_flow_key * mkey,struct fl_flow_key * key)261 static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask,
262 struct fl_flow_key *mkey,
263 struct fl_flow_key *key)
264 {
265 struct cls_fl_filter *filter, *f;
266
267 list_for_each_entry_rcu(filter, &mask->filters, list) {
268 if (!fl_range_port_dst_cmp(filter, key, mkey))
269 continue;
270
271 if (!fl_range_port_src_cmp(filter, key, mkey))
272 continue;
273
274 f = __fl_lookup(mask, mkey);
275 if (f)
276 return f;
277 }
278 return NULL;
279 }
280
281 static noinline_for_stack
fl_mask_lookup(struct fl_flow_mask * mask,struct fl_flow_key * key)282 struct cls_fl_filter *fl_mask_lookup(struct fl_flow_mask *mask, struct fl_flow_key *key)
283 {
284 struct fl_flow_key mkey;
285
286 fl_set_masked_key(&mkey, key, mask);
287 if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE))
288 return fl_lookup_range(mask, &mkey, key);
289
290 return __fl_lookup(mask, &mkey);
291 }
292
293 static u16 fl_ct_info_to_flower_map[] = {
294 [IP_CT_ESTABLISHED] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
295 TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
296 [IP_CT_RELATED] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
297 TCA_FLOWER_KEY_CT_FLAGS_RELATED,
298 [IP_CT_ESTABLISHED_REPLY] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
299 TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
300 [IP_CT_RELATED_REPLY] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
301 TCA_FLOWER_KEY_CT_FLAGS_RELATED,
302 [IP_CT_NEW] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
303 TCA_FLOWER_KEY_CT_FLAGS_NEW,
304 };
305
fl_classify(struct sk_buff * skb,const struct tcf_proto * tp,struct tcf_result * res)306 static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
307 struct tcf_result *res)
308 {
309 struct cls_fl_head *head = rcu_dereference_bh(tp->root);
310 struct fl_flow_key skb_key;
311 struct fl_flow_mask *mask;
312 struct cls_fl_filter *f;
313
314 list_for_each_entry_rcu(mask, &head->masks, list) {
315 flow_dissector_init_keys(&skb_key.control, &skb_key.basic);
316 fl_clear_masked_range(&skb_key, mask);
317
318 skb_flow_dissect_meta(skb, &mask->dissector, &skb_key);
319 /* skb_flow_dissect() does not set n_proto in case an unknown
320 * protocol, so do it rather here.
321 */
322 skb_key.basic.n_proto = skb_protocol(skb, false);
323 skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
324 skb_flow_dissect_ct(skb, &mask->dissector, &skb_key,
325 fl_ct_info_to_flower_map,
326 ARRAY_SIZE(fl_ct_info_to_flower_map));
327 skb_flow_dissect_hash(skb, &mask->dissector, &skb_key);
328 skb_flow_dissect(skb, &mask->dissector, &skb_key, 0);
329
330 f = fl_mask_lookup(mask, &skb_key);
331 if (f && !tc_skip_sw(f->flags)) {
332 *res = f->res;
333 return tcf_exts_exec(skb, &f->exts, res);
334 }
335 }
336 return -1;
337 }
338
fl_init(struct tcf_proto * tp)339 static int fl_init(struct tcf_proto *tp)
340 {
341 struct cls_fl_head *head;
342
343 head = kzalloc(sizeof(*head), GFP_KERNEL);
344 if (!head)
345 return -ENOBUFS;
346
347 spin_lock_init(&head->masks_lock);
348 INIT_LIST_HEAD_RCU(&head->masks);
349 INIT_LIST_HEAD(&head->hw_filters);
350 rcu_assign_pointer(tp->root, head);
351 idr_init(&head->handle_idr);
352
353 return rhashtable_init(&head->ht, &mask_ht_params);
354 }
355
fl_mask_free(struct fl_flow_mask * mask,bool mask_init_done)356 static void fl_mask_free(struct fl_flow_mask *mask, bool mask_init_done)
357 {
358 /* temporary masks don't have their filters list and ht initialized */
359 if (mask_init_done) {
360 WARN_ON(!list_empty(&mask->filters));
361 rhashtable_destroy(&mask->ht);
362 }
363 kfree(mask);
364 }
365
fl_mask_free_work(struct work_struct * work)366 static void fl_mask_free_work(struct work_struct *work)
367 {
368 struct fl_flow_mask *mask = container_of(to_rcu_work(work),
369 struct fl_flow_mask, rwork);
370
371 fl_mask_free(mask, true);
372 }
373
fl_uninit_mask_free_work(struct work_struct * work)374 static void fl_uninit_mask_free_work(struct work_struct *work)
375 {
376 struct fl_flow_mask *mask = container_of(to_rcu_work(work),
377 struct fl_flow_mask, rwork);
378
379 fl_mask_free(mask, false);
380 }
381
fl_mask_put(struct cls_fl_head * head,struct fl_flow_mask * mask)382 static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask)
383 {
384 if (!refcount_dec_and_test(&mask->refcnt))
385 return false;
386
387 rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
388
389 spin_lock(&head->masks_lock);
390 list_del_rcu(&mask->list);
391 spin_unlock(&head->masks_lock);
392
393 tcf_queue_work(&mask->rwork, fl_mask_free_work);
394
395 return true;
396 }
397
fl_head_dereference(struct tcf_proto * tp)398 static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp)
399 {
400 /* Flower classifier only changes root pointer during init and destroy.
401 * Users must obtain reference to tcf_proto instance before calling its
402 * API, so tp->root pointer is protected from concurrent call to
403 * fl_destroy() by reference counting.
404 */
405 return rcu_dereference_raw(tp->root);
406 }
407
__fl_destroy_filter(struct cls_fl_filter * f)408 static void __fl_destroy_filter(struct cls_fl_filter *f)
409 {
410 tcf_exts_destroy(&f->exts);
411 tcf_exts_put_net(&f->exts);
412 kfree(f);
413 }
414
fl_destroy_filter_work(struct work_struct * work)415 static void fl_destroy_filter_work(struct work_struct *work)
416 {
417 struct cls_fl_filter *f = container_of(to_rcu_work(work),
418 struct cls_fl_filter, rwork);
419
420 __fl_destroy_filter(f);
421 }
422
fl_hw_destroy_filter(struct tcf_proto * tp,struct cls_fl_filter * f,bool rtnl_held,struct netlink_ext_ack * extack)423 static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
424 bool rtnl_held, struct netlink_ext_ack *extack)
425 {
426 struct tcf_block *block = tp->chain->block;
427 struct flow_cls_offload cls_flower = {};
428
429 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
430 cls_flower.command = FLOW_CLS_DESTROY;
431 cls_flower.cookie = (unsigned long) f;
432
433 tc_setup_cb_destroy(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, false,
434 &f->flags, &f->in_hw_count, rtnl_held);
435
436 }
437
fl_hw_replace_filter(struct tcf_proto * tp,struct cls_fl_filter * f,bool rtnl_held,struct netlink_ext_ack * extack)438 static int fl_hw_replace_filter(struct tcf_proto *tp,
439 struct cls_fl_filter *f, bool rtnl_held,
440 struct netlink_ext_ack *extack)
441 {
442 struct tcf_block *block = tp->chain->block;
443 struct flow_cls_offload cls_flower = {};
444 bool skip_sw = tc_skip_sw(f->flags);
445 int err = 0;
446
447 cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts));
448 if (!cls_flower.rule)
449 return -ENOMEM;
450
451 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
452 cls_flower.command = FLOW_CLS_REPLACE;
453 cls_flower.cookie = (unsigned long) f;
454 cls_flower.rule->match.dissector = &f->mask->dissector;
455 cls_flower.rule->match.mask = &f->mask->key;
456 cls_flower.rule->match.key = &f->mkey;
457 cls_flower.classid = f->res.classid;
458
459 err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
460 if (err) {
461 kfree(cls_flower.rule);
462 if (skip_sw) {
463 NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
464 return err;
465 }
466 return 0;
467 }
468
469 err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower,
470 skip_sw, &f->flags, &f->in_hw_count, rtnl_held);
471 tc_cleanup_flow_action(&cls_flower.rule->action);
472 kfree(cls_flower.rule);
473
474 if (err) {
475 fl_hw_destroy_filter(tp, f, rtnl_held, NULL);
476 return err;
477 }
478
479 if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW))
480 return -EINVAL;
481
482 return 0;
483 }
484
fl_hw_update_stats(struct tcf_proto * tp,struct cls_fl_filter * f,bool rtnl_held)485 static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
486 bool rtnl_held)
487 {
488 struct tcf_block *block = tp->chain->block;
489 struct flow_cls_offload cls_flower = {};
490
491 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
492 cls_flower.command = FLOW_CLS_STATS;
493 cls_flower.cookie = (unsigned long) f;
494 cls_flower.classid = f->res.classid;
495
496 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false,
497 rtnl_held);
498
499 tcf_exts_stats_update(&f->exts, cls_flower.stats.bytes,
500 cls_flower.stats.pkts,
501 cls_flower.stats.drops,
502 cls_flower.stats.lastused,
503 cls_flower.stats.used_hw_stats,
504 cls_flower.stats.used_hw_stats_valid);
505 }
506
__fl_put(struct cls_fl_filter * f)507 static void __fl_put(struct cls_fl_filter *f)
508 {
509 if (!refcount_dec_and_test(&f->refcnt))
510 return;
511
512 if (tcf_exts_get_net(&f->exts))
513 tcf_queue_work(&f->rwork, fl_destroy_filter_work);
514 else
515 __fl_destroy_filter(f);
516 }
517
__fl_get(struct cls_fl_head * head,u32 handle)518 static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle)
519 {
520 struct cls_fl_filter *f;
521
522 rcu_read_lock();
523 f = idr_find(&head->handle_idr, handle);
524 if (f && !refcount_inc_not_zero(&f->refcnt))
525 f = NULL;
526 rcu_read_unlock();
527
528 return f;
529 }
530
__fl_delete(struct tcf_proto * tp,struct cls_fl_filter * f,bool * last,bool rtnl_held,struct netlink_ext_ack * extack)531 static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
532 bool *last, bool rtnl_held,
533 struct netlink_ext_ack *extack)
534 {
535 struct cls_fl_head *head = fl_head_dereference(tp);
536
537 *last = false;
538
539 spin_lock(&tp->lock);
540 if (f->deleted) {
541 spin_unlock(&tp->lock);
542 return -ENOENT;
543 }
544
545 f->deleted = true;
546 rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
547 f->mask->filter_ht_params);
548 idr_remove(&head->handle_idr, f->handle);
549 list_del_rcu(&f->list);
550 spin_unlock(&tp->lock);
551
552 *last = fl_mask_put(head, f->mask);
553 if (!tc_skip_hw(f->flags))
554 fl_hw_destroy_filter(tp, f, rtnl_held, extack);
555 tcf_unbind_filter(tp, &f->res);
556 __fl_put(f);
557
558 return 0;
559 }
560
fl_destroy_sleepable(struct work_struct * work)561 static void fl_destroy_sleepable(struct work_struct *work)
562 {
563 struct cls_fl_head *head = container_of(to_rcu_work(work),
564 struct cls_fl_head,
565 rwork);
566
567 rhashtable_destroy(&head->ht);
568 kfree(head);
569 module_put(THIS_MODULE);
570 }
571
fl_destroy(struct tcf_proto * tp,bool rtnl_held,struct netlink_ext_ack * extack)572 static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
573 struct netlink_ext_ack *extack)
574 {
575 struct cls_fl_head *head = fl_head_dereference(tp);
576 struct fl_flow_mask *mask, *next_mask;
577 struct cls_fl_filter *f, *next;
578 bool last;
579
580 list_for_each_entry_safe(mask, next_mask, &head->masks, list) {
581 list_for_each_entry_safe(f, next, &mask->filters, list) {
582 __fl_delete(tp, f, &last, rtnl_held, extack);
583 if (last)
584 break;
585 }
586 }
587 idr_destroy(&head->handle_idr);
588
589 __module_get(THIS_MODULE);
590 tcf_queue_work(&head->rwork, fl_destroy_sleepable);
591 }
592
fl_put(struct tcf_proto * tp,void * arg)593 static void fl_put(struct tcf_proto *tp, void *arg)
594 {
595 struct cls_fl_filter *f = arg;
596
597 __fl_put(f);
598 }
599
fl_get(struct tcf_proto * tp,u32 handle)600 static void *fl_get(struct tcf_proto *tp, u32 handle)
601 {
602 struct cls_fl_head *head = fl_head_dereference(tp);
603
604 return __fl_get(head, handle);
605 }
606
607 static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
608 [TCA_FLOWER_UNSPEC] = { .type = NLA_UNSPEC },
609 [TCA_FLOWER_CLASSID] = { .type = NLA_U32 },
610 [TCA_FLOWER_INDEV] = { .type = NLA_STRING,
611 .len = IFNAMSIZ },
612 [TCA_FLOWER_KEY_ETH_DST] = { .len = ETH_ALEN },
613 [TCA_FLOWER_KEY_ETH_DST_MASK] = { .len = ETH_ALEN },
614 [TCA_FLOWER_KEY_ETH_SRC] = { .len = ETH_ALEN },
615 [TCA_FLOWER_KEY_ETH_SRC_MASK] = { .len = ETH_ALEN },
616 [TCA_FLOWER_KEY_ETH_TYPE] = { .type = NLA_U16 },
617 [TCA_FLOWER_KEY_IP_PROTO] = { .type = NLA_U8 },
618 [TCA_FLOWER_KEY_IPV4_SRC] = { .type = NLA_U32 },
619 [TCA_FLOWER_KEY_IPV4_SRC_MASK] = { .type = NLA_U32 },
620 [TCA_FLOWER_KEY_IPV4_DST] = { .type = NLA_U32 },
621 [TCA_FLOWER_KEY_IPV4_DST_MASK] = { .type = NLA_U32 },
622 [TCA_FLOWER_KEY_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
623 [TCA_FLOWER_KEY_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
624 [TCA_FLOWER_KEY_IPV6_DST] = { .len = sizeof(struct in6_addr) },
625 [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
626 [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 },
627 [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 },
628 [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 },
629 [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 },
630 [TCA_FLOWER_KEY_VLAN_ID] = { .type = NLA_U16 },
631 [TCA_FLOWER_KEY_VLAN_PRIO] = { .type = NLA_U8 },
632 [TCA_FLOWER_KEY_VLAN_ETH_TYPE] = { .type = NLA_U16 },
633 [TCA_FLOWER_KEY_ENC_KEY_ID] = { .type = NLA_U32 },
634 [TCA_FLOWER_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 },
635 [TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
636 [TCA_FLOWER_KEY_ENC_IPV4_DST] = { .type = NLA_U32 },
637 [TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
638 [TCA_FLOWER_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
639 [TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
640 [TCA_FLOWER_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) },
641 [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
642 [TCA_FLOWER_KEY_TCP_SRC_MASK] = { .type = NLA_U16 },
643 [TCA_FLOWER_KEY_TCP_DST_MASK] = { .type = NLA_U16 },
644 [TCA_FLOWER_KEY_UDP_SRC_MASK] = { .type = NLA_U16 },
645 [TCA_FLOWER_KEY_UDP_DST_MASK] = { .type = NLA_U16 },
646 [TCA_FLOWER_KEY_SCTP_SRC_MASK] = { .type = NLA_U16 },
647 [TCA_FLOWER_KEY_SCTP_DST_MASK] = { .type = NLA_U16 },
648 [TCA_FLOWER_KEY_SCTP_SRC] = { .type = NLA_U16 },
649 [TCA_FLOWER_KEY_SCTP_DST] = { .type = NLA_U16 },
650 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT] = { .type = NLA_U16 },
651 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK] = { .type = NLA_U16 },
652 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT] = { .type = NLA_U16 },
653 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK] = { .type = NLA_U16 },
654 [TCA_FLOWER_KEY_FLAGS] = { .type = NLA_U32 },
655 [TCA_FLOWER_KEY_FLAGS_MASK] = { .type = NLA_U32 },
656 [TCA_FLOWER_KEY_ICMPV4_TYPE] = { .type = NLA_U8 },
657 [TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
658 [TCA_FLOWER_KEY_ICMPV4_CODE] = { .type = NLA_U8 },
659 [TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
660 [TCA_FLOWER_KEY_ICMPV6_TYPE] = { .type = NLA_U8 },
661 [TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
662 [TCA_FLOWER_KEY_ICMPV6_CODE] = { .type = NLA_U8 },
663 [TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
664 [TCA_FLOWER_KEY_ARP_SIP] = { .type = NLA_U32 },
665 [TCA_FLOWER_KEY_ARP_SIP_MASK] = { .type = NLA_U32 },
666 [TCA_FLOWER_KEY_ARP_TIP] = { .type = NLA_U32 },
667 [TCA_FLOWER_KEY_ARP_TIP_MASK] = { .type = NLA_U32 },
668 [TCA_FLOWER_KEY_ARP_OP] = { .type = NLA_U8 },
669 [TCA_FLOWER_KEY_ARP_OP_MASK] = { .type = NLA_U8 },
670 [TCA_FLOWER_KEY_ARP_SHA] = { .len = ETH_ALEN },
671 [TCA_FLOWER_KEY_ARP_SHA_MASK] = { .len = ETH_ALEN },
672 [TCA_FLOWER_KEY_ARP_THA] = { .len = ETH_ALEN },
673 [TCA_FLOWER_KEY_ARP_THA_MASK] = { .len = ETH_ALEN },
674 [TCA_FLOWER_KEY_MPLS_TTL] = { .type = NLA_U8 },
675 [TCA_FLOWER_KEY_MPLS_BOS] = { .type = NLA_U8 },
676 [TCA_FLOWER_KEY_MPLS_TC] = { .type = NLA_U8 },
677 [TCA_FLOWER_KEY_MPLS_LABEL] = { .type = NLA_U32 },
678 [TCA_FLOWER_KEY_MPLS_OPTS] = { .type = NLA_NESTED },
679 [TCA_FLOWER_KEY_TCP_FLAGS] = { .type = NLA_U16 },
680 [TCA_FLOWER_KEY_TCP_FLAGS_MASK] = { .type = NLA_U16 },
681 [TCA_FLOWER_KEY_IP_TOS] = { .type = NLA_U8 },
682 [TCA_FLOWER_KEY_IP_TOS_MASK] = { .type = NLA_U8 },
683 [TCA_FLOWER_KEY_IP_TTL] = { .type = NLA_U8 },
684 [TCA_FLOWER_KEY_IP_TTL_MASK] = { .type = NLA_U8 },
685 [TCA_FLOWER_KEY_CVLAN_ID] = { .type = NLA_U16 },
686 [TCA_FLOWER_KEY_CVLAN_PRIO] = { .type = NLA_U8 },
687 [TCA_FLOWER_KEY_CVLAN_ETH_TYPE] = { .type = NLA_U16 },
688 [TCA_FLOWER_KEY_ENC_IP_TOS] = { .type = NLA_U8 },
689 [TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 },
690 [TCA_FLOWER_KEY_ENC_IP_TTL] = { .type = NLA_U8 },
691 [TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 },
692 [TCA_FLOWER_KEY_ENC_OPTS] = { .type = NLA_NESTED },
693 [TCA_FLOWER_KEY_ENC_OPTS_MASK] = { .type = NLA_NESTED },
694 [TCA_FLOWER_KEY_CT_STATE] =
695 NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK),
696 [TCA_FLOWER_KEY_CT_STATE_MASK] =
697 NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK),
698 [TCA_FLOWER_KEY_CT_ZONE] = { .type = NLA_U16 },
699 [TCA_FLOWER_KEY_CT_ZONE_MASK] = { .type = NLA_U16 },
700 [TCA_FLOWER_KEY_CT_MARK] = { .type = NLA_U32 },
701 [TCA_FLOWER_KEY_CT_MARK_MASK] = { .type = NLA_U32 },
702 [TCA_FLOWER_KEY_CT_LABELS] = { .type = NLA_BINARY,
703 .len = 128 / BITS_PER_BYTE },
704 [TCA_FLOWER_KEY_CT_LABELS_MASK] = { .type = NLA_BINARY,
705 .len = 128 / BITS_PER_BYTE },
706 [TCA_FLOWER_FLAGS] = { .type = NLA_U32 },
707 [TCA_FLOWER_KEY_HASH] = { .type = NLA_U32 },
708 [TCA_FLOWER_KEY_HASH_MASK] = { .type = NLA_U32 },
709
710 };
711
712 static const struct nla_policy
713 enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
714 [TCA_FLOWER_KEY_ENC_OPTS_UNSPEC] = {
715 .strict_start_type = TCA_FLOWER_KEY_ENC_OPTS_VXLAN },
716 [TCA_FLOWER_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED },
717 [TCA_FLOWER_KEY_ENC_OPTS_VXLAN] = { .type = NLA_NESTED },
718 [TCA_FLOWER_KEY_ENC_OPTS_ERSPAN] = { .type = NLA_NESTED },
719 };
720
721 static const struct nla_policy
722 geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = {
723 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 },
724 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 },
725 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY,
726 .len = 128 },
727 };
728
729 static const struct nla_policy
730 vxlan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1] = {
731 [TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP] = { .type = NLA_U32 },
732 };
733
734 static const struct nla_policy
735 erspan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1] = {
736 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER] = { .type = NLA_U8 },
737 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX] = { .type = NLA_U32 },
738 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] = { .type = NLA_U8 },
739 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID] = { .type = NLA_U8 },
740 };
741
742 static const struct nla_policy
743 mpls_stack_entry_policy[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1] = {
744 [TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH] = { .type = NLA_U8 },
745 [TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL] = { .type = NLA_U8 },
746 [TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS] = { .type = NLA_U8 },
747 [TCA_FLOWER_KEY_MPLS_OPT_LSE_TC] = { .type = NLA_U8 },
748 [TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL] = { .type = NLA_U32 },
749 };
750
fl_set_key_val(struct nlattr ** tb,void * val,int val_type,void * mask,int mask_type,int len)751 static void fl_set_key_val(struct nlattr **tb,
752 void *val, int val_type,
753 void *mask, int mask_type, int len)
754 {
755 if (!tb[val_type])
756 return;
757 nla_memcpy(val, tb[val_type], len);
758 if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
759 memset(mask, 0xff, len);
760 else
761 nla_memcpy(mask, tb[mask_type], len);
762 }
763
fl_set_key_port_range(struct nlattr ** tb,struct fl_flow_key * key,struct fl_flow_key * mask,struct netlink_ext_ack * extack)764 static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
765 struct fl_flow_key *mask,
766 struct netlink_ext_ack *extack)
767 {
768 fl_set_key_val(tb, &key->tp_range.tp_min.dst,
769 TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_range.tp_min.dst,
770 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.dst));
771 fl_set_key_val(tb, &key->tp_range.tp_max.dst,
772 TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_range.tp_max.dst,
773 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.dst));
774 fl_set_key_val(tb, &key->tp_range.tp_min.src,
775 TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_range.tp_min.src,
776 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.src));
777 fl_set_key_val(tb, &key->tp_range.tp_max.src,
778 TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_range.tp_max.src,
779 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src));
780
781 if (mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst &&
782 ntohs(key->tp_range.tp_max.dst) <=
783 ntohs(key->tp_range.tp_min.dst)) {
784 NL_SET_ERR_MSG_ATTR(extack,
785 tb[TCA_FLOWER_KEY_PORT_DST_MIN],
786 "Invalid destination port range (min must be strictly smaller than max)");
787 return -EINVAL;
788 }
789 if (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src &&
790 ntohs(key->tp_range.tp_max.src) <=
791 ntohs(key->tp_range.tp_min.src)) {
792 NL_SET_ERR_MSG_ATTR(extack,
793 tb[TCA_FLOWER_KEY_PORT_SRC_MIN],
794 "Invalid source port range (min must be strictly smaller than max)");
795 return -EINVAL;
796 }
797
798 return 0;
799 }
800
fl_set_key_mpls_lse(const struct nlattr * nla_lse,struct flow_dissector_key_mpls * key_val,struct flow_dissector_key_mpls * key_mask,struct netlink_ext_ack * extack)801 static int fl_set_key_mpls_lse(const struct nlattr *nla_lse,
802 struct flow_dissector_key_mpls *key_val,
803 struct flow_dissector_key_mpls *key_mask,
804 struct netlink_ext_ack *extack)
805 {
806 struct nlattr *tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1];
807 struct flow_dissector_mpls_lse *lse_mask;
808 struct flow_dissector_mpls_lse *lse_val;
809 u8 lse_index;
810 u8 depth;
811 int err;
812
813 err = nla_parse_nested(tb, TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX, nla_lse,
814 mpls_stack_entry_policy, extack);
815 if (err < 0)
816 return err;
817
818 if (!tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]) {
819 NL_SET_ERR_MSG(extack, "Missing MPLS option \"depth\"");
820 return -EINVAL;
821 }
822
823 depth = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]);
824
825 /* LSE depth starts at 1, for consistency with terminology used by
826 * RFC 3031 (section 3.9), where depth 0 refers to unlabeled packets.
827 */
828 if (depth < 1 || depth > FLOW_DIS_MPLS_MAX) {
829 NL_SET_ERR_MSG_ATTR(extack,
830 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH],
831 "Invalid MPLS depth");
832 return -EINVAL;
833 }
834 lse_index = depth - 1;
835
836 dissector_set_mpls_lse(key_val, lse_index);
837 dissector_set_mpls_lse(key_mask, lse_index);
838
839 lse_val = &key_val->ls[lse_index];
840 lse_mask = &key_mask->ls[lse_index];
841
842 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]) {
843 lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]);
844 lse_mask->mpls_ttl = MPLS_TTL_MASK;
845 }
846 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]) {
847 u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]);
848
849 if (bos & ~MPLS_BOS_MASK) {
850 NL_SET_ERR_MSG_ATTR(extack,
851 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS],
852 "Bottom Of Stack (BOS) must be 0 or 1");
853 return -EINVAL;
854 }
855 lse_val->mpls_bos = bos;
856 lse_mask->mpls_bos = MPLS_BOS_MASK;
857 }
858 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]) {
859 u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]);
860
861 if (tc & ~MPLS_TC_MASK) {
862 NL_SET_ERR_MSG_ATTR(extack,
863 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC],
864 "Traffic Class (TC) must be between 0 and 7");
865 return -EINVAL;
866 }
867 lse_val->mpls_tc = tc;
868 lse_mask->mpls_tc = MPLS_TC_MASK;
869 }
870 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]) {
871 u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]);
872
873 if (label & ~MPLS_LABEL_MASK) {
874 NL_SET_ERR_MSG_ATTR(extack,
875 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL],
876 "Label must be between 0 and 1048575");
877 return -EINVAL;
878 }
879 lse_val->mpls_label = label;
880 lse_mask->mpls_label = MPLS_LABEL_MASK;
881 }
882
883 return 0;
884 }
885
fl_set_key_mpls_opts(const struct nlattr * nla_mpls_opts,struct flow_dissector_key_mpls * key_val,struct flow_dissector_key_mpls * key_mask,struct netlink_ext_ack * extack)886 static int fl_set_key_mpls_opts(const struct nlattr *nla_mpls_opts,
887 struct flow_dissector_key_mpls *key_val,
888 struct flow_dissector_key_mpls *key_mask,
889 struct netlink_ext_ack *extack)
890 {
891 struct nlattr *nla_lse;
892 int rem;
893 int err;
894
895 if (!(nla_mpls_opts->nla_type & NLA_F_NESTED)) {
896 NL_SET_ERR_MSG_ATTR(extack, nla_mpls_opts,
897 "NLA_F_NESTED is missing");
898 return -EINVAL;
899 }
900
901 nla_for_each_nested(nla_lse, nla_mpls_opts, rem) {
902 if (nla_type(nla_lse) != TCA_FLOWER_KEY_MPLS_OPTS_LSE) {
903 NL_SET_ERR_MSG_ATTR(extack, nla_lse,
904 "Invalid MPLS option type");
905 return -EINVAL;
906 }
907
908 err = fl_set_key_mpls_lse(nla_lse, key_val, key_mask, extack);
909 if (err < 0)
910 return err;
911 }
912 if (rem) {
913 NL_SET_ERR_MSG(extack,
914 "Bytes leftover after parsing MPLS options");
915 return -EINVAL;
916 }
917
918 return 0;
919 }
920
fl_set_key_mpls(struct nlattr ** tb,struct flow_dissector_key_mpls * key_val,struct flow_dissector_key_mpls * key_mask,struct netlink_ext_ack * extack)921 static int fl_set_key_mpls(struct nlattr **tb,
922 struct flow_dissector_key_mpls *key_val,
923 struct flow_dissector_key_mpls *key_mask,
924 struct netlink_ext_ack *extack)
925 {
926 struct flow_dissector_mpls_lse *lse_mask;
927 struct flow_dissector_mpls_lse *lse_val;
928
929 if (tb[TCA_FLOWER_KEY_MPLS_OPTS]) {
930 if (tb[TCA_FLOWER_KEY_MPLS_TTL] ||
931 tb[TCA_FLOWER_KEY_MPLS_BOS] ||
932 tb[TCA_FLOWER_KEY_MPLS_TC] ||
933 tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
934 NL_SET_ERR_MSG_ATTR(extack,
935 tb[TCA_FLOWER_KEY_MPLS_OPTS],
936 "MPLS label, Traffic Class, Bottom Of Stack and Time To Live must be encapsulated in the MPLS options attribute");
937 return -EBADMSG;
938 }
939
940 return fl_set_key_mpls_opts(tb[TCA_FLOWER_KEY_MPLS_OPTS],
941 key_val, key_mask, extack);
942 }
943
944 lse_val = &key_val->ls[0];
945 lse_mask = &key_mask->ls[0];
946
947 if (tb[TCA_FLOWER_KEY_MPLS_TTL]) {
948 lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
949 lse_mask->mpls_ttl = MPLS_TTL_MASK;
950 dissector_set_mpls_lse(key_val, 0);
951 dissector_set_mpls_lse(key_mask, 0);
952 }
953 if (tb[TCA_FLOWER_KEY_MPLS_BOS]) {
954 u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]);
955
956 if (bos & ~MPLS_BOS_MASK) {
957 NL_SET_ERR_MSG_ATTR(extack,
958 tb[TCA_FLOWER_KEY_MPLS_BOS],
959 "Bottom Of Stack (BOS) must be 0 or 1");
960 return -EINVAL;
961 }
962 lse_val->mpls_bos = bos;
963 lse_mask->mpls_bos = MPLS_BOS_MASK;
964 dissector_set_mpls_lse(key_val, 0);
965 dissector_set_mpls_lse(key_mask, 0);
966 }
967 if (tb[TCA_FLOWER_KEY_MPLS_TC]) {
968 u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]);
969
970 if (tc & ~MPLS_TC_MASK) {
971 NL_SET_ERR_MSG_ATTR(extack,
972 tb[TCA_FLOWER_KEY_MPLS_TC],
973 "Traffic Class (TC) must be between 0 and 7");
974 return -EINVAL;
975 }
976 lse_val->mpls_tc = tc;
977 lse_mask->mpls_tc = MPLS_TC_MASK;
978 dissector_set_mpls_lse(key_val, 0);
979 dissector_set_mpls_lse(key_mask, 0);
980 }
981 if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
982 u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]);
983
984 if (label & ~MPLS_LABEL_MASK) {
985 NL_SET_ERR_MSG_ATTR(extack,
986 tb[TCA_FLOWER_KEY_MPLS_LABEL],
987 "Label must be between 0 and 1048575");
988 return -EINVAL;
989 }
990 lse_val->mpls_label = label;
991 lse_mask->mpls_label = MPLS_LABEL_MASK;
992 dissector_set_mpls_lse(key_val, 0);
993 dissector_set_mpls_lse(key_mask, 0);
994 }
995 return 0;
996 }
997
fl_set_key_vlan(struct nlattr ** tb,__be16 ethertype,int vlan_id_key,int vlan_prio_key,int vlan_next_eth_type_key,struct flow_dissector_key_vlan * key_val,struct flow_dissector_key_vlan * key_mask)998 static void fl_set_key_vlan(struct nlattr **tb,
999 __be16 ethertype,
1000 int vlan_id_key, int vlan_prio_key,
1001 int vlan_next_eth_type_key,
1002 struct flow_dissector_key_vlan *key_val,
1003 struct flow_dissector_key_vlan *key_mask)
1004 {
1005 #define VLAN_PRIORITY_MASK 0x7
1006
1007 if (tb[vlan_id_key]) {
1008 key_val->vlan_id =
1009 nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK;
1010 key_mask->vlan_id = VLAN_VID_MASK;
1011 }
1012 if (tb[vlan_prio_key]) {
1013 key_val->vlan_priority =
1014 nla_get_u8(tb[vlan_prio_key]) &
1015 VLAN_PRIORITY_MASK;
1016 key_mask->vlan_priority = VLAN_PRIORITY_MASK;
1017 }
1018 key_val->vlan_tpid = ethertype;
1019 key_mask->vlan_tpid = cpu_to_be16(~0);
1020 if (tb[vlan_next_eth_type_key]) {
1021 key_val->vlan_eth_type =
1022 nla_get_be16(tb[vlan_next_eth_type_key]);
1023 key_mask->vlan_eth_type = cpu_to_be16(~0);
1024 }
1025 }
1026
fl_set_key_flag(u32 flower_key,u32 flower_mask,u32 * dissector_key,u32 * dissector_mask,u32 flower_flag_bit,u32 dissector_flag_bit)1027 static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
1028 u32 *dissector_key, u32 *dissector_mask,
1029 u32 flower_flag_bit, u32 dissector_flag_bit)
1030 {
1031 if (flower_mask & flower_flag_bit) {
1032 *dissector_mask |= dissector_flag_bit;
1033 if (flower_key & flower_flag_bit)
1034 *dissector_key |= dissector_flag_bit;
1035 }
1036 }
1037
fl_set_key_flags(struct nlattr ** tb,u32 * flags_key,u32 * flags_mask,struct netlink_ext_ack * extack)1038 static int fl_set_key_flags(struct nlattr **tb, u32 *flags_key,
1039 u32 *flags_mask, struct netlink_ext_ack *extack)
1040 {
1041 u32 key, mask;
1042
1043 /* mask is mandatory for flags */
1044 if (!tb[TCA_FLOWER_KEY_FLAGS_MASK]) {
1045 NL_SET_ERR_MSG(extack, "Missing flags mask");
1046 return -EINVAL;
1047 }
1048
1049 key = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS]));
1050 mask = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
1051
1052 *flags_key = 0;
1053 *flags_mask = 0;
1054
1055 fl_set_key_flag(key, mask, flags_key, flags_mask,
1056 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
1057 fl_set_key_flag(key, mask, flags_key, flags_mask,
1058 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
1059 FLOW_DIS_FIRST_FRAG);
1060
1061 return 0;
1062 }
1063
fl_set_key_ip(struct nlattr ** tb,bool encap,struct flow_dissector_key_ip * key,struct flow_dissector_key_ip * mask)1064 static void fl_set_key_ip(struct nlattr **tb, bool encap,
1065 struct flow_dissector_key_ip *key,
1066 struct flow_dissector_key_ip *mask)
1067 {
1068 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
1069 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
1070 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
1071 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
1072
1073 fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos));
1074 fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl));
1075 }
1076
fl_set_geneve_opt(const struct nlattr * nla,struct fl_flow_key * key,int depth,int option_len,struct netlink_ext_ack * extack)1077 static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key,
1078 int depth, int option_len,
1079 struct netlink_ext_ack *extack)
1080 {
1081 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1];
1082 struct nlattr *class = NULL, *type = NULL, *data = NULL;
1083 struct geneve_opt *opt;
1084 int err, data_len = 0;
1085
1086 if (option_len > sizeof(struct geneve_opt))
1087 data_len = option_len - sizeof(struct geneve_opt);
1088
1089 if (key->enc_opts.len > FLOW_DIS_TUN_OPTS_MAX - 4)
1090 return -ERANGE;
1091
1092 opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len];
1093 memset(opt, 0xff, option_len);
1094 opt->length = data_len / 4;
1095 opt->r1 = 0;
1096 opt->r2 = 0;
1097 opt->r3 = 0;
1098
1099 /* If no mask has been prodived we assume an exact match. */
1100 if (!depth)
1101 return sizeof(struct geneve_opt) + data_len;
1102
1103 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) {
1104 NL_SET_ERR_MSG(extack, "Non-geneve option type for mask");
1105 return -EINVAL;
1106 }
1107
1108 err = nla_parse_nested_deprecated(tb,
1109 TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
1110 nla, geneve_opt_policy, extack);
1111 if (err < 0)
1112 return err;
1113
1114 /* We are not allowed to omit any of CLASS, TYPE or DATA
1115 * fields from the key.
1116 */
1117 if (!option_len &&
1118 (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] ||
1119 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] ||
1120 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) {
1121 NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
1122 return -EINVAL;
1123 }
1124
1125 /* Omitting any of CLASS, TYPE or DATA fields is allowed
1126 * for the mask.
1127 */
1128 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) {
1129 int new_len = key->enc_opts.len;
1130
1131 data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA];
1132 data_len = nla_len(data);
1133 if (data_len < 4) {
1134 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
1135 return -ERANGE;
1136 }
1137 if (data_len % 4) {
1138 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
1139 return -ERANGE;
1140 }
1141
1142 new_len += sizeof(struct geneve_opt) + data_len;
1143 BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX);
1144 if (new_len > FLOW_DIS_TUN_OPTS_MAX) {
1145 NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
1146 return -ERANGE;
1147 }
1148 opt->length = data_len / 4;
1149 memcpy(opt->opt_data, nla_data(data), data_len);
1150 }
1151
1152 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) {
1153 class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS];
1154 opt->opt_class = nla_get_be16(class);
1155 }
1156
1157 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) {
1158 type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE];
1159 opt->type = nla_get_u8(type);
1160 }
1161
1162 return sizeof(struct geneve_opt) + data_len;
1163 }
1164
fl_set_vxlan_opt(const struct nlattr * nla,struct fl_flow_key * key,int depth,int option_len,struct netlink_ext_ack * extack)1165 static int fl_set_vxlan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1166 int depth, int option_len,
1167 struct netlink_ext_ack *extack)
1168 {
1169 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1];
1170 struct vxlan_metadata *md;
1171 int err;
1172
1173 md = (struct vxlan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1174 memset(md, 0xff, sizeof(*md));
1175
1176 if (!depth)
1177 return sizeof(*md);
1178
1179 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_VXLAN) {
1180 NL_SET_ERR_MSG(extack, "Non-vxlan option type for mask");
1181 return -EINVAL;
1182 }
1183
1184 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX, nla,
1185 vxlan_opt_policy, extack);
1186 if (err < 0)
1187 return err;
1188
1189 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
1190 NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp");
1191 return -EINVAL;
1192 }
1193
1194 if (tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
1195 md->gbp = nla_get_u32(tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]);
1196 md->gbp &= VXLAN_GBP_MASK;
1197 }
1198
1199 return sizeof(*md);
1200 }
1201
fl_set_erspan_opt(const struct nlattr * nla,struct fl_flow_key * key,int depth,int option_len,struct netlink_ext_ack * extack)1202 static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1203 int depth, int option_len,
1204 struct netlink_ext_ack *extack)
1205 {
1206 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1];
1207 struct erspan_metadata *md;
1208 int err;
1209
1210 md = (struct erspan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1211 memset(md, 0xff, sizeof(*md));
1212 md->version = 1;
1213
1214 if (!depth)
1215 return sizeof(*md);
1216
1217 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_ERSPAN) {
1218 NL_SET_ERR_MSG(extack, "Non-erspan option type for mask");
1219 return -EINVAL;
1220 }
1221
1222 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX, nla,
1223 erspan_opt_policy, extack);
1224 if (err < 0)
1225 return err;
1226
1227 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) {
1228 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver");
1229 return -EINVAL;
1230 }
1231
1232 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER])
1233 md->version = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]);
1234
1235 if (md->version == 1) {
1236 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1237 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index");
1238 return -EINVAL;
1239 }
1240 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1241 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX];
1242 memset(&md->u, 0x00, sizeof(md->u));
1243 md->u.index = nla_get_be32(nla);
1244 }
1245 } else if (md->version == 2) {
1246 if (!option_len && (!tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] ||
1247 !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID])) {
1248 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid");
1249 return -EINVAL;
1250 }
1251 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]) {
1252 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR];
1253 md->u.md2.dir = nla_get_u8(nla);
1254 }
1255 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]) {
1256 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID];
1257 set_hwid(&md->u.md2, nla_get_u8(nla));
1258 }
1259 } else {
1260 NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect");
1261 return -EINVAL;
1262 }
1263
1264 return sizeof(*md);
1265 }
1266
fl_set_enc_opt(struct nlattr ** tb,struct fl_flow_key * key,struct fl_flow_key * mask,struct netlink_ext_ack * extack)1267 static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
1268 struct fl_flow_key *mask,
1269 struct netlink_ext_ack *extack)
1270 {
1271 const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
1272 int err, option_len, key_depth, msk_depth = 0;
1273
1274 err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS],
1275 TCA_FLOWER_KEY_ENC_OPTS_MAX,
1276 enc_opts_policy, extack);
1277 if (err)
1278 return err;
1279
1280 nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
1281
1282 if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
1283 err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
1284 TCA_FLOWER_KEY_ENC_OPTS_MAX,
1285 enc_opts_policy, extack);
1286 if (err)
1287 return err;
1288
1289 nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1290 msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1291 if (!nla_ok(nla_opt_msk, msk_depth)) {
1292 NL_SET_ERR_MSG(extack, "Invalid nested attribute for masks");
1293 return -EINVAL;
1294 }
1295 }
1296
1297 nla_for_each_attr(nla_opt_key, nla_enc_key,
1298 nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) {
1299 switch (nla_type(nla_opt_key)) {
1300 case TCA_FLOWER_KEY_ENC_OPTS_GENEVE:
1301 if (key->enc_opts.dst_opt_type &&
1302 key->enc_opts.dst_opt_type != TUNNEL_GENEVE_OPT) {
1303 NL_SET_ERR_MSG(extack, "Duplicate type for geneve options");
1304 return -EINVAL;
1305 }
1306 option_len = 0;
1307 key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1308 option_len = fl_set_geneve_opt(nla_opt_key, key,
1309 key_depth, option_len,
1310 extack);
1311 if (option_len < 0)
1312 return option_len;
1313
1314 key->enc_opts.len += option_len;
1315 /* At the same time we need to parse through the mask
1316 * in order to verify exact and mask attribute lengths.
1317 */
1318 mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1319 option_len = fl_set_geneve_opt(nla_opt_msk, mask,
1320 msk_depth, option_len,
1321 extack);
1322 if (option_len < 0)
1323 return option_len;
1324
1325 mask->enc_opts.len += option_len;
1326 if (key->enc_opts.len != mask->enc_opts.len) {
1327 NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1328 return -EINVAL;
1329 }
1330 break;
1331 case TCA_FLOWER_KEY_ENC_OPTS_VXLAN:
1332 if (key->enc_opts.dst_opt_type) {
1333 NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options");
1334 return -EINVAL;
1335 }
1336 option_len = 0;
1337 key->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1338 option_len = fl_set_vxlan_opt(nla_opt_key, key,
1339 key_depth, option_len,
1340 extack);
1341 if (option_len < 0)
1342 return option_len;
1343
1344 key->enc_opts.len += option_len;
1345 /* At the same time we need to parse through the mask
1346 * in order to verify exact and mask attribute lengths.
1347 */
1348 mask->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1349 option_len = fl_set_vxlan_opt(nla_opt_msk, mask,
1350 msk_depth, option_len,
1351 extack);
1352 if (option_len < 0)
1353 return option_len;
1354
1355 mask->enc_opts.len += option_len;
1356 if (key->enc_opts.len != mask->enc_opts.len) {
1357 NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1358 return -EINVAL;
1359 }
1360 break;
1361 case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN:
1362 if (key->enc_opts.dst_opt_type) {
1363 NL_SET_ERR_MSG(extack, "Duplicate type for erspan options");
1364 return -EINVAL;
1365 }
1366 option_len = 0;
1367 key->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1368 option_len = fl_set_erspan_opt(nla_opt_key, key,
1369 key_depth, option_len,
1370 extack);
1371 if (option_len < 0)
1372 return option_len;
1373
1374 key->enc_opts.len += option_len;
1375 /* At the same time we need to parse through the mask
1376 * in order to verify exact and mask attribute lengths.
1377 */
1378 mask->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1379 option_len = fl_set_erspan_opt(nla_opt_msk, mask,
1380 msk_depth, option_len,
1381 extack);
1382 if (option_len < 0)
1383 return option_len;
1384
1385 mask->enc_opts.len += option_len;
1386 if (key->enc_opts.len != mask->enc_opts.len) {
1387 NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1388 return -EINVAL;
1389 }
1390 break;
1391 default:
1392 NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
1393 return -EINVAL;
1394 }
1395
1396 if (!msk_depth)
1397 continue;
1398
1399 if (!nla_ok(nla_opt_msk, msk_depth)) {
1400 NL_SET_ERR_MSG(extack, "A mask attribute is invalid");
1401 return -EINVAL;
1402 }
1403 nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
1404 }
1405
1406 return 0;
1407 }
1408
fl_validate_ct_state(u16 state,struct nlattr * tb,struct netlink_ext_ack * extack)1409 static int fl_validate_ct_state(u16 state, struct nlattr *tb,
1410 struct netlink_ext_ack *extack)
1411 {
1412 if (state && !(state & TCA_FLOWER_KEY_CT_FLAGS_TRACKED)) {
1413 NL_SET_ERR_MSG_ATTR(extack, tb,
1414 "no trk, so no other flag can be set");
1415 return -EINVAL;
1416 }
1417
1418 if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW &&
1419 state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED) {
1420 NL_SET_ERR_MSG_ATTR(extack, tb,
1421 "new and est are mutually exclusive");
1422 return -EINVAL;
1423 }
1424
1425 return 0;
1426 }
1427
fl_set_key_ct(struct nlattr ** tb,struct flow_dissector_key_ct * key,struct flow_dissector_key_ct * mask,struct netlink_ext_ack * extack)1428 static int fl_set_key_ct(struct nlattr **tb,
1429 struct flow_dissector_key_ct *key,
1430 struct flow_dissector_key_ct *mask,
1431 struct netlink_ext_ack *extack)
1432 {
1433 if (tb[TCA_FLOWER_KEY_CT_STATE]) {
1434 int err;
1435
1436 if (!IS_ENABLED(CONFIG_NF_CONNTRACK)) {
1437 NL_SET_ERR_MSG(extack, "Conntrack isn't enabled");
1438 return -EOPNOTSUPP;
1439 }
1440 fl_set_key_val(tb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
1441 &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
1442 sizeof(key->ct_state));
1443
1444 err = fl_validate_ct_state(key->ct_state & mask->ct_state,
1445 tb[TCA_FLOWER_KEY_CT_STATE_MASK],
1446 extack);
1447 if (err)
1448 return err;
1449
1450 }
1451 if (tb[TCA_FLOWER_KEY_CT_ZONE]) {
1452 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1453 NL_SET_ERR_MSG(extack, "Conntrack zones isn't enabled");
1454 return -EOPNOTSUPP;
1455 }
1456 fl_set_key_val(tb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
1457 &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
1458 sizeof(key->ct_zone));
1459 }
1460 if (tb[TCA_FLOWER_KEY_CT_MARK]) {
1461 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1462 NL_SET_ERR_MSG(extack, "Conntrack mark isn't enabled");
1463 return -EOPNOTSUPP;
1464 }
1465 fl_set_key_val(tb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
1466 &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
1467 sizeof(key->ct_mark));
1468 }
1469 if (tb[TCA_FLOWER_KEY_CT_LABELS]) {
1470 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1471 NL_SET_ERR_MSG(extack, "Conntrack labels aren't enabled");
1472 return -EOPNOTSUPP;
1473 }
1474 fl_set_key_val(tb, key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
1475 mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
1476 sizeof(key->ct_labels));
1477 }
1478
1479 return 0;
1480 }
1481
fl_set_key(struct net * net,struct nlattr ** tb,struct fl_flow_key * key,struct fl_flow_key * mask,struct netlink_ext_ack * extack)1482 static int fl_set_key(struct net *net, struct nlattr **tb,
1483 struct fl_flow_key *key, struct fl_flow_key *mask,
1484 struct netlink_ext_ack *extack)
1485 {
1486 __be16 ethertype;
1487 int ret = 0;
1488
1489 if (tb[TCA_FLOWER_INDEV]) {
1490 int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
1491 if (err < 0)
1492 return err;
1493 key->meta.ingress_ifindex = err;
1494 mask->meta.ingress_ifindex = 0xffffffff;
1495 }
1496
1497 fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
1498 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
1499 sizeof(key->eth.dst));
1500 fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
1501 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
1502 sizeof(key->eth.src));
1503
1504 if (tb[TCA_FLOWER_KEY_ETH_TYPE]) {
1505 ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]);
1506
1507 if (eth_type_vlan(ethertype)) {
1508 fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
1509 TCA_FLOWER_KEY_VLAN_PRIO,
1510 TCA_FLOWER_KEY_VLAN_ETH_TYPE,
1511 &key->vlan, &mask->vlan);
1512
1513 if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) {
1514 ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]);
1515 if (eth_type_vlan(ethertype)) {
1516 fl_set_key_vlan(tb, ethertype,
1517 TCA_FLOWER_KEY_CVLAN_ID,
1518 TCA_FLOWER_KEY_CVLAN_PRIO,
1519 TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1520 &key->cvlan, &mask->cvlan);
1521 fl_set_key_val(tb, &key->basic.n_proto,
1522 TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1523 &mask->basic.n_proto,
1524 TCA_FLOWER_UNSPEC,
1525 sizeof(key->basic.n_proto));
1526 } else {
1527 key->basic.n_proto = ethertype;
1528 mask->basic.n_proto = cpu_to_be16(~0);
1529 }
1530 }
1531 } else {
1532 key->basic.n_proto = ethertype;
1533 mask->basic.n_proto = cpu_to_be16(~0);
1534 }
1535 }
1536
1537 if (key->basic.n_proto == htons(ETH_P_IP) ||
1538 key->basic.n_proto == htons(ETH_P_IPV6)) {
1539 fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
1540 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
1541 sizeof(key->basic.ip_proto));
1542 fl_set_key_ip(tb, false, &key->ip, &mask->ip);
1543 }
1544
1545 if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
1546 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1547 mask->control.addr_type = ~0;
1548 fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
1549 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
1550 sizeof(key->ipv4.src));
1551 fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
1552 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
1553 sizeof(key->ipv4.dst));
1554 } else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
1555 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1556 mask->control.addr_type = ~0;
1557 fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
1558 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
1559 sizeof(key->ipv6.src));
1560 fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
1561 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
1562 sizeof(key->ipv6.dst));
1563 }
1564
1565 if (key->basic.ip_proto == IPPROTO_TCP) {
1566 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
1567 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
1568 sizeof(key->tp.src));
1569 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
1570 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
1571 sizeof(key->tp.dst));
1572 fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
1573 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
1574 sizeof(key->tcp.flags));
1575 } else if (key->basic.ip_proto == IPPROTO_UDP) {
1576 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
1577 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
1578 sizeof(key->tp.src));
1579 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
1580 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
1581 sizeof(key->tp.dst));
1582 } else if (key->basic.ip_proto == IPPROTO_SCTP) {
1583 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
1584 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
1585 sizeof(key->tp.src));
1586 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
1587 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
1588 sizeof(key->tp.dst));
1589 } else if (key->basic.n_proto == htons(ETH_P_IP) &&
1590 key->basic.ip_proto == IPPROTO_ICMP) {
1591 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
1592 &mask->icmp.type,
1593 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
1594 sizeof(key->icmp.type));
1595 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
1596 &mask->icmp.code,
1597 TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
1598 sizeof(key->icmp.code));
1599 } else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
1600 key->basic.ip_proto == IPPROTO_ICMPV6) {
1601 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
1602 &mask->icmp.type,
1603 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
1604 sizeof(key->icmp.type));
1605 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
1606 &mask->icmp.code,
1607 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
1608 sizeof(key->icmp.code));
1609 } else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) ||
1610 key->basic.n_proto == htons(ETH_P_MPLS_MC)) {
1611 ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls, extack);
1612 if (ret)
1613 return ret;
1614 } else if (key->basic.n_proto == htons(ETH_P_ARP) ||
1615 key->basic.n_proto == htons(ETH_P_RARP)) {
1616 fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP,
1617 &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK,
1618 sizeof(key->arp.sip));
1619 fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP,
1620 &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK,
1621 sizeof(key->arp.tip));
1622 fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP,
1623 &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK,
1624 sizeof(key->arp.op));
1625 fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
1626 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
1627 sizeof(key->arp.sha));
1628 fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
1629 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
1630 sizeof(key->arp.tha));
1631 }
1632
1633 if (key->basic.ip_proto == IPPROTO_TCP ||
1634 key->basic.ip_proto == IPPROTO_UDP ||
1635 key->basic.ip_proto == IPPROTO_SCTP) {
1636 ret = fl_set_key_port_range(tb, key, mask, extack);
1637 if (ret)
1638 return ret;
1639 }
1640
1641 if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
1642 tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
1643 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1644 mask->enc_control.addr_type = ~0;
1645 fl_set_key_val(tb, &key->enc_ipv4.src,
1646 TCA_FLOWER_KEY_ENC_IPV4_SRC,
1647 &mask->enc_ipv4.src,
1648 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
1649 sizeof(key->enc_ipv4.src));
1650 fl_set_key_val(tb, &key->enc_ipv4.dst,
1651 TCA_FLOWER_KEY_ENC_IPV4_DST,
1652 &mask->enc_ipv4.dst,
1653 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
1654 sizeof(key->enc_ipv4.dst));
1655 }
1656
1657 if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
1658 tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
1659 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1660 mask->enc_control.addr_type = ~0;
1661 fl_set_key_val(tb, &key->enc_ipv6.src,
1662 TCA_FLOWER_KEY_ENC_IPV6_SRC,
1663 &mask->enc_ipv6.src,
1664 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
1665 sizeof(key->enc_ipv6.src));
1666 fl_set_key_val(tb, &key->enc_ipv6.dst,
1667 TCA_FLOWER_KEY_ENC_IPV6_DST,
1668 &mask->enc_ipv6.dst,
1669 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
1670 sizeof(key->enc_ipv6.dst));
1671 }
1672
1673 fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
1674 &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
1675 sizeof(key->enc_key_id.keyid));
1676
1677 fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
1678 &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
1679 sizeof(key->enc_tp.src));
1680
1681 fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
1682 &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
1683 sizeof(key->enc_tp.dst));
1684
1685 fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip);
1686
1687 fl_set_key_val(tb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
1688 &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
1689 sizeof(key->hash.hash));
1690
1691 if (tb[TCA_FLOWER_KEY_ENC_OPTS]) {
1692 ret = fl_set_enc_opt(tb, key, mask, extack);
1693 if (ret)
1694 return ret;
1695 }
1696
1697 ret = fl_set_key_ct(tb, &key->ct, &mask->ct, extack);
1698 if (ret)
1699 return ret;
1700
1701 if (tb[TCA_FLOWER_KEY_FLAGS])
1702 ret = fl_set_key_flags(tb, &key->control.flags,
1703 &mask->control.flags, extack);
1704
1705 return ret;
1706 }
1707
fl_mask_copy(struct fl_flow_mask * dst,struct fl_flow_mask * src)1708 static void fl_mask_copy(struct fl_flow_mask *dst,
1709 struct fl_flow_mask *src)
1710 {
1711 const void *psrc = fl_key_get_start(&src->key, src);
1712 void *pdst = fl_key_get_start(&dst->key, src);
1713
1714 memcpy(pdst, psrc, fl_mask_range(src));
1715 dst->range = src->range;
1716 }
1717
1718 static const struct rhashtable_params fl_ht_params = {
1719 .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
1720 .head_offset = offsetof(struct cls_fl_filter, ht_node),
1721 .automatic_shrinking = true,
1722 };
1723
fl_init_mask_hashtable(struct fl_flow_mask * mask)1724 static int fl_init_mask_hashtable(struct fl_flow_mask *mask)
1725 {
1726 mask->filter_ht_params = fl_ht_params;
1727 mask->filter_ht_params.key_len = fl_mask_range(mask);
1728 mask->filter_ht_params.key_offset += mask->range.start;
1729
1730 return rhashtable_init(&mask->ht, &mask->filter_ht_params);
1731 }
1732
1733 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
1734 #define FL_KEY_MEMBER_SIZE(member) sizeof_field(struct fl_flow_key, member)
1735
1736 #define FL_KEY_IS_MASKED(mask, member) \
1737 memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \
1738 0, FL_KEY_MEMBER_SIZE(member)) \
1739
1740 #define FL_KEY_SET(keys, cnt, id, member) \
1741 do { \
1742 keys[cnt].key_id = id; \
1743 keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \
1744 cnt++; \
1745 } while(0);
1746
1747 #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \
1748 do { \
1749 if (FL_KEY_IS_MASKED(mask, member)) \
1750 FL_KEY_SET(keys, cnt, id, member); \
1751 } while(0);
1752
fl_init_dissector(struct flow_dissector * dissector,struct fl_flow_key * mask)1753 static void fl_init_dissector(struct flow_dissector *dissector,
1754 struct fl_flow_key *mask)
1755 {
1756 struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
1757 size_t cnt = 0;
1758
1759 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1760 FLOW_DISSECTOR_KEY_META, meta);
1761 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
1762 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
1763 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1764 FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
1765 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1766 FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
1767 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1768 FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
1769 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1770 FLOW_DISSECTOR_KEY_PORTS, tp);
1771 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1772 FLOW_DISSECTOR_KEY_PORTS_RANGE, tp_range);
1773 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1774 FLOW_DISSECTOR_KEY_IP, ip);
1775 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1776 FLOW_DISSECTOR_KEY_TCP, tcp);
1777 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1778 FLOW_DISSECTOR_KEY_ICMP, icmp);
1779 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1780 FLOW_DISSECTOR_KEY_ARP, arp);
1781 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1782 FLOW_DISSECTOR_KEY_MPLS, mpls);
1783 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1784 FLOW_DISSECTOR_KEY_VLAN, vlan);
1785 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1786 FLOW_DISSECTOR_KEY_CVLAN, cvlan);
1787 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1788 FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
1789 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1790 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
1791 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1792 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
1793 if (FL_KEY_IS_MASKED(mask, enc_ipv4) ||
1794 FL_KEY_IS_MASKED(mask, enc_ipv6))
1795 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
1796 enc_control);
1797 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1798 FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
1799 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1800 FLOW_DISSECTOR_KEY_ENC_IP, enc_ip);
1801 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1802 FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts);
1803 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1804 FLOW_DISSECTOR_KEY_CT, ct);
1805 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1806 FLOW_DISSECTOR_KEY_HASH, hash);
1807
1808 skb_flow_dissector_init(dissector, keys, cnt);
1809 }
1810
fl_create_new_mask(struct cls_fl_head * head,struct fl_flow_mask * mask)1811 static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
1812 struct fl_flow_mask *mask)
1813 {
1814 struct fl_flow_mask *newmask;
1815 int err;
1816
1817 newmask = kzalloc(sizeof(*newmask), GFP_KERNEL);
1818 if (!newmask)
1819 return ERR_PTR(-ENOMEM);
1820
1821 fl_mask_copy(newmask, mask);
1822
1823 if ((newmask->key.tp_range.tp_min.dst &&
1824 newmask->key.tp_range.tp_max.dst) ||
1825 (newmask->key.tp_range.tp_min.src &&
1826 newmask->key.tp_range.tp_max.src))
1827 newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE;
1828
1829 err = fl_init_mask_hashtable(newmask);
1830 if (err)
1831 goto errout_free;
1832
1833 fl_init_dissector(&newmask->dissector, &newmask->key);
1834
1835 INIT_LIST_HEAD_RCU(&newmask->filters);
1836
1837 refcount_set(&newmask->refcnt, 1);
1838 err = rhashtable_replace_fast(&head->ht, &mask->ht_node,
1839 &newmask->ht_node, mask_ht_params);
1840 if (err)
1841 goto errout_destroy;
1842
1843 spin_lock(&head->masks_lock);
1844 list_add_tail_rcu(&newmask->list, &head->masks);
1845 spin_unlock(&head->masks_lock);
1846
1847 return newmask;
1848
1849 errout_destroy:
1850 rhashtable_destroy(&newmask->ht);
1851 errout_free:
1852 kfree(newmask);
1853
1854 return ERR_PTR(err);
1855 }
1856
fl_check_assign_mask(struct cls_fl_head * head,struct cls_fl_filter * fnew,struct cls_fl_filter * fold,struct fl_flow_mask * mask)1857 static int fl_check_assign_mask(struct cls_fl_head *head,
1858 struct cls_fl_filter *fnew,
1859 struct cls_fl_filter *fold,
1860 struct fl_flow_mask *mask)
1861 {
1862 struct fl_flow_mask *newmask;
1863 int ret = 0;
1864
1865 rcu_read_lock();
1866
1867 /* Insert mask as temporary node to prevent concurrent creation of mask
1868 * with same key. Any concurrent lookups with same key will return
1869 * -EAGAIN because mask's refcnt is zero.
1870 */
1871 fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht,
1872 &mask->ht_node,
1873 mask_ht_params);
1874 if (!fnew->mask) {
1875 rcu_read_unlock();
1876
1877 if (fold) {
1878 ret = -EINVAL;
1879 goto errout_cleanup;
1880 }
1881
1882 newmask = fl_create_new_mask(head, mask);
1883 if (IS_ERR(newmask)) {
1884 ret = PTR_ERR(newmask);
1885 goto errout_cleanup;
1886 }
1887
1888 fnew->mask = newmask;
1889 return 0;
1890 } else if (IS_ERR(fnew->mask)) {
1891 ret = PTR_ERR(fnew->mask);
1892 } else if (fold && fold->mask != fnew->mask) {
1893 ret = -EINVAL;
1894 } else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) {
1895 /* Mask was deleted concurrently, try again */
1896 ret = -EAGAIN;
1897 }
1898 rcu_read_unlock();
1899 return ret;
1900
1901 errout_cleanup:
1902 rhashtable_remove_fast(&head->ht, &mask->ht_node,
1903 mask_ht_params);
1904 return ret;
1905 }
1906
fl_set_parms(struct net * net,struct tcf_proto * tp,struct cls_fl_filter * f,struct fl_flow_mask * mask,unsigned long base,struct nlattr ** tb,struct nlattr * est,bool ovr,struct fl_flow_tmplt * tmplt,bool rtnl_held,struct netlink_ext_ack * extack)1907 static int fl_set_parms(struct net *net, struct tcf_proto *tp,
1908 struct cls_fl_filter *f, struct fl_flow_mask *mask,
1909 unsigned long base, struct nlattr **tb,
1910 struct nlattr *est, bool ovr,
1911 struct fl_flow_tmplt *tmplt, bool rtnl_held,
1912 struct netlink_ext_ack *extack)
1913 {
1914 int err;
1915
1916 err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, rtnl_held,
1917 extack);
1918 if (err < 0)
1919 return err;
1920
1921 if (tb[TCA_FLOWER_CLASSID]) {
1922 f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
1923 if (!rtnl_held)
1924 rtnl_lock();
1925 tcf_bind_filter(tp, &f->res, base);
1926 if (!rtnl_held)
1927 rtnl_unlock();
1928 }
1929
1930 err = fl_set_key(net, tb, &f->key, &mask->key, extack);
1931 if (err)
1932 return err;
1933
1934 fl_mask_update_range(mask);
1935 fl_set_masked_key(&f->mkey, &f->key, mask);
1936
1937 if (!fl_mask_fits_tmplt(tmplt, mask)) {
1938 NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template");
1939 return -EINVAL;
1940 }
1941
1942 return 0;
1943 }
1944
fl_ht_insert_unique(struct cls_fl_filter * fnew,struct cls_fl_filter * fold,bool * in_ht)1945 static int fl_ht_insert_unique(struct cls_fl_filter *fnew,
1946 struct cls_fl_filter *fold,
1947 bool *in_ht)
1948 {
1949 struct fl_flow_mask *mask = fnew->mask;
1950 int err;
1951
1952 err = rhashtable_lookup_insert_fast(&mask->ht,
1953 &fnew->ht_node,
1954 mask->filter_ht_params);
1955 if (err) {
1956 *in_ht = false;
1957 /* It is okay if filter with same key exists when
1958 * overwriting.
1959 */
1960 return fold && err == -EEXIST ? 0 : err;
1961 }
1962
1963 *in_ht = true;
1964 return 0;
1965 }
1966
fl_change(struct net * net,struct sk_buff * in_skb,struct tcf_proto * tp,unsigned long base,u32 handle,struct nlattr ** tca,void ** arg,bool ovr,bool rtnl_held,struct netlink_ext_ack * extack)1967 static int fl_change(struct net *net, struct sk_buff *in_skb,
1968 struct tcf_proto *tp, unsigned long base,
1969 u32 handle, struct nlattr **tca,
1970 void **arg, bool ovr, bool rtnl_held,
1971 struct netlink_ext_ack *extack)
1972 {
1973 struct cls_fl_head *head = fl_head_dereference(tp);
1974 struct cls_fl_filter *fold = *arg;
1975 struct cls_fl_filter *fnew;
1976 struct fl_flow_mask *mask;
1977 struct nlattr **tb;
1978 bool in_ht;
1979 int err;
1980
1981 if (!tca[TCA_OPTIONS]) {
1982 err = -EINVAL;
1983 goto errout_fold;
1984 }
1985
1986 mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
1987 if (!mask) {
1988 err = -ENOBUFS;
1989 goto errout_fold;
1990 }
1991
1992 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
1993 if (!tb) {
1994 err = -ENOBUFS;
1995 goto errout_mask_alloc;
1996 }
1997
1998 err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
1999 tca[TCA_OPTIONS], fl_policy, NULL);
2000 if (err < 0)
2001 goto errout_tb;
2002
2003 if (fold && handle && fold->handle != handle) {
2004 err = -EINVAL;
2005 goto errout_tb;
2006 }
2007
2008 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
2009 if (!fnew) {
2010 err = -ENOBUFS;
2011 goto errout_tb;
2012 }
2013 INIT_LIST_HEAD(&fnew->hw_list);
2014 refcount_set(&fnew->refcnt, 1);
2015
2016 err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0);
2017 if (err < 0)
2018 goto errout;
2019
2020 if (tb[TCA_FLOWER_FLAGS]) {
2021 fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
2022
2023 if (!tc_flags_valid(fnew->flags)) {
2024 err = -EINVAL;
2025 goto errout;
2026 }
2027 }
2028
2029 err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
2030 tp->chain->tmplt_priv, rtnl_held, extack);
2031 if (err)
2032 goto errout;
2033
2034 err = fl_check_assign_mask(head, fnew, fold, mask);
2035 if (err)
2036 goto errout;
2037
2038 err = fl_ht_insert_unique(fnew, fold, &in_ht);
2039 if (err)
2040 goto errout_mask;
2041
2042 if (!tc_skip_hw(fnew->flags)) {
2043 err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack);
2044 if (err)
2045 goto errout_ht;
2046 }
2047
2048 if (!tc_in_hw(fnew->flags))
2049 fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
2050
2051 spin_lock(&tp->lock);
2052
2053 /* tp was deleted concurrently. -EAGAIN will cause caller to lookup
2054 * proto again or create new one, if necessary.
2055 */
2056 if (tp->deleting) {
2057 err = -EAGAIN;
2058 goto errout_hw;
2059 }
2060
2061 if (fold) {
2062 /* Fold filter was deleted concurrently. Retry lookup. */
2063 if (fold->deleted) {
2064 err = -EAGAIN;
2065 goto errout_hw;
2066 }
2067
2068 fnew->handle = handle;
2069
2070 if (!in_ht) {
2071 struct rhashtable_params params =
2072 fnew->mask->filter_ht_params;
2073
2074 err = rhashtable_insert_fast(&fnew->mask->ht,
2075 &fnew->ht_node,
2076 params);
2077 if (err)
2078 goto errout_hw;
2079 in_ht = true;
2080 }
2081
2082 refcount_inc(&fnew->refcnt);
2083 rhashtable_remove_fast(&fold->mask->ht,
2084 &fold->ht_node,
2085 fold->mask->filter_ht_params);
2086 idr_replace(&head->handle_idr, fnew, fnew->handle);
2087 list_replace_rcu(&fold->list, &fnew->list);
2088 fold->deleted = true;
2089
2090 spin_unlock(&tp->lock);
2091
2092 fl_mask_put(head, fold->mask);
2093 if (!tc_skip_hw(fold->flags))
2094 fl_hw_destroy_filter(tp, fold, rtnl_held, NULL);
2095 tcf_unbind_filter(tp, &fold->res);
2096 /* Caller holds reference to fold, so refcnt is always > 0
2097 * after this.
2098 */
2099 refcount_dec(&fold->refcnt);
2100 __fl_put(fold);
2101 } else {
2102 if (handle) {
2103 /* user specifies a handle and it doesn't exist */
2104 err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
2105 handle, GFP_ATOMIC);
2106
2107 /* Filter with specified handle was concurrently
2108 * inserted after initial check in cls_api. This is not
2109 * necessarily an error if NLM_F_EXCL is not set in
2110 * message flags. Returning EAGAIN will cause cls_api to
2111 * try to update concurrently inserted rule.
2112 */
2113 if (err == -ENOSPC)
2114 err = -EAGAIN;
2115 } else {
2116 handle = 1;
2117 err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
2118 INT_MAX, GFP_ATOMIC);
2119 }
2120 if (err)
2121 goto errout_hw;
2122
2123 refcount_inc(&fnew->refcnt);
2124 fnew->handle = handle;
2125 list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
2126 spin_unlock(&tp->lock);
2127 }
2128
2129 *arg = fnew;
2130
2131 kfree(tb);
2132 tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
2133 return 0;
2134
2135 errout_ht:
2136 spin_lock(&tp->lock);
2137 errout_hw:
2138 fnew->deleted = true;
2139 spin_unlock(&tp->lock);
2140 if (!tc_skip_hw(fnew->flags))
2141 fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL);
2142 if (in_ht)
2143 rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
2144 fnew->mask->filter_ht_params);
2145 errout_mask:
2146 fl_mask_put(head, fnew->mask);
2147 errout:
2148 __fl_put(fnew);
2149 errout_tb:
2150 kfree(tb);
2151 errout_mask_alloc:
2152 tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
2153 errout_fold:
2154 if (fold)
2155 __fl_put(fold);
2156 return err;
2157 }
2158
fl_delete(struct tcf_proto * tp,void * arg,bool * last,bool rtnl_held,struct netlink_ext_ack * extack)2159 static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
2160 bool rtnl_held, struct netlink_ext_ack *extack)
2161 {
2162 struct cls_fl_head *head = fl_head_dereference(tp);
2163 struct cls_fl_filter *f = arg;
2164 bool last_on_mask;
2165 int err = 0;
2166
2167 err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack);
2168 *last = list_empty(&head->masks);
2169 __fl_put(f);
2170
2171 return err;
2172 }
2173
fl_walk(struct tcf_proto * tp,struct tcf_walker * arg,bool rtnl_held)2174 static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
2175 bool rtnl_held)
2176 {
2177 struct cls_fl_head *head = fl_head_dereference(tp);
2178 unsigned long id = arg->cookie, tmp;
2179 struct cls_fl_filter *f;
2180
2181 arg->count = arg->skip;
2182
2183 rcu_read_lock();
2184 idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) {
2185 /* don't return filters that are being deleted */
2186 if (!refcount_inc_not_zero(&f->refcnt))
2187 continue;
2188 rcu_read_unlock();
2189
2190 if (arg->fn(tp, f, arg) < 0) {
2191 __fl_put(f);
2192 arg->stop = 1;
2193 rcu_read_lock();
2194 break;
2195 }
2196 __fl_put(f);
2197 arg->count++;
2198 rcu_read_lock();
2199 }
2200 rcu_read_unlock();
2201 arg->cookie = id;
2202 }
2203
2204 static struct cls_fl_filter *
fl_get_next_hw_filter(struct tcf_proto * tp,struct cls_fl_filter * f,bool add)2205 fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add)
2206 {
2207 struct cls_fl_head *head = fl_head_dereference(tp);
2208
2209 spin_lock(&tp->lock);
2210 if (list_empty(&head->hw_filters)) {
2211 spin_unlock(&tp->lock);
2212 return NULL;
2213 }
2214
2215 if (!f)
2216 f = list_entry(&head->hw_filters, struct cls_fl_filter,
2217 hw_list);
2218 list_for_each_entry_continue(f, &head->hw_filters, hw_list) {
2219 if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) {
2220 spin_unlock(&tp->lock);
2221 return f;
2222 }
2223 }
2224
2225 spin_unlock(&tp->lock);
2226 return NULL;
2227 }
2228
fl_reoffload(struct tcf_proto * tp,bool add,flow_setup_cb_t * cb,void * cb_priv,struct netlink_ext_ack * extack)2229 static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
2230 void *cb_priv, struct netlink_ext_ack *extack)
2231 {
2232 struct tcf_block *block = tp->chain->block;
2233 struct flow_cls_offload cls_flower = {};
2234 struct cls_fl_filter *f = NULL;
2235 int err;
2236
2237 /* hw_filters list can only be changed by hw offload functions after
2238 * obtaining rtnl lock. Make sure it is not changed while reoffload is
2239 * iterating it.
2240 */
2241 ASSERT_RTNL();
2242
2243 while ((f = fl_get_next_hw_filter(tp, f, add))) {
2244 cls_flower.rule =
2245 flow_rule_alloc(tcf_exts_num_actions(&f->exts));
2246 if (!cls_flower.rule) {
2247 __fl_put(f);
2248 return -ENOMEM;
2249 }
2250
2251 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
2252 extack);
2253 cls_flower.command = add ?
2254 FLOW_CLS_REPLACE : FLOW_CLS_DESTROY;
2255 cls_flower.cookie = (unsigned long)f;
2256 cls_flower.rule->match.dissector = &f->mask->dissector;
2257 cls_flower.rule->match.mask = &f->mask->key;
2258 cls_flower.rule->match.key = &f->mkey;
2259
2260 err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
2261 if (err) {
2262 kfree(cls_flower.rule);
2263 if (tc_skip_sw(f->flags)) {
2264 NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
2265 __fl_put(f);
2266 return err;
2267 }
2268 goto next_flow;
2269 }
2270
2271 cls_flower.classid = f->res.classid;
2272
2273 err = tc_setup_cb_reoffload(block, tp, add, cb,
2274 TC_SETUP_CLSFLOWER, &cls_flower,
2275 cb_priv, &f->flags,
2276 &f->in_hw_count);
2277 tc_cleanup_flow_action(&cls_flower.rule->action);
2278 kfree(cls_flower.rule);
2279
2280 if (err) {
2281 __fl_put(f);
2282 return err;
2283 }
2284 next_flow:
2285 __fl_put(f);
2286 }
2287
2288 return 0;
2289 }
2290
fl_hw_add(struct tcf_proto * tp,void * type_data)2291 static void fl_hw_add(struct tcf_proto *tp, void *type_data)
2292 {
2293 struct flow_cls_offload *cls_flower = type_data;
2294 struct cls_fl_filter *f =
2295 (struct cls_fl_filter *) cls_flower->cookie;
2296 struct cls_fl_head *head = fl_head_dereference(tp);
2297
2298 spin_lock(&tp->lock);
2299 list_add(&f->hw_list, &head->hw_filters);
2300 spin_unlock(&tp->lock);
2301 }
2302
fl_hw_del(struct tcf_proto * tp,void * type_data)2303 static void fl_hw_del(struct tcf_proto *tp, void *type_data)
2304 {
2305 struct flow_cls_offload *cls_flower = type_data;
2306 struct cls_fl_filter *f =
2307 (struct cls_fl_filter *) cls_flower->cookie;
2308
2309 spin_lock(&tp->lock);
2310 if (!list_empty(&f->hw_list))
2311 list_del_init(&f->hw_list);
2312 spin_unlock(&tp->lock);
2313 }
2314
fl_hw_create_tmplt(struct tcf_chain * chain,struct fl_flow_tmplt * tmplt)2315 static int fl_hw_create_tmplt(struct tcf_chain *chain,
2316 struct fl_flow_tmplt *tmplt)
2317 {
2318 struct flow_cls_offload cls_flower = {};
2319 struct tcf_block *block = chain->block;
2320
2321 cls_flower.rule = flow_rule_alloc(0);
2322 if (!cls_flower.rule)
2323 return -ENOMEM;
2324
2325 cls_flower.common.chain_index = chain->index;
2326 cls_flower.command = FLOW_CLS_TMPLT_CREATE;
2327 cls_flower.cookie = (unsigned long) tmplt;
2328 cls_flower.rule->match.dissector = &tmplt->dissector;
2329 cls_flower.rule->match.mask = &tmplt->mask;
2330 cls_flower.rule->match.key = &tmplt->dummy_key;
2331
2332 /* We don't care if driver (any of them) fails to handle this
2333 * call. It serves just as a hint for it.
2334 */
2335 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2336 kfree(cls_flower.rule);
2337
2338 return 0;
2339 }
2340
fl_hw_destroy_tmplt(struct tcf_chain * chain,struct fl_flow_tmplt * tmplt)2341 static void fl_hw_destroy_tmplt(struct tcf_chain *chain,
2342 struct fl_flow_tmplt *tmplt)
2343 {
2344 struct flow_cls_offload cls_flower = {};
2345 struct tcf_block *block = chain->block;
2346
2347 cls_flower.common.chain_index = chain->index;
2348 cls_flower.command = FLOW_CLS_TMPLT_DESTROY;
2349 cls_flower.cookie = (unsigned long) tmplt;
2350
2351 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2352 }
2353
fl_tmplt_create(struct net * net,struct tcf_chain * chain,struct nlattr ** tca,struct netlink_ext_ack * extack)2354 static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
2355 struct nlattr **tca,
2356 struct netlink_ext_ack *extack)
2357 {
2358 struct fl_flow_tmplt *tmplt;
2359 struct nlattr **tb;
2360 int err;
2361
2362 if (!tca[TCA_OPTIONS])
2363 return ERR_PTR(-EINVAL);
2364
2365 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
2366 if (!tb)
2367 return ERR_PTR(-ENOBUFS);
2368 err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2369 tca[TCA_OPTIONS], fl_policy, NULL);
2370 if (err)
2371 goto errout_tb;
2372
2373 tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL);
2374 if (!tmplt) {
2375 err = -ENOMEM;
2376 goto errout_tb;
2377 }
2378 tmplt->chain = chain;
2379 err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack);
2380 if (err)
2381 goto errout_tmplt;
2382
2383 fl_init_dissector(&tmplt->dissector, &tmplt->mask);
2384
2385 err = fl_hw_create_tmplt(chain, tmplt);
2386 if (err)
2387 goto errout_tmplt;
2388
2389 kfree(tb);
2390 return tmplt;
2391
2392 errout_tmplt:
2393 kfree(tmplt);
2394 errout_tb:
2395 kfree(tb);
2396 return ERR_PTR(err);
2397 }
2398
fl_tmplt_destroy(void * tmplt_priv)2399 static void fl_tmplt_destroy(void *tmplt_priv)
2400 {
2401 struct fl_flow_tmplt *tmplt = tmplt_priv;
2402
2403 fl_hw_destroy_tmplt(tmplt->chain, tmplt);
2404 kfree(tmplt);
2405 }
2406
fl_dump_key_val(struct sk_buff * skb,void * val,int val_type,void * mask,int mask_type,int len)2407 static int fl_dump_key_val(struct sk_buff *skb,
2408 void *val, int val_type,
2409 void *mask, int mask_type, int len)
2410 {
2411 int err;
2412
2413 if (!memchr_inv(mask, 0, len))
2414 return 0;
2415 err = nla_put(skb, val_type, len, val);
2416 if (err)
2417 return err;
2418 if (mask_type != TCA_FLOWER_UNSPEC) {
2419 err = nla_put(skb, mask_type, len, mask);
2420 if (err)
2421 return err;
2422 }
2423 return 0;
2424 }
2425
fl_dump_key_port_range(struct sk_buff * skb,struct fl_flow_key * key,struct fl_flow_key * mask)2426 static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key,
2427 struct fl_flow_key *mask)
2428 {
2429 if (fl_dump_key_val(skb, &key->tp_range.tp_min.dst,
2430 TCA_FLOWER_KEY_PORT_DST_MIN,
2431 &mask->tp_range.tp_min.dst, TCA_FLOWER_UNSPEC,
2432 sizeof(key->tp_range.tp_min.dst)) ||
2433 fl_dump_key_val(skb, &key->tp_range.tp_max.dst,
2434 TCA_FLOWER_KEY_PORT_DST_MAX,
2435 &mask->tp_range.tp_max.dst, TCA_FLOWER_UNSPEC,
2436 sizeof(key->tp_range.tp_max.dst)) ||
2437 fl_dump_key_val(skb, &key->tp_range.tp_min.src,
2438 TCA_FLOWER_KEY_PORT_SRC_MIN,
2439 &mask->tp_range.tp_min.src, TCA_FLOWER_UNSPEC,
2440 sizeof(key->tp_range.tp_min.src)) ||
2441 fl_dump_key_val(skb, &key->tp_range.tp_max.src,
2442 TCA_FLOWER_KEY_PORT_SRC_MAX,
2443 &mask->tp_range.tp_max.src, TCA_FLOWER_UNSPEC,
2444 sizeof(key->tp_range.tp_max.src)))
2445 return -1;
2446
2447 return 0;
2448 }
2449
fl_dump_key_mpls_opt_lse(struct sk_buff * skb,struct flow_dissector_key_mpls * mpls_key,struct flow_dissector_key_mpls * mpls_mask,u8 lse_index)2450 static int fl_dump_key_mpls_opt_lse(struct sk_buff *skb,
2451 struct flow_dissector_key_mpls *mpls_key,
2452 struct flow_dissector_key_mpls *mpls_mask,
2453 u8 lse_index)
2454 {
2455 struct flow_dissector_mpls_lse *lse_mask = &mpls_mask->ls[lse_index];
2456 struct flow_dissector_mpls_lse *lse_key = &mpls_key->ls[lse_index];
2457 int err;
2458
2459 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH,
2460 lse_index + 1);
2461 if (err)
2462 return err;
2463
2464 if (lse_mask->mpls_ttl) {
2465 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL,
2466 lse_key->mpls_ttl);
2467 if (err)
2468 return err;
2469 }
2470 if (lse_mask->mpls_bos) {
2471 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS,
2472 lse_key->mpls_bos);
2473 if (err)
2474 return err;
2475 }
2476 if (lse_mask->mpls_tc) {
2477 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TC,
2478 lse_key->mpls_tc);
2479 if (err)
2480 return err;
2481 }
2482 if (lse_mask->mpls_label) {
2483 err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL,
2484 lse_key->mpls_label);
2485 if (err)
2486 return err;
2487 }
2488
2489 return 0;
2490 }
2491
fl_dump_key_mpls_opts(struct sk_buff * skb,struct flow_dissector_key_mpls * mpls_key,struct flow_dissector_key_mpls * mpls_mask)2492 static int fl_dump_key_mpls_opts(struct sk_buff *skb,
2493 struct flow_dissector_key_mpls *mpls_key,
2494 struct flow_dissector_key_mpls *mpls_mask)
2495 {
2496 struct nlattr *opts;
2497 struct nlattr *lse;
2498 u8 lse_index;
2499 int err;
2500
2501 opts = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS);
2502 if (!opts)
2503 return -EMSGSIZE;
2504
2505 for (lse_index = 0; lse_index < FLOW_DIS_MPLS_MAX; lse_index++) {
2506 if (!(mpls_mask->used_lses & 1 << lse_index))
2507 continue;
2508
2509 lse = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS_LSE);
2510 if (!lse) {
2511 err = -EMSGSIZE;
2512 goto err_opts;
2513 }
2514
2515 err = fl_dump_key_mpls_opt_lse(skb, mpls_key, mpls_mask,
2516 lse_index);
2517 if (err)
2518 goto err_opts_lse;
2519 nla_nest_end(skb, lse);
2520 }
2521 nla_nest_end(skb, opts);
2522
2523 return 0;
2524
2525 err_opts_lse:
2526 nla_nest_cancel(skb, lse);
2527 err_opts:
2528 nla_nest_cancel(skb, opts);
2529
2530 return err;
2531 }
2532
fl_dump_key_mpls(struct sk_buff * skb,struct flow_dissector_key_mpls * mpls_key,struct flow_dissector_key_mpls * mpls_mask)2533 static int fl_dump_key_mpls(struct sk_buff *skb,
2534 struct flow_dissector_key_mpls *mpls_key,
2535 struct flow_dissector_key_mpls *mpls_mask)
2536 {
2537 struct flow_dissector_mpls_lse *lse_mask;
2538 struct flow_dissector_mpls_lse *lse_key;
2539 int err;
2540
2541 if (!mpls_mask->used_lses)
2542 return 0;
2543
2544 lse_mask = &mpls_mask->ls[0];
2545 lse_key = &mpls_key->ls[0];
2546
2547 /* For backward compatibility, don't use the MPLS nested attributes if
2548 * the rule can be expressed using the old attributes.
2549 */
2550 if (mpls_mask->used_lses & ~1 ||
2551 (!lse_mask->mpls_ttl && !lse_mask->mpls_bos &&
2552 !lse_mask->mpls_tc && !lse_mask->mpls_label))
2553 return fl_dump_key_mpls_opts(skb, mpls_key, mpls_mask);
2554
2555 if (lse_mask->mpls_ttl) {
2556 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL,
2557 lse_key->mpls_ttl);
2558 if (err)
2559 return err;
2560 }
2561 if (lse_mask->mpls_tc) {
2562 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC,
2563 lse_key->mpls_tc);
2564 if (err)
2565 return err;
2566 }
2567 if (lse_mask->mpls_label) {
2568 err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL,
2569 lse_key->mpls_label);
2570 if (err)
2571 return err;
2572 }
2573 if (lse_mask->mpls_bos) {
2574 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS,
2575 lse_key->mpls_bos);
2576 if (err)
2577 return err;
2578 }
2579 return 0;
2580 }
2581
fl_dump_key_ip(struct sk_buff * skb,bool encap,struct flow_dissector_key_ip * key,struct flow_dissector_key_ip * mask)2582 static int fl_dump_key_ip(struct sk_buff *skb, bool encap,
2583 struct flow_dissector_key_ip *key,
2584 struct flow_dissector_key_ip *mask)
2585 {
2586 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
2587 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
2588 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
2589 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
2590
2591 if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) ||
2592 fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)))
2593 return -1;
2594
2595 return 0;
2596 }
2597
fl_dump_key_vlan(struct sk_buff * skb,int vlan_id_key,int vlan_prio_key,struct flow_dissector_key_vlan * vlan_key,struct flow_dissector_key_vlan * vlan_mask)2598 static int fl_dump_key_vlan(struct sk_buff *skb,
2599 int vlan_id_key, int vlan_prio_key,
2600 struct flow_dissector_key_vlan *vlan_key,
2601 struct flow_dissector_key_vlan *vlan_mask)
2602 {
2603 int err;
2604
2605 if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
2606 return 0;
2607 if (vlan_mask->vlan_id) {
2608 err = nla_put_u16(skb, vlan_id_key,
2609 vlan_key->vlan_id);
2610 if (err)
2611 return err;
2612 }
2613 if (vlan_mask->vlan_priority) {
2614 err = nla_put_u8(skb, vlan_prio_key,
2615 vlan_key->vlan_priority);
2616 if (err)
2617 return err;
2618 }
2619 return 0;
2620 }
2621
fl_get_key_flag(u32 dissector_key,u32 dissector_mask,u32 * flower_key,u32 * flower_mask,u32 flower_flag_bit,u32 dissector_flag_bit)2622 static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
2623 u32 *flower_key, u32 *flower_mask,
2624 u32 flower_flag_bit, u32 dissector_flag_bit)
2625 {
2626 if (dissector_mask & dissector_flag_bit) {
2627 *flower_mask |= flower_flag_bit;
2628 if (dissector_key & dissector_flag_bit)
2629 *flower_key |= flower_flag_bit;
2630 }
2631 }
2632
fl_dump_key_flags(struct sk_buff * skb,u32 flags_key,u32 flags_mask)2633 static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
2634 {
2635 u32 key, mask;
2636 __be32 _key, _mask;
2637 int err;
2638
2639 if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
2640 return 0;
2641
2642 key = 0;
2643 mask = 0;
2644
2645 fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2646 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
2647 fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2648 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
2649 FLOW_DIS_FIRST_FRAG);
2650
2651 _key = cpu_to_be32(key);
2652 _mask = cpu_to_be32(mask);
2653
2654 err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
2655 if (err)
2656 return err;
2657
2658 return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
2659 }
2660
fl_dump_key_geneve_opt(struct sk_buff * skb,struct flow_dissector_key_enc_opts * enc_opts)2661 static int fl_dump_key_geneve_opt(struct sk_buff *skb,
2662 struct flow_dissector_key_enc_opts *enc_opts)
2663 {
2664 struct geneve_opt *opt;
2665 struct nlattr *nest;
2666 int opt_off = 0;
2667
2668 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE);
2669 if (!nest)
2670 goto nla_put_failure;
2671
2672 while (enc_opts->len > opt_off) {
2673 opt = (struct geneve_opt *)&enc_opts->data[opt_off];
2674
2675 if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS,
2676 opt->opt_class))
2677 goto nla_put_failure;
2678 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE,
2679 opt->type))
2680 goto nla_put_failure;
2681 if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA,
2682 opt->length * 4, opt->opt_data))
2683 goto nla_put_failure;
2684
2685 opt_off += sizeof(struct geneve_opt) + opt->length * 4;
2686 }
2687 nla_nest_end(skb, nest);
2688 return 0;
2689
2690 nla_put_failure:
2691 nla_nest_cancel(skb, nest);
2692 return -EMSGSIZE;
2693 }
2694
fl_dump_key_vxlan_opt(struct sk_buff * skb,struct flow_dissector_key_enc_opts * enc_opts)2695 static int fl_dump_key_vxlan_opt(struct sk_buff *skb,
2696 struct flow_dissector_key_enc_opts *enc_opts)
2697 {
2698 struct vxlan_metadata *md;
2699 struct nlattr *nest;
2700
2701 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_VXLAN);
2702 if (!nest)
2703 goto nla_put_failure;
2704
2705 md = (struct vxlan_metadata *)&enc_opts->data[0];
2706 if (nla_put_u32(skb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP, md->gbp))
2707 goto nla_put_failure;
2708
2709 nla_nest_end(skb, nest);
2710 return 0;
2711
2712 nla_put_failure:
2713 nla_nest_cancel(skb, nest);
2714 return -EMSGSIZE;
2715 }
2716
fl_dump_key_erspan_opt(struct sk_buff * skb,struct flow_dissector_key_enc_opts * enc_opts)2717 static int fl_dump_key_erspan_opt(struct sk_buff *skb,
2718 struct flow_dissector_key_enc_opts *enc_opts)
2719 {
2720 struct erspan_metadata *md;
2721 struct nlattr *nest;
2722
2723 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_ERSPAN);
2724 if (!nest)
2725 goto nla_put_failure;
2726
2727 md = (struct erspan_metadata *)&enc_opts->data[0];
2728 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER, md->version))
2729 goto nla_put_failure;
2730
2731 if (md->version == 1 &&
2732 nla_put_be32(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index))
2733 goto nla_put_failure;
2734
2735 if (md->version == 2 &&
2736 (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR,
2737 md->u.md2.dir) ||
2738 nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID,
2739 get_hwid(&md->u.md2))))
2740 goto nla_put_failure;
2741
2742 nla_nest_end(skb, nest);
2743 return 0;
2744
2745 nla_put_failure:
2746 nla_nest_cancel(skb, nest);
2747 return -EMSGSIZE;
2748 }
2749
fl_dump_key_ct(struct sk_buff * skb,struct flow_dissector_key_ct * key,struct flow_dissector_key_ct * mask)2750 static int fl_dump_key_ct(struct sk_buff *skb,
2751 struct flow_dissector_key_ct *key,
2752 struct flow_dissector_key_ct *mask)
2753 {
2754 if (IS_ENABLED(CONFIG_NF_CONNTRACK) &&
2755 fl_dump_key_val(skb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
2756 &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
2757 sizeof(key->ct_state)))
2758 goto nla_put_failure;
2759
2760 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
2761 fl_dump_key_val(skb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
2762 &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
2763 sizeof(key->ct_zone)))
2764 goto nla_put_failure;
2765
2766 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
2767 fl_dump_key_val(skb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
2768 &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
2769 sizeof(key->ct_mark)))
2770 goto nla_put_failure;
2771
2772 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
2773 fl_dump_key_val(skb, &key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
2774 &mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
2775 sizeof(key->ct_labels)))
2776 goto nla_put_failure;
2777
2778 return 0;
2779
2780 nla_put_failure:
2781 return -EMSGSIZE;
2782 }
2783
fl_dump_key_options(struct sk_buff * skb,int enc_opt_type,struct flow_dissector_key_enc_opts * enc_opts)2784 static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
2785 struct flow_dissector_key_enc_opts *enc_opts)
2786 {
2787 struct nlattr *nest;
2788 int err;
2789
2790 if (!enc_opts->len)
2791 return 0;
2792
2793 nest = nla_nest_start_noflag(skb, enc_opt_type);
2794 if (!nest)
2795 goto nla_put_failure;
2796
2797 switch (enc_opts->dst_opt_type) {
2798 case TUNNEL_GENEVE_OPT:
2799 err = fl_dump_key_geneve_opt(skb, enc_opts);
2800 if (err)
2801 goto nla_put_failure;
2802 break;
2803 case TUNNEL_VXLAN_OPT:
2804 err = fl_dump_key_vxlan_opt(skb, enc_opts);
2805 if (err)
2806 goto nla_put_failure;
2807 break;
2808 case TUNNEL_ERSPAN_OPT:
2809 err = fl_dump_key_erspan_opt(skb, enc_opts);
2810 if (err)
2811 goto nla_put_failure;
2812 break;
2813 default:
2814 goto nla_put_failure;
2815 }
2816 nla_nest_end(skb, nest);
2817 return 0;
2818
2819 nla_put_failure:
2820 nla_nest_cancel(skb, nest);
2821 return -EMSGSIZE;
2822 }
2823
fl_dump_key_enc_opt(struct sk_buff * skb,struct flow_dissector_key_enc_opts * key_opts,struct flow_dissector_key_enc_opts * msk_opts)2824 static int fl_dump_key_enc_opt(struct sk_buff *skb,
2825 struct flow_dissector_key_enc_opts *key_opts,
2826 struct flow_dissector_key_enc_opts *msk_opts)
2827 {
2828 int err;
2829
2830 err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts);
2831 if (err)
2832 return err;
2833
2834 return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts);
2835 }
2836
fl_dump_key(struct sk_buff * skb,struct net * net,struct fl_flow_key * key,struct fl_flow_key * mask)2837 static int fl_dump_key(struct sk_buff *skb, struct net *net,
2838 struct fl_flow_key *key, struct fl_flow_key *mask)
2839 {
2840 if (mask->meta.ingress_ifindex) {
2841 struct net_device *dev;
2842
2843 dev = __dev_get_by_index(net, key->meta.ingress_ifindex);
2844 if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
2845 goto nla_put_failure;
2846 }
2847
2848 if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
2849 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
2850 sizeof(key->eth.dst)) ||
2851 fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
2852 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
2853 sizeof(key->eth.src)) ||
2854 fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
2855 &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
2856 sizeof(key->basic.n_proto)))
2857 goto nla_put_failure;
2858
2859 if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls))
2860 goto nla_put_failure;
2861
2862 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID,
2863 TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan))
2864 goto nla_put_failure;
2865
2866 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID,
2867 TCA_FLOWER_KEY_CVLAN_PRIO,
2868 &key->cvlan, &mask->cvlan) ||
2869 (mask->cvlan.vlan_tpid &&
2870 nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
2871 key->cvlan.vlan_tpid)))
2872 goto nla_put_failure;
2873
2874 if (mask->basic.n_proto) {
2875 if (mask->cvlan.vlan_eth_type) {
2876 if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
2877 key->basic.n_proto))
2878 goto nla_put_failure;
2879 } else if (mask->vlan.vlan_eth_type) {
2880 if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
2881 key->vlan.vlan_eth_type))
2882 goto nla_put_failure;
2883 }
2884 }
2885
2886 if ((key->basic.n_proto == htons(ETH_P_IP) ||
2887 key->basic.n_proto == htons(ETH_P_IPV6)) &&
2888 (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
2889 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
2890 sizeof(key->basic.ip_proto)) ||
2891 fl_dump_key_ip(skb, false, &key->ip, &mask->ip)))
2892 goto nla_put_failure;
2893
2894 if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
2895 (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
2896 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
2897 sizeof(key->ipv4.src)) ||
2898 fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
2899 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
2900 sizeof(key->ipv4.dst))))
2901 goto nla_put_failure;
2902 else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
2903 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
2904 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
2905 sizeof(key->ipv6.src)) ||
2906 fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
2907 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
2908 sizeof(key->ipv6.dst))))
2909 goto nla_put_failure;
2910
2911 if (key->basic.ip_proto == IPPROTO_TCP &&
2912 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
2913 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
2914 sizeof(key->tp.src)) ||
2915 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
2916 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
2917 sizeof(key->tp.dst)) ||
2918 fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
2919 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
2920 sizeof(key->tcp.flags))))
2921 goto nla_put_failure;
2922 else if (key->basic.ip_proto == IPPROTO_UDP &&
2923 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
2924 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
2925 sizeof(key->tp.src)) ||
2926 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
2927 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
2928 sizeof(key->tp.dst))))
2929 goto nla_put_failure;
2930 else if (key->basic.ip_proto == IPPROTO_SCTP &&
2931 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
2932 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
2933 sizeof(key->tp.src)) ||
2934 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
2935 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
2936 sizeof(key->tp.dst))))
2937 goto nla_put_failure;
2938 else if (key->basic.n_proto == htons(ETH_P_IP) &&
2939 key->basic.ip_proto == IPPROTO_ICMP &&
2940 (fl_dump_key_val(skb, &key->icmp.type,
2941 TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
2942 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
2943 sizeof(key->icmp.type)) ||
2944 fl_dump_key_val(skb, &key->icmp.code,
2945 TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
2946 TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
2947 sizeof(key->icmp.code))))
2948 goto nla_put_failure;
2949 else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
2950 key->basic.ip_proto == IPPROTO_ICMPV6 &&
2951 (fl_dump_key_val(skb, &key->icmp.type,
2952 TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
2953 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
2954 sizeof(key->icmp.type)) ||
2955 fl_dump_key_val(skb, &key->icmp.code,
2956 TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
2957 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
2958 sizeof(key->icmp.code))))
2959 goto nla_put_failure;
2960 else if ((key->basic.n_proto == htons(ETH_P_ARP) ||
2961 key->basic.n_proto == htons(ETH_P_RARP)) &&
2962 (fl_dump_key_val(skb, &key->arp.sip,
2963 TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip,
2964 TCA_FLOWER_KEY_ARP_SIP_MASK,
2965 sizeof(key->arp.sip)) ||
2966 fl_dump_key_val(skb, &key->arp.tip,
2967 TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip,
2968 TCA_FLOWER_KEY_ARP_TIP_MASK,
2969 sizeof(key->arp.tip)) ||
2970 fl_dump_key_val(skb, &key->arp.op,
2971 TCA_FLOWER_KEY_ARP_OP, &mask->arp.op,
2972 TCA_FLOWER_KEY_ARP_OP_MASK,
2973 sizeof(key->arp.op)) ||
2974 fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
2975 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
2976 sizeof(key->arp.sha)) ||
2977 fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
2978 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
2979 sizeof(key->arp.tha))))
2980 goto nla_put_failure;
2981
2982 if ((key->basic.ip_proto == IPPROTO_TCP ||
2983 key->basic.ip_proto == IPPROTO_UDP ||
2984 key->basic.ip_proto == IPPROTO_SCTP) &&
2985 fl_dump_key_port_range(skb, key, mask))
2986 goto nla_put_failure;
2987
2988 if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
2989 (fl_dump_key_val(skb, &key->enc_ipv4.src,
2990 TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
2991 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
2992 sizeof(key->enc_ipv4.src)) ||
2993 fl_dump_key_val(skb, &key->enc_ipv4.dst,
2994 TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
2995 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
2996 sizeof(key->enc_ipv4.dst))))
2997 goto nla_put_failure;
2998 else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
2999 (fl_dump_key_val(skb, &key->enc_ipv6.src,
3000 TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
3001 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
3002 sizeof(key->enc_ipv6.src)) ||
3003 fl_dump_key_val(skb, &key->enc_ipv6.dst,
3004 TCA_FLOWER_KEY_ENC_IPV6_DST,
3005 &mask->enc_ipv6.dst,
3006 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
3007 sizeof(key->enc_ipv6.dst))))
3008 goto nla_put_failure;
3009
3010 if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
3011 &mask->enc_key_id, TCA_FLOWER_UNSPEC,
3012 sizeof(key->enc_key_id)) ||
3013 fl_dump_key_val(skb, &key->enc_tp.src,
3014 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
3015 &mask->enc_tp.src,
3016 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
3017 sizeof(key->enc_tp.src)) ||
3018 fl_dump_key_val(skb, &key->enc_tp.dst,
3019 TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
3020 &mask->enc_tp.dst,
3021 TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
3022 sizeof(key->enc_tp.dst)) ||
3023 fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) ||
3024 fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts))
3025 goto nla_put_failure;
3026
3027 if (fl_dump_key_ct(skb, &key->ct, &mask->ct))
3028 goto nla_put_failure;
3029
3030 if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
3031 goto nla_put_failure;
3032
3033 if (fl_dump_key_val(skb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
3034 &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
3035 sizeof(key->hash.hash)))
3036 goto nla_put_failure;
3037
3038 return 0;
3039
3040 nla_put_failure:
3041 return -EMSGSIZE;
3042 }
3043
fl_dump(struct net * net,struct tcf_proto * tp,void * fh,struct sk_buff * skb,struct tcmsg * t,bool rtnl_held)3044 static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
3045 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
3046 {
3047 struct cls_fl_filter *f = fh;
3048 struct nlattr *nest;
3049 struct fl_flow_key *key, *mask;
3050 bool skip_hw;
3051
3052 if (!f)
3053 return skb->len;
3054
3055 t->tcm_handle = f->handle;
3056
3057 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3058 if (!nest)
3059 goto nla_put_failure;
3060
3061 spin_lock(&tp->lock);
3062
3063 if (f->res.classid &&
3064 nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
3065 goto nla_put_failure_locked;
3066
3067 key = &f->key;
3068 mask = &f->mask->key;
3069 skip_hw = tc_skip_hw(f->flags);
3070
3071 if (fl_dump_key(skb, net, key, mask))
3072 goto nla_put_failure_locked;
3073
3074 if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3075 goto nla_put_failure_locked;
3076
3077 spin_unlock(&tp->lock);
3078
3079 if (!skip_hw)
3080 fl_hw_update_stats(tp, f, rtnl_held);
3081
3082 if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count))
3083 goto nla_put_failure;
3084
3085 if (tcf_exts_dump(skb, &f->exts))
3086 goto nla_put_failure;
3087
3088 nla_nest_end(skb, nest);
3089
3090 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
3091 goto nla_put_failure;
3092
3093 return skb->len;
3094
3095 nla_put_failure_locked:
3096 spin_unlock(&tp->lock);
3097 nla_put_failure:
3098 nla_nest_cancel(skb, nest);
3099 return -1;
3100 }
3101
fl_terse_dump(struct net * net,struct tcf_proto * tp,void * fh,struct sk_buff * skb,struct tcmsg * t,bool rtnl_held)3102 static int fl_terse_dump(struct net *net, struct tcf_proto *tp, void *fh,
3103 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
3104 {
3105 struct cls_fl_filter *f = fh;
3106 struct nlattr *nest;
3107 bool skip_hw;
3108
3109 if (!f)
3110 return skb->len;
3111
3112 t->tcm_handle = f->handle;
3113
3114 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3115 if (!nest)
3116 goto nla_put_failure;
3117
3118 spin_lock(&tp->lock);
3119
3120 skip_hw = tc_skip_hw(f->flags);
3121
3122 if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3123 goto nla_put_failure_locked;
3124
3125 spin_unlock(&tp->lock);
3126
3127 if (!skip_hw)
3128 fl_hw_update_stats(tp, f, rtnl_held);
3129
3130 if (tcf_exts_terse_dump(skb, &f->exts))
3131 goto nla_put_failure;
3132
3133 nla_nest_end(skb, nest);
3134
3135 return skb->len;
3136
3137 nla_put_failure_locked:
3138 spin_unlock(&tp->lock);
3139 nla_put_failure:
3140 nla_nest_cancel(skb, nest);
3141 return -1;
3142 }
3143
fl_tmplt_dump(struct sk_buff * skb,struct net * net,void * tmplt_priv)3144 static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv)
3145 {
3146 struct fl_flow_tmplt *tmplt = tmplt_priv;
3147 struct fl_flow_key *key, *mask;
3148 struct nlattr *nest;
3149
3150 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3151 if (!nest)
3152 goto nla_put_failure;
3153
3154 key = &tmplt->dummy_key;
3155 mask = &tmplt->mask;
3156
3157 if (fl_dump_key(skb, net, key, mask))
3158 goto nla_put_failure;
3159
3160 nla_nest_end(skb, nest);
3161
3162 return skb->len;
3163
3164 nla_put_failure:
3165 nla_nest_cancel(skb, nest);
3166 return -EMSGSIZE;
3167 }
3168
fl_bind_class(void * fh,u32 classid,unsigned long cl,void * q,unsigned long base)3169 static void fl_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
3170 unsigned long base)
3171 {
3172 struct cls_fl_filter *f = fh;
3173
3174 if (f && f->res.classid == classid) {
3175 if (cl)
3176 __tcf_bind_filter(q, &f->res, base);
3177 else
3178 __tcf_unbind_filter(q, &f->res);
3179 }
3180 }
3181
fl_delete_empty(struct tcf_proto * tp)3182 static bool fl_delete_empty(struct tcf_proto *tp)
3183 {
3184 struct cls_fl_head *head = fl_head_dereference(tp);
3185
3186 spin_lock(&tp->lock);
3187 tp->deleting = idr_is_empty(&head->handle_idr);
3188 spin_unlock(&tp->lock);
3189
3190 return tp->deleting;
3191 }
3192
3193 static struct tcf_proto_ops cls_fl_ops __read_mostly = {
3194 .kind = "flower",
3195 .classify = fl_classify,
3196 .init = fl_init,
3197 .destroy = fl_destroy,
3198 .get = fl_get,
3199 .put = fl_put,
3200 .change = fl_change,
3201 .delete = fl_delete,
3202 .delete_empty = fl_delete_empty,
3203 .walk = fl_walk,
3204 .reoffload = fl_reoffload,
3205 .hw_add = fl_hw_add,
3206 .hw_del = fl_hw_del,
3207 .dump = fl_dump,
3208 .terse_dump = fl_terse_dump,
3209 .bind_class = fl_bind_class,
3210 .tmplt_create = fl_tmplt_create,
3211 .tmplt_destroy = fl_tmplt_destroy,
3212 .tmplt_dump = fl_tmplt_dump,
3213 .owner = THIS_MODULE,
3214 .flags = TCF_PROTO_OPS_DOIT_UNLOCKED,
3215 };
3216
cls_fl_init(void)3217 static int __init cls_fl_init(void)
3218 {
3219 return register_tcf_proto_ops(&cls_fl_ops);
3220 }
3221
cls_fl_exit(void)3222 static void __exit cls_fl_exit(void)
3223 {
3224 unregister_tcf_proto_ops(&cls_fl_ops);
3225 }
3226
3227 module_init(cls_fl_init);
3228 module_exit(cls_fl_exit);
3229
3230 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
3231 MODULE_DESCRIPTION("Flower classifier");
3232 MODULE_LICENSE("GPL v2");
3233