• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/cls_flower.c		Flower classifier
4  *
5  * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/rhashtable.h>
12 #include <linux/workqueue.h>
13 #include <linux/refcount.h>
14 
15 #include <linux/if_ether.h>
16 #include <linux/in6.h>
17 #include <linux/ip.h>
18 #include <linux/mpls.h>
19 
20 #include <net/sch_generic.h>
21 #include <net/pkt_cls.h>
22 #include <net/ip.h>
23 #include <net/flow_dissector.h>
24 #include <net/geneve.h>
25 #include <net/vxlan.h>
26 #include <net/erspan.h>
27 
28 #include <net/dst.h>
29 #include <net/dst_metadata.h>
30 
31 #include <uapi/linux/netfilter/nf_conntrack_common.h>
32 
33 #define TCA_FLOWER_KEY_CT_FLAGS_MAX \
34 		((__TCA_FLOWER_KEY_CT_FLAGS_MAX - 1) << 1)
35 #define TCA_FLOWER_KEY_CT_FLAGS_MASK \
36 		(TCA_FLOWER_KEY_CT_FLAGS_MAX - 1)
37 
38 struct fl_flow_key {
39 	struct flow_dissector_key_meta meta;
40 	struct flow_dissector_key_control control;
41 	struct flow_dissector_key_control enc_control;
42 	struct flow_dissector_key_basic basic;
43 	struct flow_dissector_key_eth_addrs eth;
44 	struct flow_dissector_key_vlan vlan;
45 	struct flow_dissector_key_vlan cvlan;
46 	union {
47 		struct flow_dissector_key_ipv4_addrs ipv4;
48 		struct flow_dissector_key_ipv6_addrs ipv6;
49 	};
50 	struct flow_dissector_key_ports tp;
51 	struct flow_dissector_key_icmp icmp;
52 	struct flow_dissector_key_arp arp;
53 	struct flow_dissector_key_keyid enc_key_id;
54 	union {
55 		struct flow_dissector_key_ipv4_addrs enc_ipv4;
56 		struct flow_dissector_key_ipv6_addrs enc_ipv6;
57 	};
58 	struct flow_dissector_key_ports enc_tp;
59 	struct flow_dissector_key_mpls mpls;
60 	struct flow_dissector_key_tcp tcp;
61 	struct flow_dissector_key_ip ip;
62 	struct flow_dissector_key_ip enc_ip;
63 	struct flow_dissector_key_enc_opts enc_opts;
64 	union {
65 		struct flow_dissector_key_ports tp;
66 		struct {
67 			struct flow_dissector_key_ports tp_min;
68 			struct flow_dissector_key_ports tp_max;
69 		};
70 	} tp_range;
71 	struct flow_dissector_key_ct ct;
72 	struct flow_dissector_key_hash hash;
73 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
74 
75 struct fl_flow_mask_range {
76 	unsigned short int start;
77 	unsigned short int end;
78 };
79 
80 struct fl_flow_mask {
81 	struct fl_flow_key key;
82 	struct fl_flow_mask_range range;
83 	u32 flags;
84 	struct rhash_head ht_node;
85 	struct rhashtable ht;
86 	struct rhashtable_params filter_ht_params;
87 	struct flow_dissector dissector;
88 	struct list_head filters;
89 	struct rcu_work rwork;
90 	struct list_head list;
91 	refcount_t refcnt;
92 };
93 
94 struct fl_flow_tmplt {
95 	struct fl_flow_key dummy_key;
96 	struct fl_flow_key mask;
97 	struct flow_dissector dissector;
98 	struct tcf_chain *chain;
99 };
100 
101 struct cls_fl_head {
102 	struct rhashtable ht;
103 	spinlock_t masks_lock; /* Protect masks list */
104 	struct list_head masks;
105 	struct list_head hw_filters;
106 	struct rcu_work rwork;
107 	struct idr handle_idr;
108 };
109 
110 struct cls_fl_filter {
111 	struct fl_flow_mask *mask;
112 	struct rhash_head ht_node;
113 	struct fl_flow_key mkey;
114 	struct tcf_exts exts;
115 	struct tcf_result res;
116 	struct fl_flow_key key;
117 	struct list_head list;
118 	struct list_head hw_list;
119 	u32 handle;
120 	u32 flags;
121 	u32 in_hw_count;
122 	struct rcu_work rwork;
123 	struct net_device *hw_dev;
124 	/* Flower classifier is unlocked, which means that its reference counter
125 	 * can be changed concurrently without any kind of external
126 	 * synchronization. Use atomic reference counter to be concurrency-safe.
127 	 */
128 	refcount_t refcnt;
129 	bool deleted;
130 };
131 
132 static const struct rhashtable_params mask_ht_params = {
133 	.key_offset = offsetof(struct fl_flow_mask, key),
134 	.key_len = sizeof(struct fl_flow_key),
135 	.head_offset = offsetof(struct fl_flow_mask, ht_node),
136 	.automatic_shrinking = true,
137 };
138 
fl_mask_range(const struct fl_flow_mask * mask)139 static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
140 {
141 	return mask->range.end - mask->range.start;
142 }
143 
fl_mask_update_range(struct fl_flow_mask * mask)144 static void fl_mask_update_range(struct fl_flow_mask *mask)
145 {
146 	const u8 *bytes = (const u8 *) &mask->key;
147 	size_t size = sizeof(mask->key);
148 	size_t i, first = 0, last;
149 
150 	for (i = 0; i < size; i++) {
151 		if (bytes[i]) {
152 			first = i;
153 			break;
154 		}
155 	}
156 	last = first;
157 	for (i = size - 1; i != first; i--) {
158 		if (bytes[i]) {
159 			last = i;
160 			break;
161 		}
162 	}
163 	mask->range.start = rounddown(first, sizeof(long));
164 	mask->range.end = roundup(last + 1, sizeof(long));
165 }
166 
fl_key_get_start(struct fl_flow_key * key,const struct fl_flow_mask * mask)167 static void *fl_key_get_start(struct fl_flow_key *key,
168 			      const struct fl_flow_mask *mask)
169 {
170 	return (u8 *) key + mask->range.start;
171 }
172 
fl_set_masked_key(struct fl_flow_key * mkey,struct fl_flow_key * key,struct fl_flow_mask * mask)173 static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
174 			      struct fl_flow_mask *mask)
175 {
176 	const long *lkey = fl_key_get_start(key, mask);
177 	const long *lmask = fl_key_get_start(&mask->key, mask);
178 	long *lmkey = fl_key_get_start(mkey, mask);
179 	int i;
180 
181 	for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
182 		*lmkey++ = *lkey++ & *lmask++;
183 }
184 
fl_mask_fits_tmplt(struct fl_flow_tmplt * tmplt,struct fl_flow_mask * mask)185 static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt,
186 			       struct fl_flow_mask *mask)
187 {
188 	const long *lmask = fl_key_get_start(&mask->key, mask);
189 	const long *ltmplt;
190 	int i;
191 
192 	if (!tmplt)
193 		return true;
194 	ltmplt = fl_key_get_start(&tmplt->mask, mask);
195 	for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) {
196 		if (~*ltmplt++ & *lmask++)
197 			return false;
198 	}
199 	return true;
200 }
201 
fl_clear_masked_range(struct fl_flow_key * key,struct fl_flow_mask * mask)202 static void fl_clear_masked_range(struct fl_flow_key *key,
203 				  struct fl_flow_mask *mask)
204 {
205 	memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
206 }
207 
fl_range_port_dst_cmp(struct cls_fl_filter * filter,struct fl_flow_key * key,struct fl_flow_key * mkey)208 static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
209 				  struct fl_flow_key *key,
210 				  struct fl_flow_key *mkey)
211 {
212 	u16 min_mask, max_mask, min_val, max_val;
213 
214 	min_mask = ntohs(filter->mask->key.tp_range.tp_min.dst);
215 	max_mask = ntohs(filter->mask->key.tp_range.tp_max.dst);
216 	min_val = ntohs(filter->key.tp_range.tp_min.dst);
217 	max_val = ntohs(filter->key.tp_range.tp_max.dst);
218 
219 	if (min_mask && max_mask) {
220 		if (ntohs(key->tp_range.tp.dst) < min_val ||
221 		    ntohs(key->tp_range.tp.dst) > max_val)
222 			return false;
223 
224 		/* skb does not have min and max values */
225 		mkey->tp_range.tp_min.dst = filter->mkey.tp_range.tp_min.dst;
226 		mkey->tp_range.tp_max.dst = filter->mkey.tp_range.tp_max.dst;
227 	}
228 	return true;
229 }
230 
fl_range_port_src_cmp(struct cls_fl_filter * filter,struct fl_flow_key * key,struct fl_flow_key * mkey)231 static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
232 				  struct fl_flow_key *key,
233 				  struct fl_flow_key *mkey)
234 {
235 	u16 min_mask, max_mask, min_val, max_val;
236 
237 	min_mask = ntohs(filter->mask->key.tp_range.tp_min.src);
238 	max_mask = ntohs(filter->mask->key.tp_range.tp_max.src);
239 	min_val = ntohs(filter->key.tp_range.tp_min.src);
240 	max_val = ntohs(filter->key.tp_range.tp_max.src);
241 
242 	if (min_mask && max_mask) {
243 		if (ntohs(key->tp_range.tp.src) < min_val ||
244 		    ntohs(key->tp_range.tp.src) > max_val)
245 			return false;
246 
247 		/* skb does not have min and max values */
248 		mkey->tp_range.tp_min.src = filter->mkey.tp_range.tp_min.src;
249 		mkey->tp_range.tp_max.src = filter->mkey.tp_range.tp_max.src;
250 	}
251 	return true;
252 }
253 
__fl_lookup(struct fl_flow_mask * mask,struct fl_flow_key * mkey)254 static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask,
255 					 struct fl_flow_key *mkey)
256 {
257 	return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask),
258 				      mask->filter_ht_params);
259 }
260 
fl_lookup_range(struct fl_flow_mask * mask,struct fl_flow_key * mkey,struct fl_flow_key * key)261 static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask,
262 					     struct fl_flow_key *mkey,
263 					     struct fl_flow_key *key)
264 {
265 	struct cls_fl_filter *filter, *f;
266 
267 	list_for_each_entry_rcu(filter, &mask->filters, list) {
268 		if (!fl_range_port_dst_cmp(filter, key, mkey))
269 			continue;
270 
271 		if (!fl_range_port_src_cmp(filter, key, mkey))
272 			continue;
273 
274 		f = __fl_lookup(mask, mkey);
275 		if (f)
276 			return f;
277 	}
278 	return NULL;
279 }
280 
281 static noinline_for_stack
fl_mask_lookup(struct fl_flow_mask * mask,struct fl_flow_key * key)282 struct cls_fl_filter *fl_mask_lookup(struct fl_flow_mask *mask, struct fl_flow_key *key)
283 {
284 	struct fl_flow_key mkey;
285 
286 	fl_set_masked_key(&mkey, key, mask);
287 	if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE))
288 		return fl_lookup_range(mask, &mkey, key);
289 
290 	return __fl_lookup(mask, &mkey);
291 }
292 
293 static u16 fl_ct_info_to_flower_map[] = {
294 	[IP_CT_ESTABLISHED] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
295 					TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
296 	[IP_CT_RELATED] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
297 					TCA_FLOWER_KEY_CT_FLAGS_RELATED,
298 	[IP_CT_ESTABLISHED_REPLY] =	TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
299 					TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
300 	[IP_CT_RELATED_REPLY] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
301 					TCA_FLOWER_KEY_CT_FLAGS_RELATED,
302 	[IP_CT_NEW] =			TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
303 					TCA_FLOWER_KEY_CT_FLAGS_NEW,
304 };
305 
fl_classify(struct sk_buff * skb,const struct tcf_proto * tp,struct tcf_result * res)306 static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
307 		       struct tcf_result *res)
308 {
309 	struct cls_fl_head *head = rcu_dereference_bh(tp->root);
310 	struct fl_flow_key skb_key;
311 	struct fl_flow_mask *mask;
312 	struct cls_fl_filter *f;
313 
314 	list_for_each_entry_rcu(mask, &head->masks, list) {
315 		flow_dissector_init_keys(&skb_key.control, &skb_key.basic);
316 		fl_clear_masked_range(&skb_key, mask);
317 
318 		skb_flow_dissect_meta(skb, &mask->dissector, &skb_key);
319 		/* skb_flow_dissect() does not set n_proto in case an unknown
320 		 * protocol, so do it rather here.
321 		 */
322 		skb_key.basic.n_proto = skb_protocol(skb, false);
323 		skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
324 		skb_flow_dissect_ct(skb, &mask->dissector, &skb_key,
325 				    fl_ct_info_to_flower_map,
326 				    ARRAY_SIZE(fl_ct_info_to_flower_map));
327 		skb_flow_dissect_hash(skb, &mask->dissector, &skb_key);
328 		skb_flow_dissect(skb, &mask->dissector, &skb_key, 0);
329 
330 		f = fl_mask_lookup(mask, &skb_key);
331 		if (f && !tc_skip_sw(f->flags)) {
332 			*res = f->res;
333 			return tcf_exts_exec(skb, &f->exts, res);
334 		}
335 	}
336 	return -1;
337 }
338 
fl_init(struct tcf_proto * tp)339 static int fl_init(struct tcf_proto *tp)
340 {
341 	struct cls_fl_head *head;
342 
343 	head = kzalloc(sizeof(*head), GFP_KERNEL);
344 	if (!head)
345 		return -ENOBUFS;
346 
347 	spin_lock_init(&head->masks_lock);
348 	INIT_LIST_HEAD_RCU(&head->masks);
349 	INIT_LIST_HEAD(&head->hw_filters);
350 	rcu_assign_pointer(tp->root, head);
351 	idr_init(&head->handle_idr);
352 
353 	return rhashtable_init(&head->ht, &mask_ht_params);
354 }
355 
fl_mask_free(struct fl_flow_mask * mask,bool mask_init_done)356 static void fl_mask_free(struct fl_flow_mask *mask, bool mask_init_done)
357 {
358 	/* temporary masks don't have their filters list and ht initialized */
359 	if (mask_init_done) {
360 		WARN_ON(!list_empty(&mask->filters));
361 		rhashtable_destroy(&mask->ht);
362 	}
363 	kfree(mask);
364 }
365 
fl_mask_free_work(struct work_struct * work)366 static void fl_mask_free_work(struct work_struct *work)
367 {
368 	struct fl_flow_mask *mask = container_of(to_rcu_work(work),
369 						 struct fl_flow_mask, rwork);
370 
371 	fl_mask_free(mask, true);
372 }
373 
fl_uninit_mask_free_work(struct work_struct * work)374 static void fl_uninit_mask_free_work(struct work_struct *work)
375 {
376 	struct fl_flow_mask *mask = container_of(to_rcu_work(work),
377 						 struct fl_flow_mask, rwork);
378 
379 	fl_mask_free(mask, false);
380 }
381 
fl_mask_put(struct cls_fl_head * head,struct fl_flow_mask * mask)382 static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask)
383 {
384 	if (!refcount_dec_and_test(&mask->refcnt))
385 		return false;
386 
387 	rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
388 
389 	spin_lock(&head->masks_lock);
390 	list_del_rcu(&mask->list);
391 	spin_unlock(&head->masks_lock);
392 
393 	tcf_queue_work(&mask->rwork, fl_mask_free_work);
394 
395 	return true;
396 }
397 
fl_head_dereference(struct tcf_proto * tp)398 static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp)
399 {
400 	/* Flower classifier only changes root pointer during init and destroy.
401 	 * Users must obtain reference to tcf_proto instance before calling its
402 	 * API, so tp->root pointer is protected from concurrent call to
403 	 * fl_destroy() by reference counting.
404 	 */
405 	return rcu_dereference_raw(tp->root);
406 }
407 
__fl_destroy_filter(struct cls_fl_filter * f)408 static void __fl_destroy_filter(struct cls_fl_filter *f)
409 {
410 	tcf_exts_destroy(&f->exts);
411 	tcf_exts_put_net(&f->exts);
412 	kfree(f);
413 }
414 
fl_destroy_filter_work(struct work_struct * work)415 static void fl_destroy_filter_work(struct work_struct *work)
416 {
417 	struct cls_fl_filter *f = container_of(to_rcu_work(work),
418 					struct cls_fl_filter, rwork);
419 
420 	__fl_destroy_filter(f);
421 }
422 
fl_hw_destroy_filter(struct tcf_proto * tp,struct cls_fl_filter * f,bool rtnl_held,struct netlink_ext_ack * extack)423 static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
424 				 bool rtnl_held, struct netlink_ext_ack *extack)
425 {
426 	struct tcf_block *block = tp->chain->block;
427 	struct flow_cls_offload cls_flower = {};
428 
429 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
430 	cls_flower.command = FLOW_CLS_DESTROY;
431 	cls_flower.cookie = (unsigned long) f;
432 
433 	tc_setup_cb_destroy(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, false,
434 			    &f->flags, &f->in_hw_count, rtnl_held);
435 
436 }
437 
fl_hw_replace_filter(struct tcf_proto * tp,struct cls_fl_filter * f,bool rtnl_held,struct netlink_ext_ack * extack)438 static int fl_hw_replace_filter(struct tcf_proto *tp,
439 				struct cls_fl_filter *f, bool rtnl_held,
440 				struct netlink_ext_ack *extack)
441 {
442 	struct tcf_block *block = tp->chain->block;
443 	struct flow_cls_offload cls_flower = {};
444 	bool skip_sw = tc_skip_sw(f->flags);
445 	int err = 0;
446 
447 	cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts));
448 	if (!cls_flower.rule)
449 		return -ENOMEM;
450 
451 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
452 	cls_flower.command = FLOW_CLS_REPLACE;
453 	cls_flower.cookie = (unsigned long) f;
454 	cls_flower.rule->match.dissector = &f->mask->dissector;
455 	cls_flower.rule->match.mask = &f->mask->key;
456 	cls_flower.rule->match.key = &f->mkey;
457 	cls_flower.classid = f->res.classid;
458 
459 	err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
460 	if (err) {
461 		kfree(cls_flower.rule);
462 		if (skip_sw) {
463 			NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
464 			return err;
465 		}
466 		return 0;
467 	}
468 
469 	err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower,
470 			      skip_sw, &f->flags, &f->in_hw_count, rtnl_held);
471 	tc_cleanup_flow_action(&cls_flower.rule->action);
472 	kfree(cls_flower.rule);
473 
474 	if (err) {
475 		fl_hw_destroy_filter(tp, f, rtnl_held, NULL);
476 		return err;
477 	}
478 
479 	if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW))
480 		return -EINVAL;
481 
482 	return 0;
483 }
484 
fl_hw_update_stats(struct tcf_proto * tp,struct cls_fl_filter * f,bool rtnl_held)485 static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
486 			       bool rtnl_held)
487 {
488 	struct tcf_block *block = tp->chain->block;
489 	struct flow_cls_offload cls_flower = {};
490 
491 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
492 	cls_flower.command = FLOW_CLS_STATS;
493 	cls_flower.cookie = (unsigned long) f;
494 	cls_flower.classid = f->res.classid;
495 
496 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false,
497 			 rtnl_held);
498 
499 	tcf_exts_stats_update(&f->exts, cls_flower.stats.bytes,
500 			      cls_flower.stats.pkts,
501 			      cls_flower.stats.drops,
502 			      cls_flower.stats.lastused,
503 			      cls_flower.stats.used_hw_stats,
504 			      cls_flower.stats.used_hw_stats_valid);
505 }
506 
__fl_put(struct cls_fl_filter * f)507 static void __fl_put(struct cls_fl_filter *f)
508 {
509 	if (!refcount_dec_and_test(&f->refcnt))
510 		return;
511 
512 	if (tcf_exts_get_net(&f->exts))
513 		tcf_queue_work(&f->rwork, fl_destroy_filter_work);
514 	else
515 		__fl_destroy_filter(f);
516 }
517 
__fl_get(struct cls_fl_head * head,u32 handle)518 static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle)
519 {
520 	struct cls_fl_filter *f;
521 
522 	rcu_read_lock();
523 	f = idr_find(&head->handle_idr, handle);
524 	if (f && !refcount_inc_not_zero(&f->refcnt))
525 		f = NULL;
526 	rcu_read_unlock();
527 
528 	return f;
529 }
530 
__fl_delete(struct tcf_proto * tp,struct cls_fl_filter * f,bool * last,bool rtnl_held,struct netlink_ext_ack * extack)531 static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
532 		       bool *last, bool rtnl_held,
533 		       struct netlink_ext_ack *extack)
534 {
535 	struct cls_fl_head *head = fl_head_dereference(tp);
536 
537 	*last = false;
538 
539 	spin_lock(&tp->lock);
540 	if (f->deleted) {
541 		spin_unlock(&tp->lock);
542 		return -ENOENT;
543 	}
544 
545 	f->deleted = true;
546 	rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
547 			       f->mask->filter_ht_params);
548 	idr_remove(&head->handle_idr, f->handle);
549 	list_del_rcu(&f->list);
550 	spin_unlock(&tp->lock);
551 
552 	*last = fl_mask_put(head, f->mask);
553 	if (!tc_skip_hw(f->flags))
554 		fl_hw_destroy_filter(tp, f, rtnl_held, extack);
555 	tcf_unbind_filter(tp, &f->res);
556 	__fl_put(f);
557 
558 	return 0;
559 }
560 
fl_destroy_sleepable(struct work_struct * work)561 static void fl_destroy_sleepable(struct work_struct *work)
562 {
563 	struct cls_fl_head *head = container_of(to_rcu_work(work),
564 						struct cls_fl_head,
565 						rwork);
566 
567 	rhashtable_destroy(&head->ht);
568 	kfree(head);
569 	module_put(THIS_MODULE);
570 }
571 
fl_destroy(struct tcf_proto * tp,bool rtnl_held,struct netlink_ext_ack * extack)572 static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
573 		       struct netlink_ext_ack *extack)
574 {
575 	struct cls_fl_head *head = fl_head_dereference(tp);
576 	struct fl_flow_mask *mask, *next_mask;
577 	struct cls_fl_filter *f, *next;
578 	bool last;
579 
580 	list_for_each_entry_safe(mask, next_mask, &head->masks, list) {
581 		list_for_each_entry_safe(f, next, &mask->filters, list) {
582 			__fl_delete(tp, f, &last, rtnl_held, extack);
583 			if (last)
584 				break;
585 		}
586 	}
587 	idr_destroy(&head->handle_idr);
588 
589 	__module_get(THIS_MODULE);
590 	tcf_queue_work(&head->rwork, fl_destroy_sleepable);
591 }
592 
fl_put(struct tcf_proto * tp,void * arg)593 static void fl_put(struct tcf_proto *tp, void *arg)
594 {
595 	struct cls_fl_filter *f = arg;
596 
597 	__fl_put(f);
598 }
599 
fl_get(struct tcf_proto * tp,u32 handle)600 static void *fl_get(struct tcf_proto *tp, u32 handle)
601 {
602 	struct cls_fl_head *head = fl_head_dereference(tp);
603 
604 	return __fl_get(head, handle);
605 }
606 
607 static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
608 	[TCA_FLOWER_UNSPEC]		= { .type = NLA_UNSPEC },
609 	[TCA_FLOWER_CLASSID]		= { .type = NLA_U32 },
610 	[TCA_FLOWER_INDEV]		= { .type = NLA_STRING,
611 					    .len = IFNAMSIZ },
612 	[TCA_FLOWER_KEY_ETH_DST]	= { .len = ETH_ALEN },
613 	[TCA_FLOWER_KEY_ETH_DST_MASK]	= { .len = ETH_ALEN },
614 	[TCA_FLOWER_KEY_ETH_SRC]	= { .len = ETH_ALEN },
615 	[TCA_FLOWER_KEY_ETH_SRC_MASK]	= { .len = ETH_ALEN },
616 	[TCA_FLOWER_KEY_ETH_TYPE]	= { .type = NLA_U16 },
617 	[TCA_FLOWER_KEY_IP_PROTO]	= { .type = NLA_U8 },
618 	[TCA_FLOWER_KEY_IPV4_SRC]	= { .type = NLA_U32 },
619 	[TCA_FLOWER_KEY_IPV4_SRC_MASK]	= { .type = NLA_U32 },
620 	[TCA_FLOWER_KEY_IPV4_DST]	= { .type = NLA_U32 },
621 	[TCA_FLOWER_KEY_IPV4_DST_MASK]	= { .type = NLA_U32 },
622 	[TCA_FLOWER_KEY_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
623 	[TCA_FLOWER_KEY_IPV6_SRC_MASK]	= { .len = sizeof(struct in6_addr) },
624 	[TCA_FLOWER_KEY_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
625 	[TCA_FLOWER_KEY_IPV6_DST_MASK]	= { .len = sizeof(struct in6_addr) },
626 	[TCA_FLOWER_KEY_TCP_SRC]	= { .type = NLA_U16 },
627 	[TCA_FLOWER_KEY_TCP_DST]	= { .type = NLA_U16 },
628 	[TCA_FLOWER_KEY_UDP_SRC]	= { .type = NLA_U16 },
629 	[TCA_FLOWER_KEY_UDP_DST]	= { .type = NLA_U16 },
630 	[TCA_FLOWER_KEY_VLAN_ID]	= { .type = NLA_U16 },
631 	[TCA_FLOWER_KEY_VLAN_PRIO]	= { .type = NLA_U8 },
632 	[TCA_FLOWER_KEY_VLAN_ETH_TYPE]	= { .type = NLA_U16 },
633 	[TCA_FLOWER_KEY_ENC_KEY_ID]	= { .type = NLA_U32 },
634 	[TCA_FLOWER_KEY_ENC_IPV4_SRC]	= { .type = NLA_U32 },
635 	[TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
636 	[TCA_FLOWER_KEY_ENC_IPV4_DST]	= { .type = NLA_U32 },
637 	[TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
638 	[TCA_FLOWER_KEY_ENC_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
639 	[TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
640 	[TCA_FLOWER_KEY_ENC_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
641 	[TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
642 	[TCA_FLOWER_KEY_TCP_SRC_MASK]	= { .type = NLA_U16 },
643 	[TCA_FLOWER_KEY_TCP_DST_MASK]	= { .type = NLA_U16 },
644 	[TCA_FLOWER_KEY_UDP_SRC_MASK]	= { .type = NLA_U16 },
645 	[TCA_FLOWER_KEY_UDP_DST_MASK]	= { .type = NLA_U16 },
646 	[TCA_FLOWER_KEY_SCTP_SRC_MASK]	= { .type = NLA_U16 },
647 	[TCA_FLOWER_KEY_SCTP_DST_MASK]	= { .type = NLA_U16 },
648 	[TCA_FLOWER_KEY_SCTP_SRC]	= { .type = NLA_U16 },
649 	[TCA_FLOWER_KEY_SCTP_DST]	= { .type = NLA_U16 },
650 	[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT]	= { .type = NLA_U16 },
651 	[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK]	= { .type = NLA_U16 },
652 	[TCA_FLOWER_KEY_ENC_UDP_DST_PORT]	= { .type = NLA_U16 },
653 	[TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK]	= { .type = NLA_U16 },
654 	[TCA_FLOWER_KEY_FLAGS]		= { .type = NLA_U32 },
655 	[TCA_FLOWER_KEY_FLAGS_MASK]	= { .type = NLA_U32 },
656 	[TCA_FLOWER_KEY_ICMPV4_TYPE]	= { .type = NLA_U8 },
657 	[TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
658 	[TCA_FLOWER_KEY_ICMPV4_CODE]	= { .type = NLA_U8 },
659 	[TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
660 	[TCA_FLOWER_KEY_ICMPV6_TYPE]	= { .type = NLA_U8 },
661 	[TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
662 	[TCA_FLOWER_KEY_ICMPV6_CODE]	= { .type = NLA_U8 },
663 	[TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
664 	[TCA_FLOWER_KEY_ARP_SIP]	= { .type = NLA_U32 },
665 	[TCA_FLOWER_KEY_ARP_SIP_MASK]	= { .type = NLA_U32 },
666 	[TCA_FLOWER_KEY_ARP_TIP]	= { .type = NLA_U32 },
667 	[TCA_FLOWER_KEY_ARP_TIP_MASK]	= { .type = NLA_U32 },
668 	[TCA_FLOWER_KEY_ARP_OP]		= { .type = NLA_U8 },
669 	[TCA_FLOWER_KEY_ARP_OP_MASK]	= { .type = NLA_U8 },
670 	[TCA_FLOWER_KEY_ARP_SHA]	= { .len = ETH_ALEN },
671 	[TCA_FLOWER_KEY_ARP_SHA_MASK]	= { .len = ETH_ALEN },
672 	[TCA_FLOWER_KEY_ARP_THA]	= { .len = ETH_ALEN },
673 	[TCA_FLOWER_KEY_ARP_THA_MASK]	= { .len = ETH_ALEN },
674 	[TCA_FLOWER_KEY_MPLS_TTL]	= { .type = NLA_U8 },
675 	[TCA_FLOWER_KEY_MPLS_BOS]	= { .type = NLA_U8 },
676 	[TCA_FLOWER_KEY_MPLS_TC]	= { .type = NLA_U8 },
677 	[TCA_FLOWER_KEY_MPLS_LABEL]	= { .type = NLA_U32 },
678 	[TCA_FLOWER_KEY_MPLS_OPTS]	= { .type = NLA_NESTED },
679 	[TCA_FLOWER_KEY_TCP_FLAGS]	= { .type = NLA_U16 },
680 	[TCA_FLOWER_KEY_TCP_FLAGS_MASK]	= { .type = NLA_U16 },
681 	[TCA_FLOWER_KEY_IP_TOS]		= { .type = NLA_U8 },
682 	[TCA_FLOWER_KEY_IP_TOS_MASK]	= { .type = NLA_U8 },
683 	[TCA_FLOWER_KEY_IP_TTL]		= { .type = NLA_U8 },
684 	[TCA_FLOWER_KEY_IP_TTL_MASK]	= { .type = NLA_U8 },
685 	[TCA_FLOWER_KEY_CVLAN_ID]	= { .type = NLA_U16 },
686 	[TCA_FLOWER_KEY_CVLAN_PRIO]	= { .type = NLA_U8 },
687 	[TCA_FLOWER_KEY_CVLAN_ETH_TYPE]	= { .type = NLA_U16 },
688 	[TCA_FLOWER_KEY_ENC_IP_TOS]	= { .type = NLA_U8 },
689 	[TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 },
690 	[TCA_FLOWER_KEY_ENC_IP_TTL]	 = { .type = NLA_U8 },
691 	[TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 },
692 	[TCA_FLOWER_KEY_ENC_OPTS]	= { .type = NLA_NESTED },
693 	[TCA_FLOWER_KEY_ENC_OPTS_MASK]	= { .type = NLA_NESTED },
694 	[TCA_FLOWER_KEY_CT_STATE]	=
695 		NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK),
696 	[TCA_FLOWER_KEY_CT_STATE_MASK]	=
697 		NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK),
698 	[TCA_FLOWER_KEY_CT_ZONE]	= { .type = NLA_U16 },
699 	[TCA_FLOWER_KEY_CT_ZONE_MASK]	= { .type = NLA_U16 },
700 	[TCA_FLOWER_KEY_CT_MARK]	= { .type = NLA_U32 },
701 	[TCA_FLOWER_KEY_CT_MARK_MASK]	= { .type = NLA_U32 },
702 	[TCA_FLOWER_KEY_CT_LABELS]	= { .type = NLA_BINARY,
703 					    .len = 128 / BITS_PER_BYTE },
704 	[TCA_FLOWER_KEY_CT_LABELS_MASK]	= { .type = NLA_BINARY,
705 					    .len = 128 / BITS_PER_BYTE },
706 	[TCA_FLOWER_FLAGS]		= { .type = NLA_U32 },
707 	[TCA_FLOWER_KEY_HASH]		= { .type = NLA_U32 },
708 	[TCA_FLOWER_KEY_HASH_MASK]	= { .type = NLA_U32 },
709 
710 };
711 
712 static const struct nla_policy
713 enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
714 	[TCA_FLOWER_KEY_ENC_OPTS_UNSPEC]        = {
715 		.strict_start_type = TCA_FLOWER_KEY_ENC_OPTS_VXLAN },
716 	[TCA_FLOWER_KEY_ENC_OPTS_GENEVE]        = { .type = NLA_NESTED },
717 	[TCA_FLOWER_KEY_ENC_OPTS_VXLAN]         = { .type = NLA_NESTED },
718 	[TCA_FLOWER_KEY_ENC_OPTS_ERSPAN]        = { .type = NLA_NESTED },
719 };
720 
721 static const struct nla_policy
722 geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = {
723 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]      = { .type = NLA_U16 },
724 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]       = { .type = NLA_U8 },
725 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]       = { .type = NLA_BINARY,
726 						       .len = 128 },
727 };
728 
729 static const struct nla_policy
730 vxlan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1] = {
731 	[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]         = { .type = NLA_U32 },
732 };
733 
734 static const struct nla_policy
735 erspan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1] = {
736 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]        = { .type = NLA_U8 },
737 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]      = { .type = NLA_U32 },
738 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]        = { .type = NLA_U8 },
739 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]       = { .type = NLA_U8 },
740 };
741 
742 static const struct nla_policy
743 mpls_stack_entry_policy[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1] = {
744 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]    = { .type = NLA_U8 },
745 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]      = { .type = NLA_U8 },
746 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]      = { .type = NLA_U8 },
747 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]       = { .type = NLA_U8 },
748 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]    = { .type = NLA_U32 },
749 };
750 
fl_set_key_val(struct nlattr ** tb,void * val,int val_type,void * mask,int mask_type,int len)751 static void fl_set_key_val(struct nlattr **tb,
752 			   void *val, int val_type,
753 			   void *mask, int mask_type, int len)
754 {
755 	if (!tb[val_type])
756 		return;
757 	nla_memcpy(val, tb[val_type], len);
758 	if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
759 		memset(mask, 0xff, len);
760 	else
761 		nla_memcpy(mask, tb[mask_type], len);
762 }
763 
fl_set_key_port_range(struct nlattr ** tb,struct fl_flow_key * key,struct fl_flow_key * mask,struct netlink_ext_ack * extack)764 static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
765 				 struct fl_flow_key *mask,
766 				 struct netlink_ext_ack *extack)
767 {
768 	fl_set_key_val(tb, &key->tp_range.tp_min.dst,
769 		       TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_range.tp_min.dst,
770 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.dst));
771 	fl_set_key_val(tb, &key->tp_range.tp_max.dst,
772 		       TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_range.tp_max.dst,
773 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.dst));
774 	fl_set_key_val(tb, &key->tp_range.tp_min.src,
775 		       TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_range.tp_min.src,
776 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.src));
777 	fl_set_key_val(tb, &key->tp_range.tp_max.src,
778 		       TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_range.tp_max.src,
779 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src));
780 
781 	if (mask->tp_range.tp_min.dst != mask->tp_range.tp_max.dst) {
782 		NL_SET_ERR_MSG(extack,
783 			       "Both min and max destination ports must be specified");
784 		return -EINVAL;
785 	}
786 	if (mask->tp_range.tp_min.src != mask->tp_range.tp_max.src) {
787 		NL_SET_ERR_MSG(extack,
788 			       "Both min and max source ports must be specified");
789 		return -EINVAL;
790 	}
791 	if (mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst &&
792 	    ntohs(key->tp_range.tp_max.dst) <=
793 	    ntohs(key->tp_range.tp_min.dst)) {
794 		NL_SET_ERR_MSG_ATTR(extack,
795 				    tb[TCA_FLOWER_KEY_PORT_DST_MIN],
796 				    "Invalid destination port range (min must be strictly smaller than max)");
797 		return -EINVAL;
798 	}
799 	if (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src &&
800 	    ntohs(key->tp_range.tp_max.src) <=
801 	    ntohs(key->tp_range.tp_min.src)) {
802 		NL_SET_ERR_MSG_ATTR(extack,
803 				    tb[TCA_FLOWER_KEY_PORT_SRC_MIN],
804 				    "Invalid source port range (min must be strictly smaller than max)");
805 		return -EINVAL;
806 	}
807 
808 	return 0;
809 }
810 
fl_set_key_mpls_lse(const struct nlattr * nla_lse,struct flow_dissector_key_mpls * key_val,struct flow_dissector_key_mpls * key_mask,struct netlink_ext_ack * extack)811 static int fl_set_key_mpls_lse(const struct nlattr *nla_lse,
812 			       struct flow_dissector_key_mpls *key_val,
813 			       struct flow_dissector_key_mpls *key_mask,
814 			       struct netlink_ext_ack *extack)
815 {
816 	struct nlattr *tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1];
817 	struct flow_dissector_mpls_lse *lse_mask;
818 	struct flow_dissector_mpls_lse *lse_val;
819 	u8 lse_index;
820 	u8 depth;
821 	int err;
822 
823 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX, nla_lse,
824 			       mpls_stack_entry_policy, extack);
825 	if (err < 0)
826 		return err;
827 
828 	if (!tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]) {
829 		NL_SET_ERR_MSG(extack, "Missing MPLS option \"depth\"");
830 		return -EINVAL;
831 	}
832 
833 	depth = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]);
834 
835 	/* LSE depth starts at 1, for consistency with terminology used by
836 	 * RFC 3031 (section 3.9), where depth 0 refers to unlabeled packets.
837 	 */
838 	if (depth < 1 || depth > FLOW_DIS_MPLS_MAX) {
839 		NL_SET_ERR_MSG_ATTR(extack,
840 				    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH],
841 				    "Invalid MPLS depth");
842 		return -EINVAL;
843 	}
844 	lse_index = depth - 1;
845 
846 	dissector_set_mpls_lse(key_val, lse_index);
847 	dissector_set_mpls_lse(key_mask, lse_index);
848 
849 	lse_val = &key_val->ls[lse_index];
850 	lse_mask = &key_mask->ls[lse_index];
851 
852 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]) {
853 		lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]);
854 		lse_mask->mpls_ttl = MPLS_TTL_MASK;
855 	}
856 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]) {
857 		u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]);
858 
859 		if (bos & ~MPLS_BOS_MASK) {
860 			NL_SET_ERR_MSG_ATTR(extack,
861 					    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS],
862 					    "Bottom Of Stack (BOS) must be 0 or 1");
863 			return -EINVAL;
864 		}
865 		lse_val->mpls_bos = bos;
866 		lse_mask->mpls_bos = MPLS_BOS_MASK;
867 	}
868 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]) {
869 		u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]);
870 
871 		if (tc & ~MPLS_TC_MASK) {
872 			NL_SET_ERR_MSG_ATTR(extack,
873 					    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC],
874 					    "Traffic Class (TC) must be between 0 and 7");
875 			return -EINVAL;
876 		}
877 		lse_val->mpls_tc = tc;
878 		lse_mask->mpls_tc = MPLS_TC_MASK;
879 	}
880 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]) {
881 		u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]);
882 
883 		if (label & ~MPLS_LABEL_MASK) {
884 			NL_SET_ERR_MSG_ATTR(extack,
885 					    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL],
886 					    "Label must be between 0 and 1048575");
887 			return -EINVAL;
888 		}
889 		lse_val->mpls_label = label;
890 		lse_mask->mpls_label = MPLS_LABEL_MASK;
891 	}
892 
893 	return 0;
894 }
895 
fl_set_key_mpls_opts(const struct nlattr * nla_mpls_opts,struct flow_dissector_key_mpls * key_val,struct flow_dissector_key_mpls * key_mask,struct netlink_ext_ack * extack)896 static int fl_set_key_mpls_opts(const struct nlattr *nla_mpls_opts,
897 				struct flow_dissector_key_mpls *key_val,
898 				struct flow_dissector_key_mpls *key_mask,
899 				struct netlink_ext_ack *extack)
900 {
901 	struct nlattr *nla_lse;
902 	int rem;
903 	int err;
904 
905 	if (!(nla_mpls_opts->nla_type & NLA_F_NESTED)) {
906 		NL_SET_ERR_MSG_ATTR(extack, nla_mpls_opts,
907 				    "NLA_F_NESTED is missing");
908 		return -EINVAL;
909 	}
910 
911 	nla_for_each_nested(nla_lse, nla_mpls_opts, rem) {
912 		if (nla_type(nla_lse) != TCA_FLOWER_KEY_MPLS_OPTS_LSE) {
913 			NL_SET_ERR_MSG_ATTR(extack, nla_lse,
914 					    "Invalid MPLS option type");
915 			return -EINVAL;
916 		}
917 
918 		err = fl_set_key_mpls_lse(nla_lse, key_val, key_mask, extack);
919 		if (err < 0)
920 			return err;
921 	}
922 	if (rem) {
923 		NL_SET_ERR_MSG(extack,
924 			       "Bytes leftover after parsing MPLS options");
925 		return -EINVAL;
926 	}
927 
928 	return 0;
929 }
930 
fl_set_key_mpls(struct nlattr ** tb,struct flow_dissector_key_mpls * key_val,struct flow_dissector_key_mpls * key_mask,struct netlink_ext_ack * extack)931 static int fl_set_key_mpls(struct nlattr **tb,
932 			   struct flow_dissector_key_mpls *key_val,
933 			   struct flow_dissector_key_mpls *key_mask,
934 			   struct netlink_ext_ack *extack)
935 {
936 	struct flow_dissector_mpls_lse *lse_mask;
937 	struct flow_dissector_mpls_lse *lse_val;
938 
939 	if (tb[TCA_FLOWER_KEY_MPLS_OPTS]) {
940 		if (tb[TCA_FLOWER_KEY_MPLS_TTL] ||
941 		    tb[TCA_FLOWER_KEY_MPLS_BOS] ||
942 		    tb[TCA_FLOWER_KEY_MPLS_TC] ||
943 		    tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
944 			NL_SET_ERR_MSG_ATTR(extack,
945 					    tb[TCA_FLOWER_KEY_MPLS_OPTS],
946 					    "MPLS label, Traffic Class, Bottom Of Stack and Time To Live must be encapsulated in the MPLS options attribute");
947 			return -EBADMSG;
948 		}
949 
950 		return fl_set_key_mpls_opts(tb[TCA_FLOWER_KEY_MPLS_OPTS],
951 					    key_val, key_mask, extack);
952 	}
953 
954 	lse_val = &key_val->ls[0];
955 	lse_mask = &key_mask->ls[0];
956 
957 	if (tb[TCA_FLOWER_KEY_MPLS_TTL]) {
958 		lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
959 		lse_mask->mpls_ttl = MPLS_TTL_MASK;
960 		dissector_set_mpls_lse(key_val, 0);
961 		dissector_set_mpls_lse(key_mask, 0);
962 	}
963 	if (tb[TCA_FLOWER_KEY_MPLS_BOS]) {
964 		u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]);
965 
966 		if (bos & ~MPLS_BOS_MASK) {
967 			NL_SET_ERR_MSG_ATTR(extack,
968 					    tb[TCA_FLOWER_KEY_MPLS_BOS],
969 					    "Bottom Of Stack (BOS) must be 0 or 1");
970 			return -EINVAL;
971 		}
972 		lse_val->mpls_bos = bos;
973 		lse_mask->mpls_bos = MPLS_BOS_MASK;
974 		dissector_set_mpls_lse(key_val, 0);
975 		dissector_set_mpls_lse(key_mask, 0);
976 	}
977 	if (tb[TCA_FLOWER_KEY_MPLS_TC]) {
978 		u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]);
979 
980 		if (tc & ~MPLS_TC_MASK) {
981 			NL_SET_ERR_MSG_ATTR(extack,
982 					    tb[TCA_FLOWER_KEY_MPLS_TC],
983 					    "Traffic Class (TC) must be between 0 and 7");
984 			return -EINVAL;
985 		}
986 		lse_val->mpls_tc = tc;
987 		lse_mask->mpls_tc = MPLS_TC_MASK;
988 		dissector_set_mpls_lse(key_val, 0);
989 		dissector_set_mpls_lse(key_mask, 0);
990 	}
991 	if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
992 		u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]);
993 
994 		if (label & ~MPLS_LABEL_MASK) {
995 			NL_SET_ERR_MSG_ATTR(extack,
996 					    tb[TCA_FLOWER_KEY_MPLS_LABEL],
997 					    "Label must be between 0 and 1048575");
998 			return -EINVAL;
999 		}
1000 		lse_val->mpls_label = label;
1001 		lse_mask->mpls_label = MPLS_LABEL_MASK;
1002 		dissector_set_mpls_lse(key_val, 0);
1003 		dissector_set_mpls_lse(key_mask, 0);
1004 	}
1005 	return 0;
1006 }
1007 
fl_set_key_vlan(struct nlattr ** tb,__be16 ethertype,int vlan_id_key,int vlan_prio_key,int vlan_next_eth_type_key,struct flow_dissector_key_vlan * key_val,struct flow_dissector_key_vlan * key_mask)1008 static void fl_set_key_vlan(struct nlattr **tb,
1009 			    __be16 ethertype,
1010 			    int vlan_id_key, int vlan_prio_key,
1011 			    int vlan_next_eth_type_key,
1012 			    struct flow_dissector_key_vlan *key_val,
1013 			    struct flow_dissector_key_vlan *key_mask)
1014 {
1015 #define VLAN_PRIORITY_MASK	0x7
1016 
1017 	if (tb[vlan_id_key]) {
1018 		key_val->vlan_id =
1019 			nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK;
1020 		key_mask->vlan_id = VLAN_VID_MASK;
1021 	}
1022 	if (tb[vlan_prio_key]) {
1023 		key_val->vlan_priority =
1024 			nla_get_u8(tb[vlan_prio_key]) &
1025 			VLAN_PRIORITY_MASK;
1026 		key_mask->vlan_priority = VLAN_PRIORITY_MASK;
1027 	}
1028 	key_val->vlan_tpid = ethertype;
1029 	key_mask->vlan_tpid = cpu_to_be16(~0);
1030 	if (tb[vlan_next_eth_type_key]) {
1031 		key_val->vlan_eth_type =
1032 			nla_get_be16(tb[vlan_next_eth_type_key]);
1033 		key_mask->vlan_eth_type = cpu_to_be16(~0);
1034 	}
1035 }
1036 
fl_set_key_flag(u32 flower_key,u32 flower_mask,u32 * dissector_key,u32 * dissector_mask,u32 flower_flag_bit,u32 dissector_flag_bit)1037 static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
1038 			    u32 *dissector_key, u32 *dissector_mask,
1039 			    u32 flower_flag_bit, u32 dissector_flag_bit)
1040 {
1041 	if (flower_mask & flower_flag_bit) {
1042 		*dissector_mask |= dissector_flag_bit;
1043 		if (flower_key & flower_flag_bit)
1044 			*dissector_key |= dissector_flag_bit;
1045 	}
1046 }
1047 
fl_set_key_flags(struct nlattr ** tb,u32 * flags_key,u32 * flags_mask,struct netlink_ext_ack * extack)1048 static int fl_set_key_flags(struct nlattr **tb, u32 *flags_key,
1049 			    u32 *flags_mask, struct netlink_ext_ack *extack)
1050 {
1051 	u32 key, mask;
1052 
1053 	/* mask is mandatory for flags */
1054 	if (!tb[TCA_FLOWER_KEY_FLAGS_MASK]) {
1055 		NL_SET_ERR_MSG(extack, "Missing flags mask");
1056 		return -EINVAL;
1057 	}
1058 
1059 	key = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS]));
1060 	mask = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
1061 
1062 	*flags_key  = 0;
1063 	*flags_mask = 0;
1064 
1065 	fl_set_key_flag(key, mask, flags_key, flags_mask,
1066 			TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
1067 	fl_set_key_flag(key, mask, flags_key, flags_mask,
1068 			TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
1069 			FLOW_DIS_FIRST_FRAG);
1070 
1071 	return 0;
1072 }
1073 
fl_set_key_ip(struct nlattr ** tb,bool encap,struct flow_dissector_key_ip * key,struct flow_dissector_key_ip * mask)1074 static void fl_set_key_ip(struct nlattr **tb, bool encap,
1075 			  struct flow_dissector_key_ip *key,
1076 			  struct flow_dissector_key_ip *mask)
1077 {
1078 	int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
1079 	int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
1080 	int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
1081 	int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
1082 
1083 	fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos));
1084 	fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl));
1085 }
1086 
fl_set_geneve_opt(const struct nlattr * nla,struct fl_flow_key * key,int depth,int option_len,struct netlink_ext_ack * extack)1087 static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key,
1088 			     int depth, int option_len,
1089 			     struct netlink_ext_ack *extack)
1090 {
1091 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1];
1092 	struct nlattr *class = NULL, *type = NULL, *data = NULL;
1093 	struct geneve_opt *opt;
1094 	int err, data_len = 0;
1095 
1096 	if (option_len > sizeof(struct geneve_opt))
1097 		data_len = option_len - sizeof(struct geneve_opt);
1098 
1099 	if (key->enc_opts.len > FLOW_DIS_TUN_OPTS_MAX - 4)
1100 		return -ERANGE;
1101 
1102 	opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len];
1103 	memset(opt, 0xff, option_len);
1104 	opt->length = data_len / 4;
1105 	opt->r1 = 0;
1106 	opt->r2 = 0;
1107 	opt->r3 = 0;
1108 
1109 	/* If no mask has been prodived we assume an exact match. */
1110 	if (!depth)
1111 		return sizeof(struct geneve_opt) + data_len;
1112 
1113 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) {
1114 		NL_SET_ERR_MSG(extack, "Non-geneve option type for mask");
1115 		return -EINVAL;
1116 	}
1117 
1118 	err = nla_parse_nested_deprecated(tb,
1119 					  TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
1120 					  nla, geneve_opt_policy, extack);
1121 	if (err < 0)
1122 		return err;
1123 
1124 	/* We are not allowed to omit any of CLASS, TYPE or DATA
1125 	 * fields from the key.
1126 	 */
1127 	if (!option_len &&
1128 	    (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] ||
1129 	     !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] ||
1130 	     !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) {
1131 		NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
1132 		return -EINVAL;
1133 	}
1134 
1135 	/* Omitting any of CLASS, TYPE or DATA fields is allowed
1136 	 * for the mask.
1137 	 */
1138 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) {
1139 		int new_len = key->enc_opts.len;
1140 
1141 		data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA];
1142 		data_len = nla_len(data);
1143 		if (data_len < 4) {
1144 			NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
1145 			return -ERANGE;
1146 		}
1147 		if (data_len % 4) {
1148 			NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
1149 			return -ERANGE;
1150 		}
1151 
1152 		new_len += sizeof(struct geneve_opt) + data_len;
1153 		BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX);
1154 		if (new_len > FLOW_DIS_TUN_OPTS_MAX) {
1155 			NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
1156 			return -ERANGE;
1157 		}
1158 		opt->length = data_len / 4;
1159 		memcpy(opt->opt_data, nla_data(data), data_len);
1160 	}
1161 
1162 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) {
1163 		class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS];
1164 		opt->opt_class = nla_get_be16(class);
1165 	}
1166 
1167 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) {
1168 		type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE];
1169 		opt->type = nla_get_u8(type);
1170 	}
1171 
1172 	return sizeof(struct geneve_opt) + data_len;
1173 }
1174 
fl_set_vxlan_opt(const struct nlattr * nla,struct fl_flow_key * key,int depth,int option_len,struct netlink_ext_ack * extack)1175 static int fl_set_vxlan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1176 			    int depth, int option_len,
1177 			    struct netlink_ext_ack *extack)
1178 {
1179 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1];
1180 	struct vxlan_metadata *md;
1181 	int err;
1182 
1183 	md = (struct vxlan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1184 	memset(md, 0xff, sizeof(*md));
1185 
1186 	if (!depth)
1187 		return sizeof(*md);
1188 
1189 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_VXLAN) {
1190 		NL_SET_ERR_MSG(extack, "Non-vxlan option type for mask");
1191 		return -EINVAL;
1192 	}
1193 
1194 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX, nla,
1195 			       vxlan_opt_policy, extack);
1196 	if (err < 0)
1197 		return err;
1198 
1199 	if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
1200 		NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp");
1201 		return -EINVAL;
1202 	}
1203 
1204 	if (tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
1205 		md->gbp = nla_get_u32(tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]);
1206 		md->gbp &= VXLAN_GBP_MASK;
1207 	}
1208 
1209 	return sizeof(*md);
1210 }
1211 
fl_set_erspan_opt(const struct nlattr * nla,struct fl_flow_key * key,int depth,int option_len,struct netlink_ext_ack * extack)1212 static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1213 			     int depth, int option_len,
1214 			     struct netlink_ext_ack *extack)
1215 {
1216 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1];
1217 	struct erspan_metadata *md;
1218 	int err;
1219 
1220 	md = (struct erspan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1221 	memset(md, 0xff, sizeof(*md));
1222 	md->version = 1;
1223 
1224 	if (!depth)
1225 		return sizeof(*md);
1226 
1227 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_ERSPAN) {
1228 		NL_SET_ERR_MSG(extack, "Non-erspan option type for mask");
1229 		return -EINVAL;
1230 	}
1231 
1232 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX, nla,
1233 			       erspan_opt_policy, extack);
1234 	if (err < 0)
1235 		return err;
1236 
1237 	if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) {
1238 		NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver");
1239 		return -EINVAL;
1240 	}
1241 
1242 	if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER])
1243 		md->version = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]);
1244 
1245 	if (md->version == 1) {
1246 		if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1247 			NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index");
1248 			return -EINVAL;
1249 		}
1250 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1251 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX];
1252 			memset(&md->u, 0x00, sizeof(md->u));
1253 			md->u.index = nla_get_be32(nla);
1254 		}
1255 	} else if (md->version == 2) {
1256 		if (!option_len && (!tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] ||
1257 				    !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID])) {
1258 			NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid");
1259 			return -EINVAL;
1260 		}
1261 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]) {
1262 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR];
1263 			md->u.md2.dir = nla_get_u8(nla);
1264 		}
1265 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]) {
1266 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID];
1267 			set_hwid(&md->u.md2, nla_get_u8(nla));
1268 		}
1269 	} else {
1270 		NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect");
1271 		return -EINVAL;
1272 	}
1273 
1274 	return sizeof(*md);
1275 }
1276 
fl_set_enc_opt(struct nlattr ** tb,struct fl_flow_key * key,struct fl_flow_key * mask,struct netlink_ext_ack * extack)1277 static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
1278 			  struct fl_flow_key *mask,
1279 			  struct netlink_ext_ack *extack)
1280 {
1281 	const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
1282 	int err, option_len, key_depth, msk_depth = 0;
1283 
1284 	err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS],
1285 					     TCA_FLOWER_KEY_ENC_OPTS_MAX,
1286 					     enc_opts_policy, extack);
1287 	if (err)
1288 		return err;
1289 
1290 	nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
1291 
1292 	if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
1293 		err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
1294 						     TCA_FLOWER_KEY_ENC_OPTS_MAX,
1295 						     enc_opts_policy, extack);
1296 		if (err)
1297 			return err;
1298 
1299 		nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1300 		msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1301 		if (!nla_ok(nla_opt_msk, msk_depth)) {
1302 			NL_SET_ERR_MSG(extack, "Invalid nested attribute for masks");
1303 			return -EINVAL;
1304 		}
1305 	}
1306 
1307 	nla_for_each_attr(nla_opt_key, nla_enc_key,
1308 			  nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) {
1309 		switch (nla_type(nla_opt_key)) {
1310 		case TCA_FLOWER_KEY_ENC_OPTS_GENEVE:
1311 			if (key->enc_opts.dst_opt_type &&
1312 			    key->enc_opts.dst_opt_type != TUNNEL_GENEVE_OPT) {
1313 				NL_SET_ERR_MSG(extack, "Duplicate type for geneve options");
1314 				return -EINVAL;
1315 			}
1316 			option_len = 0;
1317 			key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1318 			option_len = fl_set_geneve_opt(nla_opt_key, key,
1319 						       key_depth, option_len,
1320 						       extack);
1321 			if (option_len < 0)
1322 				return option_len;
1323 
1324 			key->enc_opts.len += option_len;
1325 			/* At the same time we need to parse through the mask
1326 			 * in order to verify exact and mask attribute lengths.
1327 			 */
1328 			mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1329 			option_len = fl_set_geneve_opt(nla_opt_msk, mask,
1330 						       msk_depth, option_len,
1331 						       extack);
1332 			if (option_len < 0)
1333 				return option_len;
1334 
1335 			mask->enc_opts.len += option_len;
1336 			if (key->enc_opts.len != mask->enc_opts.len) {
1337 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1338 				return -EINVAL;
1339 			}
1340 			break;
1341 		case TCA_FLOWER_KEY_ENC_OPTS_VXLAN:
1342 			if (key->enc_opts.dst_opt_type) {
1343 				NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options");
1344 				return -EINVAL;
1345 			}
1346 			option_len = 0;
1347 			key->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1348 			option_len = fl_set_vxlan_opt(nla_opt_key, key,
1349 						      key_depth, option_len,
1350 						      extack);
1351 			if (option_len < 0)
1352 				return option_len;
1353 
1354 			key->enc_opts.len += option_len;
1355 			/* At the same time we need to parse through the mask
1356 			 * in order to verify exact and mask attribute lengths.
1357 			 */
1358 			mask->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1359 			option_len = fl_set_vxlan_opt(nla_opt_msk, mask,
1360 						      msk_depth, option_len,
1361 						      extack);
1362 			if (option_len < 0)
1363 				return option_len;
1364 
1365 			mask->enc_opts.len += option_len;
1366 			if (key->enc_opts.len != mask->enc_opts.len) {
1367 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1368 				return -EINVAL;
1369 			}
1370 			break;
1371 		case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN:
1372 			if (key->enc_opts.dst_opt_type) {
1373 				NL_SET_ERR_MSG(extack, "Duplicate type for erspan options");
1374 				return -EINVAL;
1375 			}
1376 			option_len = 0;
1377 			key->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1378 			option_len = fl_set_erspan_opt(nla_opt_key, key,
1379 						       key_depth, option_len,
1380 						       extack);
1381 			if (option_len < 0)
1382 				return option_len;
1383 
1384 			key->enc_opts.len += option_len;
1385 			/* At the same time we need to parse through the mask
1386 			 * in order to verify exact and mask attribute lengths.
1387 			 */
1388 			mask->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1389 			option_len = fl_set_erspan_opt(nla_opt_msk, mask,
1390 						       msk_depth, option_len,
1391 						       extack);
1392 			if (option_len < 0)
1393 				return option_len;
1394 
1395 			mask->enc_opts.len += option_len;
1396 			if (key->enc_opts.len != mask->enc_opts.len) {
1397 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1398 				return -EINVAL;
1399 			}
1400 			break;
1401 		default:
1402 			NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
1403 			return -EINVAL;
1404 		}
1405 
1406 		if (!msk_depth)
1407 			continue;
1408 
1409 		if (!nla_ok(nla_opt_msk, msk_depth)) {
1410 			NL_SET_ERR_MSG(extack, "A mask attribute is invalid");
1411 			return -EINVAL;
1412 		}
1413 		nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
1414 	}
1415 
1416 	return 0;
1417 }
1418 
fl_validate_ct_state(u16 state,struct nlattr * tb,struct netlink_ext_ack * extack)1419 static int fl_validate_ct_state(u16 state, struct nlattr *tb,
1420 				struct netlink_ext_ack *extack)
1421 {
1422 	if (state && !(state & TCA_FLOWER_KEY_CT_FLAGS_TRACKED)) {
1423 		NL_SET_ERR_MSG_ATTR(extack, tb,
1424 				    "no trk, so no other flag can be set");
1425 		return -EINVAL;
1426 	}
1427 
1428 	if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW &&
1429 	    state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED) {
1430 		NL_SET_ERR_MSG_ATTR(extack, tb,
1431 				    "new and est are mutually exclusive");
1432 		return -EINVAL;
1433 	}
1434 
1435 	return 0;
1436 }
1437 
fl_set_key_ct(struct nlattr ** tb,struct flow_dissector_key_ct * key,struct flow_dissector_key_ct * mask,struct netlink_ext_ack * extack)1438 static int fl_set_key_ct(struct nlattr **tb,
1439 			 struct flow_dissector_key_ct *key,
1440 			 struct flow_dissector_key_ct *mask,
1441 			 struct netlink_ext_ack *extack)
1442 {
1443 	if (tb[TCA_FLOWER_KEY_CT_STATE]) {
1444 		int err;
1445 
1446 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK)) {
1447 			NL_SET_ERR_MSG(extack, "Conntrack isn't enabled");
1448 			return -EOPNOTSUPP;
1449 		}
1450 		fl_set_key_val(tb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
1451 			       &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
1452 			       sizeof(key->ct_state));
1453 
1454 		err = fl_validate_ct_state(key->ct_state & mask->ct_state,
1455 					   tb[TCA_FLOWER_KEY_CT_STATE_MASK],
1456 					   extack);
1457 		if (err)
1458 			return err;
1459 
1460 	}
1461 	if (tb[TCA_FLOWER_KEY_CT_ZONE]) {
1462 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1463 			NL_SET_ERR_MSG(extack, "Conntrack zones isn't enabled");
1464 			return -EOPNOTSUPP;
1465 		}
1466 		fl_set_key_val(tb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
1467 			       &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
1468 			       sizeof(key->ct_zone));
1469 	}
1470 	if (tb[TCA_FLOWER_KEY_CT_MARK]) {
1471 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1472 			NL_SET_ERR_MSG(extack, "Conntrack mark isn't enabled");
1473 			return -EOPNOTSUPP;
1474 		}
1475 		fl_set_key_val(tb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
1476 			       &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
1477 			       sizeof(key->ct_mark));
1478 	}
1479 	if (tb[TCA_FLOWER_KEY_CT_LABELS]) {
1480 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1481 			NL_SET_ERR_MSG(extack, "Conntrack labels aren't enabled");
1482 			return -EOPNOTSUPP;
1483 		}
1484 		fl_set_key_val(tb, key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
1485 			       mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
1486 			       sizeof(key->ct_labels));
1487 	}
1488 
1489 	return 0;
1490 }
1491 
fl_set_key(struct net * net,struct nlattr ** tb,struct fl_flow_key * key,struct fl_flow_key * mask,struct netlink_ext_ack * extack)1492 static int fl_set_key(struct net *net, struct nlattr **tb,
1493 		      struct fl_flow_key *key, struct fl_flow_key *mask,
1494 		      struct netlink_ext_ack *extack)
1495 {
1496 	__be16 ethertype;
1497 	int ret = 0;
1498 
1499 	if (tb[TCA_FLOWER_INDEV]) {
1500 		int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
1501 		if (err < 0)
1502 			return err;
1503 		key->meta.ingress_ifindex = err;
1504 		mask->meta.ingress_ifindex = 0xffffffff;
1505 	}
1506 
1507 	fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
1508 		       mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
1509 		       sizeof(key->eth.dst));
1510 	fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
1511 		       mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
1512 		       sizeof(key->eth.src));
1513 
1514 	if (tb[TCA_FLOWER_KEY_ETH_TYPE]) {
1515 		ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]);
1516 
1517 		if (eth_type_vlan(ethertype)) {
1518 			fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
1519 					TCA_FLOWER_KEY_VLAN_PRIO,
1520 					TCA_FLOWER_KEY_VLAN_ETH_TYPE,
1521 					&key->vlan, &mask->vlan);
1522 
1523 			if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) {
1524 				ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]);
1525 				if (eth_type_vlan(ethertype)) {
1526 					fl_set_key_vlan(tb, ethertype,
1527 							TCA_FLOWER_KEY_CVLAN_ID,
1528 							TCA_FLOWER_KEY_CVLAN_PRIO,
1529 							TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1530 							&key->cvlan, &mask->cvlan);
1531 					fl_set_key_val(tb, &key->basic.n_proto,
1532 						       TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1533 						       &mask->basic.n_proto,
1534 						       TCA_FLOWER_UNSPEC,
1535 						       sizeof(key->basic.n_proto));
1536 				} else {
1537 					key->basic.n_proto = ethertype;
1538 					mask->basic.n_proto = cpu_to_be16(~0);
1539 				}
1540 			}
1541 		} else {
1542 			key->basic.n_proto = ethertype;
1543 			mask->basic.n_proto = cpu_to_be16(~0);
1544 		}
1545 	}
1546 
1547 	if (key->basic.n_proto == htons(ETH_P_IP) ||
1548 	    key->basic.n_proto == htons(ETH_P_IPV6)) {
1549 		fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
1550 			       &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
1551 			       sizeof(key->basic.ip_proto));
1552 		fl_set_key_ip(tb, false, &key->ip, &mask->ip);
1553 	}
1554 
1555 	if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
1556 		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1557 		mask->control.addr_type = ~0;
1558 		fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
1559 			       &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
1560 			       sizeof(key->ipv4.src));
1561 		fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
1562 			       &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
1563 			       sizeof(key->ipv4.dst));
1564 	} else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
1565 		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1566 		mask->control.addr_type = ~0;
1567 		fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
1568 			       &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
1569 			       sizeof(key->ipv6.src));
1570 		fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
1571 			       &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
1572 			       sizeof(key->ipv6.dst));
1573 	}
1574 
1575 	if (key->basic.ip_proto == IPPROTO_TCP) {
1576 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
1577 			       &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
1578 			       sizeof(key->tp.src));
1579 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
1580 			       &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
1581 			       sizeof(key->tp.dst));
1582 		fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
1583 			       &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
1584 			       sizeof(key->tcp.flags));
1585 	} else if (key->basic.ip_proto == IPPROTO_UDP) {
1586 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
1587 			       &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
1588 			       sizeof(key->tp.src));
1589 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
1590 			       &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
1591 			       sizeof(key->tp.dst));
1592 	} else if (key->basic.ip_proto == IPPROTO_SCTP) {
1593 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
1594 			       &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
1595 			       sizeof(key->tp.src));
1596 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
1597 			       &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
1598 			       sizeof(key->tp.dst));
1599 	} else if (key->basic.n_proto == htons(ETH_P_IP) &&
1600 		   key->basic.ip_proto == IPPROTO_ICMP) {
1601 		fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
1602 			       &mask->icmp.type,
1603 			       TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
1604 			       sizeof(key->icmp.type));
1605 		fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
1606 			       &mask->icmp.code,
1607 			       TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
1608 			       sizeof(key->icmp.code));
1609 	} else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
1610 		   key->basic.ip_proto == IPPROTO_ICMPV6) {
1611 		fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
1612 			       &mask->icmp.type,
1613 			       TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
1614 			       sizeof(key->icmp.type));
1615 		fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
1616 			       &mask->icmp.code,
1617 			       TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
1618 			       sizeof(key->icmp.code));
1619 	} else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) ||
1620 		   key->basic.n_proto == htons(ETH_P_MPLS_MC)) {
1621 		ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls, extack);
1622 		if (ret)
1623 			return ret;
1624 	} else if (key->basic.n_proto == htons(ETH_P_ARP) ||
1625 		   key->basic.n_proto == htons(ETH_P_RARP)) {
1626 		fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP,
1627 			       &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK,
1628 			       sizeof(key->arp.sip));
1629 		fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP,
1630 			       &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK,
1631 			       sizeof(key->arp.tip));
1632 		fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP,
1633 			       &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK,
1634 			       sizeof(key->arp.op));
1635 		fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
1636 			       mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
1637 			       sizeof(key->arp.sha));
1638 		fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
1639 			       mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
1640 			       sizeof(key->arp.tha));
1641 	}
1642 
1643 	if (key->basic.ip_proto == IPPROTO_TCP ||
1644 	    key->basic.ip_proto == IPPROTO_UDP ||
1645 	    key->basic.ip_proto == IPPROTO_SCTP) {
1646 		ret = fl_set_key_port_range(tb, key, mask, extack);
1647 		if (ret)
1648 			return ret;
1649 	}
1650 
1651 	if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
1652 	    tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
1653 		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1654 		mask->enc_control.addr_type = ~0;
1655 		fl_set_key_val(tb, &key->enc_ipv4.src,
1656 			       TCA_FLOWER_KEY_ENC_IPV4_SRC,
1657 			       &mask->enc_ipv4.src,
1658 			       TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
1659 			       sizeof(key->enc_ipv4.src));
1660 		fl_set_key_val(tb, &key->enc_ipv4.dst,
1661 			       TCA_FLOWER_KEY_ENC_IPV4_DST,
1662 			       &mask->enc_ipv4.dst,
1663 			       TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
1664 			       sizeof(key->enc_ipv4.dst));
1665 	}
1666 
1667 	if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
1668 	    tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
1669 		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1670 		mask->enc_control.addr_type = ~0;
1671 		fl_set_key_val(tb, &key->enc_ipv6.src,
1672 			       TCA_FLOWER_KEY_ENC_IPV6_SRC,
1673 			       &mask->enc_ipv6.src,
1674 			       TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
1675 			       sizeof(key->enc_ipv6.src));
1676 		fl_set_key_val(tb, &key->enc_ipv6.dst,
1677 			       TCA_FLOWER_KEY_ENC_IPV6_DST,
1678 			       &mask->enc_ipv6.dst,
1679 			       TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
1680 			       sizeof(key->enc_ipv6.dst));
1681 	}
1682 
1683 	fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
1684 		       &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
1685 		       sizeof(key->enc_key_id.keyid));
1686 
1687 	fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
1688 		       &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
1689 		       sizeof(key->enc_tp.src));
1690 
1691 	fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
1692 		       &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
1693 		       sizeof(key->enc_tp.dst));
1694 
1695 	fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip);
1696 
1697 	fl_set_key_val(tb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
1698 		       &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
1699 		       sizeof(key->hash.hash));
1700 
1701 	if (tb[TCA_FLOWER_KEY_ENC_OPTS]) {
1702 		ret = fl_set_enc_opt(tb, key, mask, extack);
1703 		if (ret)
1704 			return ret;
1705 	}
1706 
1707 	ret = fl_set_key_ct(tb, &key->ct, &mask->ct, extack);
1708 	if (ret)
1709 		return ret;
1710 
1711 	if (tb[TCA_FLOWER_KEY_FLAGS])
1712 		ret = fl_set_key_flags(tb, &key->control.flags,
1713 				       &mask->control.flags, extack);
1714 
1715 	return ret;
1716 }
1717 
fl_mask_copy(struct fl_flow_mask * dst,struct fl_flow_mask * src)1718 static void fl_mask_copy(struct fl_flow_mask *dst,
1719 			 struct fl_flow_mask *src)
1720 {
1721 	const void *psrc = fl_key_get_start(&src->key, src);
1722 	void *pdst = fl_key_get_start(&dst->key, src);
1723 
1724 	memcpy(pdst, psrc, fl_mask_range(src));
1725 	dst->range = src->range;
1726 }
1727 
1728 static const struct rhashtable_params fl_ht_params = {
1729 	.key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
1730 	.head_offset = offsetof(struct cls_fl_filter, ht_node),
1731 	.automatic_shrinking = true,
1732 };
1733 
fl_init_mask_hashtable(struct fl_flow_mask * mask)1734 static int fl_init_mask_hashtable(struct fl_flow_mask *mask)
1735 {
1736 	mask->filter_ht_params = fl_ht_params;
1737 	mask->filter_ht_params.key_len = fl_mask_range(mask);
1738 	mask->filter_ht_params.key_offset += mask->range.start;
1739 
1740 	return rhashtable_init(&mask->ht, &mask->filter_ht_params);
1741 }
1742 
1743 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
1744 #define FL_KEY_MEMBER_SIZE(member) sizeof_field(struct fl_flow_key, member)
1745 
1746 #define FL_KEY_IS_MASKED(mask, member)						\
1747 	memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member),		\
1748 		   0, FL_KEY_MEMBER_SIZE(member))				\
1749 
1750 #define FL_KEY_SET(keys, cnt, id, member)					\
1751 	do {									\
1752 		keys[cnt].key_id = id;						\
1753 		keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member);		\
1754 		cnt++;								\
1755 	} while(0);
1756 
1757 #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member)			\
1758 	do {									\
1759 		if (FL_KEY_IS_MASKED(mask, member))				\
1760 			FL_KEY_SET(keys, cnt, id, member);			\
1761 	} while(0);
1762 
fl_init_dissector(struct flow_dissector * dissector,struct fl_flow_key * mask)1763 static void fl_init_dissector(struct flow_dissector *dissector,
1764 			      struct fl_flow_key *mask)
1765 {
1766 	struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
1767 	size_t cnt = 0;
1768 
1769 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1770 			     FLOW_DISSECTOR_KEY_META, meta);
1771 	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
1772 	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
1773 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1774 			     FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
1775 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1776 			     FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
1777 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1778 			     FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
1779 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1780 			     FLOW_DISSECTOR_KEY_PORTS, tp);
1781 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1782 			     FLOW_DISSECTOR_KEY_PORTS_RANGE, tp_range);
1783 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1784 			     FLOW_DISSECTOR_KEY_IP, ip);
1785 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1786 			     FLOW_DISSECTOR_KEY_TCP, tcp);
1787 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1788 			     FLOW_DISSECTOR_KEY_ICMP, icmp);
1789 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1790 			     FLOW_DISSECTOR_KEY_ARP, arp);
1791 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1792 			     FLOW_DISSECTOR_KEY_MPLS, mpls);
1793 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1794 			     FLOW_DISSECTOR_KEY_VLAN, vlan);
1795 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1796 			     FLOW_DISSECTOR_KEY_CVLAN, cvlan);
1797 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1798 			     FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
1799 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1800 			     FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
1801 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1802 			     FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
1803 	if (FL_KEY_IS_MASKED(mask, enc_ipv4) ||
1804 	    FL_KEY_IS_MASKED(mask, enc_ipv6))
1805 		FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
1806 			   enc_control);
1807 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1808 			     FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
1809 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1810 			     FLOW_DISSECTOR_KEY_ENC_IP, enc_ip);
1811 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1812 			     FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts);
1813 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1814 			     FLOW_DISSECTOR_KEY_CT, ct);
1815 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1816 			     FLOW_DISSECTOR_KEY_HASH, hash);
1817 
1818 	skb_flow_dissector_init(dissector, keys, cnt);
1819 }
1820 
fl_create_new_mask(struct cls_fl_head * head,struct fl_flow_mask * mask)1821 static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
1822 					       struct fl_flow_mask *mask)
1823 {
1824 	struct fl_flow_mask *newmask;
1825 	int err;
1826 
1827 	newmask = kzalloc(sizeof(*newmask), GFP_KERNEL);
1828 	if (!newmask)
1829 		return ERR_PTR(-ENOMEM);
1830 
1831 	fl_mask_copy(newmask, mask);
1832 
1833 	if ((newmask->key.tp_range.tp_min.dst &&
1834 	     newmask->key.tp_range.tp_max.dst) ||
1835 	    (newmask->key.tp_range.tp_min.src &&
1836 	     newmask->key.tp_range.tp_max.src))
1837 		newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE;
1838 
1839 	err = fl_init_mask_hashtable(newmask);
1840 	if (err)
1841 		goto errout_free;
1842 
1843 	fl_init_dissector(&newmask->dissector, &newmask->key);
1844 
1845 	INIT_LIST_HEAD_RCU(&newmask->filters);
1846 
1847 	refcount_set(&newmask->refcnt, 1);
1848 	err = rhashtable_replace_fast(&head->ht, &mask->ht_node,
1849 				      &newmask->ht_node, mask_ht_params);
1850 	if (err)
1851 		goto errout_destroy;
1852 
1853 	spin_lock(&head->masks_lock);
1854 	list_add_tail_rcu(&newmask->list, &head->masks);
1855 	spin_unlock(&head->masks_lock);
1856 
1857 	return newmask;
1858 
1859 errout_destroy:
1860 	rhashtable_destroy(&newmask->ht);
1861 errout_free:
1862 	kfree(newmask);
1863 
1864 	return ERR_PTR(err);
1865 }
1866 
fl_check_assign_mask(struct cls_fl_head * head,struct cls_fl_filter * fnew,struct cls_fl_filter * fold,struct fl_flow_mask * mask)1867 static int fl_check_assign_mask(struct cls_fl_head *head,
1868 				struct cls_fl_filter *fnew,
1869 				struct cls_fl_filter *fold,
1870 				struct fl_flow_mask *mask)
1871 {
1872 	struct fl_flow_mask *newmask;
1873 	int ret = 0;
1874 
1875 	rcu_read_lock();
1876 
1877 	/* Insert mask as temporary node to prevent concurrent creation of mask
1878 	 * with same key. Any concurrent lookups with same key will return
1879 	 * -EAGAIN because mask's refcnt is zero.
1880 	 */
1881 	fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht,
1882 						       &mask->ht_node,
1883 						       mask_ht_params);
1884 	if (!fnew->mask) {
1885 		rcu_read_unlock();
1886 
1887 		if (fold) {
1888 			ret = -EINVAL;
1889 			goto errout_cleanup;
1890 		}
1891 
1892 		newmask = fl_create_new_mask(head, mask);
1893 		if (IS_ERR(newmask)) {
1894 			ret = PTR_ERR(newmask);
1895 			goto errout_cleanup;
1896 		}
1897 
1898 		fnew->mask = newmask;
1899 		return 0;
1900 	} else if (IS_ERR(fnew->mask)) {
1901 		ret = PTR_ERR(fnew->mask);
1902 	} else if (fold && fold->mask != fnew->mask) {
1903 		ret = -EINVAL;
1904 	} else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) {
1905 		/* Mask was deleted concurrently, try again */
1906 		ret = -EAGAIN;
1907 	}
1908 	rcu_read_unlock();
1909 	return ret;
1910 
1911 errout_cleanup:
1912 	rhashtable_remove_fast(&head->ht, &mask->ht_node,
1913 			       mask_ht_params);
1914 	return ret;
1915 }
1916 
fl_set_parms(struct net * net,struct tcf_proto * tp,struct cls_fl_filter * f,struct fl_flow_mask * mask,unsigned long base,struct nlattr ** tb,struct nlattr * est,bool ovr,struct fl_flow_tmplt * tmplt,bool rtnl_held,struct netlink_ext_ack * extack)1917 static int fl_set_parms(struct net *net, struct tcf_proto *tp,
1918 			struct cls_fl_filter *f, struct fl_flow_mask *mask,
1919 			unsigned long base, struct nlattr **tb,
1920 			struct nlattr *est, bool ovr,
1921 			struct fl_flow_tmplt *tmplt, bool rtnl_held,
1922 			struct netlink_ext_ack *extack)
1923 {
1924 	int err;
1925 
1926 	err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, rtnl_held,
1927 				extack);
1928 	if (err < 0)
1929 		return err;
1930 
1931 	if (tb[TCA_FLOWER_CLASSID]) {
1932 		f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
1933 		if (!rtnl_held)
1934 			rtnl_lock();
1935 		tcf_bind_filter(tp, &f->res, base);
1936 		if (!rtnl_held)
1937 			rtnl_unlock();
1938 	}
1939 
1940 	err = fl_set_key(net, tb, &f->key, &mask->key, extack);
1941 	if (err)
1942 		return err;
1943 
1944 	fl_mask_update_range(mask);
1945 	fl_set_masked_key(&f->mkey, &f->key, mask);
1946 
1947 	if (!fl_mask_fits_tmplt(tmplt, mask)) {
1948 		NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template");
1949 		return -EINVAL;
1950 	}
1951 
1952 	return 0;
1953 }
1954 
fl_ht_insert_unique(struct cls_fl_filter * fnew,struct cls_fl_filter * fold,bool * in_ht)1955 static int fl_ht_insert_unique(struct cls_fl_filter *fnew,
1956 			       struct cls_fl_filter *fold,
1957 			       bool *in_ht)
1958 {
1959 	struct fl_flow_mask *mask = fnew->mask;
1960 	int err;
1961 
1962 	err = rhashtable_lookup_insert_fast(&mask->ht,
1963 					    &fnew->ht_node,
1964 					    mask->filter_ht_params);
1965 	if (err) {
1966 		*in_ht = false;
1967 		/* It is okay if filter with same key exists when
1968 		 * overwriting.
1969 		 */
1970 		return fold && err == -EEXIST ? 0 : err;
1971 	}
1972 
1973 	*in_ht = true;
1974 	return 0;
1975 }
1976 
fl_change(struct net * net,struct sk_buff * in_skb,struct tcf_proto * tp,unsigned long base,u32 handle,struct nlattr ** tca,void ** arg,bool ovr,bool rtnl_held,struct netlink_ext_ack * extack)1977 static int fl_change(struct net *net, struct sk_buff *in_skb,
1978 		     struct tcf_proto *tp, unsigned long base,
1979 		     u32 handle, struct nlattr **tca,
1980 		     void **arg, bool ovr, bool rtnl_held,
1981 		     struct netlink_ext_ack *extack)
1982 {
1983 	struct cls_fl_head *head = fl_head_dereference(tp);
1984 	struct cls_fl_filter *fold = *arg;
1985 	struct cls_fl_filter *fnew;
1986 	struct fl_flow_mask *mask;
1987 	struct nlattr **tb;
1988 	bool in_ht;
1989 	int err;
1990 
1991 	if (!tca[TCA_OPTIONS]) {
1992 		err = -EINVAL;
1993 		goto errout_fold;
1994 	}
1995 
1996 	mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
1997 	if (!mask) {
1998 		err = -ENOBUFS;
1999 		goto errout_fold;
2000 	}
2001 
2002 	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
2003 	if (!tb) {
2004 		err = -ENOBUFS;
2005 		goto errout_mask_alloc;
2006 	}
2007 
2008 	err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2009 					  tca[TCA_OPTIONS], fl_policy, NULL);
2010 	if (err < 0)
2011 		goto errout_tb;
2012 
2013 	if (fold && handle && fold->handle != handle) {
2014 		err = -EINVAL;
2015 		goto errout_tb;
2016 	}
2017 
2018 	fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
2019 	if (!fnew) {
2020 		err = -ENOBUFS;
2021 		goto errout_tb;
2022 	}
2023 	INIT_LIST_HEAD(&fnew->hw_list);
2024 	refcount_set(&fnew->refcnt, 1);
2025 
2026 	err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0);
2027 	if (err < 0)
2028 		goto errout;
2029 
2030 	if (tb[TCA_FLOWER_FLAGS]) {
2031 		fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
2032 
2033 		if (!tc_flags_valid(fnew->flags)) {
2034 			err = -EINVAL;
2035 			goto errout;
2036 		}
2037 	}
2038 
2039 	err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
2040 			   tp->chain->tmplt_priv, rtnl_held, extack);
2041 	if (err)
2042 		goto errout;
2043 
2044 	err = fl_check_assign_mask(head, fnew, fold, mask);
2045 	if (err)
2046 		goto errout;
2047 
2048 	err = fl_ht_insert_unique(fnew, fold, &in_ht);
2049 	if (err)
2050 		goto errout_mask;
2051 
2052 	if (!tc_skip_hw(fnew->flags)) {
2053 		err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack);
2054 		if (err)
2055 			goto errout_ht;
2056 	}
2057 
2058 	if (!tc_in_hw(fnew->flags))
2059 		fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
2060 
2061 	spin_lock(&tp->lock);
2062 
2063 	/* tp was deleted concurrently. -EAGAIN will cause caller to lookup
2064 	 * proto again or create new one, if necessary.
2065 	 */
2066 	if (tp->deleting) {
2067 		err = -EAGAIN;
2068 		goto errout_hw;
2069 	}
2070 
2071 	if (fold) {
2072 		/* Fold filter was deleted concurrently. Retry lookup. */
2073 		if (fold->deleted) {
2074 			err = -EAGAIN;
2075 			goto errout_hw;
2076 		}
2077 
2078 		fnew->handle = handle;
2079 
2080 		if (!in_ht) {
2081 			struct rhashtable_params params =
2082 				fnew->mask->filter_ht_params;
2083 
2084 			err = rhashtable_insert_fast(&fnew->mask->ht,
2085 						     &fnew->ht_node,
2086 						     params);
2087 			if (err)
2088 				goto errout_hw;
2089 			in_ht = true;
2090 		}
2091 
2092 		refcount_inc(&fnew->refcnt);
2093 		rhashtable_remove_fast(&fold->mask->ht,
2094 				       &fold->ht_node,
2095 				       fold->mask->filter_ht_params);
2096 		idr_replace(&head->handle_idr, fnew, fnew->handle);
2097 		list_replace_rcu(&fold->list, &fnew->list);
2098 		fold->deleted = true;
2099 
2100 		spin_unlock(&tp->lock);
2101 
2102 		fl_mask_put(head, fold->mask);
2103 		if (!tc_skip_hw(fold->flags))
2104 			fl_hw_destroy_filter(tp, fold, rtnl_held, NULL);
2105 		tcf_unbind_filter(tp, &fold->res);
2106 		/* Caller holds reference to fold, so refcnt is always > 0
2107 		 * after this.
2108 		 */
2109 		refcount_dec(&fold->refcnt);
2110 		__fl_put(fold);
2111 	} else {
2112 		if (handle) {
2113 			/* user specifies a handle and it doesn't exist */
2114 			err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
2115 					    handle, GFP_ATOMIC);
2116 
2117 			/* Filter with specified handle was concurrently
2118 			 * inserted after initial check in cls_api. This is not
2119 			 * necessarily an error if NLM_F_EXCL is not set in
2120 			 * message flags. Returning EAGAIN will cause cls_api to
2121 			 * try to update concurrently inserted rule.
2122 			 */
2123 			if (err == -ENOSPC)
2124 				err = -EAGAIN;
2125 		} else {
2126 			handle = 1;
2127 			err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
2128 					    INT_MAX, GFP_ATOMIC);
2129 		}
2130 		if (err)
2131 			goto errout_hw;
2132 
2133 		refcount_inc(&fnew->refcnt);
2134 		fnew->handle = handle;
2135 		list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
2136 		spin_unlock(&tp->lock);
2137 	}
2138 
2139 	*arg = fnew;
2140 
2141 	kfree(tb);
2142 	tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
2143 	return 0;
2144 
2145 errout_ht:
2146 	spin_lock(&tp->lock);
2147 errout_hw:
2148 	fnew->deleted = true;
2149 	spin_unlock(&tp->lock);
2150 	if (!tc_skip_hw(fnew->flags))
2151 		fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL);
2152 	if (in_ht)
2153 		rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
2154 				       fnew->mask->filter_ht_params);
2155 errout_mask:
2156 	fl_mask_put(head, fnew->mask);
2157 errout:
2158 	__fl_put(fnew);
2159 errout_tb:
2160 	kfree(tb);
2161 errout_mask_alloc:
2162 	tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
2163 errout_fold:
2164 	if (fold)
2165 		__fl_put(fold);
2166 	return err;
2167 }
2168 
fl_delete(struct tcf_proto * tp,void * arg,bool * last,bool rtnl_held,struct netlink_ext_ack * extack)2169 static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
2170 		     bool rtnl_held, struct netlink_ext_ack *extack)
2171 {
2172 	struct cls_fl_head *head = fl_head_dereference(tp);
2173 	struct cls_fl_filter *f = arg;
2174 	bool last_on_mask;
2175 	int err = 0;
2176 
2177 	err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack);
2178 	*last = list_empty(&head->masks);
2179 	__fl_put(f);
2180 
2181 	return err;
2182 }
2183 
fl_walk(struct tcf_proto * tp,struct tcf_walker * arg,bool rtnl_held)2184 static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
2185 		    bool rtnl_held)
2186 {
2187 	struct cls_fl_head *head = fl_head_dereference(tp);
2188 	unsigned long id = arg->cookie, tmp;
2189 	struct cls_fl_filter *f;
2190 
2191 	arg->count = arg->skip;
2192 
2193 	rcu_read_lock();
2194 	idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) {
2195 		/* don't return filters that are being deleted */
2196 		if (!refcount_inc_not_zero(&f->refcnt))
2197 			continue;
2198 		rcu_read_unlock();
2199 
2200 		if (arg->fn(tp, f, arg) < 0) {
2201 			__fl_put(f);
2202 			arg->stop = 1;
2203 			rcu_read_lock();
2204 			break;
2205 		}
2206 		__fl_put(f);
2207 		arg->count++;
2208 		rcu_read_lock();
2209 	}
2210 	rcu_read_unlock();
2211 	arg->cookie = id;
2212 }
2213 
2214 static struct cls_fl_filter *
fl_get_next_hw_filter(struct tcf_proto * tp,struct cls_fl_filter * f,bool add)2215 fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add)
2216 {
2217 	struct cls_fl_head *head = fl_head_dereference(tp);
2218 
2219 	spin_lock(&tp->lock);
2220 	if (list_empty(&head->hw_filters)) {
2221 		spin_unlock(&tp->lock);
2222 		return NULL;
2223 	}
2224 
2225 	if (!f)
2226 		f = list_entry(&head->hw_filters, struct cls_fl_filter,
2227 			       hw_list);
2228 	list_for_each_entry_continue(f, &head->hw_filters, hw_list) {
2229 		if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) {
2230 			spin_unlock(&tp->lock);
2231 			return f;
2232 		}
2233 	}
2234 
2235 	spin_unlock(&tp->lock);
2236 	return NULL;
2237 }
2238 
fl_reoffload(struct tcf_proto * tp,bool add,flow_setup_cb_t * cb,void * cb_priv,struct netlink_ext_ack * extack)2239 static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
2240 			void *cb_priv, struct netlink_ext_ack *extack)
2241 {
2242 	struct tcf_block *block = tp->chain->block;
2243 	struct flow_cls_offload cls_flower = {};
2244 	struct cls_fl_filter *f = NULL;
2245 	int err;
2246 
2247 	/* hw_filters list can only be changed by hw offload functions after
2248 	 * obtaining rtnl lock. Make sure it is not changed while reoffload is
2249 	 * iterating it.
2250 	 */
2251 	ASSERT_RTNL();
2252 
2253 	while ((f = fl_get_next_hw_filter(tp, f, add))) {
2254 		cls_flower.rule =
2255 			flow_rule_alloc(tcf_exts_num_actions(&f->exts));
2256 		if (!cls_flower.rule) {
2257 			__fl_put(f);
2258 			return -ENOMEM;
2259 		}
2260 
2261 		tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
2262 					   extack);
2263 		cls_flower.command = add ?
2264 			FLOW_CLS_REPLACE : FLOW_CLS_DESTROY;
2265 		cls_flower.cookie = (unsigned long)f;
2266 		cls_flower.rule->match.dissector = &f->mask->dissector;
2267 		cls_flower.rule->match.mask = &f->mask->key;
2268 		cls_flower.rule->match.key = &f->mkey;
2269 
2270 		err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
2271 		if (err) {
2272 			kfree(cls_flower.rule);
2273 			if (tc_skip_sw(f->flags)) {
2274 				NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
2275 				__fl_put(f);
2276 				return err;
2277 			}
2278 			goto next_flow;
2279 		}
2280 
2281 		cls_flower.classid = f->res.classid;
2282 
2283 		err = tc_setup_cb_reoffload(block, tp, add, cb,
2284 					    TC_SETUP_CLSFLOWER, &cls_flower,
2285 					    cb_priv, &f->flags,
2286 					    &f->in_hw_count);
2287 		tc_cleanup_flow_action(&cls_flower.rule->action);
2288 		kfree(cls_flower.rule);
2289 
2290 		if (err) {
2291 			__fl_put(f);
2292 			return err;
2293 		}
2294 next_flow:
2295 		__fl_put(f);
2296 	}
2297 
2298 	return 0;
2299 }
2300 
fl_hw_add(struct tcf_proto * tp,void * type_data)2301 static void fl_hw_add(struct tcf_proto *tp, void *type_data)
2302 {
2303 	struct flow_cls_offload *cls_flower = type_data;
2304 	struct cls_fl_filter *f =
2305 		(struct cls_fl_filter *) cls_flower->cookie;
2306 	struct cls_fl_head *head = fl_head_dereference(tp);
2307 
2308 	spin_lock(&tp->lock);
2309 	list_add(&f->hw_list, &head->hw_filters);
2310 	spin_unlock(&tp->lock);
2311 }
2312 
fl_hw_del(struct tcf_proto * tp,void * type_data)2313 static void fl_hw_del(struct tcf_proto *tp, void *type_data)
2314 {
2315 	struct flow_cls_offload *cls_flower = type_data;
2316 	struct cls_fl_filter *f =
2317 		(struct cls_fl_filter *) cls_flower->cookie;
2318 
2319 	spin_lock(&tp->lock);
2320 	if (!list_empty(&f->hw_list))
2321 		list_del_init(&f->hw_list);
2322 	spin_unlock(&tp->lock);
2323 }
2324 
fl_hw_create_tmplt(struct tcf_chain * chain,struct fl_flow_tmplt * tmplt)2325 static int fl_hw_create_tmplt(struct tcf_chain *chain,
2326 			      struct fl_flow_tmplt *tmplt)
2327 {
2328 	struct flow_cls_offload cls_flower = {};
2329 	struct tcf_block *block = chain->block;
2330 
2331 	cls_flower.rule = flow_rule_alloc(0);
2332 	if (!cls_flower.rule)
2333 		return -ENOMEM;
2334 
2335 	cls_flower.common.chain_index = chain->index;
2336 	cls_flower.command = FLOW_CLS_TMPLT_CREATE;
2337 	cls_flower.cookie = (unsigned long) tmplt;
2338 	cls_flower.rule->match.dissector = &tmplt->dissector;
2339 	cls_flower.rule->match.mask = &tmplt->mask;
2340 	cls_flower.rule->match.key = &tmplt->dummy_key;
2341 
2342 	/* We don't care if driver (any of them) fails to handle this
2343 	 * call. It serves just as a hint for it.
2344 	 */
2345 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2346 	kfree(cls_flower.rule);
2347 
2348 	return 0;
2349 }
2350 
fl_hw_destroy_tmplt(struct tcf_chain * chain,struct fl_flow_tmplt * tmplt)2351 static void fl_hw_destroy_tmplt(struct tcf_chain *chain,
2352 				struct fl_flow_tmplt *tmplt)
2353 {
2354 	struct flow_cls_offload cls_flower = {};
2355 	struct tcf_block *block = chain->block;
2356 
2357 	cls_flower.common.chain_index = chain->index;
2358 	cls_flower.command = FLOW_CLS_TMPLT_DESTROY;
2359 	cls_flower.cookie = (unsigned long) tmplt;
2360 
2361 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2362 }
2363 
fl_tmplt_create(struct net * net,struct tcf_chain * chain,struct nlattr ** tca,struct netlink_ext_ack * extack)2364 static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
2365 			     struct nlattr **tca,
2366 			     struct netlink_ext_ack *extack)
2367 {
2368 	struct fl_flow_tmplt *tmplt;
2369 	struct nlattr **tb;
2370 	int err;
2371 
2372 	if (!tca[TCA_OPTIONS])
2373 		return ERR_PTR(-EINVAL);
2374 
2375 	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
2376 	if (!tb)
2377 		return ERR_PTR(-ENOBUFS);
2378 	err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2379 					  tca[TCA_OPTIONS], fl_policy, NULL);
2380 	if (err)
2381 		goto errout_tb;
2382 
2383 	tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL);
2384 	if (!tmplt) {
2385 		err = -ENOMEM;
2386 		goto errout_tb;
2387 	}
2388 	tmplt->chain = chain;
2389 	err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack);
2390 	if (err)
2391 		goto errout_tmplt;
2392 
2393 	fl_init_dissector(&tmplt->dissector, &tmplt->mask);
2394 
2395 	err = fl_hw_create_tmplt(chain, tmplt);
2396 	if (err)
2397 		goto errout_tmplt;
2398 
2399 	kfree(tb);
2400 	return tmplt;
2401 
2402 errout_tmplt:
2403 	kfree(tmplt);
2404 errout_tb:
2405 	kfree(tb);
2406 	return ERR_PTR(err);
2407 }
2408 
fl_tmplt_destroy(void * tmplt_priv)2409 static void fl_tmplt_destroy(void *tmplt_priv)
2410 {
2411 	struct fl_flow_tmplt *tmplt = tmplt_priv;
2412 
2413 	fl_hw_destroy_tmplt(tmplt->chain, tmplt);
2414 	kfree(tmplt);
2415 }
2416 
fl_dump_key_val(struct sk_buff * skb,void * val,int val_type,void * mask,int mask_type,int len)2417 static int fl_dump_key_val(struct sk_buff *skb,
2418 			   void *val, int val_type,
2419 			   void *mask, int mask_type, int len)
2420 {
2421 	int err;
2422 
2423 	if (!memchr_inv(mask, 0, len))
2424 		return 0;
2425 	err = nla_put(skb, val_type, len, val);
2426 	if (err)
2427 		return err;
2428 	if (mask_type != TCA_FLOWER_UNSPEC) {
2429 		err = nla_put(skb, mask_type, len, mask);
2430 		if (err)
2431 			return err;
2432 	}
2433 	return 0;
2434 }
2435 
fl_dump_key_port_range(struct sk_buff * skb,struct fl_flow_key * key,struct fl_flow_key * mask)2436 static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key,
2437 				  struct fl_flow_key *mask)
2438 {
2439 	if (fl_dump_key_val(skb, &key->tp_range.tp_min.dst,
2440 			    TCA_FLOWER_KEY_PORT_DST_MIN,
2441 			    &mask->tp_range.tp_min.dst, TCA_FLOWER_UNSPEC,
2442 			    sizeof(key->tp_range.tp_min.dst)) ||
2443 	    fl_dump_key_val(skb, &key->tp_range.tp_max.dst,
2444 			    TCA_FLOWER_KEY_PORT_DST_MAX,
2445 			    &mask->tp_range.tp_max.dst, TCA_FLOWER_UNSPEC,
2446 			    sizeof(key->tp_range.tp_max.dst)) ||
2447 	    fl_dump_key_val(skb, &key->tp_range.tp_min.src,
2448 			    TCA_FLOWER_KEY_PORT_SRC_MIN,
2449 			    &mask->tp_range.tp_min.src, TCA_FLOWER_UNSPEC,
2450 			    sizeof(key->tp_range.tp_min.src)) ||
2451 	    fl_dump_key_val(skb, &key->tp_range.tp_max.src,
2452 			    TCA_FLOWER_KEY_PORT_SRC_MAX,
2453 			    &mask->tp_range.tp_max.src, TCA_FLOWER_UNSPEC,
2454 			    sizeof(key->tp_range.tp_max.src)))
2455 		return -1;
2456 
2457 	return 0;
2458 }
2459 
fl_dump_key_mpls_opt_lse(struct sk_buff * skb,struct flow_dissector_key_mpls * mpls_key,struct flow_dissector_key_mpls * mpls_mask,u8 lse_index)2460 static int fl_dump_key_mpls_opt_lse(struct sk_buff *skb,
2461 				    struct flow_dissector_key_mpls *mpls_key,
2462 				    struct flow_dissector_key_mpls *mpls_mask,
2463 				    u8 lse_index)
2464 {
2465 	struct flow_dissector_mpls_lse *lse_mask = &mpls_mask->ls[lse_index];
2466 	struct flow_dissector_mpls_lse *lse_key = &mpls_key->ls[lse_index];
2467 	int err;
2468 
2469 	err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH,
2470 			 lse_index + 1);
2471 	if (err)
2472 		return err;
2473 
2474 	if (lse_mask->mpls_ttl) {
2475 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL,
2476 				 lse_key->mpls_ttl);
2477 		if (err)
2478 			return err;
2479 	}
2480 	if (lse_mask->mpls_bos) {
2481 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS,
2482 				 lse_key->mpls_bos);
2483 		if (err)
2484 			return err;
2485 	}
2486 	if (lse_mask->mpls_tc) {
2487 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TC,
2488 				 lse_key->mpls_tc);
2489 		if (err)
2490 			return err;
2491 	}
2492 	if (lse_mask->mpls_label) {
2493 		err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL,
2494 				  lse_key->mpls_label);
2495 		if (err)
2496 			return err;
2497 	}
2498 
2499 	return 0;
2500 }
2501 
fl_dump_key_mpls_opts(struct sk_buff * skb,struct flow_dissector_key_mpls * mpls_key,struct flow_dissector_key_mpls * mpls_mask)2502 static int fl_dump_key_mpls_opts(struct sk_buff *skb,
2503 				 struct flow_dissector_key_mpls *mpls_key,
2504 				 struct flow_dissector_key_mpls *mpls_mask)
2505 {
2506 	struct nlattr *opts;
2507 	struct nlattr *lse;
2508 	u8 lse_index;
2509 	int err;
2510 
2511 	opts = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS);
2512 	if (!opts)
2513 		return -EMSGSIZE;
2514 
2515 	for (lse_index = 0; lse_index < FLOW_DIS_MPLS_MAX; lse_index++) {
2516 		if (!(mpls_mask->used_lses & 1 << lse_index))
2517 			continue;
2518 
2519 		lse = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS_LSE);
2520 		if (!lse) {
2521 			err = -EMSGSIZE;
2522 			goto err_opts;
2523 		}
2524 
2525 		err = fl_dump_key_mpls_opt_lse(skb, mpls_key, mpls_mask,
2526 					       lse_index);
2527 		if (err)
2528 			goto err_opts_lse;
2529 		nla_nest_end(skb, lse);
2530 	}
2531 	nla_nest_end(skb, opts);
2532 
2533 	return 0;
2534 
2535 err_opts_lse:
2536 	nla_nest_cancel(skb, lse);
2537 err_opts:
2538 	nla_nest_cancel(skb, opts);
2539 
2540 	return err;
2541 }
2542 
fl_dump_key_mpls(struct sk_buff * skb,struct flow_dissector_key_mpls * mpls_key,struct flow_dissector_key_mpls * mpls_mask)2543 static int fl_dump_key_mpls(struct sk_buff *skb,
2544 			    struct flow_dissector_key_mpls *mpls_key,
2545 			    struct flow_dissector_key_mpls *mpls_mask)
2546 {
2547 	struct flow_dissector_mpls_lse *lse_mask;
2548 	struct flow_dissector_mpls_lse *lse_key;
2549 	int err;
2550 
2551 	if (!mpls_mask->used_lses)
2552 		return 0;
2553 
2554 	lse_mask = &mpls_mask->ls[0];
2555 	lse_key = &mpls_key->ls[0];
2556 
2557 	/* For backward compatibility, don't use the MPLS nested attributes if
2558 	 * the rule can be expressed using the old attributes.
2559 	 */
2560 	if (mpls_mask->used_lses & ~1 ||
2561 	    (!lse_mask->mpls_ttl && !lse_mask->mpls_bos &&
2562 	     !lse_mask->mpls_tc && !lse_mask->mpls_label))
2563 		return fl_dump_key_mpls_opts(skb, mpls_key, mpls_mask);
2564 
2565 	if (lse_mask->mpls_ttl) {
2566 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL,
2567 				 lse_key->mpls_ttl);
2568 		if (err)
2569 			return err;
2570 	}
2571 	if (lse_mask->mpls_tc) {
2572 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC,
2573 				 lse_key->mpls_tc);
2574 		if (err)
2575 			return err;
2576 	}
2577 	if (lse_mask->mpls_label) {
2578 		err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL,
2579 				  lse_key->mpls_label);
2580 		if (err)
2581 			return err;
2582 	}
2583 	if (lse_mask->mpls_bos) {
2584 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS,
2585 				 lse_key->mpls_bos);
2586 		if (err)
2587 			return err;
2588 	}
2589 	return 0;
2590 }
2591 
fl_dump_key_ip(struct sk_buff * skb,bool encap,struct flow_dissector_key_ip * key,struct flow_dissector_key_ip * mask)2592 static int fl_dump_key_ip(struct sk_buff *skb, bool encap,
2593 			  struct flow_dissector_key_ip *key,
2594 			  struct flow_dissector_key_ip *mask)
2595 {
2596 	int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
2597 	int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
2598 	int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
2599 	int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
2600 
2601 	if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) ||
2602 	    fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)))
2603 		return -1;
2604 
2605 	return 0;
2606 }
2607 
fl_dump_key_vlan(struct sk_buff * skb,int vlan_id_key,int vlan_prio_key,struct flow_dissector_key_vlan * vlan_key,struct flow_dissector_key_vlan * vlan_mask)2608 static int fl_dump_key_vlan(struct sk_buff *skb,
2609 			    int vlan_id_key, int vlan_prio_key,
2610 			    struct flow_dissector_key_vlan *vlan_key,
2611 			    struct flow_dissector_key_vlan *vlan_mask)
2612 {
2613 	int err;
2614 
2615 	if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
2616 		return 0;
2617 	if (vlan_mask->vlan_id) {
2618 		err = nla_put_u16(skb, vlan_id_key,
2619 				  vlan_key->vlan_id);
2620 		if (err)
2621 			return err;
2622 	}
2623 	if (vlan_mask->vlan_priority) {
2624 		err = nla_put_u8(skb, vlan_prio_key,
2625 				 vlan_key->vlan_priority);
2626 		if (err)
2627 			return err;
2628 	}
2629 	return 0;
2630 }
2631 
fl_get_key_flag(u32 dissector_key,u32 dissector_mask,u32 * flower_key,u32 * flower_mask,u32 flower_flag_bit,u32 dissector_flag_bit)2632 static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
2633 			    u32 *flower_key, u32 *flower_mask,
2634 			    u32 flower_flag_bit, u32 dissector_flag_bit)
2635 {
2636 	if (dissector_mask & dissector_flag_bit) {
2637 		*flower_mask |= flower_flag_bit;
2638 		if (dissector_key & dissector_flag_bit)
2639 			*flower_key |= flower_flag_bit;
2640 	}
2641 }
2642 
fl_dump_key_flags(struct sk_buff * skb,u32 flags_key,u32 flags_mask)2643 static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
2644 {
2645 	u32 key, mask;
2646 	__be32 _key, _mask;
2647 	int err;
2648 
2649 	if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
2650 		return 0;
2651 
2652 	key = 0;
2653 	mask = 0;
2654 
2655 	fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2656 			TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
2657 	fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2658 			TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
2659 			FLOW_DIS_FIRST_FRAG);
2660 
2661 	_key = cpu_to_be32(key);
2662 	_mask = cpu_to_be32(mask);
2663 
2664 	err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
2665 	if (err)
2666 		return err;
2667 
2668 	return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
2669 }
2670 
fl_dump_key_geneve_opt(struct sk_buff * skb,struct flow_dissector_key_enc_opts * enc_opts)2671 static int fl_dump_key_geneve_opt(struct sk_buff *skb,
2672 				  struct flow_dissector_key_enc_opts *enc_opts)
2673 {
2674 	struct geneve_opt *opt;
2675 	struct nlattr *nest;
2676 	int opt_off = 0;
2677 
2678 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE);
2679 	if (!nest)
2680 		goto nla_put_failure;
2681 
2682 	while (enc_opts->len > opt_off) {
2683 		opt = (struct geneve_opt *)&enc_opts->data[opt_off];
2684 
2685 		if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS,
2686 				 opt->opt_class))
2687 			goto nla_put_failure;
2688 		if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE,
2689 			       opt->type))
2690 			goto nla_put_failure;
2691 		if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA,
2692 			    opt->length * 4, opt->opt_data))
2693 			goto nla_put_failure;
2694 
2695 		opt_off += sizeof(struct geneve_opt) + opt->length * 4;
2696 	}
2697 	nla_nest_end(skb, nest);
2698 	return 0;
2699 
2700 nla_put_failure:
2701 	nla_nest_cancel(skb, nest);
2702 	return -EMSGSIZE;
2703 }
2704 
fl_dump_key_vxlan_opt(struct sk_buff * skb,struct flow_dissector_key_enc_opts * enc_opts)2705 static int fl_dump_key_vxlan_opt(struct sk_buff *skb,
2706 				 struct flow_dissector_key_enc_opts *enc_opts)
2707 {
2708 	struct vxlan_metadata *md;
2709 	struct nlattr *nest;
2710 
2711 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_VXLAN);
2712 	if (!nest)
2713 		goto nla_put_failure;
2714 
2715 	md = (struct vxlan_metadata *)&enc_opts->data[0];
2716 	if (nla_put_u32(skb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP, md->gbp))
2717 		goto nla_put_failure;
2718 
2719 	nla_nest_end(skb, nest);
2720 	return 0;
2721 
2722 nla_put_failure:
2723 	nla_nest_cancel(skb, nest);
2724 	return -EMSGSIZE;
2725 }
2726 
fl_dump_key_erspan_opt(struct sk_buff * skb,struct flow_dissector_key_enc_opts * enc_opts)2727 static int fl_dump_key_erspan_opt(struct sk_buff *skb,
2728 				  struct flow_dissector_key_enc_opts *enc_opts)
2729 {
2730 	struct erspan_metadata *md;
2731 	struct nlattr *nest;
2732 
2733 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_ERSPAN);
2734 	if (!nest)
2735 		goto nla_put_failure;
2736 
2737 	md = (struct erspan_metadata *)&enc_opts->data[0];
2738 	if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER, md->version))
2739 		goto nla_put_failure;
2740 
2741 	if (md->version == 1 &&
2742 	    nla_put_be32(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index))
2743 		goto nla_put_failure;
2744 
2745 	if (md->version == 2 &&
2746 	    (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR,
2747 			md->u.md2.dir) ||
2748 	     nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID,
2749 			get_hwid(&md->u.md2))))
2750 		goto nla_put_failure;
2751 
2752 	nla_nest_end(skb, nest);
2753 	return 0;
2754 
2755 nla_put_failure:
2756 	nla_nest_cancel(skb, nest);
2757 	return -EMSGSIZE;
2758 }
2759 
fl_dump_key_ct(struct sk_buff * skb,struct flow_dissector_key_ct * key,struct flow_dissector_key_ct * mask)2760 static int fl_dump_key_ct(struct sk_buff *skb,
2761 			  struct flow_dissector_key_ct *key,
2762 			  struct flow_dissector_key_ct *mask)
2763 {
2764 	if (IS_ENABLED(CONFIG_NF_CONNTRACK) &&
2765 	    fl_dump_key_val(skb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
2766 			    &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
2767 			    sizeof(key->ct_state)))
2768 		goto nla_put_failure;
2769 
2770 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
2771 	    fl_dump_key_val(skb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
2772 			    &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
2773 			    sizeof(key->ct_zone)))
2774 		goto nla_put_failure;
2775 
2776 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
2777 	    fl_dump_key_val(skb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
2778 			    &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
2779 			    sizeof(key->ct_mark)))
2780 		goto nla_put_failure;
2781 
2782 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
2783 	    fl_dump_key_val(skb, &key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
2784 			    &mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
2785 			    sizeof(key->ct_labels)))
2786 		goto nla_put_failure;
2787 
2788 	return 0;
2789 
2790 nla_put_failure:
2791 	return -EMSGSIZE;
2792 }
2793 
fl_dump_key_options(struct sk_buff * skb,int enc_opt_type,struct flow_dissector_key_enc_opts * enc_opts)2794 static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
2795 			       struct flow_dissector_key_enc_opts *enc_opts)
2796 {
2797 	struct nlattr *nest;
2798 	int err;
2799 
2800 	if (!enc_opts->len)
2801 		return 0;
2802 
2803 	nest = nla_nest_start_noflag(skb, enc_opt_type);
2804 	if (!nest)
2805 		goto nla_put_failure;
2806 
2807 	switch (enc_opts->dst_opt_type) {
2808 	case TUNNEL_GENEVE_OPT:
2809 		err = fl_dump_key_geneve_opt(skb, enc_opts);
2810 		if (err)
2811 			goto nla_put_failure;
2812 		break;
2813 	case TUNNEL_VXLAN_OPT:
2814 		err = fl_dump_key_vxlan_opt(skb, enc_opts);
2815 		if (err)
2816 			goto nla_put_failure;
2817 		break;
2818 	case TUNNEL_ERSPAN_OPT:
2819 		err = fl_dump_key_erspan_opt(skb, enc_opts);
2820 		if (err)
2821 			goto nla_put_failure;
2822 		break;
2823 	default:
2824 		goto nla_put_failure;
2825 	}
2826 	nla_nest_end(skb, nest);
2827 	return 0;
2828 
2829 nla_put_failure:
2830 	nla_nest_cancel(skb, nest);
2831 	return -EMSGSIZE;
2832 }
2833 
fl_dump_key_enc_opt(struct sk_buff * skb,struct flow_dissector_key_enc_opts * key_opts,struct flow_dissector_key_enc_opts * msk_opts)2834 static int fl_dump_key_enc_opt(struct sk_buff *skb,
2835 			       struct flow_dissector_key_enc_opts *key_opts,
2836 			       struct flow_dissector_key_enc_opts *msk_opts)
2837 {
2838 	int err;
2839 
2840 	err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts);
2841 	if (err)
2842 		return err;
2843 
2844 	return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts);
2845 }
2846 
fl_dump_key(struct sk_buff * skb,struct net * net,struct fl_flow_key * key,struct fl_flow_key * mask)2847 static int fl_dump_key(struct sk_buff *skb, struct net *net,
2848 		       struct fl_flow_key *key, struct fl_flow_key *mask)
2849 {
2850 	if (mask->meta.ingress_ifindex) {
2851 		struct net_device *dev;
2852 
2853 		dev = __dev_get_by_index(net, key->meta.ingress_ifindex);
2854 		if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
2855 			goto nla_put_failure;
2856 	}
2857 
2858 	if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
2859 			    mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
2860 			    sizeof(key->eth.dst)) ||
2861 	    fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
2862 			    mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
2863 			    sizeof(key->eth.src)) ||
2864 	    fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
2865 			    &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
2866 			    sizeof(key->basic.n_proto)))
2867 		goto nla_put_failure;
2868 
2869 	if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls))
2870 		goto nla_put_failure;
2871 
2872 	if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID,
2873 			     TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan))
2874 		goto nla_put_failure;
2875 
2876 	if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID,
2877 			     TCA_FLOWER_KEY_CVLAN_PRIO,
2878 			     &key->cvlan, &mask->cvlan) ||
2879 	    (mask->cvlan.vlan_tpid &&
2880 	     nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
2881 			  key->cvlan.vlan_tpid)))
2882 		goto nla_put_failure;
2883 
2884 	if (mask->basic.n_proto) {
2885 		if (mask->cvlan.vlan_eth_type) {
2886 			if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
2887 					 key->basic.n_proto))
2888 				goto nla_put_failure;
2889 		} else if (mask->vlan.vlan_eth_type) {
2890 			if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
2891 					 key->vlan.vlan_eth_type))
2892 				goto nla_put_failure;
2893 		}
2894 	}
2895 
2896 	if ((key->basic.n_proto == htons(ETH_P_IP) ||
2897 	     key->basic.n_proto == htons(ETH_P_IPV6)) &&
2898 	    (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
2899 			    &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
2900 			    sizeof(key->basic.ip_proto)) ||
2901 	    fl_dump_key_ip(skb, false, &key->ip, &mask->ip)))
2902 		goto nla_put_failure;
2903 
2904 	if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
2905 	    (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
2906 			     &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
2907 			     sizeof(key->ipv4.src)) ||
2908 	     fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
2909 			     &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
2910 			     sizeof(key->ipv4.dst))))
2911 		goto nla_put_failure;
2912 	else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
2913 		 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
2914 				  &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
2915 				  sizeof(key->ipv6.src)) ||
2916 		  fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
2917 				  &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
2918 				  sizeof(key->ipv6.dst))))
2919 		goto nla_put_failure;
2920 
2921 	if (key->basic.ip_proto == IPPROTO_TCP &&
2922 	    (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
2923 			     &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
2924 			     sizeof(key->tp.src)) ||
2925 	     fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
2926 			     &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
2927 			     sizeof(key->tp.dst)) ||
2928 	     fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
2929 			     &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
2930 			     sizeof(key->tcp.flags))))
2931 		goto nla_put_failure;
2932 	else if (key->basic.ip_proto == IPPROTO_UDP &&
2933 		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
2934 				  &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
2935 				  sizeof(key->tp.src)) ||
2936 		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
2937 				  &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
2938 				  sizeof(key->tp.dst))))
2939 		goto nla_put_failure;
2940 	else if (key->basic.ip_proto == IPPROTO_SCTP &&
2941 		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
2942 				  &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
2943 				  sizeof(key->tp.src)) ||
2944 		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
2945 				  &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
2946 				  sizeof(key->tp.dst))))
2947 		goto nla_put_failure;
2948 	else if (key->basic.n_proto == htons(ETH_P_IP) &&
2949 		 key->basic.ip_proto == IPPROTO_ICMP &&
2950 		 (fl_dump_key_val(skb, &key->icmp.type,
2951 				  TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
2952 				  TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
2953 				  sizeof(key->icmp.type)) ||
2954 		  fl_dump_key_val(skb, &key->icmp.code,
2955 				  TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
2956 				  TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
2957 				  sizeof(key->icmp.code))))
2958 		goto nla_put_failure;
2959 	else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
2960 		 key->basic.ip_proto == IPPROTO_ICMPV6 &&
2961 		 (fl_dump_key_val(skb, &key->icmp.type,
2962 				  TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
2963 				  TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
2964 				  sizeof(key->icmp.type)) ||
2965 		  fl_dump_key_val(skb, &key->icmp.code,
2966 				  TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
2967 				  TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
2968 				  sizeof(key->icmp.code))))
2969 		goto nla_put_failure;
2970 	else if ((key->basic.n_proto == htons(ETH_P_ARP) ||
2971 		  key->basic.n_proto == htons(ETH_P_RARP)) &&
2972 		 (fl_dump_key_val(skb, &key->arp.sip,
2973 				  TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip,
2974 				  TCA_FLOWER_KEY_ARP_SIP_MASK,
2975 				  sizeof(key->arp.sip)) ||
2976 		  fl_dump_key_val(skb, &key->arp.tip,
2977 				  TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip,
2978 				  TCA_FLOWER_KEY_ARP_TIP_MASK,
2979 				  sizeof(key->arp.tip)) ||
2980 		  fl_dump_key_val(skb, &key->arp.op,
2981 				  TCA_FLOWER_KEY_ARP_OP, &mask->arp.op,
2982 				  TCA_FLOWER_KEY_ARP_OP_MASK,
2983 				  sizeof(key->arp.op)) ||
2984 		  fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
2985 				  mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
2986 				  sizeof(key->arp.sha)) ||
2987 		  fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
2988 				  mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
2989 				  sizeof(key->arp.tha))))
2990 		goto nla_put_failure;
2991 
2992 	if ((key->basic.ip_proto == IPPROTO_TCP ||
2993 	     key->basic.ip_proto == IPPROTO_UDP ||
2994 	     key->basic.ip_proto == IPPROTO_SCTP) &&
2995 	     fl_dump_key_port_range(skb, key, mask))
2996 		goto nla_put_failure;
2997 
2998 	if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
2999 	    (fl_dump_key_val(skb, &key->enc_ipv4.src,
3000 			    TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
3001 			    TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
3002 			    sizeof(key->enc_ipv4.src)) ||
3003 	     fl_dump_key_val(skb, &key->enc_ipv4.dst,
3004 			     TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
3005 			     TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
3006 			     sizeof(key->enc_ipv4.dst))))
3007 		goto nla_put_failure;
3008 	else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
3009 		 (fl_dump_key_val(skb, &key->enc_ipv6.src,
3010 			    TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
3011 			    TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
3012 			    sizeof(key->enc_ipv6.src)) ||
3013 		 fl_dump_key_val(skb, &key->enc_ipv6.dst,
3014 				 TCA_FLOWER_KEY_ENC_IPV6_DST,
3015 				 &mask->enc_ipv6.dst,
3016 				 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
3017 			    sizeof(key->enc_ipv6.dst))))
3018 		goto nla_put_failure;
3019 
3020 	if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
3021 			    &mask->enc_key_id, TCA_FLOWER_UNSPEC,
3022 			    sizeof(key->enc_key_id)) ||
3023 	    fl_dump_key_val(skb, &key->enc_tp.src,
3024 			    TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
3025 			    &mask->enc_tp.src,
3026 			    TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
3027 			    sizeof(key->enc_tp.src)) ||
3028 	    fl_dump_key_val(skb, &key->enc_tp.dst,
3029 			    TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
3030 			    &mask->enc_tp.dst,
3031 			    TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
3032 			    sizeof(key->enc_tp.dst)) ||
3033 	    fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) ||
3034 	    fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts))
3035 		goto nla_put_failure;
3036 
3037 	if (fl_dump_key_ct(skb, &key->ct, &mask->ct))
3038 		goto nla_put_failure;
3039 
3040 	if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
3041 		goto nla_put_failure;
3042 
3043 	if (fl_dump_key_val(skb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
3044 			     &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
3045 			     sizeof(key->hash.hash)))
3046 		goto nla_put_failure;
3047 
3048 	return 0;
3049 
3050 nla_put_failure:
3051 	return -EMSGSIZE;
3052 }
3053 
fl_dump(struct net * net,struct tcf_proto * tp,void * fh,struct sk_buff * skb,struct tcmsg * t,bool rtnl_held)3054 static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
3055 		   struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
3056 {
3057 	struct cls_fl_filter *f = fh;
3058 	struct nlattr *nest;
3059 	struct fl_flow_key *key, *mask;
3060 	bool skip_hw;
3061 
3062 	if (!f)
3063 		return skb->len;
3064 
3065 	t->tcm_handle = f->handle;
3066 
3067 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3068 	if (!nest)
3069 		goto nla_put_failure;
3070 
3071 	spin_lock(&tp->lock);
3072 
3073 	if (f->res.classid &&
3074 	    nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
3075 		goto nla_put_failure_locked;
3076 
3077 	key = &f->key;
3078 	mask = &f->mask->key;
3079 	skip_hw = tc_skip_hw(f->flags);
3080 
3081 	if (fl_dump_key(skb, net, key, mask))
3082 		goto nla_put_failure_locked;
3083 
3084 	if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3085 		goto nla_put_failure_locked;
3086 
3087 	spin_unlock(&tp->lock);
3088 
3089 	if (!skip_hw)
3090 		fl_hw_update_stats(tp, f, rtnl_held);
3091 
3092 	if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count))
3093 		goto nla_put_failure;
3094 
3095 	if (tcf_exts_dump(skb, &f->exts))
3096 		goto nla_put_failure;
3097 
3098 	nla_nest_end(skb, nest);
3099 
3100 	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
3101 		goto nla_put_failure;
3102 
3103 	return skb->len;
3104 
3105 nla_put_failure_locked:
3106 	spin_unlock(&tp->lock);
3107 nla_put_failure:
3108 	nla_nest_cancel(skb, nest);
3109 	return -1;
3110 }
3111 
fl_terse_dump(struct net * net,struct tcf_proto * tp,void * fh,struct sk_buff * skb,struct tcmsg * t,bool rtnl_held)3112 static int fl_terse_dump(struct net *net, struct tcf_proto *tp, void *fh,
3113 			 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
3114 {
3115 	struct cls_fl_filter *f = fh;
3116 	struct nlattr *nest;
3117 	bool skip_hw;
3118 
3119 	if (!f)
3120 		return skb->len;
3121 
3122 	t->tcm_handle = f->handle;
3123 
3124 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3125 	if (!nest)
3126 		goto nla_put_failure;
3127 
3128 	spin_lock(&tp->lock);
3129 
3130 	skip_hw = tc_skip_hw(f->flags);
3131 
3132 	if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3133 		goto nla_put_failure_locked;
3134 
3135 	spin_unlock(&tp->lock);
3136 
3137 	if (!skip_hw)
3138 		fl_hw_update_stats(tp, f, rtnl_held);
3139 
3140 	if (tcf_exts_terse_dump(skb, &f->exts))
3141 		goto nla_put_failure;
3142 
3143 	nla_nest_end(skb, nest);
3144 
3145 	return skb->len;
3146 
3147 nla_put_failure_locked:
3148 	spin_unlock(&tp->lock);
3149 nla_put_failure:
3150 	nla_nest_cancel(skb, nest);
3151 	return -1;
3152 }
3153 
fl_tmplt_dump(struct sk_buff * skb,struct net * net,void * tmplt_priv)3154 static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv)
3155 {
3156 	struct fl_flow_tmplt *tmplt = tmplt_priv;
3157 	struct fl_flow_key *key, *mask;
3158 	struct nlattr *nest;
3159 
3160 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3161 	if (!nest)
3162 		goto nla_put_failure;
3163 
3164 	key = &tmplt->dummy_key;
3165 	mask = &tmplt->mask;
3166 
3167 	if (fl_dump_key(skb, net, key, mask))
3168 		goto nla_put_failure;
3169 
3170 	nla_nest_end(skb, nest);
3171 
3172 	return skb->len;
3173 
3174 nla_put_failure:
3175 	nla_nest_cancel(skb, nest);
3176 	return -EMSGSIZE;
3177 }
3178 
fl_bind_class(void * fh,u32 classid,unsigned long cl,void * q,unsigned long base)3179 static void fl_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
3180 			  unsigned long base)
3181 {
3182 	struct cls_fl_filter *f = fh;
3183 
3184 	if (f && f->res.classid == classid) {
3185 		if (cl)
3186 			__tcf_bind_filter(q, &f->res, base);
3187 		else
3188 			__tcf_unbind_filter(q, &f->res);
3189 	}
3190 }
3191 
fl_delete_empty(struct tcf_proto * tp)3192 static bool fl_delete_empty(struct tcf_proto *tp)
3193 {
3194 	struct cls_fl_head *head = fl_head_dereference(tp);
3195 
3196 	spin_lock(&tp->lock);
3197 	tp->deleting = idr_is_empty(&head->handle_idr);
3198 	spin_unlock(&tp->lock);
3199 
3200 	return tp->deleting;
3201 }
3202 
3203 static struct tcf_proto_ops cls_fl_ops __read_mostly = {
3204 	.kind		= "flower",
3205 	.classify	= fl_classify,
3206 	.init		= fl_init,
3207 	.destroy	= fl_destroy,
3208 	.get		= fl_get,
3209 	.put		= fl_put,
3210 	.change		= fl_change,
3211 	.delete		= fl_delete,
3212 	.delete_empty	= fl_delete_empty,
3213 	.walk		= fl_walk,
3214 	.reoffload	= fl_reoffload,
3215 	.hw_add		= fl_hw_add,
3216 	.hw_del		= fl_hw_del,
3217 	.dump		= fl_dump,
3218 	.terse_dump	= fl_terse_dump,
3219 	.bind_class	= fl_bind_class,
3220 	.tmplt_create	= fl_tmplt_create,
3221 	.tmplt_destroy	= fl_tmplt_destroy,
3222 	.tmplt_dump	= fl_tmplt_dump,
3223 	.owner		= THIS_MODULE,
3224 	.flags		= TCF_PROTO_OPS_DOIT_UNLOCKED,
3225 };
3226 
cls_fl_init(void)3227 static int __init cls_fl_init(void)
3228 {
3229 	return register_tcf_proto_ops(&cls_fl_ops);
3230 }
3231 
cls_fl_exit(void)3232 static void __exit cls_fl_exit(void)
3233 {
3234 	unregister_tcf_proto_ops(&cls_fl_ops);
3235 }
3236 
3237 module_init(cls_fl_init);
3238 module_exit(cls_fl_exit);
3239 
3240 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
3241 MODULE_DESCRIPTION("Flower classifier");
3242 MODULE_LICENSE("GPL v2");
3243