• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/cls_flower.c		Flower classifier
4  *
5  * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/rhashtable.h>
12 #include <linux/workqueue.h>
13 #include <linux/refcount.h>
14 
15 #include <linux/if_ether.h>
16 #include <linux/in6.h>
17 #include <linux/ip.h>
18 #include <linux/mpls.h>
19 
20 #include <net/sch_generic.h>
21 #include <net/pkt_cls.h>
22 #include <net/pkt_sched.h>
23 #include <net/ip.h>
24 #include <net/flow_dissector.h>
25 #include <net/geneve.h>
26 #include <net/vxlan.h>
27 #include <net/erspan.h>
28 
29 #include <net/dst.h>
30 #include <net/dst_metadata.h>
31 
32 #include <uapi/linux/netfilter/nf_conntrack_common.h>
33 
34 #define TCA_FLOWER_KEY_CT_FLAGS_MAX \
35 		((__TCA_FLOWER_KEY_CT_FLAGS_MAX - 1) << 1)
36 #define TCA_FLOWER_KEY_CT_FLAGS_MASK \
37 		(TCA_FLOWER_KEY_CT_FLAGS_MAX - 1)
38 
39 struct fl_flow_key {
40 	struct flow_dissector_key_meta meta;
41 	struct flow_dissector_key_control control;
42 	struct flow_dissector_key_control enc_control;
43 	struct flow_dissector_key_basic basic;
44 	struct flow_dissector_key_eth_addrs eth;
45 	struct flow_dissector_key_vlan vlan;
46 	struct flow_dissector_key_vlan cvlan;
47 	union {
48 		struct flow_dissector_key_ipv4_addrs ipv4;
49 		struct flow_dissector_key_ipv6_addrs ipv6;
50 	};
51 	struct flow_dissector_key_ports tp;
52 	struct flow_dissector_key_icmp icmp;
53 	struct flow_dissector_key_arp arp;
54 	struct flow_dissector_key_keyid enc_key_id;
55 	union {
56 		struct flow_dissector_key_ipv4_addrs enc_ipv4;
57 		struct flow_dissector_key_ipv6_addrs enc_ipv6;
58 	};
59 	struct flow_dissector_key_ports enc_tp;
60 	struct flow_dissector_key_mpls mpls;
61 	struct flow_dissector_key_tcp tcp;
62 	struct flow_dissector_key_ip ip;
63 	struct flow_dissector_key_ip enc_ip;
64 	struct flow_dissector_key_enc_opts enc_opts;
65 	union {
66 		struct flow_dissector_key_ports tp;
67 		struct {
68 			struct flow_dissector_key_ports tp_min;
69 			struct flow_dissector_key_ports tp_max;
70 		};
71 	} tp_range;
72 	struct flow_dissector_key_ct ct;
73 	struct flow_dissector_key_hash hash;
74 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
75 
76 struct fl_flow_mask_range {
77 	unsigned short int start;
78 	unsigned short int end;
79 };
80 
81 struct fl_flow_mask {
82 	struct fl_flow_key key;
83 	struct fl_flow_mask_range range;
84 	u32 flags;
85 	struct rhash_head ht_node;
86 	struct rhashtable ht;
87 	struct rhashtable_params filter_ht_params;
88 	struct flow_dissector dissector;
89 	struct list_head filters;
90 	struct rcu_work rwork;
91 	struct list_head list;
92 	refcount_t refcnt;
93 };
94 
95 struct fl_flow_tmplt {
96 	struct fl_flow_key dummy_key;
97 	struct fl_flow_key mask;
98 	struct flow_dissector dissector;
99 	struct tcf_chain *chain;
100 };
101 
102 struct cls_fl_head {
103 	struct rhashtable ht;
104 	spinlock_t masks_lock; /* Protect masks list */
105 	struct list_head masks;
106 	struct list_head hw_filters;
107 	struct rcu_work rwork;
108 	struct idr handle_idr;
109 };
110 
111 struct cls_fl_filter {
112 	struct fl_flow_mask *mask;
113 	struct rhash_head ht_node;
114 	struct fl_flow_key mkey;
115 	struct tcf_exts exts;
116 	struct tcf_result res;
117 	struct fl_flow_key key;
118 	struct list_head list;
119 	struct list_head hw_list;
120 	u32 handle;
121 	u32 flags;
122 	u32 in_hw_count;
123 	struct rcu_work rwork;
124 	struct net_device *hw_dev;
125 	/* Flower classifier is unlocked, which means that its reference counter
126 	 * can be changed concurrently without any kind of external
127 	 * synchronization. Use atomic reference counter to be concurrency-safe.
128 	 */
129 	refcount_t refcnt;
130 	bool deleted;
131 };
132 
133 static const struct rhashtable_params mask_ht_params = {
134 	.key_offset = offsetof(struct fl_flow_mask, key),
135 	.key_len = sizeof(struct fl_flow_key),
136 	.head_offset = offsetof(struct fl_flow_mask, ht_node),
137 	.automatic_shrinking = true,
138 };
139 
fl_mask_range(const struct fl_flow_mask * mask)140 static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
141 {
142 	return mask->range.end - mask->range.start;
143 }
144 
fl_mask_update_range(struct fl_flow_mask * mask)145 static void fl_mask_update_range(struct fl_flow_mask *mask)
146 {
147 	const u8 *bytes = (const u8 *) &mask->key;
148 	size_t size = sizeof(mask->key);
149 	size_t i, first = 0, last;
150 
151 	for (i = 0; i < size; i++) {
152 		if (bytes[i]) {
153 			first = i;
154 			break;
155 		}
156 	}
157 	last = first;
158 	for (i = size - 1; i != first; i--) {
159 		if (bytes[i]) {
160 			last = i;
161 			break;
162 		}
163 	}
164 	mask->range.start = rounddown(first, sizeof(long));
165 	mask->range.end = roundup(last + 1, sizeof(long));
166 }
167 
fl_key_get_start(struct fl_flow_key * key,const struct fl_flow_mask * mask)168 static void *fl_key_get_start(struct fl_flow_key *key,
169 			      const struct fl_flow_mask *mask)
170 {
171 	return (u8 *) key + mask->range.start;
172 }
173 
fl_set_masked_key(struct fl_flow_key * mkey,struct fl_flow_key * key,struct fl_flow_mask * mask)174 static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
175 			      struct fl_flow_mask *mask)
176 {
177 	const long *lkey = fl_key_get_start(key, mask);
178 	const long *lmask = fl_key_get_start(&mask->key, mask);
179 	long *lmkey = fl_key_get_start(mkey, mask);
180 	int i;
181 
182 	for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
183 		*lmkey++ = *lkey++ & *lmask++;
184 }
185 
fl_mask_fits_tmplt(struct fl_flow_tmplt * tmplt,struct fl_flow_mask * mask)186 static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt,
187 			       struct fl_flow_mask *mask)
188 {
189 	const long *lmask = fl_key_get_start(&mask->key, mask);
190 	const long *ltmplt;
191 	int i;
192 
193 	if (!tmplt)
194 		return true;
195 	ltmplt = fl_key_get_start(&tmplt->mask, mask);
196 	for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) {
197 		if (~*ltmplt++ & *lmask++)
198 			return false;
199 	}
200 	return true;
201 }
202 
fl_clear_masked_range(struct fl_flow_key * key,struct fl_flow_mask * mask)203 static void fl_clear_masked_range(struct fl_flow_key *key,
204 				  struct fl_flow_mask *mask)
205 {
206 	memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
207 }
208 
fl_range_port_dst_cmp(struct cls_fl_filter * filter,struct fl_flow_key * key,struct fl_flow_key * mkey)209 static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
210 				  struct fl_flow_key *key,
211 				  struct fl_flow_key *mkey)
212 {
213 	u16 min_mask, max_mask, min_val, max_val;
214 
215 	min_mask = ntohs(filter->mask->key.tp_range.tp_min.dst);
216 	max_mask = ntohs(filter->mask->key.tp_range.tp_max.dst);
217 	min_val = ntohs(filter->key.tp_range.tp_min.dst);
218 	max_val = ntohs(filter->key.tp_range.tp_max.dst);
219 
220 	if (min_mask && max_mask) {
221 		if (ntohs(key->tp_range.tp.dst) < min_val ||
222 		    ntohs(key->tp_range.tp.dst) > max_val)
223 			return false;
224 
225 		/* skb does not have min and max values */
226 		mkey->tp_range.tp_min.dst = filter->mkey.tp_range.tp_min.dst;
227 		mkey->tp_range.tp_max.dst = filter->mkey.tp_range.tp_max.dst;
228 	}
229 	return true;
230 }
231 
fl_range_port_src_cmp(struct cls_fl_filter * filter,struct fl_flow_key * key,struct fl_flow_key * mkey)232 static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
233 				  struct fl_flow_key *key,
234 				  struct fl_flow_key *mkey)
235 {
236 	u16 min_mask, max_mask, min_val, max_val;
237 
238 	min_mask = ntohs(filter->mask->key.tp_range.tp_min.src);
239 	max_mask = ntohs(filter->mask->key.tp_range.tp_max.src);
240 	min_val = ntohs(filter->key.tp_range.tp_min.src);
241 	max_val = ntohs(filter->key.tp_range.tp_max.src);
242 
243 	if (min_mask && max_mask) {
244 		if (ntohs(key->tp_range.tp.src) < min_val ||
245 		    ntohs(key->tp_range.tp.src) > max_val)
246 			return false;
247 
248 		/* skb does not have min and max values */
249 		mkey->tp_range.tp_min.src = filter->mkey.tp_range.tp_min.src;
250 		mkey->tp_range.tp_max.src = filter->mkey.tp_range.tp_max.src;
251 	}
252 	return true;
253 }
254 
__fl_lookup(struct fl_flow_mask * mask,struct fl_flow_key * mkey)255 static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask,
256 					 struct fl_flow_key *mkey)
257 {
258 	return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask),
259 				      mask->filter_ht_params);
260 }
261 
fl_lookup_range(struct fl_flow_mask * mask,struct fl_flow_key * mkey,struct fl_flow_key * key)262 static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask,
263 					     struct fl_flow_key *mkey,
264 					     struct fl_flow_key *key)
265 {
266 	struct cls_fl_filter *filter, *f;
267 
268 	list_for_each_entry_rcu(filter, &mask->filters, list) {
269 		if (!fl_range_port_dst_cmp(filter, key, mkey))
270 			continue;
271 
272 		if (!fl_range_port_src_cmp(filter, key, mkey))
273 			continue;
274 
275 		f = __fl_lookup(mask, mkey);
276 		if (f)
277 			return f;
278 	}
279 	return NULL;
280 }
281 
282 static noinline_for_stack
fl_mask_lookup(struct fl_flow_mask * mask,struct fl_flow_key * key)283 struct cls_fl_filter *fl_mask_lookup(struct fl_flow_mask *mask, struct fl_flow_key *key)
284 {
285 	struct fl_flow_key mkey;
286 
287 	fl_set_masked_key(&mkey, key, mask);
288 	if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE))
289 		return fl_lookup_range(mask, &mkey, key);
290 
291 	return __fl_lookup(mask, &mkey);
292 }
293 
294 static u16 fl_ct_info_to_flower_map[] = {
295 	[IP_CT_ESTABLISHED] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
296 					TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
297 	[IP_CT_RELATED] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
298 					TCA_FLOWER_KEY_CT_FLAGS_RELATED,
299 	[IP_CT_ESTABLISHED_REPLY] =	TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
300 					TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED |
301 					TCA_FLOWER_KEY_CT_FLAGS_REPLY,
302 	[IP_CT_RELATED_REPLY] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
303 					TCA_FLOWER_KEY_CT_FLAGS_RELATED |
304 					TCA_FLOWER_KEY_CT_FLAGS_REPLY,
305 	[IP_CT_NEW] =			TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
306 					TCA_FLOWER_KEY_CT_FLAGS_NEW,
307 };
308 
fl_classify(struct sk_buff * skb,const struct tcf_proto * tp,struct tcf_result * res)309 static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
310 		       struct tcf_result *res)
311 {
312 	struct cls_fl_head *head = rcu_dereference_bh(tp->root);
313 	bool post_ct = tc_skb_cb(skb)->post_ct;
314 	u16 zone = tc_skb_cb(skb)->zone;
315 	struct fl_flow_key skb_key;
316 	struct fl_flow_mask *mask;
317 	struct cls_fl_filter *f;
318 
319 	list_for_each_entry_rcu(mask, &head->masks, list) {
320 		flow_dissector_init_keys(&skb_key.control, &skb_key.basic);
321 		fl_clear_masked_range(&skb_key, mask);
322 
323 		skb_flow_dissect_meta(skb, &mask->dissector, &skb_key);
324 		/* skb_flow_dissect() does not set n_proto in case an unknown
325 		 * protocol, so do it rather here.
326 		 */
327 		skb_key.basic.n_proto = skb_protocol(skb, false);
328 		skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
329 		skb_flow_dissect_ct(skb, &mask->dissector, &skb_key,
330 				    fl_ct_info_to_flower_map,
331 				    ARRAY_SIZE(fl_ct_info_to_flower_map),
332 				    post_ct, zone);
333 		skb_flow_dissect_hash(skb, &mask->dissector, &skb_key);
334 		skb_flow_dissect(skb, &mask->dissector, &skb_key, 0);
335 
336 		f = fl_mask_lookup(mask, &skb_key);
337 		if (f && !tc_skip_sw(f->flags)) {
338 			*res = f->res;
339 			return tcf_exts_exec(skb, &f->exts, res);
340 		}
341 	}
342 	return -1;
343 }
344 
fl_init(struct tcf_proto * tp)345 static int fl_init(struct tcf_proto *tp)
346 {
347 	struct cls_fl_head *head;
348 
349 	head = kzalloc(sizeof(*head), GFP_KERNEL);
350 	if (!head)
351 		return -ENOBUFS;
352 
353 	spin_lock_init(&head->masks_lock);
354 	INIT_LIST_HEAD_RCU(&head->masks);
355 	INIT_LIST_HEAD(&head->hw_filters);
356 	rcu_assign_pointer(tp->root, head);
357 	idr_init(&head->handle_idr);
358 
359 	return rhashtable_init(&head->ht, &mask_ht_params);
360 }
361 
fl_mask_free(struct fl_flow_mask * mask,bool mask_init_done)362 static void fl_mask_free(struct fl_flow_mask *mask, bool mask_init_done)
363 {
364 	/* temporary masks don't have their filters list and ht initialized */
365 	if (mask_init_done) {
366 		WARN_ON(!list_empty(&mask->filters));
367 		rhashtable_destroy(&mask->ht);
368 	}
369 	kfree(mask);
370 }
371 
fl_mask_free_work(struct work_struct * work)372 static void fl_mask_free_work(struct work_struct *work)
373 {
374 	struct fl_flow_mask *mask = container_of(to_rcu_work(work),
375 						 struct fl_flow_mask, rwork);
376 
377 	fl_mask_free(mask, true);
378 }
379 
fl_uninit_mask_free_work(struct work_struct * work)380 static void fl_uninit_mask_free_work(struct work_struct *work)
381 {
382 	struct fl_flow_mask *mask = container_of(to_rcu_work(work),
383 						 struct fl_flow_mask, rwork);
384 
385 	fl_mask_free(mask, false);
386 }
387 
fl_mask_put(struct cls_fl_head * head,struct fl_flow_mask * mask)388 static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask)
389 {
390 	if (!refcount_dec_and_test(&mask->refcnt))
391 		return false;
392 
393 	rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
394 
395 	spin_lock(&head->masks_lock);
396 	list_del_rcu(&mask->list);
397 	spin_unlock(&head->masks_lock);
398 
399 	tcf_queue_work(&mask->rwork, fl_mask_free_work);
400 
401 	return true;
402 }
403 
fl_head_dereference(struct tcf_proto * tp)404 static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp)
405 {
406 	/* Flower classifier only changes root pointer during init and destroy.
407 	 * Users must obtain reference to tcf_proto instance before calling its
408 	 * API, so tp->root pointer is protected from concurrent call to
409 	 * fl_destroy() by reference counting.
410 	 */
411 	return rcu_dereference_raw(tp->root);
412 }
413 
__fl_destroy_filter(struct cls_fl_filter * f)414 static void __fl_destroy_filter(struct cls_fl_filter *f)
415 {
416 	tcf_exts_destroy(&f->exts);
417 	tcf_exts_put_net(&f->exts);
418 	kfree(f);
419 }
420 
fl_destroy_filter_work(struct work_struct * work)421 static void fl_destroy_filter_work(struct work_struct *work)
422 {
423 	struct cls_fl_filter *f = container_of(to_rcu_work(work),
424 					struct cls_fl_filter, rwork);
425 
426 	__fl_destroy_filter(f);
427 }
428 
fl_hw_destroy_filter(struct tcf_proto * tp,struct cls_fl_filter * f,bool rtnl_held,struct netlink_ext_ack * extack)429 static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
430 				 bool rtnl_held, struct netlink_ext_ack *extack)
431 {
432 	struct tcf_block *block = tp->chain->block;
433 	struct flow_cls_offload cls_flower = {};
434 
435 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
436 	cls_flower.command = FLOW_CLS_DESTROY;
437 	cls_flower.cookie = (unsigned long) f;
438 
439 	tc_setup_cb_destroy(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, false,
440 			    &f->flags, &f->in_hw_count, rtnl_held);
441 
442 }
443 
fl_hw_replace_filter(struct tcf_proto * tp,struct cls_fl_filter * f,bool rtnl_held,struct netlink_ext_ack * extack)444 static int fl_hw_replace_filter(struct tcf_proto *tp,
445 				struct cls_fl_filter *f, bool rtnl_held,
446 				struct netlink_ext_ack *extack)
447 {
448 	struct tcf_block *block = tp->chain->block;
449 	struct flow_cls_offload cls_flower = {};
450 	bool skip_sw = tc_skip_sw(f->flags);
451 	int err = 0;
452 
453 	cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts));
454 	if (!cls_flower.rule)
455 		return -ENOMEM;
456 
457 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
458 	cls_flower.command = FLOW_CLS_REPLACE;
459 	cls_flower.cookie = (unsigned long) f;
460 	cls_flower.rule->match.dissector = &f->mask->dissector;
461 	cls_flower.rule->match.mask = &f->mask->key;
462 	cls_flower.rule->match.key = &f->mkey;
463 	cls_flower.classid = f->res.classid;
464 
465 	err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
466 	if (err) {
467 		kfree(cls_flower.rule);
468 		if (skip_sw) {
469 			NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
470 			return err;
471 		}
472 		return 0;
473 	}
474 
475 	err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower,
476 			      skip_sw, &f->flags, &f->in_hw_count, rtnl_held);
477 	tc_cleanup_flow_action(&cls_flower.rule->action);
478 	kfree(cls_flower.rule);
479 
480 	if (err) {
481 		fl_hw_destroy_filter(tp, f, rtnl_held, NULL);
482 		return err;
483 	}
484 
485 	if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW))
486 		return -EINVAL;
487 
488 	return 0;
489 }
490 
fl_hw_update_stats(struct tcf_proto * tp,struct cls_fl_filter * f,bool rtnl_held)491 static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
492 			       bool rtnl_held)
493 {
494 	struct tcf_block *block = tp->chain->block;
495 	struct flow_cls_offload cls_flower = {};
496 
497 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
498 	cls_flower.command = FLOW_CLS_STATS;
499 	cls_flower.cookie = (unsigned long) f;
500 	cls_flower.classid = f->res.classid;
501 
502 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false,
503 			 rtnl_held);
504 
505 	tcf_exts_stats_update(&f->exts, cls_flower.stats.bytes,
506 			      cls_flower.stats.pkts,
507 			      cls_flower.stats.drops,
508 			      cls_flower.stats.lastused,
509 			      cls_flower.stats.used_hw_stats,
510 			      cls_flower.stats.used_hw_stats_valid);
511 }
512 
__fl_put(struct cls_fl_filter * f)513 static void __fl_put(struct cls_fl_filter *f)
514 {
515 	if (!refcount_dec_and_test(&f->refcnt))
516 		return;
517 
518 	if (tcf_exts_get_net(&f->exts))
519 		tcf_queue_work(&f->rwork, fl_destroy_filter_work);
520 	else
521 		__fl_destroy_filter(f);
522 }
523 
__fl_get(struct cls_fl_head * head,u32 handle)524 static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle)
525 {
526 	struct cls_fl_filter *f;
527 
528 	rcu_read_lock();
529 	f = idr_find(&head->handle_idr, handle);
530 	if (f && !refcount_inc_not_zero(&f->refcnt))
531 		f = NULL;
532 	rcu_read_unlock();
533 
534 	return f;
535 }
536 
__fl_delete(struct tcf_proto * tp,struct cls_fl_filter * f,bool * last,bool rtnl_held,struct netlink_ext_ack * extack)537 static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
538 		       bool *last, bool rtnl_held,
539 		       struct netlink_ext_ack *extack)
540 {
541 	struct cls_fl_head *head = fl_head_dereference(tp);
542 
543 	*last = false;
544 
545 	spin_lock(&tp->lock);
546 	if (f->deleted) {
547 		spin_unlock(&tp->lock);
548 		return -ENOENT;
549 	}
550 
551 	f->deleted = true;
552 	rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
553 			       f->mask->filter_ht_params);
554 	idr_remove(&head->handle_idr, f->handle);
555 	list_del_rcu(&f->list);
556 	spin_unlock(&tp->lock);
557 
558 	*last = fl_mask_put(head, f->mask);
559 	if (!tc_skip_hw(f->flags))
560 		fl_hw_destroy_filter(tp, f, rtnl_held, extack);
561 	tcf_unbind_filter(tp, &f->res);
562 	__fl_put(f);
563 
564 	return 0;
565 }
566 
fl_destroy_sleepable(struct work_struct * work)567 static void fl_destroy_sleepable(struct work_struct *work)
568 {
569 	struct cls_fl_head *head = container_of(to_rcu_work(work),
570 						struct cls_fl_head,
571 						rwork);
572 
573 	rhashtable_destroy(&head->ht);
574 	kfree(head);
575 	module_put(THIS_MODULE);
576 }
577 
fl_destroy(struct tcf_proto * tp,bool rtnl_held,struct netlink_ext_ack * extack)578 static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
579 		       struct netlink_ext_ack *extack)
580 {
581 	struct cls_fl_head *head = fl_head_dereference(tp);
582 	struct fl_flow_mask *mask, *next_mask;
583 	struct cls_fl_filter *f, *next;
584 	bool last;
585 
586 	list_for_each_entry_safe(mask, next_mask, &head->masks, list) {
587 		list_for_each_entry_safe(f, next, &mask->filters, list) {
588 			__fl_delete(tp, f, &last, rtnl_held, extack);
589 			if (last)
590 				break;
591 		}
592 	}
593 	idr_destroy(&head->handle_idr);
594 
595 	__module_get(THIS_MODULE);
596 	tcf_queue_work(&head->rwork, fl_destroy_sleepable);
597 }
598 
fl_put(struct tcf_proto * tp,void * arg)599 static void fl_put(struct tcf_proto *tp, void *arg)
600 {
601 	struct cls_fl_filter *f = arg;
602 
603 	__fl_put(f);
604 }
605 
fl_get(struct tcf_proto * tp,u32 handle)606 static void *fl_get(struct tcf_proto *tp, u32 handle)
607 {
608 	struct cls_fl_head *head = fl_head_dereference(tp);
609 
610 	return __fl_get(head, handle);
611 }
612 
613 static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
614 	[TCA_FLOWER_UNSPEC]		= { .type = NLA_UNSPEC },
615 	[TCA_FLOWER_CLASSID]		= { .type = NLA_U32 },
616 	[TCA_FLOWER_INDEV]		= { .type = NLA_STRING,
617 					    .len = IFNAMSIZ },
618 	[TCA_FLOWER_KEY_ETH_DST]	= { .len = ETH_ALEN },
619 	[TCA_FLOWER_KEY_ETH_DST_MASK]	= { .len = ETH_ALEN },
620 	[TCA_FLOWER_KEY_ETH_SRC]	= { .len = ETH_ALEN },
621 	[TCA_FLOWER_KEY_ETH_SRC_MASK]	= { .len = ETH_ALEN },
622 	[TCA_FLOWER_KEY_ETH_TYPE]	= { .type = NLA_U16 },
623 	[TCA_FLOWER_KEY_IP_PROTO]	= { .type = NLA_U8 },
624 	[TCA_FLOWER_KEY_IPV4_SRC]	= { .type = NLA_U32 },
625 	[TCA_FLOWER_KEY_IPV4_SRC_MASK]	= { .type = NLA_U32 },
626 	[TCA_FLOWER_KEY_IPV4_DST]	= { .type = NLA_U32 },
627 	[TCA_FLOWER_KEY_IPV4_DST_MASK]	= { .type = NLA_U32 },
628 	[TCA_FLOWER_KEY_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
629 	[TCA_FLOWER_KEY_IPV6_SRC_MASK]	= { .len = sizeof(struct in6_addr) },
630 	[TCA_FLOWER_KEY_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
631 	[TCA_FLOWER_KEY_IPV6_DST_MASK]	= { .len = sizeof(struct in6_addr) },
632 	[TCA_FLOWER_KEY_TCP_SRC]	= { .type = NLA_U16 },
633 	[TCA_FLOWER_KEY_TCP_DST]	= { .type = NLA_U16 },
634 	[TCA_FLOWER_KEY_UDP_SRC]	= { .type = NLA_U16 },
635 	[TCA_FLOWER_KEY_UDP_DST]	= { .type = NLA_U16 },
636 	[TCA_FLOWER_KEY_VLAN_ID]	= { .type = NLA_U16 },
637 	[TCA_FLOWER_KEY_VLAN_PRIO]	= { .type = NLA_U8 },
638 	[TCA_FLOWER_KEY_VLAN_ETH_TYPE]	= { .type = NLA_U16 },
639 	[TCA_FLOWER_KEY_ENC_KEY_ID]	= { .type = NLA_U32 },
640 	[TCA_FLOWER_KEY_ENC_IPV4_SRC]	= { .type = NLA_U32 },
641 	[TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
642 	[TCA_FLOWER_KEY_ENC_IPV4_DST]	= { .type = NLA_U32 },
643 	[TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
644 	[TCA_FLOWER_KEY_ENC_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
645 	[TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
646 	[TCA_FLOWER_KEY_ENC_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
647 	[TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
648 	[TCA_FLOWER_KEY_TCP_SRC_MASK]	= { .type = NLA_U16 },
649 	[TCA_FLOWER_KEY_TCP_DST_MASK]	= { .type = NLA_U16 },
650 	[TCA_FLOWER_KEY_UDP_SRC_MASK]	= { .type = NLA_U16 },
651 	[TCA_FLOWER_KEY_UDP_DST_MASK]	= { .type = NLA_U16 },
652 	[TCA_FLOWER_KEY_SCTP_SRC_MASK]	= { .type = NLA_U16 },
653 	[TCA_FLOWER_KEY_SCTP_DST_MASK]	= { .type = NLA_U16 },
654 	[TCA_FLOWER_KEY_SCTP_SRC]	= { .type = NLA_U16 },
655 	[TCA_FLOWER_KEY_SCTP_DST]	= { .type = NLA_U16 },
656 	[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT]	= { .type = NLA_U16 },
657 	[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK]	= { .type = NLA_U16 },
658 	[TCA_FLOWER_KEY_ENC_UDP_DST_PORT]	= { .type = NLA_U16 },
659 	[TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK]	= { .type = NLA_U16 },
660 	[TCA_FLOWER_KEY_FLAGS]		= { .type = NLA_U32 },
661 	[TCA_FLOWER_KEY_FLAGS_MASK]	= { .type = NLA_U32 },
662 	[TCA_FLOWER_KEY_ICMPV4_TYPE]	= { .type = NLA_U8 },
663 	[TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
664 	[TCA_FLOWER_KEY_ICMPV4_CODE]	= { .type = NLA_U8 },
665 	[TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
666 	[TCA_FLOWER_KEY_ICMPV6_TYPE]	= { .type = NLA_U8 },
667 	[TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
668 	[TCA_FLOWER_KEY_ICMPV6_CODE]	= { .type = NLA_U8 },
669 	[TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
670 	[TCA_FLOWER_KEY_ARP_SIP]	= { .type = NLA_U32 },
671 	[TCA_FLOWER_KEY_ARP_SIP_MASK]	= { .type = NLA_U32 },
672 	[TCA_FLOWER_KEY_ARP_TIP]	= { .type = NLA_U32 },
673 	[TCA_FLOWER_KEY_ARP_TIP_MASK]	= { .type = NLA_U32 },
674 	[TCA_FLOWER_KEY_ARP_OP]		= { .type = NLA_U8 },
675 	[TCA_FLOWER_KEY_ARP_OP_MASK]	= { .type = NLA_U8 },
676 	[TCA_FLOWER_KEY_ARP_SHA]	= { .len = ETH_ALEN },
677 	[TCA_FLOWER_KEY_ARP_SHA_MASK]	= { .len = ETH_ALEN },
678 	[TCA_FLOWER_KEY_ARP_THA]	= { .len = ETH_ALEN },
679 	[TCA_FLOWER_KEY_ARP_THA_MASK]	= { .len = ETH_ALEN },
680 	[TCA_FLOWER_KEY_MPLS_TTL]	= { .type = NLA_U8 },
681 	[TCA_FLOWER_KEY_MPLS_BOS]	= { .type = NLA_U8 },
682 	[TCA_FLOWER_KEY_MPLS_TC]	= { .type = NLA_U8 },
683 	[TCA_FLOWER_KEY_MPLS_LABEL]	= { .type = NLA_U32 },
684 	[TCA_FLOWER_KEY_MPLS_OPTS]	= { .type = NLA_NESTED },
685 	[TCA_FLOWER_KEY_TCP_FLAGS]	= { .type = NLA_U16 },
686 	[TCA_FLOWER_KEY_TCP_FLAGS_MASK]	= { .type = NLA_U16 },
687 	[TCA_FLOWER_KEY_IP_TOS]		= { .type = NLA_U8 },
688 	[TCA_FLOWER_KEY_IP_TOS_MASK]	= { .type = NLA_U8 },
689 	[TCA_FLOWER_KEY_IP_TTL]		= { .type = NLA_U8 },
690 	[TCA_FLOWER_KEY_IP_TTL_MASK]	= { .type = NLA_U8 },
691 	[TCA_FLOWER_KEY_CVLAN_ID]	= { .type = NLA_U16 },
692 	[TCA_FLOWER_KEY_CVLAN_PRIO]	= { .type = NLA_U8 },
693 	[TCA_FLOWER_KEY_CVLAN_ETH_TYPE]	= { .type = NLA_U16 },
694 	[TCA_FLOWER_KEY_ENC_IP_TOS]	= { .type = NLA_U8 },
695 	[TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 },
696 	[TCA_FLOWER_KEY_ENC_IP_TTL]	 = { .type = NLA_U8 },
697 	[TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 },
698 	[TCA_FLOWER_KEY_ENC_OPTS]	= { .type = NLA_NESTED },
699 	[TCA_FLOWER_KEY_ENC_OPTS_MASK]	= { .type = NLA_NESTED },
700 	[TCA_FLOWER_KEY_CT_STATE]	=
701 		NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK),
702 	[TCA_FLOWER_KEY_CT_STATE_MASK]	=
703 		NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK),
704 	[TCA_FLOWER_KEY_CT_ZONE]	= { .type = NLA_U16 },
705 	[TCA_FLOWER_KEY_CT_ZONE_MASK]	= { .type = NLA_U16 },
706 	[TCA_FLOWER_KEY_CT_MARK]	= { .type = NLA_U32 },
707 	[TCA_FLOWER_KEY_CT_MARK_MASK]	= { .type = NLA_U32 },
708 	[TCA_FLOWER_KEY_CT_LABELS]	= { .type = NLA_BINARY,
709 					    .len = 128 / BITS_PER_BYTE },
710 	[TCA_FLOWER_KEY_CT_LABELS_MASK]	= { .type = NLA_BINARY,
711 					    .len = 128 / BITS_PER_BYTE },
712 	[TCA_FLOWER_FLAGS]		= { .type = NLA_U32 },
713 	[TCA_FLOWER_KEY_HASH]		= { .type = NLA_U32 },
714 	[TCA_FLOWER_KEY_HASH_MASK]	= { .type = NLA_U32 },
715 
716 };
717 
718 static const struct nla_policy
719 enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
720 	[TCA_FLOWER_KEY_ENC_OPTS_UNSPEC]        = {
721 		.strict_start_type = TCA_FLOWER_KEY_ENC_OPTS_VXLAN },
722 	[TCA_FLOWER_KEY_ENC_OPTS_GENEVE]        = { .type = NLA_NESTED },
723 	[TCA_FLOWER_KEY_ENC_OPTS_VXLAN]         = { .type = NLA_NESTED },
724 	[TCA_FLOWER_KEY_ENC_OPTS_ERSPAN]        = { .type = NLA_NESTED },
725 };
726 
727 static const struct nla_policy
728 geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = {
729 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]      = { .type = NLA_U16 },
730 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]       = { .type = NLA_U8 },
731 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]       = { .type = NLA_BINARY,
732 						       .len = 128 },
733 };
734 
735 static const struct nla_policy
736 vxlan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1] = {
737 	[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]         = { .type = NLA_U32 },
738 };
739 
740 static const struct nla_policy
741 erspan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1] = {
742 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]        = { .type = NLA_U8 },
743 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]      = { .type = NLA_U32 },
744 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]        = { .type = NLA_U8 },
745 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]       = { .type = NLA_U8 },
746 };
747 
748 static const struct nla_policy
749 mpls_stack_entry_policy[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1] = {
750 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]    = { .type = NLA_U8 },
751 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]      = { .type = NLA_U8 },
752 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]      = { .type = NLA_U8 },
753 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]       = { .type = NLA_U8 },
754 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]    = { .type = NLA_U32 },
755 };
756 
fl_set_key_val(struct nlattr ** tb,void * val,int val_type,void * mask,int mask_type,int len)757 static void fl_set_key_val(struct nlattr **tb,
758 			   void *val, int val_type,
759 			   void *mask, int mask_type, int len)
760 {
761 	if (!tb[val_type])
762 		return;
763 	nla_memcpy(val, tb[val_type], len);
764 	if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
765 		memset(mask, 0xff, len);
766 	else
767 		nla_memcpy(mask, tb[mask_type], len);
768 }
769 
fl_set_key_port_range(struct nlattr ** tb,struct fl_flow_key * key,struct fl_flow_key * mask,struct netlink_ext_ack * extack)770 static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
771 				 struct fl_flow_key *mask,
772 				 struct netlink_ext_ack *extack)
773 {
774 	fl_set_key_val(tb, &key->tp_range.tp_min.dst,
775 		       TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_range.tp_min.dst,
776 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.dst));
777 	fl_set_key_val(tb, &key->tp_range.tp_max.dst,
778 		       TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_range.tp_max.dst,
779 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.dst));
780 	fl_set_key_val(tb, &key->tp_range.tp_min.src,
781 		       TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_range.tp_min.src,
782 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.src));
783 	fl_set_key_val(tb, &key->tp_range.tp_max.src,
784 		       TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_range.tp_max.src,
785 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src));
786 
787 	if (mask->tp_range.tp_min.dst != mask->tp_range.tp_max.dst) {
788 		NL_SET_ERR_MSG(extack,
789 			       "Both min and max destination ports must be specified");
790 		return -EINVAL;
791 	}
792 	if (mask->tp_range.tp_min.src != mask->tp_range.tp_max.src) {
793 		NL_SET_ERR_MSG(extack,
794 			       "Both min and max source ports must be specified");
795 		return -EINVAL;
796 	}
797 	if (mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst &&
798 	    ntohs(key->tp_range.tp_max.dst) <=
799 	    ntohs(key->tp_range.tp_min.dst)) {
800 		NL_SET_ERR_MSG_ATTR(extack,
801 				    tb[TCA_FLOWER_KEY_PORT_DST_MIN],
802 				    "Invalid destination port range (min must be strictly smaller than max)");
803 		return -EINVAL;
804 	}
805 	if (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src &&
806 	    ntohs(key->tp_range.tp_max.src) <=
807 	    ntohs(key->tp_range.tp_min.src)) {
808 		NL_SET_ERR_MSG_ATTR(extack,
809 				    tb[TCA_FLOWER_KEY_PORT_SRC_MIN],
810 				    "Invalid source port range (min must be strictly smaller than max)");
811 		return -EINVAL;
812 	}
813 
814 	return 0;
815 }
816 
fl_set_key_mpls_lse(const struct nlattr * nla_lse,struct flow_dissector_key_mpls * key_val,struct flow_dissector_key_mpls * key_mask,struct netlink_ext_ack * extack)817 static int fl_set_key_mpls_lse(const struct nlattr *nla_lse,
818 			       struct flow_dissector_key_mpls *key_val,
819 			       struct flow_dissector_key_mpls *key_mask,
820 			       struct netlink_ext_ack *extack)
821 {
822 	struct nlattr *tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1];
823 	struct flow_dissector_mpls_lse *lse_mask;
824 	struct flow_dissector_mpls_lse *lse_val;
825 	u8 lse_index;
826 	u8 depth;
827 	int err;
828 
829 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX, nla_lse,
830 			       mpls_stack_entry_policy, extack);
831 	if (err < 0)
832 		return err;
833 
834 	if (!tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]) {
835 		NL_SET_ERR_MSG(extack, "Missing MPLS option \"depth\"");
836 		return -EINVAL;
837 	}
838 
839 	depth = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]);
840 
841 	/* LSE depth starts at 1, for consistency with terminology used by
842 	 * RFC 3031 (section 3.9), where depth 0 refers to unlabeled packets.
843 	 */
844 	if (depth < 1 || depth > FLOW_DIS_MPLS_MAX) {
845 		NL_SET_ERR_MSG_ATTR(extack,
846 				    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH],
847 				    "Invalid MPLS depth");
848 		return -EINVAL;
849 	}
850 	lse_index = depth - 1;
851 
852 	dissector_set_mpls_lse(key_val, lse_index);
853 	dissector_set_mpls_lse(key_mask, lse_index);
854 
855 	lse_val = &key_val->ls[lse_index];
856 	lse_mask = &key_mask->ls[lse_index];
857 
858 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]) {
859 		lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]);
860 		lse_mask->mpls_ttl = MPLS_TTL_MASK;
861 	}
862 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]) {
863 		u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]);
864 
865 		if (bos & ~MPLS_BOS_MASK) {
866 			NL_SET_ERR_MSG_ATTR(extack,
867 					    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS],
868 					    "Bottom Of Stack (BOS) must be 0 or 1");
869 			return -EINVAL;
870 		}
871 		lse_val->mpls_bos = bos;
872 		lse_mask->mpls_bos = MPLS_BOS_MASK;
873 	}
874 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]) {
875 		u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]);
876 
877 		if (tc & ~MPLS_TC_MASK) {
878 			NL_SET_ERR_MSG_ATTR(extack,
879 					    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC],
880 					    "Traffic Class (TC) must be between 0 and 7");
881 			return -EINVAL;
882 		}
883 		lse_val->mpls_tc = tc;
884 		lse_mask->mpls_tc = MPLS_TC_MASK;
885 	}
886 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]) {
887 		u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]);
888 
889 		if (label & ~MPLS_LABEL_MASK) {
890 			NL_SET_ERR_MSG_ATTR(extack,
891 					    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL],
892 					    "Label must be between 0 and 1048575");
893 			return -EINVAL;
894 		}
895 		lse_val->mpls_label = label;
896 		lse_mask->mpls_label = MPLS_LABEL_MASK;
897 	}
898 
899 	return 0;
900 }
901 
fl_set_key_mpls_opts(const struct nlattr * nla_mpls_opts,struct flow_dissector_key_mpls * key_val,struct flow_dissector_key_mpls * key_mask,struct netlink_ext_ack * extack)902 static int fl_set_key_mpls_opts(const struct nlattr *nla_mpls_opts,
903 				struct flow_dissector_key_mpls *key_val,
904 				struct flow_dissector_key_mpls *key_mask,
905 				struct netlink_ext_ack *extack)
906 {
907 	struct nlattr *nla_lse;
908 	int rem;
909 	int err;
910 
911 	if (!(nla_mpls_opts->nla_type & NLA_F_NESTED)) {
912 		NL_SET_ERR_MSG_ATTR(extack, nla_mpls_opts,
913 				    "NLA_F_NESTED is missing");
914 		return -EINVAL;
915 	}
916 
917 	nla_for_each_nested(nla_lse, nla_mpls_opts, rem) {
918 		if (nla_type(nla_lse) != TCA_FLOWER_KEY_MPLS_OPTS_LSE) {
919 			NL_SET_ERR_MSG_ATTR(extack, nla_lse,
920 					    "Invalid MPLS option type");
921 			return -EINVAL;
922 		}
923 
924 		err = fl_set_key_mpls_lse(nla_lse, key_val, key_mask, extack);
925 		if (err < 0)
926 			return err;
927 	}
928 	if (rem) {
929 		NL_SET_ERR_MSG(extack,
930 			       "Bytes leftover after parsing MPLS options");
931 		return -EINVAL;
932 	}
933 
934 	return 0;
935 }
936 
fl_set_key_mpls(struct nlattr ** tb,struct flow_dissector_key_mpls * key_val,struct flow_dissector_key_mpls * key_mask,struct netlink_ext_ack * extack)937 static int fl_set_key_mpls(struct nlattr **tb,
938 			   struct flow_dissector_key_mpls *key_val,
939 			   struct flow_dissector_key_mpls *key_mask,
940 			   struct netlink_ext_ack *extack)
941 {
942 	struct flow_dissector_mpls_lse *lse_mask;
943 	struct flow_dissector_mpls_lse *lse_val;
944 
945 	if (tb[TCA_FLOWER_KEY_MPLS_OPTS]) {
946 		if (tb[TCA_FLOWER_KEY_MPLS_TTL] ||
947 		    tb[TCA_FLOWER_KEY_MPLS_BOS] ||
948 		    tb[TCA_FLOWER_KEY_MPLS_TC] ||
949 		    tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
950 			NL_SET_ERR_MSG_ATTR(extack,
951 					    tb[TCA_FLOWER_KEY_MPLS_OPTS],
952 					    "MPLS label, Traffic Class, Bottom Of Stack and Time To Live must be encapsulated in the MPLS options attribute");
953 			return -EBADMSG;
954 		}
955 
956 		return fl_set_key_mpls_opts(tb[TCA_FLOWER_KEY_MPLS_OPTS],
957 					    key_val, key_mask, extack);
958 	}
959 
960 	lse_val = &key_val->ls[0];
961 	lse_mask = &key_mask->ls[0];
962 
963 	if (tb[TCA_FLOWER_KEY_MPLS_TTL]) {
964 		lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
965 		lse_mask->mpls_ttl = MPLS_TTL_MASK;
966 		dissector_set_mpls_lse(key_val, 0);
967 		dissector_set_mpls_lse(key_mask, 0);
968 	}
969 	if (tb[TCA_FLOWER_KEY_MPLS_BOS]) {
970 		u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]);
971 
972 		if (bos & ~MPLS_BOS_MASK) {
973 			NL_SET_ERR_MSG_ATTR(extack,
974 					    tb[TCA_FLOWER_KEY_MPLS_BOS],
975 					    "Bottom Of Stack (BOS) must be 0 or 1");
976 			return -EINVAL;
977 		}
978 		lse_val->mpls_bos = bos;
979 		lse_mask->mpls_bos = MPLS_BOS_MASK;
980 		dissector_set_mpls_lse(key_val, 0);
981 		dissector_set_mpls_lse(key_mask, 0);
982 	}
983 	if (tb[TCA_FLOWER_KEY_MPLS_TC]) {
984 		u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]);
985 
986 		if (tc & ~MPLS_TC_MASK) {
987 			NL_SET_ERR_MSG_ATTR(extack,
988 					    tb[TCA_FLOWER_KEY_MPLS_TC],
989 					    "Traffic Class (TC) must be between 0 and 7");
990 			return -EINVAL;
991 		}
992 		lse_val->mpls_tc = tc;
993 		lse_mask->mpls_tc = MPLS_TC_MASK;
994 		dissector_set_mpls_lse(key_val, 0);
995 		dissector_set_mpls_lse(key_mask, 0);
996 	}
997 	if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
998 		u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]);
999 
1000 		if (label & ~MPLS_LABEL_MASK) {
1001 			NL_SET_ERR_MSG_ATTR(extack,
1002 					    tb[TCA_FLOWER_KEY_MPLS_LABEL],
1003 					    "Label must be between 0 and 1048575");
1004 			return -EINVAL;
1005 		}
1006 		lse_val->mpls_label = label;
1007 		lse_mask->mpls_label = MPLS_LABEL_MASK;
1008 		dissector_set_mpls_lse(key_val, 0);
1009 		dissector_set_mpls_lse(key_mask, 0);
1010 	}
1011 	return 0;
1012 }
1013 
fl_set_key_vlan(struct nlattr ** tb,__be16 ethertype,int vlan_id_key,int vlan_prio_key,int vlan_next_eth_type_key,struct flow_dissector_key_vlan * key_val,struct flow_dissector_key_vlan * key_mask)1014 static void fl_set_key_vlan(struct nlattr **tb,
1015 			    __be16 ethertype,
1016 			    int vlan_id_key, int vlan_prio_key,
1017 			    int vlan_next_eth_type_key,
1018 			    struct flow_dissector_key_vlan *key_val,
1019 			    struct flow_dissector_key_vlan *key_mask)
1020 {
1021 #define VLAN_PRIORITY_MASK	0x7
1022 
1023 	if (tb[vlan_id_key]) {
1024 		key_val->vlan_id =
1025 			nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK;
1026 		key_mask->vlan_id = VLAN_VID_MASK;
1027 	}
1028 	if (tb[vlan_prio_key]) {
1029 		key_val->vlan_priority =
1030 			nla_get_u8(tb[vlan_prio_key]) &
1031 			VLAN_PRIORITY_MASK;
1032 		key_mask->vlan_priority = VLAN_PRIORITY_MASK;
1033 	}
1034 	key_val->vlan_tpid = ethertype;
1035 	key_mask->vlan_tpid = cpu_to_be16(~0);
1036 	if (tb[vlan_next_eth_type_key]) {
1037 		key_val->vlan_eth_type =
1038 			nla_get_be16(tb[vlan_next_eth_type_key]);
1039 		key_mask->vlan_eth_type = cpu_to_be16(~0);
1040 	}
1041 }
1042 
fl_set_key_flag(u32 flower_key,u32 flower_mask,u32 * dissector_key,u32 * dissector_mask,u32 flower_flag_bit,u32 dissector_flag_bit)1043 static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
1044 			    u32 *dissector_key, u32 *dissector_mask,
1045 			    u32 flower_flag_bit, u32 dissector_flag_bit)
1046 {
1047 	if (flower_mask & flower_flag_bit) {
1048 		*dissector_mask |= dissector_flag_bit;
1049 		if (flower_key & flower_flag_bit)
1050 			*dissector_key |= dissector_flag_bit;
1051 	}
1052 }
1053 
fl_set_key_flags(struct nlattr ** tb,u32 * flags_key,u32 * flags_mask,struct netlink_ext_ack * extack)1054 static int fl_set_key_flags(struct nlattr **tb, u32 *flags_key,
1055 			    u32 *flags_mask, struct netlink_ext_ack *extack)
1056 {
1057 	u32 key, mask;
1058 
1059 	/* mask is mandatory for flags */
1060 	if (!tb[TCA_FLOWER_KEY_FLAGS_MASK]) {
1061 		NL_SET_ERR_MSG(extack, "Missing flags mask");
1062 		return -EINVAL;
1063 	}
1064 
1065 	key = be32_to_cpu(nla_get_be32(tb[TCA_FLOWER_KEY_FLAGS]));
1066 	mask = be32_to_cpu(nla_get_be32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
1067 
1068 	*flags_key  = 0;
1069 	*flags_mask = 0;
1070 
1071 	fl_set_key_flag(key, mask, flags_key, flags_mask,
1072 			TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
1073 	fl_set_key_flag(key, mask, flags_key, flags_mask,
1074 			TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
1075 			FLOW_DIS_FIRST_FRAG);
1076 
1077 	return 0;
1078 }
1079 
fl_set_key_ip(struct nlattr ** tb,bool encap,struct flow_dissector_key_ip * key,struct flow_dissector_key_ip * mask)1080 static void fl_set_key_ip(struct nlattr **tb, bool encap,
1081 			  struct flow_dissector_key_ip *key,
1082 			  struct flow_dissector_key_ip *mask)
1083 {
1084 	int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
1085 	int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
1086 	int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
1087 	int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
1088 
1089 	fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos));
1090 	fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl));
1091 }
1092 
fl_set_geneve_opt(const struct nlattr * nla,struct fl_flow_key * key,int depth,int option_len,struct netlink_ext_ack * extack)1093 static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key,
1094 			     int depth, int option_len,
1095 			     struct netlink_ext_ack *extack)
1096 {
1097 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1];
1098 	struct nlattr *class = NULL, *type = NULL, *data = NULL;
1099 	struct geneve_opt *opt;
1100 	int err, data_len = 0;
1101 
1102 	if (option_len > sizeof(struct geneve_opt))
1103 		data_len = option_len - sizeof(struct geneve_opt);
1104 
1105 	if (key->enc_opts.len > FLOW_DIS_TUN_OPTS_MAX - 4)
1106 		return -ERANGE;
1107 
1108 	opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len];
1109 	memset(opt, 0xff, option_len);
1110 	opt->length = data_len / 4;
1111 	opt->r1 = 0;
1112 	opt->r2 = 0;
1113 	opt->r3 = 0;
1114 
1115 	/* If no mask has been prodived we assume an exact match. */
1116 	if (!depth)
1117 		return sizeof(struct geneve_opt) + data_len;
1118 
1119 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) {
1120 		NL_SET_ERR_MSG(extack, "Non-geneve option type for mask");
1121 		return -EINVAL;
1122 	}
1123 
1124 	err = nla_parse_nested_deprecated(tb,
1125 					  TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
1126 					  nla, geneve_opt_policy, extack);
1127 	if (err < 0)
1128 		return err;
1129 
1130 	/* We are not allowed to omit any of CLASS, TYPE or DATA
1131 	 * fields from the key.
1132 	 */
1133 	if (!option_len &&
1134 	    (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] ||
1135 	     !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] ||
1136 	     !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) {
1137 		NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
1138 		return -EINVAL;
1139 	}
1140 
1141 	/* Omitting any of CLASS, TYPE or DATA fields is allowed
1142 	 * for the mask.
1143 	 */
1144 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) {
1145 		int new_len = key->enc_opts.len;
1146 
1147 		data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA];
1148 		data_len = nla_len(data);
1149 		if (data_len < 4) {
1150 			NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
1151 			return -ERANGE;
1152 		}
1153 		if (data_len % 4) {
1154 			NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
1155 			return -ERANGE;
1156 		}
1157 
1158 		new_len += sizeof(struct geneve_opt) + data_len;
1159 		BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX);
1160 		if (new_len > FLOW_DIS_TUN_OPTS_MAX) {
1161 			NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
1162 			return -ERANGE;
1163 		}
1164 		opt->length = data_len / 4;
1165 		memcpy(opt->opt_data, nla_data(data), data_len);
1166 	}
1167 
1168 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) {
1169 		class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS];
1170 		opt->opt_class = nla_get_be16(class);
1171 	}
1172 
1173 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) {
1174 		type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE];
1175 		opt->type = nla_get_u8(type);
1176 	}
1177 
1178 	return sizeof(struct geneve_opt) + data_len;
1179 }
1180 
fl_set_vxlan_opt(const struct nlattr * nla,struct fl_flow_key * key,int depth,int option_len,struct netlink_ext_ack * extack)1181 static int fl_set_vxlan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1182 			    int depth, int option_len,
1183 			    struct netlink_ext_ack *extack)
1184 {
1185 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1];
1186 	struct vxlan_metadata *md;
1187 	int err;
1188 
1189 	md = (struct vxlan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1190 	memset(md, 0xff, sizeof(*md));
1191 
1192 	if (!depth)
1193 		return sizeof(*md);
1194 
1195 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_VXLAN) {
1196 		NL_SET_ERR_MSG(extack, "Non-vxlan option type for mask");
1197 		return -EINVAL;
1198 	}
1199 
1200 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX, nla,
1201 			       vxlan_opt_policy, extack);
1202 	if (err < 0)
1203 		return err;
1204 
1205 	if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
1206 		NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp");
1207 		return -EINVAL;
1208 	}
1209 
1210 	if (tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
1211 		md->gbp = nla_get_u32(tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]);
1212 		md->gbp &= VXLAN_GBP_MASK;
1213 	}
1214 
1215 	return sizeof(*md);
1216 }
1217 
fl_set_erspan_opt(const struct nlattr * nla,struct fl_flow_key * key,int depth,int option_len,struct netlink_ext_ack * extack)1218 static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1219 			     int depth, int option_len,
1220 			     struct netlink_ext_ack *extack)
1221 {
1222 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1];
1223 	struct erspan_metadata *md;
1224 	int err;
1225 
1226 	md = (struct erspan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1227 	memset(md, 0xff, sizeof(*md));
1228 	md->version = 1;
1229 
1230 	if (!depth)
1231 		return sizeof(*md);
1232 
1233 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_ERSPAN) {
1234 		NL_SET_ERR_MSG(extack, "Non-erspan option type for mask");
1235 		return -EINVAL;
1236 	}
1237 
1238 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX, nla,
1239 			       erspan_opt_policy, extack);
1240 	if (err < 0)
1241 		return err;
1242 
1243 	if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) {
1244 		NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver");
1245 		return -EINVAL;
1246 	}
1247 
1248 	if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER])
1249 		md->version = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]);
1250 
1251 	if (md->version == 1) {
1252 		if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1253 			NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index");
1254 			return -EINVAL;
1255 		}
1256 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1257 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX];
1258 			memset(&md->u, 0x00, sizeof(md->u));
1259 			md->u.index = nla_get_be32(nla);
1260 		}
1261 	} else if (md->version == 2) {
1262 		if (!option_len && (!tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] ||
1263 				    !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID])) {
1264 			NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid");
1265 			return -EINVAL;
1266 		}
1267 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]) {
1268 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR];
1269 			md->u.md2.dir = nla_get_u8(nla);
1270 		}
1271 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]) {
1272 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID];
1273 			set_hwid(&md->u.md2, nla_get_u8(nla));
1274 		}
1275 	} else {
1276 		NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect");
1277 		return -EINVAL;
1278 	}
1279 
1280 	return sizeof(*md);
1281 }
1282 
fl_set_enc_opt(struct nlattr ** tb,struct fl_flow_key * key,struct fl_flow_key * mask,struct netlink_ext_ack * extack)1283 static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
1284 			  struct fl_flow_key *mask,
1285 			  struct netlink_ext_ack *extack)
1286 {
1287 	const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
1288 	int err, option_len, key_depth, msk_depth = 0;
1289 
1290 	err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS],
1291 					     TCA_FLOWER_KEY_ENC_OPTS_MAX,
1292 					     enc_opts_policy, extack);
1293 	if (err)
1294 		return err;
1295 
1296 	nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
1297 
1298 	if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
1299 		err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
1300 						     TCA_FLOWER_KEY_ENC_OPTS_MAX,
1301 						     enc_opts_policy, extack);
1302 		if (err)
1303 			return err;
1304 
1305 		nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1306 		msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1307 		if (!nla_ok(nla_opt_msk, msk_depth)) {
1308 			NL_SET_ERR_MSG(extack, "Invalid nested attribute for masks");
1309 			return -EINVAL;
1310 		}
1311 	}
1312 
1313 	nla_for_each_attr(nla_opt_key, nla_enc_key,
1314 			  nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) {
1315 		switch (nla_type(nla_opt_key)) {
1316 		case TCA_FLOWER_KEY_ENC_OPTS_GENEVE:
1317 			if (key->enc_opts.dst_opt_type &&
1318 			    key->enc_opts.dst_opt_type != TUNNEL_GENEVE_OPT) {
1319 				NL_SET_ERR_MSG(extack, "Duplicate type for geneve options");
1320 				return -EINVAL;
1321 			}
1322 			option_len = 0;
1323 			key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1324 			option_len = fl_set_geneve_opt(nla_opt_key, key,
1325 						       key_depth, option_len,
1326 						       extack);
1327 			if (option_len < 0)
1328 				return option_len;
1329 
1330 			key->enc_opts.len += option_len;
1331 			/* At the same time we need to parse through the mask
1332 			 * in order to verify exact and mask attribute lengths.
1333 			 */
1334 			mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1335 			option_len = fl_set_geneve_opt(nla_opt_msk, mask,
1336 						       msk_depth, option_len,
1337 						       extack);
1338 			if (option_len < 0)
1339 				return option_len;
1340 
1341 			mask->enc_opts.len += option_len;
1342 			if (key->enc_opts.len != mask->enc_opts.len) {
1343 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1344 				return -EINVAL;
1345 			}
1346 			break;
1347 		case TCA_FLOWER_KEY_ENC_OPTS_VXLAN:
1348 			if (key->enc_opts.dst_opt_type) {
1349 				NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options");
1350 				return -EINVAL;
1351 			}
1352 			option_len = 0;
1353 			key->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1354 			option_len = fl_set_vxlan_opt(nla_opt_key, key,
1355 						      key_depth, option_len,
1356 						      extack);
1357 			if (option_len < 0)
1358 				return option_len;
1359 
1360 			key->enc_opts.len += option_len;
1361 			/* At the same time we need to parse through the mask
1362 			 * in order to verify exact and mask attribute lengths.
1363 			 */
1364 			mask->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1365 			option_len = fl_set_vxlan_opt(nla_opt_msk, mask,
1366 						      msk_depth, option_len,
1367 						      extack);
1368 			if (option_len < 0)
1369 				return option_len;
1370 
1371 			mask->enc_opts.len += option_len;
1372 			if (key->enc_opts.len != mask->enc_opts.len) {
1373 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1374 				return -EINVAL;
1375 			}
1376 			break;
1377 		case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN:
1378 			if (key->enc_opts.dst_opt_type) {
1379 				NL_SET_ERR_MSG(extack, "Duplicate type for erspan options");
1380 				return -EINVAL;
1381 			}
1382 			option_len = 0;
1383 			key->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1384 			option_len = fl_set_erspan_opt(nla_opt_key, key,
1385 						       key_depth, option_len,
1386 						       extack);
1387 			if (option_len < 0)
1388 				return option_len;
1389 
1390 			key->enc_opts.len += option_len;
1391 			/* At the same time we need to parse through the mask
1392 			 * in order to verify exact and mask attribute lengths.
1393 			 */
1394 			mask->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1395 			option_len = fl_set_erspan_opt(nla_opt_msk, mask,
1396 						       msk_depth, option_len,
1397 						       extack);
1398 			if (option_len < 0)
1399 				return option_len;
1400 
1401 			mask->enc_opts.len += option_len;
1402 			if (key->enc_opts.len != mask->enc_opts.len) {
1403 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1404 				return -EINVAL;
1405 			}
1406 			break;
1407 		default:
1408 			NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
1409 			return -EINVAL;
1410 		}
1411 
1412 		if (!msk_depth)
1413 			continue;
1414 
1415 		if (!nla_ok(nla_opt_msk, msk_depth)) {
1416 			NL_SET_ERR_MSG(extack, "A mask attribute is invalid");
1417 			return -EINVAL;
1418 		}
1419 		nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
1420 	}
1421 
1422 	return 0;
1423 }
1424 
fl_validate_ct_state(u16 state,struct nlattr * tb,struct netlink_ext_ack * extack)1425 static int fl_validate_ct_state(u16 state, struct nlattr *tb,
1426 				struct netlink_ext_ack *extack)
1427 {
1428 	if (state && !(state & TCA_FLOWER_KEY_CT_FLAGS_TRACKED)) {
1429 		NL_SET_ERR_MSG_ATTR(extack, tb,
1430 				    "no trk, so no other flag can be set");
1431 		return -EINVAL;
1432 	}
1433 
1434 	if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW &&
1435 	    state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED) {
1436 		NL_SET_ERR_MSG_ATTR(extack, tb,
1437 				    "new and est are mutually exclusive");
1438 		return -EINVAL;
1439 	}
1440 
1441 	if (state & TCA_FLOWER_KEY_CT_FLAGS_INVALID &&
1442 	    state & ~(TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
1443 		      TCA_FLOWER_KEY_CT_FLAGS_INVALID)) {
1444 		NL_SET_ERR_MSG_ATTR(extack, tb,
1445 				    "when inv is set, only trk may be set");
1446 		return -EINVAL;
1447 	}
1448 
1449 	if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW &&
1450 	    state & TCA_FLOWER_KEY_CT_FLAGS_REPLY) {
1451 		NL_SET_ERR_MSG_ATTR(extack, tb,
1452 				    "new and rpl are mutually exclusive");
1453 		return -EINVAL;
1454 	}
1455 
1456 	return 0;
1457 }
1458 
fl_set_key_ct(struct nlattr ** tb,struct flow_dissector_key_ct * key,struct flow_dissector_key_ct * mask,struct netlink_ext_ack * extack)1459 static int fl_set_key_ct(struct nlattr **tb,
1460 			 struct flow_dissector_key_ct *key,
1461 			 struct flow_dissector_key_ct *mask,
1462 			 struct netlink_ext_ack *extack)
1463 {
1464 	if (tb[TCA_FLOWER_KEY_CT_STATE]) {
1465 		int err;
1466 
1467 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK)) {
1468 			NL_SET_ERR_MSG(extack, "Conntrack isn't enabled");
1469 			return -EOPNOTSUPP;
1470 		}
1471 		fl_set_key_val(tb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
1472 			       &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
1473 			       sizeof(key->ct_state));
1474 
1475 		err = fl_validate_ct_state(key->ct_state & mask->ct_state,
1476 					   tb[TCA_FLOWER_KEY_CT_STATE_MASK],
1477 					   extack);
1478 		if (err)
1479 			return err;
1480 
1481 	}
1482 	if (tb[TCA_FLOWER_KEY_CT_ZONE]) {
1483 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1484 			NL_SET_ERR_MSG(extack, "Conntrack zones isn't enabled");
1485 			return -EOPNOTSUPP;
1486 		}
1487 		fl_set_key_val(tb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
1488 			       &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
1489 			       sizeof(key->ct_zone));
1490 	}
1491 	if (tb[TCA_FLOWER_KEY_CT_MARK]) {
1492 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1493 			NL_SET_ERR_MSG(extack, "Conntrack mark isn't enabled");
1494 			return -EOPNOTSUPP;
1495 		}
1496 		fl_set_key_val(tb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
1497 			       &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
1498 			       sizeof(key->ct_mark));
1499 	}
1500 	if (tb[TCA_FLOWER_KEY_CT_LABELS]) {
1501 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1502 			NL_SET_ERR_MSG(extack, "Conntrack labels aren't enabled");
1503 			return -EOPNOTSUPP;
1504 		}
1505 		fl_set_key_val(tb, key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
1506 			       mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
1507 			       sizeof(key->ct_labels));
1508 	}
1509 
1510 	return 0;
1511 }
1512 
fl_set_key(struct net * net,struct nlattr ** tb,struct fl_flow_key * key,struct fl_flow_key * mask,struct netlink_ext_ack * extack)1513 static int fl_set_key(struct net *net, struct nlattr **tb,
1514 		      struct fl_flow_key *key, struct fl_flow_key *mask,
1515 		      struct netlink_ext_ack *extack)
1516 {
1517 	__be16 ethertype;
1518 	int ret = 0;
1519 
1520 	if (tb[TCA_FLOWER_INDEV]) {
1521 		int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
1522 		if (err < 0)
1523 			return err;
1524 		key->meta.ingress_ifindex = err;
1525 		mask->meta.ingress_ifindex = 0xffffffff;
1526 	}
1527 
1528 	fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
1529 		       mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
1530 		       sizeof(key->eth.dst));
1531 	fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
1532 		       mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
1533 		       sizeof(key->eth.src));
1534 
1535 	if (tb[TCA_FLOWER_KEY_ETH_TYPE]) {
1536 		ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]);
1537 
1538 		if (eth_type_vlan(ethertype)) {
1539 			fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
1540 					TCA_FLOWER_KEY_VLAN_PRIO,
1541 					TCA_FLOWER_KEY_VLAN_ETH_TYPE,
1542 					&key->vlan, &mask->vlan);
1543 
1544 			if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) {
1545 				ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]);
1546 				if (eth_type_vlan(ethertype)) {
1547 					fl_set_key_vlan(tb, ethertype,
1548 							TCA_FLOWER_KEY_CVLAN_ID,
1549 							TCA_FLOWER_KEY_CVLAN_PRIO,
1550 							TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1551 							&key->cvlan, &mask->cvlan);
1552 					fl_set_key_val(tb, &key->basic.n_proto,
1553 						       TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1554 						       &mask->basic.n_proto,
1555 						       TCA_FLOWER_UNSPEC,
1556 						       sizeof(key->basic.n_proto));
1557 				} else {
1558 					key->basic.n_proto = ethertype;
1559 					mask->basic.n_proto = cpu_to_be16(~0);
1560 				}
1561 			}
1562 		} else {
1563 			key->basic.n_proto = ethertype;
1564 			mask->basic.n_proto = cpu_to_be16(~0);
1565 		}
1566 	}
1567 
1568 	if (key->basic.n_proto == htons(ETH_P_IP) ||
1569 	    key->basic.n_proto == htons(ETH_P_IPV6)) {
1570 		fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
1571 			       &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
1572 			       sizeof(key->basic.ip_proto));
1573 		fl_set_key_ip(tb, false, &key->ip, &mask->ip);
1574 	}
1575 
1576 	if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
1577 		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1578 		mask->control.addr_type = ~0;
1579 		fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
1580 			       &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
1581 			       sizeof(key->ipv4.src));
1582 		fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
1583 			       &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
1584 			       sizeof(key->ipv4.dst));
1585 	} else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
1586 		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1587 		mask->control.addr_type = ~0;
1588 		fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
1589 			       &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
1590 			       sizeof(key->ipv6.src));
1591 		fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
1592 			       &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
1593 			       sizeof(key->ipv6.dst));
1594 	}
1595 
1596 	if (key->basic.ip_proto == IPPROTO_TCP) {
1597 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
1598 			       &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
1599 			       sizeof(key->tp.src));
1600 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
1601 			       &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
1602 			       sizeof(key->tp.dst));
1603 		fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
1604 			       &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
1605 			       sizeof(key->tcp.flags));
1606 	} else if (key->basic.ip_proto == IPPROTO_UDP) {
1607 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
1608 			       &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
1609 			       sizeof(key->tp.src));
1610 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
1611 			       &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
1612 			       sizeof(key->tp.dst));
1613 	} else if (key->basic.ip_proto == IPPROTO_SCTP) {
1614 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
1615 			       &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
1616 			       sizeof(key->tp.src));
1617 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
1618 			       &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
1619 			       sizeof(key->tp.dst));
1620 	} else if (key->basic.n_proto == htons(ETH_P_IP) &&
1621 		   key->basic.ip_proto == IPPROTO_ICMP) {
1622 		fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
1623 			       &mask->icmp.type,
1624 			       TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
1625 			       sizeof(key->icmp.type));
1626 		fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
1627 			       &mask->icmp.code,
1628 			       TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
1629 			       sizeof(key->icmp.code));
1630 	} else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
1631 		   key->basic.ip_proto == IPPROTO_ICMPV6) {
1632 		fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
1633 			       &mask->icmp.type,
1634 			       TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
1635 			       sizeof(key->icmp.type));
1636 		fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
1637 			       &mask->icmp.code,
1638 			       TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
1639 			       sizeof(key->icmp.code));
1640 	} else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) ||
1641 		   key->basic.n_proto == htons(ETH_P_MPLS_MC)) {
1642 		ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls, extack);
1643 		if (ret)
1644 			return ret;
1645 	} else if (key->basic.n_proto == htons(ETH_P_ARP) ||
1646 		   key->basic.n_proto == htons(ETH_P_RARP)) {
1647 		fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP,
1648 			       &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK,
1649 			       sizeof(key->arp.sip));
1650 		fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP,
1651 			       &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK,
1652 			       sizeof(key->arp.tip));
1653 		fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP,
1654 			       &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK,
1655 			       sizeof(key->arp.op));
1656 		fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
1657 			       mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
1658 			       sizeof(key->arp.sha));
1659 		fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
1660 			       mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
1661 			       sizeof(key->arp.tha));
1662 	}
1663 
1664 	if (key->basic.ip_proto == IPPROTO_TCP ||
1665 	    key->basic.ip_proto == IPPROTO_UDP ||
1666 	    key->basic.ip_proto == IPPROTO_SCTP) {
1667 		ret = fl_set_key_port_range(tb, key, mask, extack);
1668 		if (ret)
1669 			return ret;
1670 	}
1671 
1672 	if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
1673 	    tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
1674 		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1675 		mask->enc_control.addr_type = ~0;
1676 		fl_set_key_val(tb, &key->enc_ipv4.src,
1677 			       TCA_FLOWER_KEY_ENC_IPV4_SRC,
1678 			       &mask->enc_ipv4.src,
1679 			       TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
1680 			       sizeof(key->enc_ipv4.src));
1681 		fl_set_key_val(tb, &key->enc_ipv4.dst,
1682 			       TCA_FLOWER_KEY_ENC_IPV4_DST,
1683 			       &mask->enc_ipv4.dst,
1684 			       TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
1685 			       sizeof(key->enc_ipv4.dst));
1686 	}
1687 
1688 	if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
1689 	    tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
1690 		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1691 		mask->enc_control.addr_type = ~0;
1692 		fl_set_key_val(tb, &key->enc_ipv6.src,
1693 			       TCA_FLOWER_KEY_ENC_IPV6_SRC,
1694 			       &mask->enc_ipv6.src,
1695 			       TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
1696 			       sizeof(key->enc_ipv6.src));
1697 		fl_set_key_val(tb, &key->enc_ipv6.dst,
1698 			       TCA_FLOWER_KEY_ENC_IPV6_DST,
1699 			       &mask->enc_ipv6.dst,
1700 			       TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
1701 			       sizeof(key->enc_ipv6.dst));
1702 	}
1703 
1704 	fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
1705 		       &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
1706 		       sizeof(key->enc_key_id.keyid));
1707 
1708 	fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
1709 		       &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
1710 		       sizeof(key->enc_tp.src));
1711 
1712 	fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
1713 		       &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
1714 		       sizeof(key->enc_tp.dst));
1715 
1716 	fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip);
1717 
1718 	fl_set_key_val(tb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
1719 		       &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
1720 		       sizeof(key->hash.hash));
1721 
1722 	if (tb[TCA_FLOWER_KEY_ENC_OPTS]) {
1723 		ret = fl_set_enc_opt(tb, key, mask, extack);
1724 		if (ret)
1725 			return ret;
1726 	}
1727 
1728 	ret = fl_set_key_ct(tb, &key->ct, &mask->ct, extack);
1729 	if (ret)
1730 		return ret;
1731 
1732 	if (tb[TCA_FLOWER_KEY_FLAGS])
1733 		ret = fl_set_key_flags(tb, &key->control.flags,
1734 				       &mask->control.flags, extack);
1735 
1736 	return ret;
1737 }
1738 
fl_mask_copy(struct fl_flow_mask * dst,struct fl_flow_mask * src)1739 static void fl_mask_copy(struct fl_flow_mask *dst,
1740 			 struct fl_flow_mask *src)
1741 {
1742 	const void *psrc = fl_key_get_start(&src->key, src);
1743 	void *pdst = fl_key_get_start(&dst->key, src);
1744 
1745 	memcpy(pdst, psrc, fl_mask_range(src));
1746 	dst->range = src->range;
1747 }
1748 
1749 static const struct rhashtable_params fl_ht_params = {
1750 	.key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
1751 	.head_offset = offsetof(struct cls_fl_filter, ht_node),
1752 	.automatic_shrinking = true,
1753 };
1754 
fl_init_mask_hashtable(struct fl_flow_mask * mask)1755 static int fl_init_mask_hashtable(struct fl_flow_mask *mask)
1756 {
1757 	mask->filter_ht_params = fl_ht_params;
1758 	mask->filter_ht_params.key_len = fl_mask_range(mask);
1759 	mask->filter_ht_params.key_offset += mask->range.start;
1760 
1761 	return rhashtable_init(&mask->ht, &mask->filter_ht_params);
1762 }
1763 
1764 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
1765 #define FL_KEY_MEMBER_SIZE(member) sizeof_field(struct fl_flow_key, member)
1766 
1767 #define FL_KEY_IS_MASKED(mask, member)						\
1768 	memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member),		\
1769 		   0, FL_KEY_MEMBER_SIZE(member))				\
1770 
1771 #define FL_KEY_SET(keys, cnt, id, member)					\
1772 	do {									\
1773 		keys[cnt].key_id = id;						\
1774 		keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member);		\
1775 		cnt++;								\
1776 	} while(0);
1777 
1778 #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member)			\
1779 	do {									\
1780 		if (FL_KEY_IS_MASKED(mask, member))				\
1781 			FL_KEY_SET(keys, cnt, id, member);			\
1782 	} while(0);
1783 
fl_init_dissector(struct flow_dissector * dissector,struct fl_flow_key * mask)1784 static void fl_init_dissector(struct flow_dissector *dissector,
1785 			      struct fl_flow_key *mask)
1786 {
1787 	struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
1788 	size_t cnt = 0;
1789 
1790 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1791 			     FLOW_DISSECTOR_KEY_META, meta);
1792 	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
1793 	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
1794 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1795 			     FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
1796 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1797 			     FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
1798 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1799 			     FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
1800 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1801 			     FLOW_DISSECTOR_KEY_PORTS, tp);
1802 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1803 			     FLOW_DISSECTOR_KEY_PORTS_RANGE, tp_range);
1804 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1805 			     FLOW_DISSECTOR_KEY_IP, ip);
1806 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1807 			     FLOW_DISSECTOR_KEY_TCP, tcp);
1808 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1809 			     FLOW_DISSECTOR_KEY_ICMP, icmp);
1810 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1811 			     FLOW_DISSECTOR_KEY_ARP, arp);
1812 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1813 			     FLOW_DISSECTOR_KEY_MPLS, mpls);
1814 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1815 			     FLOW_DISSECTOR_KEY_VLAN, vlan);
1816 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1817 			     FLOW_DISSECTOR_KEY_CVLAN, cvlan);
1818 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1819 			     FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
1820 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1821 			     FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
1822 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1823 			     FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
1824 	if (FL_KEY_IS_MASKED(mask, enc_ipv4) ||
1825 	    FL_KEY_IS_MASKED(mask, enc_ipv6))
1826 		FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
1827 			   enc_control);
1828 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1829 			     FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
1830 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1831 			     FLOW_DISSECTOR_KEY_ENC_IP, enc_ip);
1832 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1833 			     FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts);
1834 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1835 			     FLOW_DISSECTOR_KEY_CT, ct);
1836 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1837 			     FLOW_DISSECTOR_KEY_HASH, hash);
1838 
1839 	skb_flow_dissector_init(dissector, keys, cnt);
1840 }
1841 
fl_create_new_mask(struct cls_fl_head * head,struct fl_flow_mask * mask)1842 static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
1843 					       struct fl_flow_mask *mask)
1844 {
1845 	struct fl_flow_mask *newmask;
1846 	int err;
1847 
1848 	newmask = kzalloc(sizeof(*newmask), GFP_KERNEL);
1849 	if (!newmask)
1850 		return ERR_PTR(-ENOMEM);
1851 
1852 	fl_mask_copy(newmask, mask);
1853 
1854 	if ((newmask->key.tp_range.tp_min.dst &&
1855 	     newmask->key.tp_range.tp_max.dst) ||
1856 	    (newmask->key.tp_range.tp_min.src &&
1857 	     newmask->key.tp_range.tp_max.src))
1858 		newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE;
1859 
1860 	err = fl_init_mask_hashtable(newmask);
1861 	if (err)
1862 		goto errout_free;
1863 
1864 	fl_init_dissector(&newmask->dissector, &newmask->key);
1865 
1866 	INIT_LIST_HEAD_RCU(&newmask->filters);
1867 
1868 	refcount_set(&newmask->refcnt, 1);
1869 	err = rhashtable_replace_fast(&head->ht, &mask->ht_node,
1870 				      &newmask->ht_node, mask_ht_params);
1871 	if (err)
1872 		goto errout_destroy;
1873 
1874 	spin_lock(&head->masks_lock);
1875 	list_add_tail_rcu(&newmask->list, &head->masks);
1876 	spin_unlock(&head->masks_lock);
1877 
1878 	return newmask;
1879 
1880 errout_destroy:
1881 	rhashtable_destroy(&newmask->ht);
1882 errout_free:
1883 	kfree(newmask);
1884 
1885 	return ERR_PTR(err);
1886 }
1887 
fl_check_assign_mask(struct cls_fl_head * head,struct cls_fl_filter * fnew,struct cls_fl_filter * fold,struct fl_flow_mask * mask)1888 static int fl_check_assign_mask(struct cls_fl_head *head,
1889 				struct cls_fl_filter *fnew,
1890 				struct cls_fl_filter *fold,
1891 				struct fl_flow_mask *mask)
1892 {
1893 	struct fl_flow_mask *newmask;
1894 	int ret = 0;
1895 
1896 	rcu_read_lock();
1897 
1898 	/* Insert mask as temporary node to prevent concurrent creation of mask
1899 	 * with same key. Any concurrent lookups with same key will return
1900 	 * -EAGAIN because mask's refcnt is zero.
1901 	 */
1902 	fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht,
1903 						       &mask->ht_node,
1904 						       mask_ht_params);
1905 	if (!fnew->mask) {
1906 		rcu_read_unlock();
1907 
1908 		if (fold) {
1909 			ret = -EINVAL;
1910 			goto errout_cleanup;
1911 		}
1912 
1913 		newmask = fl_create_new_mask(head, mask);
1914 		if (IS_ERR(newmask)) {
1915 			ret = PTR_ERR(newmask);
1916 			goto errout_cleanup;
1917 		}
1918 
1919 		fnew->mask = newmask;
1920 		return 0;
1921 	} else if (IS_ERR(fnew->mask)) {
1922 		ret = PTR_ERR(fnew->mask);
1923 	} else if (fold && fold->mask != fnew->mask) {
1924 		ret = -EINVAL;
1925 	} else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) {
1926 		/* Mask was deleted concurrently, try again */
1927 		ret = -EAGAIN;
1928 	}
1929 	rcu_read_unlock();
1930 	return ret;
1931 
1932 errout_cleanup:
1933 	rhashtable_remove_fast(&head->ht, &mask->ht_node,
1934 			       mask_ht_params);
1935 	return ret;
1936 }
1937 
fl_set_parms(struct net * net,struct tcf_proto * tp,struct cls_fl_filter * f,struct fl_flow_mask * mask,unsigned long base,struct nlattr ** tb,struct nlattr * est,struct fl_flow_tmplt * tmplt,u32 flags,struct netlink_ext_ack * extack)1938 static int fl_set_parms(struct net *net, struct tcf_proto *tp,
1939 			struct cls_fl_filter *f, struct fl_flow_mask *mask,
1940 			unsigned long base, struct nlattr **tb,
1941 			struct nlattr *est,
1942 			struct fl_flow_tmplt *tmplt, u32 flags,
1943 			struct netlink_ext_ack *extack)
1944 {
1945 	int err;
1946 
1947 	err = tcf_exts_validate(net, tp, tb, est, &f->exts, flags, extack);
1948 	if (err < 0)
1949 		return err;
1950 
1951 	if (tb[TCA_FLOWER_CLASSID]) {
1952 		f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
1953 		if (flags & TCA_ACT_FLAGS_NO_RTNL)
1954 			rtnl_lock();
1955 		tcf_bind_filter(tp, &f->res, base);
1956 		if (flags & TCA_ACT_FLAGS_NO_RTNL)
1957 			rtnl_unlock();
1958 	}
1959 
1960 	err = fl_set_key(net, tb, &f->key, &mask->key, extack);
1961 	if (err)
1962 		return err;
1963 
1964 	fl_mask_update_range(mask);
1965 	fl_set_masked_key(&f->mkey, &f->key, mask);
1966 
1967 	if (!fl_mask_fits_tmplt(tmplt, mask)) {
1968 		NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template");
1969 		return -EINVAL;
1970 	}
1971 
1972 	return 0;
1973 }
1974 
fl_ht_insert_unique(struct cls_fl_filter * fnew,struct cls_fl_filter * fold,bool * in_ht)1975 static int fl_ht_insert_unique(struct cls_fl_filter *fnew,
1976 			       struct cls_fl_filter *fold,
1977 			       bool *in_ht)
1978 {
1979 	struct fl_flow_mask *mask = fnew->mask;
1980 	int err;
1981 
1982 	err = rhashtable_lookup_insert_fast(&mask->ht,
1983 					    &fnew->ht_node,
1984 					    mask->filter_ht_params);
1985 	if (err) {
1986 		*in_ht = false;
1987 		/* It is okay if filter with same key exists when
1988 		 * overwriting.
1989 		 */
1990 		return fold && err == -EEXIST ? 0 : err;
1991 	}
1992 
1993 	*in_ht = true;
1994 	return 0;
1995 }
1996 
fl_change(struct net * net,struct sk_buff * in_skb,struct tcf_proto * tp,unsigned long base,u32 handle,struct nlattr ** tca,void ** arg,u32 flags,struct netlink_ext_ack * extack)1997 static int fl_change(struct net *net, struct sk_buff *in_skb,
1998 		     struct tcf_proto *tp, unsigned long base,
1999 		     u32 handle, struct nlattr **tca,
2000 		     void **arg, u32 flags,
2001 		     struct netlink_ext_ack *extack)
2002 {
2003 	struct cls_fl_head *head = fl_head_dereference(tp);
2004 	bool rtnl_held = !(flags & TCA_ACT_FLAGS_NO_RTNL);
2005 	struct cls_fl_filter *fold = *arg;
2006 	struct cls_fl_filter *fnew;
2007 	struct fl_flow_mask *mask;
2008 	struct nlattr **tb;
2009 	bool in_ht;
2010 	int err;
2011 
2012 	if (!tca[TCA_OPTIONS]) {
2013 		err = -EINVAL;
2014 		goto errout_fold;
2015 	}
2016 
2017 	mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
2018 	if (!mask) {
2019 		err = -ENOBUFS;
2020 		goto errout_fold;
2021 	}
2022 
2023 	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
2024 	if (!tb) {
2025 		err = -ENOBUFS;
2026 		goto errout_mask_alloc;
2027 	}
2028 
2029 	err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2030 					  tca[TCA_OPTIONS], fl_policy, NULL);
2031 	if (err < 0)
2032 		goto errout_tb;
2033 
2034 	if (fold && handle && fold->handle != handle) {
2035 		err = -EINVAL;
2036 		goto errout_tb;
2037 	}
2038 
2039 	fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
2040 	if (!fnew) {
2041 		err = -ENOBUFS;
2042 		goto errout_tb;
2043 	}
2044 	INIT_LIST_HEAD(&fnew->hw_list);
2045 	refcount_set(&fnew->refcnt, 1);
2046 
2047 	err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0);
2048 	if (err < 0)
2049 		goto errout;
2050 
2051 	if (tb[TCA_FLOWER_FLAGS]) {
2052 		fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
2053 
2054 		if (!tc_flags_valid(fnew->flags)) {
2055 			err = -EINVAL;
2056 			goto errout;
2057 		}
2058 	}
2059 
2060 	err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE],
2061 			   tp->chain->tmplt_priv, flags, extack);
2062 	if (err)
2063 		goto errout;
2064 
2065 	err = fl_check_assign_mask(head, fnew, fold, mask);
2066 	if (err)
2067 		goto errout;
2068 
2069 	err = fl_ht_insert_unique(fnew, fold, &in_ht);
2070 	if (err)
2071 		goto errout_mask;
2072 
2073 	if (!tc_skip_hw(fnew->flags)) {
2074 		err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack);
2075 		if (err)
2076 			goto errout_ht;
2077 	}
2078 
2079 	if (!tc_in_hw(fnew->flags))
2080 		fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
2081 
2082 	spin_lock(&tp->lock);
2083 
2084 	/* tp was deleted concurrently. -EAGAIN will cause caller to lookup
2085 	 * proto again or create new one, if necessary.
2086 	 */
2087 	if (tp->deleting) {
2088 		err = -EAGAIN;
2089 		goto errout_hw;
2090 	}
2091 
2092 	if (fold) {
2093 		/* Fold filter was deleted concurrently. Retry lookup. */
2094 		if (fold->deleted) {
2095 			err = -EAGAIN;
2096 			goto errout_hw;
2097 		}
2098 
2099 		fnew->handle = handle;
2100 
2101 		if (!in_ht) {
2102 			struct rhashtable_params params =
2103 				fnew->mask->filter_ht_params;
2104 
2105 			err = rhashtable_insert_fast(&fnew->mask->ht,
2106 						     &fnew->ht_node,
2107 						     params);
2108 			if (err)
2109 				goto errout_hw;
2110 			in_ht = true;
2111 		}
2112 
2113 		refcount_inc(&fnew->refcnt);
2114 		rhashtable_remove_fast(&fold->mask->ht,
2115 				       &fold->ht_node,
2116 				       fold->mask->filter_ht_params);
2117 		idr_replace(&head->handle_idr, fnew, fnew->handle);
2118 		list_replace_rcu(&fold->list, &fnew->list);
2119 		fold->deleted = true;
2120 
2121 		spin_unlock(&tp->lock);
2122 
2123 		fl_mask_put(head, fold->mask);
2124 		if (!tc_skip_hw(fold->flags))
2125 			fl_hw_destroy_filter(tp, fold, rtnl_held, NULL);
2126 		tcf_unbind_filter(tp, &fold->res);
2127 		/* Caller holds reference to fold, so refcnt is always > 0
2128 		 * after this.
2129 		 */
2130 		refcount_dec(&fold->refcnt);
2131 		__fl_put(fold);
2132 	} else {
2133 		if (handle) {
2134 			/* user specifies a handle and it doesn't exist */
2135 			err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
2136 					    handle, GFP_ATOMIC);
2137 
2138 			/* Filter with specified handle was concurrently
2139 			 * inserted after initial check in cls_api. This is not
2140 			 * necessarily an error if NLM_F_EXCL is not set in
2141 			 * message flags. Returning EAGAIN will cause cls_api to
2142 			 * try to update concurrently inserted rule.
2143 			 */
2144 			if (err == -ENOSPC)
2145 				err = -EAGAIN;
2146 		} else {
2147 			handle = 1;
2148 			err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
2149 					    INT_MAX, GFP_ATOMIC);
2150 		}
2151 		if (err)
2152 			goto errout_hw;
2153 
2154 		refcount_inc(&fnew->refcnt);
2155 		fnew->handle = handle;
2156 		list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
2157 		spin_unlock(&tp->lock);
2158 	}
2159 
2160 	*arg = fnew;
2161 
2162 	kfree(tb);
2163 	tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
2164 	return 0;
2165 
2166 errout_ht:
2167 	spin_lock(&tp->lock);
2168 errout_hw:
2169 	fnew->deleted = true;
2170 	spin_unlock(&tp->lock);
2171 	if (!tc_skip_hw(fnew->flags))
2172 		fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL);
2173 	if (in_ht)
2174 		rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
2175 				       fnew->mask->filter_ht_params);
2176 errout_mask:
2177 	fl_mask_put(head, fnew->mask);
2178 errout:
2179 	__fl_put(fnew);
2180 errout_tb:
2181 	kfree(tb);
2182 errout_mask_alloc:
2183 	tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
2184 errout_fold:
2185 	if (fold)
2186 		__fl_put(fold);
2187 	return err;
2188 }
2189 
fl_delete(struct tcf_proto * tp,void * arg,bool * last,bool rtnl_held,struct netlink_ext_ack * extack)2190 static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
2191 		     bool rtnl_held, struct netlink_ext_ack *extack)
2192 {
2193 	struct cls_fl_head *head = fl_head_dereference(tp);
2194 	struct cls_fl_filter *f = arg;
2195 	bool last_on_mask;
2196 	int err = 0;
2197 
2198 	err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack);
2199 	*last = list_empty(&head->masks);
2200 	__fl_put(f);
2201 
2202 	return err;
2203 }
2204 
fl_walk(struct tcf_proto * tp,struct tcf_walker * arg,bool rtnl_held)2205 static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
2206 		    bool rtnl_held)
2207 {
2208 	struct cls_fl_head *head = fl_head_dereference(tp);
2209 	unsigned long id = arg->cookie, tmp;
2210 	struct cls_fl_filter *f;
2211 
2212 	arg->count = arg->skip;
2213 
2214 	rcu_read_lock();
2215 	idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) {
2216 		/* don't return filters that are being deleted */
2217 		if (!refcount_inc_not_zero(&f->refcnt))
2218 			continue;
2219 		rcu_read_unlock();
2220 
2221 		if (arg->fn(tp, f, arg) < 0) {
2222 			__fl_put(f);
2223 			arg->stop = 1;
2224 			rcu_read_lock();
2225 			break;
2226 		}
2227 		__fl_put(f);
2228 		arg->count++;
2229 		rcu_read_lock();
2230 	}
2231 	rcu_read_unlock();
2232 	arg->cookie = id;
2233 }
2234 
2235 static struct cls_fl_filter *
fl_get_next_hw_filter(struct tcf_proto * tp,struct cls_fl_filter * f,bool add)2236 fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add)
2237 {
2238 	struct cls_fl_head *head = fl_head_dereference(tp);
2239 
2240 	spin_lock(&tp->lock);
2241 	if (list_empty(&head->hw_filters)) {
2242 		spin_unlock(&tp->lock);
2243 		return NULL;
2244 	}
2245 
2246 	if (!f)
2247 		f = list_entry(&head->hw_filters, struct cls_fl_filter,
2248 			       hw_list);
2249 	list_for_each_entry_continue(f, &head->hw_filters, hw_list) {
2250 		if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) {
2251 			spin_unlock(&tp->lock);
2252 			return f;
2253 		}
2254 	}
2255 
2256 	spin_unlock(&tp->lock);
2257 	return NULL;
2258 }
2259 
fl_reoffload(struct tcf_proto * tp,bool add,flow_setup_cb_t * cb,void * cb_priv,struct netlink_ext_ack * extack)2260 static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
2261 			void *cb_priv, struct netlink_ext_ack *extack)
2262 {
2263 	struct tcf_block *block = tp->chain->block;
2264 	struct flow_cls_offload cls_flower = {};
2265 	struct cls_fl_filter *f = NULL;
2266 	int err;
2267 
2268 	/* hw_filters list can only be changed by hw offload functions after
2269 	 * obtaining rtnl lock. Make sure it is not changed while reoffload is
2270 	 * iterating it.
2271 	 */
2272 	ASSERT_RTNL();
2273 
2274 	while ((f = fl_get_next_hw_filter(tp, f, add))) {
2275 		cls_flower.rule =
2276 			flow_rule_alloc(tcf_exts_num_actions(&f->exts));
2277 		if (!cls_flower.rule) {
2278 			__fl_put(f);
2279 			return -ENOMEM;
2280 		}
2281 
2282 		tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
2283 					   extack);
2284 		cls_flower.command = add ?
2285 			FLOW_CLS_REPLACE : FLOW_CLS_DESTROY;
2286 		cls_flower.cookie = (unsigned long)f;
2287 		cls_flower.rule->match.dissector = &f->mask->dissector;
2288 		cls_flower.rule->match.mask = &f->mask->key;
2289 		cls_flower.rule->match.key = &f->mkey;
2290 
2291 		err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
2292 		if (err) {
2293 			kfree(cls_flower.rule);
2294 			if (tc_skip_sw(f->flags)) {
2295 				NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
2296 				__fl_put(f);
2297 				return err;
2298 			}
2299 			goto next_flow;
2300 		}
2301 
2302 		cls_flower.classid = f->res.classid;
2303 
2304 		err = tc_setup_cb_reoffload(block, tp, add, cb,
2305 					    TC_SETUP_CLSFLOWER, &cls_flower,
2306 					    cb_priv, &f->flags,
2307 					    &f->in_hw_count);
2308 		tc_cleanup_flow_action(&cls_flower.rule->action);
2309 		kfree(cls_flower.rule);
2310 
2311 		if (err) {
2312 			__fl_put(f);
2313 			return err;
2314 		}
2315 next_flow:
2316 		__fl_put(f);
2317 	}
2318 
2319 	return 0;
2320 }
2321 
fl_hw_add(struct tcf_proto * tp,void * type_data)2322 static void fl_hw_add(struct tcf_proto *tp, void *type_data)
2323 {
2324 	struct flow_cls_offload *cls_flower = type_data;
2325 	struct cls_fl_filter *f =
2326 		(struct cls_fl_filter *) cls_flower->cookie;
2327 	struct cls_fl_head *head = fl_head_dereference(tp);
2328 
2329 	spin_lock(&tp->lock);
2330 	list_add(&f->hw_list, &head->hw_filters);
2331 	spin_unlock(&tp->lock);
2332 }
2333 
fl_hw_del(struct tcf_proto * tp,void * type_data)2334 static void fl_hw_del(struct tcf_proto *tp, void *type_data)
2335 {
2336 	struct flow_cls_offload *cls_flower = type_data;
2337 	struct cls_fl_filter *f =
2338 		(struct cls_fl_filter *) cls_flower->cookie;
2339 
2340 	spin_lock(&tp->lock);
2341 	if (!list_empty(&f->hw_list))
2342 		list_del_init(&f->hw_list);
2343 	spin_unlock(&tp->lock);
2344 }
2345 
fl_hw_create_tmplt(struct tcf_chain * chain,struct fl_flow_tmplt * tmplt)2346 static int fl_hw_create_tmplt(struct tcf_chain *chain,
2347 			      struct fl_flow_tmplt *tmplt)
2348 {
2349 	struct flow_cls_offload cls_flower = {};
2350 	struct tcf_block *block = chain->block;
2351 
2352 	cls_flower.rule = flow_rule_alloc(0);
2353 	if (!cls_flower.rule)
2354 		return -ENOMEM;
2355 
2356 	cls_flower.common.chain_index = chain->index;
2357 	cls_flower.command = FLOW_CLS_TMPLT_CREATE;
2358 	cls_flower.cookie = (unsigned long) tmplt;
2359 	cls_flower.rule->match.dissector = &tmplt->dissector;
2360 	cls_flower.rule->match.mask = &tmplt->mask;
2361 	cls_flower.rule->match.key = &tmplt->dummy_key;
2362 
2363 	/* We don't care if driver (any of them) fails to handle this
2364 	 * call. It serves just as a hint for it.
2365 	 */
2366 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2367 	kfree(cls_flower.rule);
2368 
2369 	return 0;
2370 }
2371 
fl_hw_destroy_tmplt(struct tcf_chain * chain,struct fl_flow_tmplt * tmplt)2372 static void fl_hw_destroy_tmplt(struct tcf_chain *chain,
2373 				struct fl_flow_tmplt *tmplt)
2374 {
2375 	struct flow_cls_offload cls_flower = {};
2376 	struct tcf_block *block = chain->block;
2377 
2378 	cls_flower.common.chain_index = chain->index;
2379 	cls_flower.command = FLOW_CLS_TMPLT_DESTROY;
2380 	cls_flower.cookie = (unsigned long) tmplt;
2381 
2382 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2383 }
2384 
fl_tmplt_create(struct net * net,struct tcf_chain * chain,struct nlattr ** tca,struct netlink_ext_ack * extack)2385 static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
2386 			     struct nlattr **tca,
2387 			     struct netlink_ext_ack *extack)
2388 {
2389 	struct fl_flow_tmplt *tmplt;
2390 	struct nlattr **tb;
2391 	int err;
2392 
2393 	if (!tca[TCA_OPTIONS])
2394 		return ERR_PTR(-EINVAL);
2395 
2396 	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
2397 	if (!tb)
2398 		return ERR_PTR(-ENOBUFS);
2399 	err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2400 					  tca[TCA_OPTIONS], fl_policy, NULL);
2401 	if (err)
2402 		goto errout_tb;
2403 
2404 	tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL);
2405 	if (!tmplt) {
2406 		err = -ENOMEM;
2407 		goto errout_tb;
2408 	}
2409 	tmplt->chain = chain;
2410 	err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack);
2411 	if (err)
2412 		goto errout_tmplt;
2413 
2414 	fl_init_dissector(&tmplt->dissector, &tmplt->mask);
2415 
2416 	err = fl_hw_create_tmplt(chain, tmplt);
2417 	if (err)
2418 		goto errout_tmplt;
2419 
2420 	kfree(tb);
2421 	return tmplt;
2422 
2423 errout_tmplt:
2424 	kfree(tmplt);
2425 errout_tb:
2426 	kfree(tb);
2427 	return ERR_PTR(err);
2428 }
2429 
fl_tmplt_destroy(void * tmplt_priv)2430 static void fl_tmplt_destroy(void *tmplt_priv)
2431 {
2432 	struct fl_flow_tmplt *tmplt = tmplt_priv;
2433 
2434 	fl_hw_destroy_tmplt(tmplt->chain, tmplt);
2435 	kfree(tmplt);
2436 }
2437 
fl_dump_key_val(struct sk_buff * skb,void * val,int val_type,void * mask,int mask_type,int len)2438 static int fl_dump_key_val(struct sk_buff *skb,
2439 			   void *val, int val_type,
2440 			   void *mask, int mask_type, int len)
2441 {
2442 	int err;
2443 
2444 	if (!memchr_inv(mask, 0, len))
2445 		return 0;
2446 	err = nla_put(skb, val_type, len, val);
2447 	if (err)
2448 		return err;
2449 	if (mask_type != TCA_FLOWER_UNSPEC) {
2450 		err = nla_put(skb, mask_type, len, mask);
2451 		if (err)
2452 			return err;
2453 	}
2454 	return 0;
2455 }
2456 
fl_dump_key_port_range(struct sk_buff * skb,struct fl_flow_key * key,struct fl_flow_key * mask)2457 static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key,
2458 				  struct fl_flow_key *mask)
2459 {
2460 	if (fl_dump_key_val(skb, &key->tp_range.tp_min.dst,
2461 			    TCA_FLOWER_KEY_PORT_DST_MIN,
2462 			    &mask->tp_range.tp_min.dst, TCA_FLOWER_UNSPEC,
2463 			    sizeof(key->tp_range.tp_min.dst)) ||
2464 	    fl_dump_key_val(skb, &key->tp_range.tp_max.dst,
2465 			    TCA_FLOWER_KEY_PORT_DST_MAX,
2466 			    &mask->tp_range.tp_max.dst, TCA_FLOWER_UNSPEC,
2467 			    sizeof(key->tp_range.tp_max.dst)) ||
2468 	    fl_dump_key_val(skb, &key->tp_range.tp_min.src,
2469 			    TCA_FLOWER_KEY_PORT_SRC_MIN,
2470 			    &mask->tp_range.tp_min.src, TCA_FLOWER_UNSPEC,
2471 			    sizeof(key->tp_range.tp_min.src)) ||
2472 	    fl_dump_key_val(skb, &key->tp_range.tp_max.src,
2473 			    TCA_FLOWER_KEY_PORT_SRC_MAX,
2474 			    &mask->tp_range.tp_max.src, TCA_FLOWER_UNSPEC,
2475 			    sizeof(key->tp_range.tp_max.src)))
2476 		return -1;
2477 
2478 	return 0;
2479 }
2480 
fl_dump_key_mpls_opt_lse(struct sk_buff * skb,struct flow_dissector_key_mpls * mpls_key,struct flow_dissector_key_mpls * mpls_mask,u8 lse_index)2481 static int fl_dump_key_mpls_opt_lse(struct sk_buff *skb,
2482 				    struct flow_dissector_key_mpls *mpls_key,
2483 				    struct flow_dissector_key_mpls *mpls_mask,
2484 				    u8 lse_index)
2485 {
2486 	struct flow_dissector_mpls_lse *lse_mask = &mpls_mask->ls[lse_index];
2487 	struct flow_dissector_mpls_lse *lse_key = &mpls_key->ls[lse_index];
2488 	int err;
2489 
2490 	err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH,
2491 			 lse_index + 1);
2492 	if (err)
2493 		return err;
2494 
2495 	if (lse_mask->mpls_ttl) {
2496 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL,
2497 				 lse_key->mpls_ttl);
2498 		if (err)
2499 			return err;
2500 	}
2501 	if (lse_mask->mpls_bos) {
2502 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS,
2503 				 lse_key->mpls_bos);
2504 		if (err)
2505 			return err;
2506 	}
2507 	if (lse_mask->mpls_tc) {
2508 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TC,
2509 				 lse_key->mpls_tc);
2510 		if (err)
2511 			return err;
2512 	}
2513 	if (lse_mask->mpls_label) {
2514 		err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL,
2515 				  lse_key->mpls_label);
2516 		if (err)
2517 			return err;
2518 	}
2519 
2520 	return 0;
2521 }
2522 
fl_dump_key_mpls_opts(struct sk_buff * skb,struct flow_dissector_key_mpls * mpls_key,struct flow_dissector_key_mpls * mpls_mask)2523 static int fl_dump_key_mpls_opts(struct sk_buff *skb,
2524 				 struct flow_dissector_key_mpls *mpls_key,
2525 				 struct flow_dissector_key_mpls *mpls_mask)
2526 {
2527 	struct nlattr *opts;
2528 	struct nlattr *lse;
2529 	u8 lse_index;
2530 	int err;
2531 
2532 	opts = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS);
2533 	if (!opts)
2534 		return -EMSGSIZE;
2535 
2536 	for (lse_index = 0; lse_index < FLOW_DIS_MPLS_MAX; lse_index++) {
2537 		if (!(mpls_mask->used_lses & 1 << lse_index))
2538 			continue;
2539 
2540 		lse = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS_LSE);
2541 		if (!lse) {
2542 			err = -EMSGSIZE;
2543 			goto err_opts;
2544 		}
2545 
2546 		err = fl_dump_key_mpls_opt_lse(skb, mpls_key, mpls_mask,
2547 					       lse_index);
2548 		if (err)
2549 			goto err_opts_lse;
2550 		nla_nest_end(skb, lse);
2551 	}
2552 	nla_nest_end(skb, opts);
2553 
2554 	return 0;
2555 
2556 err_opts_lse:
2557 	nla_nest_cancel(skb, lse);
2558 err_opts:
2559 	nla_nest_cancel(skb, opts);
2560 
2561 	return err;
2562 }
2563 
fl_dump_key_mpls(struct sk_buff * skb,struct flow_dissector_key_mpls * mpls_key,struct flow_dissector_key_mpls * mpls_mask)2564 static int fl_dump_key_mpls(struct sk_buff *skb,
2565 			    struct flow_dissector_key_mpls *mpls_key,
2566 			    struct flow_dissector_key_mpls *mpls_mask)
2567 {
2568 	struct flow_dissector_mpls_lse *lse_mask;
2569 	struct flow_dissector_mpls_lse *lse_key;
2570 	int err;
2571 
2572 	if (!mpls_mask->used_lses)
2573 		return 0;
2574 
2575 	lse_mask = &mpls_mask->ls[0];
2576 	lse_key = &mpls_key->ls[0];
2577 
2578 	/* For backward compatibility, don't use the MPLS nested attributes if
2579 	 * the rule can be expressed using the old attributes.
2580 	 */
2581 	if (mpls_mask->used_lses & ~1 ||
2582 	    (!lse_mask->mpls_ttl && !lse_mask->mpls_bos &&
2583 	     !lse_mask->mpls_tc && !lse_mask->mpls_label))
2584 		return fl_dump_key_mpls_opts(skb, mpls_key, mpls_mask);
2585 
2586 	if (lse_mask->mpls_ttl) {
2587 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL,
2588 				 lse_key->mpls_ttl);
2589 		if (err)
2590 			return err;
2591 	}
2592 	if (lse_mask->mpls_tc) {
2593 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC,
2594 				 lse_key->mpls_tc);
2595 		if (err)
2596 			return err;
2597 	}
2598 	if (lse_mask->mpls_label) {
2599 		err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL,
2600 				  lse_key->mpls_label);
2601 		if (err)
2602 			return err;
2603 	}
2604 	if (lse_mask->mpls_bos) {
2605 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS,
2606 				 lse_key->mpls_bos);
2607 		if (err)
2608 			return err;
2609 	}
2610 	return 0;
2611 }
2612 
fl_dump_key_ip(struct sk_buff * skb,bool encap,struct flow_dissector_key_ip * key,struct flow_dissector_key_ip * mask)2613 static int fl_dump_key_ip(struct sk_buff *skb, bool encap,
2614 			  struct flow_dissector_key_ip *key,
2615 			  struct flow_dissector_key_ip *mask)
2616 {
2617 	int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
2618 	int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
2619 	int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
2620 	int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
2621 
2622 	if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) ||
2623 	    fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)))
2624 		return -1;
2625 
2626 	return 0;
2627 }
2628 
fl_dump_key_vlan(struct sk_buff * skb,int vlan_id_key,int vlan_prio_key,struct flow_dissector_key_vlan * vlan_key,struct flow_dissector_key_vlan * vlan_mask)2629 static int fl_dump_key_vlan(struct sk_buff *skb,
2630 			    int vlan_id_key, int vlan_prio_key,
2631 			    struct flow_dissector_key_vlan *vlan_key,
2632 			    struct flow_dissector_key_vlan *vlan_mask)
2633 {
2634 	int err;
2635 
2636 	if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
2637 		return 0;
2638 	if (vlan_mask->vlan_id) {
2639 		err = nla_put_u16(skb, vlan_id_key,
2640 				  vlan_key->vlan_id);
2641 		if (err)
2642 			return err;
2643 	}
2644 	if (vlan_mask->vlan_priority) {
2645 		err = nla_put_u8(skb, vlan_prio_key,
2646 				 vlan_key->vlan_priority);
2647 		if (err)
2648 			return err;
2649 	}
2650 	return 0;
2651 }
2652 
fl_get_key_flag(u32 dissector_key,u32 dissector_mask,u32 * flower_key,u32 * flower_mask,u32 flower_flag_bit,u32 dissector_flag_bit)2653 static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
2654 			    u32 *flower_key, u32 *flower_mask,
2655 			    u32 flower_flag_bit, u32 dissector_flag_bit)
2656 {
2657 	if (dissector_mask & dissector_flag_bit) {
2658 		*flower_mask |= flower_flag_bit;
2659 		if (dissector_key & dissector_flag_bit)
2660 			*flower_key |= flower_flag_bit;
2661 	}
2662 }
2663 
fl_dump_key_flags(struct sk_buff * skb,u32 flags_key,u32 flags_mask)2664 static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
2665 {
2666 	u32 key, mask;
2667 	__be32 _key, _mask;
2668 	int err;
2669 
2670 	if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
2671 		return 0;
2672 
2673 	key = 0;
2674 	mask = 0;
2675 
2676 	fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2677 			TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
2678 	fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2679 			TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
2680 			FLOW_DIS_FIRST_FRAG);
2681 
2682 	_key = cpu_to_be32(key);
2683 	_mask = cpu_to_be32(mask);
2684 
2685 	err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
2686 	if (err)
2687 		return err;
2688 
2689 	return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
2690 }
2691 
fl_dump_key_geneve_opt(struct sk_buff * skb,struct flow_dissector_key_enc_opts * enc_opts)2692 static int fl_dump_key_geneve_opt(struct sk_buff *skb,
2693 				  struct flow_dissector_key_enc_opts *enc_opts)
2694 {
2695 	struct geneve_opt *opt;
2696 	struct nlattr *nest;
2697 	int opt_off = 0;
2698 
2699 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE);
2700 	if (!nest)
2701 		goto nla_put_failure;
2702 
2703 	while (enc_opts->len > opt_off) {
2704 		opt = (struct geneve_opt *)&enc_opts->data[opt_off];
2705 
2706 		if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS,
2707 				 opt->opt_class))
2708 			goto nla_put_failure;
2709 		if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE,
2710 			       opt->type))
2711 			goto nla_put_failure;
2712 		if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA,
2713 			    opt->length * 4, opt->opt_data))
2714 			goto nla_put_failure;
2715 
2716 		opt_off += sizeof(struct geneve_opt) + opt->length * 4;
2717 	}
2718 	nla_nest_end(skb, nest);
2719 	return 0;
2720 
2721 nla_put_failure:
2722 	nla_nest_cancel(skb, nest);
2723 	return -EMSGSIZE;
2724 }
2725 
fl_dump_key_vxlan_opt(struct sk_buff * skb,struct flow_dissector_key_enc_opts * enc_opts)2726 static int fl_dump_key_vxlan_opt(struct sk_buff *skb,
2727 				 struct flow_dissector_key_enc_opts *enc_opts)
2728 {
2729 	struct vxlan_metadata *md;
2730 	struct nlattr *nest;
2731 
2732 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_VXLAN);
2733 	if (!nest)
2734 		goto nla_put_failure;
2735 
2736 	md = (struct vxlan_metadata *)&enc_opts->data[0];
2737 	if (nla_put_u32(skb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP, md->gbp))
2738 		goto nla_put_failure;
2739 
2740 	nla_nest_end(skb, nest);
2741 	return 0;
2742 
2743 nla_put_failure:
2744 	nla_nest_cancel(skb, nest);
2745 	return -EMSGSIZE;
2746 }
2747 
fl_dump_key_erspan_opt(struct sk_buff * skb,struct flow_dissector_key_enc_opts * enc_opts)2748 static int fl_dump_key_erspan_opt(struct sk_buff *skb,
2749 				  struct flow_dissector_key_enc_opts *enc_opts)
2750 {
2751 	struct erspan_metadata *md;
2752 	struct nlattr *nest;
2753 
2754 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_ERSPAN);
2755 	if (!nest)
2756 		goto nla_put_failure;
2757 
2758 	md = (struct erspan_metadata *)&enc_opts->data[0];
2759 	if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER, md->version))
2760 		goto nla_put_failure;
2761 
2762 	if (md->version == 1 &&
2763 	    nla_put_be32(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index))
2764 		goto nla_put_failure;
2765 
2766 	if (md->version == 2 &&
2767 	    (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR,
2768 			md->u.md2.dir) ||
2769 	     nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID,
2770 			get_hwid(&md->u.md2))))
2771 		goto nla_put_failure;
2772 
2773 	nla_nest_end(skb, nest);
2774 	return 0;
2775 
2776 nla_put_failure:
2777 	nla_nest_cancel(skb, nest);
2778 	return -EMSGSIZE;
2779 }
2780 
fl_dump_key_ct(struct sk_buff * skb,struct flow_dissector_key_ct * key,struct flow_dissector_key_ct * mask)2781 static int fl_dump_key_ct(struct sk_buff *skb,
2782 			  struct flow_dissector_key_ct *key,
2783 			  struct flow_dissector_key_ct *mask)
2784 {
2785 	if (IS_ENABLED(CONFIG_NF_CONNTRACK) &&
2786 	    fl_dump_key_val(skb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
2787 			    &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
2788 			    sizeof(key->ct_state)))
2789 		goto nla_put_failure;
2790 
2791 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
2792 	    fl_dump_key_val(skb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
2793 			    &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
2794 			    sizeof(key->ct_zone)))
2795 		goto nla_put_failure;
2796 
2797 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
2798 	    fl_dump_key_val(skb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
2799 			    &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
2800 			    sizeof(key->ct_mark)))
2801 		goto nla_put_failure;
2802 
2803 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
2804 	    fl_dump_key_val(skb, &key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
2805 			    &mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
2806 			    sizeof(key->ct_labels)))
2807 		goto nla_put_failure;
2808 
2809 	return 0;
2810 
2811 nla_put_failure:
2812 	return -EMSGSIZE;
2813 }
2814 
fl_dump_key_options(struct sk_buff * skb,int enc_opt_type,struct flow_dissector_key_enc_opts * enc_opts)2815 static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
2816 			       struct flow_dissector_key_enc_opts *enc_opts)
2817 {
2818 	struct nlattr *nest;
2819 	int err;
2820 
2821 	if (!enc_opts->len)
2822 		return 0;
2823 
2824 	nest = nla_nest_start_noflag(skb, enc_opt_type);
2825 	if (!nest)
2826 		goto nla_put_failure;
2827 
2828 	switch (enc_opts->dst_opt_type) {
2829 	case TUNNEL_GENEVE_OPT:
2830 		err = fl_dump_key_geneve_opt(skb, enc_opts);
2831 		if (err)
2832 			goto nla_put_failure;
2833 		break;
2834 	case TUNNEL_VXLAN_OPT:
2835 		err = fl_dump_key_vxlan_opt(skb, enc_opts);
2836 		if (err)
2837 			goto nla_put_failure;
2838 		break;
2839 	case TUNNEL_ERSPAN_OPT:
2840 		err = fl_dump_key_erspan_opt(skb, enc_opts);
2841 		if (err)
2842 			goto nla_put_failure;
2843 		break;
2844 	default:
2845 		goto nla_put_failure;
2846 	}
2847 	nla_nest_end(skb, nest);
2848 	return 0;
2849 
2850 nla_put_failure:
2851 	nla_nest_cancel(skb, nest);
2852 	return -EMSGSIZE;
2853 }
2854 
fl_dump_key_enc_opt(struct sk_buff * skb,struct flow_dissector_key_enc_opts * key_opts,struct flow_dissector_key_enc_opts * msk_opts)2855 static int fl_dump_key_enc_opt(struct sk_buff *skb,
2856 			       struct flow_dissector_key_enc_opts *key_opts,
2857 			       struct flow_dissector_key_enc_opts *msk_opts)
2858 {
2859 	int err;
2860 
2861 	err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts);
2862 	if (err)
2863 		return err;
2864 
2865 	return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts);
2866 }
2867 
fl_dump_key(struct sk_buff * skb,struct net * net,struct fl_flow_key * key,struct fl_flow_key * mask)2868 static int fl_dump_key(struct sk_buff *skb, struct net *net,
2869 		       struct fl_flow_key *key, struct fl_flow_key *mask)
2870 {
2871 	if (mask->meta.ingress_ifindex) {
2872 		struct net_device *dev;
2873 
2874 		dev = __dev_get_by_index(net, key->meta.ingress_ifindex);
2875 		if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
2876 			goto nla_put_failure;
2877 	}
2878 
2879 	if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
2880 			    mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
2881 			    sizeof(key->eth.dst)) ||
2882 	    fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
2883 			    mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
2884 			    sizeof(key->eth.src)) ||
2885 	    fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
2886 			    &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
2887 			    sizeof(key->basic.n_proto)))
2888 		goto nla_put_failure;
2889 
2890 	if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls))
2891 		goto nla_put_failure;
2892 
2893 	if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID,
2894 			     TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan))
2895 		goto nla_put_failure;
2896 
2897 	if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID,
2898 			     TCA_FLOWER_KEY_CVLAN_PRIO,
2899 			     &key->cvlan, &mask->cvlan) ||
2900 	    (mask->cvlan.vlan_tpid &&
2901 	     nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
2902 			  key->cvlan.vlan_tpid)))
2903 		goto nla_put_failure;
2904 
2905 	if (mask->basic.n_proto) {
2906 		if (mask->cvlan.vlan_eth_type) {
2907 			if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
2908 					 key->basic.n_proto))
2909 				goto nla_put_failure;
2910 		} else if (mask->vlan.vlan_eth_type) {
2911 			if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
2912 					 key->vlan.vlan_eth_type))
2913 				goto nla_put_failure;
2914 		}
2915 	}
2916 
2917 	if ((key->basic.n_proto == htons(ETH_P_IP) ||
2918 	     key->basic.n_proto == htons(ETH_P_IPV6)) &&
2919 	    (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
2920 			    &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
2921 			    sizeof(key->basic.ip_proto)) ||
2922 	    fl_dump_key_ip(skb, false, &key->ip, &mask->ip)))
2923 		goto nla_put_failure;
2924 
2925 	if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
2926 	    (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
2927 			     &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
2928 			     sizeof(key->ipv4.src)) ||
2929 	     fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
2930 			     &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
2931 			     sizeof(key->ipv4.dst))))
2932 		goto nla_put_failure;
2933 	else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
2934 		 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
2935 				  &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
2936 				  sizeof(key->ipv6.src)) ||
2937 		  fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
2938 				  &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
2939 				  sizeof(key->ipv6.dst))))
2940 		goto nla_put_failure;
2941 
2942 	if (key->basic.ip_proto == IPPROTO_TCP &&
2943 	    (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
2944 			     &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
2945 			     sizeof(key->tp.src)) ||
2946 	     fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
2947 			     &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
2948 			     sizeof(key->tp.dst)) ||
2949 	     fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
2950 			     &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
2951 			     sizeof(key->tcp.flags))))
2952 		goto nla_put_failure;
2953 	else if (key->basic.ip_proto == IPPROTO_UDP &&
2954 		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
2955 				  &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
2956 				  sizeof(key->tp.src)) ||
2957 		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
2958 				  &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
2959 				  sizeof(key->tp.dst))))
2960 		goto nla_put_failure;
2961 	else if (key->basic.ip_proto == IPPROTO_SCTP &&
2962 		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
2963 				  &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
2964 				  sizeof(key->tp.src)) ||
2965 		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
2966 				  &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
2967 				  sizeof(key->tp.dst))))
2968 		goto nla_put_failure;
2969 	else if (key->basic.n_proto == htons(ETH_P_IP) &&
2970 		 key->basic.ip_proto == IPPROTO_ICMP &&
2971 		 (fl_dump_key_val(skb, &key->icmp.type,
2972 				  TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
2973 				  TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
2974 				  sizeof(key->icmp.type)) ||
2975 		  fl_dump_key_val(skb, &key->icmp.code,
2976 				  TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
2977 				  TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
2978 				  sizeof(key->icmp.code))))
2979 		goto nla_put_failure;
2980 	else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
2981 		 key->basic.ip_proto == IPPROTO_ICMPV6 &&
2982 		 (fl_dump_key_val(skb, &key->icmp.type,
2983 				  TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
2984 				  TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
2985 				  sizeof(key->icmp.type)) ||
2986 		  fl_dump_key_val(skb, &key->icmp.code,
2987 				  TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
2988 				  TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
2989 				  sizeof(key->icmp.code))))
2990 		goto nla_put_failure;
2991 	else if ((key->basic.n_proto == htons(ETH_P_ARP) ||
2992 		  key->basic.n_proto == htons(ETH_P_RARP)) &&
2993 		 (fl_dump_key_val(skb, &key->arp.sip,
2994 				  TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip,
2995 				  TCA_FLOWER_KEY_ARP_SIP_MASK,
2996 				  sizeof(key->arp.sip)) ||
2997 		  fl_dump_key_val(skb, &key->arp.tip,
2998 				  TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip,
2999 				  TCA_FLOWER_KEY_ARP_TIP_MASK,
3000 				  sizeof(key->arp.tip)) ||
3001 		  fl_dump_key_val(skb, &key->arp.op,
3002 				  TCA_FLOWER_KEY_ARP_OP, &mask->arp.op,
3003 				  TCA_FLOWER_KEY_ARP_OP_MASK,
3004 				  sizeof(key->arp.op)) ||
3005 		  fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
3006 				  mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
3007 				  sizeof(key->arp.sha)) ||
3008 		  fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
3009 				  mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
3010 				  sizeof(key->arp.tha))))
3011 		goto nla_put_failure;
3012 
3013 	if ((key->basic.ip_proto == IPPROTO_TCP ||
3014 	     key->basic.ip_proto == IPPROTO_UDP ||
3015 	     key->basic.ip_proto == IPPROTO_SCTP) &&
3016 	     fl_dump_key_port_range(skb, key, mask))
3017 		goto nla_put_failure;
3018 
3019 	if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
3020 	    (fl_dump_key_val(skb, &key->enc_ipv4.src,
3021 			    TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
3022 			    TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
3023 			    sizeof(key->enc_ipv4.src)) ||
3024 	     fl_dump_key_val(skb, &key->enc_ipv4.dst,
3025 			     TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
3026 			     TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
3027 			     sizeof(key->enc_ipv4.dst))))
3028 		goto nla_put_failure;
3029 	else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
3030 		 (fl_dump_key_val(skb, &key->enc_ipv6.src,
3031 			    TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
3032 			    TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
3033 			    sizeof(key->enc_ipv6.src)) ||
3034 		 fl_dump_key_val(skb, &key->enc_ipv6.dst,
3035 				 TCA_FLOWER_KEY_ENC_IPV6_DST,
3036 				 &mask->enc_ipv6.dst,
3037 				 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
3038 			    sizeof(key->enc_ipv6.dst))))
3039 		goto nla_put_failure;
3040 
3041 	if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
3042 			    &mask->enc_key_id, TCA_FLOWER_UNSPEC,
3043 			    sizeof(key->enc_key_id)) ||
3044 	    fl_dump_key_val(skb, &key->enc_tp.src,
3045 			    TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
3046 			    &mask->enc_tp.src,
3047 			    TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
3048 			    sizeof(key->enc_tp.src)) ||
3049 	    fl_dump_key_val(skb, &key->enc_tp.dst,
3050 			    TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
3051 			    &mask->enc_tp.dst,
3052 			    TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
3053 			    sizeof(key->enc_tp.dst)) ||
3054 	    fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) ||
3055 	    fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts))
3056 		goto nla_put_failure;
3057 
3058 	if (fl_dump_key_ct(skb, &key->ct, &mask->ct))
3059 		goto nla_put_failure;
3060 
3061 	if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
3062 		goto nla_put_failure;
3063 
3064 	if (fl_dump_key_val(skb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
3065 			     &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
3066 			     sizeof(key->hash.hash)))
3067 		goto nla_put_failure;
3068 
3069 	return 0;
3070 
3071 nla_put_failure:
3072 	return -EMSGSIZE;
3073 }
3074 
fl_dump(struct net * net,struct tcf_proto * tp,void * fh,struct sk_buff * skb,struct tcmsg * t,bool rtnl_held)3075 static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
3076 		   struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
3077 {
3078 	struct cls_fl_filter *f = fh;
3079 	struct nlattr *nest;
3080 	struct fl_flow_key *key, *mask;
3081 	bool skip_hw;
3082 
3083 	if (!f)
3084 		return skb->len;
3085 
3086 	t->tcm_handle = f->handle;
3087 
3088 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3089 	if (!nest)
3090 		goto nla_put_failure;
3091 
3092 	spin_lock(&tp->lock);
3093 
3094 	if (f->res.classid &&
3095 	    nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
3096 		goto nla_put_failure_locked;
3097 
3098 	key = &f->key;
3099 	mask = &f->mask->key;
3100 	skip_hw = tc_skip_hw(f->flags);
3101 
3102 	if (fl_dump_key(skb, net, key, mask))
3103 		goto nla_put_failure_locked;
3104 
3105 	if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3106 		goto nla_put_failure_locked;
3107 
3108 	spin_unlock(&tp->lock);
3109 
3110 	if (!skip_hw)
3111 		fl_hw_update_stats(tp, f, rtnl_held);
3112 
3113 	if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count))
3114 		goto nla_put_failure;
3115 
3116 	if (tcf_exts_dump(skb, &f->exts))
3117 		goto nla_put_failure;
3118 
3119 	nla_nest_end(skb, nest);
3120 
3121 	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
3122 		goto nla_put_failure;
3123 
3124 	return skb->len;
3125 
3126 nla_put_failure_locked:
3127 	spin_unlock(&tp->lock);
3128 nla_put_failure:
3129 	nla_nest_cancel(skb, nest);
3130 	return -1;
3131 }
3132 
fl_terse_dump(struct net * net,struct tcf_proto * tp,void * fh,struct sk_buff * skb,struct tcmsg * t,bool rtnl_held)3133 static int fl_terse_dump(struct net *net, struct tcf_proto *tp, void *fh,
3134 			 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
3135 {
3136 	struct cls_fl_filter *f = fh;
3137 	struct nlattr *nest;
3138 	bool skip_hw;
3139 
3140 	if (!f)
3141 		return skb->len;
3142 
3143 	t->tcm_handle = f->handle;
3144 
3145 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3146 	if (!nest)
3147 		goto nla_put_failure;
3148 
3149 	spin_lock(&tp->lock);
3150 
3151 	skip_hw = tc_skip_hw(f->flags);
3152 
3153 	if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3154 		goto nla_put_failure_locked;
3155 
3156 	spin_unlock(&tp->lock);
3157 
3158 	if (!skip_hw)
3159 		fl_hw_update_stats(tp, f, rtnl_held);
3160 
3161 	if (tcf_exts_terse_dump(skb, &f->exts))
3162 		goto nla_put_failure;
3163 
3164 	nla_nest_end(skb, nest);
3165 
3166 	return skb->len;
3167 
3168 nla_put_failure_locked:
3169 	spin_unlock(&tp->lock);
3170 nla_put_failure:
3171 	nla_nest_cancel(skb, nest);
3172 	return -1;
3173 }
3174 
fl_tmplt_dump(struct sk_buff * skb,struct net * net,void * tmplt_priv)3175 static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv)
3176 {
3177 	struct fl_flow_tmplt *tmplt = tmplt_priv;
3178 	struct fl_flow_key *key, *mask;
3179 	struct nlattr *nest;
3180 
3181 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3182 	if (!nest)
3183 		goto nla_put_failure;
3184 
3185 	key = &tmplt->dummy_key;
3186 	mask = &tmplt->mask;
3187 
3188 	if (fl_dump_key(skb, net, key, mask))
3189 		goto nla_put_failure;
3190 
3191 	nla_nest_end(skb, nest);
3192 
3193 	return skb->len;
3194 
3195 nla_put_failure:
3196 	nla_nest_cancel(skb, nest);
3197 	return -EMSGSIZE;
3198 }
3199 
fl_bind_class(void * fh,u32 classid,unsigned long cl,void * q,unsigned long base)3200 static void fl_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
3201 			  unsigned long base)
3202 {
3203 	struct cls_fl_filter *f = fh;
3204 
3205 	if (f && f->res.classid == classid) {
3206 		if (cl)
3207 			__tcf_bind_filter(q, &f->res, base);
3208 		else
3209 			__tcf_unbind_filter(q, &f->res);
3210 	}
3211 }
3212 
fl_delete_empty(struct tcf_proto * tp)3213 static bool fl_delete_empty(struct tcf_proto *tp)
3214 {
3215 	struct cls_fl_head *head = fl_head_dereference(tp);
3216 
3217 	spin_lock(&tp->lock);
3218 	tp->deleting = idr_is_empty(&head->handle_idr);
3219 	spin_unlock(&tp->lock);
3220 
3221 	return tp->deleting;
3222 }
3223 
3224 static struct tcf_proto_ops cls_fl_ops __read_mostly = {
3225 	.kind		= "flower",
3226 	.classify	= fl_classify,
3227 	.init		= fl_init,
3228 	.destroy	= fl_destroy,
3229 	.get		= fl_get,
3230 	.put		= fl_put,
3231 	.change		= fl_change,
3232 	.delete		= fl_delete,
3233 	.delete_empty	= fl_delete_empty,
3234 	.walk		= fl_walk,
3235 	.reoffload	= fl_reoffload,
3236 	.hw_add		= fl_hw_add,
3237 	.hw_del		= fl_hw_del,
3238 	.dump		= fl_dump,
3239 	.terse_dump	= fl_terse_dump,
3240 	.bind_class	= fl_bind_class,
3241 	.tmplt_create	= fl_tmplt_create,
3242 	.tmplt_destroy	= fl_tmplt_destroy,
3243 	.tmplt_dump	= fl_tmplt_dump,
3244 	.owner		= THIS_MODULE,
3245 	.flags		= TCF_PROTO_OPS_DOIT_UNLOCKED,
3246 };
3247 
cls_fl_init(void)3248 static int __init cls_fl_init(void)
3249 {
3250 	return register_tcf_proto_ops(&cls_fl_ops);
3251 }
3252 
cls_fl_exit(void)3253 static void __exit cls_fl_exit(void)
3254 {
3255 	unregister_tcf_proto_ops(&cls_fl_ops);
3256 }
3257 
3258 module_init(cls_fl_init);
3259 module_exit(cls_fl_exit);
3260 
3261 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
3262 MODULE_DESCRIPTION("Flower classifier");
3263 MODULE_LICENSE("GPL v2");
3264