• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/cls_flower.c		Flower classifier
4  *
5  * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/rhashtable.h>
12 #include <linux/workqueue.h>
13 #include <linux/refcount.h>
14 
15 #include <linux/if_ether.h>
16 #include <linux/in6.h>
17 #include <linux/ip.h>
18 #include <linux/mpls.h>
19 
20 #include <net/sch_generic.h>
21 #include <net/pkt_cls.h>
22 #include <net/ip.h>
23 #include <net/flow_dissector.h>
24 #include <net/geneve.h>
25 
26 #include <net/dst.h>
27 #include <net/dst_metadata.h>
28 
29 #include <uapi/linux/netfilter/nf_conntrack_common.h>
30 
31 struct fl_flow_key {
32 	struct flow_dissector_key_meta meta;
33 	struct flow_dissector_key_control control;
34 	struct flow_dissector_key_control enc_control;
35 	struct flow_dissector_key_basic basic;
36 	struct flow_dissector_key_eth_addrs eth;
37 	struct flow_dissector_key_vlan vlan;
38 	struct flow_dissector_key_vlan cvlan;
39 	union {
40 		struct flow_dissector_key_ipv4_addrs ipv4;
41 		struct flow_dissector_key_ipv6_addrs ipv6;
42 	};
43 	struct flow_dissector_key_ports tp;
44 	struct flow_dissector_key_icmp icmp;
45 	struct flow_dissector_key_arp arp;
46 	struct flow_dissector_key_keyid enc_key_id;
47 	union {
48 		struct flow_dissector_key_ipv4_addrs enc_ipv4;
49 		struct flow_dissector_key_ipv6_addrs enc_ipv6;
50 	};
51 	struct flow_dissector_key_ports enc_tp;
52 	struct flow_dissector_key_mpls mpls;
53 	struct flow_dissector_key_tcp tcp;
54 	struct flow_dissector_key_ip ip;
55 	struct flow_dissector_key_ip enc_ip;
56 	struct flow_dissector_key_enc_opts enc_opts;
57 	union {
58 		struct flow_dissector_key_ports tp;
59 		struct {
60 			struct flow_dissector_key_ports tp_min;
61 			struct flow_dissector_key_ports tp_max;
62 		};
63 	} tp_range;
64 	struct flow_dissector_key_ct ct;
65 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
66 
67 struct fl_flow_mask_range {
68 	unsigned short int start;
69 	unsigned short int end;
70 };
71 
72 struct fl_flow_mask {
73 	struct fl_flow_key key;
74 	struct fl_flow_mask_range range;
75 	u32 flags;
76 	struct rhash_head ht_node;
77 	struct rhashtable ht;
78 	struct rhashtable_params filter_ht_params;
79 	struct flow_dissector dissector;
80 	struct list_head filters;
81 	struct rcu_work rwork;
82 	struct list_head list;
83 	refcount_t refcnt;
84 };
85 
86 struct fl_flow_tmplt {
87 	struct fl_flow_key dummy_key;
88 	struct fl_flow_key mask;
89 	struct flow_dissector dissector;
90 	struct tcf_chain *chain;
91 };
92 
93 struct cls_fl_head {
94 	struct rhashtable ht;
95 	spinlock_t masks_lock; /* Protect masks list */
96 	struct list_head masks;
97 	struct list_head hw_filters;
98 	struct rcu_work rwork;
99 	struct idr handle_idr;
100 };
101 
102 struct cls_fl_filter {
103 	struct fl_flow_mask *mask;
104 	struct rhash_head ht_node;
105 	struct fl_flow_key mkey;
106 	struct tcf_exts exts;
107 	struct tcf_result res;
108 	struct fl_flow_key key;
109 	struct list_head list;
110 	struct list_head hw_list;
111 	u32 handle;
112 	u32 flags;
113 	u32 in_hw_count;
114 	struct rcu_work rwork;
115 	struct net_device *hw_dev;
116 	/* Flower classifier is unlocked, which means that its reference counter
117 	 * can be changed concurrently without any kind of external
118 	 * synchronization. Use atomic reference counter to be concurrency-safe.
119 	 */
120 	refcount_t refcnt;
121 	bool deleted;
122 };
123 
124 static const struct rhashtable_params mask_ht_params = {
125 	.key_offset = offsetof(struct fl_flow_mask, key),
126 	.key_len = sizeof(struct fl_flow_key),
127 	.head_offset = offsetof(struct fl_flow_mask, ht_node),
128 	.automatic_shrinking = true,
129 };
130 
fl_mask_range(const struct fl_flow_mask * mask)131 static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
132 {
133 	return mask->range.end - mask->range.start;
134 }
135 
fl_mask_update_range(struct fl_flow_mask * mask)136 static void fl_mask_update_range(struct fl_flow_mask *mask)
137 {
138 	const u8 *bytes = (const u8 *) &mask->key;
139 	size_t size = sizeof(mask->key);
140 	size_t i, first = 0, last;
141 
142 	for (i = 0; i < size; i++) {
143 		if (bytes[i]) {
144 			first = i;
145 			break;
146 		}
147 	}
148 	last = first;
149 	for (i = size - 1; i != first; i--) {
150 		if (bytes[i]) {
151 			last = i;
152 			break;
153 		}
154 	}
155 	mask->range.start = rounddown(first, sizeof(long));
156 	mask->range.end = roundup(last + 1, sizeof(long));
157 }
158 
fl_key_get_start(struct fl_flow_key * key,const struct fl_flow_mask * mask)159 static void *fl_key_get_start(struct fl_flow_key *key,
160 			      const struct fl_flow_mask *mask)
161 {
162 	return (u8 *) key + mask->range.start;
163 }
164 
fl_set_masked_key(struct fl_flow_key * mkey,struct fl_flow_key * key,struct fl_flow_mask * mask)165 static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
166 			      struct fl_flow_mask *mask)
167 {
168 	const long *lkey = fl_key_get_start(key, mask);
169 	const long *lmask = fl_key_get_start(&mask->key, mask);
170 	long *lmkey = fl_key_get_start(mkey, mask);
171 	int i;
172 
173 	for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
174 		*lmkey++ = *lkey++ & *lmask++;
175 }
176 
fl_mask_fits_tmplt(struct fl_flow_tmplt * tmplt,struct fl_flow_mask * mask)177 static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt,
178 			       struct fl_flow_mask *mask)
179 {
180 	const long *lmask = fl_key_get_start(&mask->key, mask);
181 	const long *ltmplt;
182 	int i;
183 
184 	if (!tmplt)
185 		return true;
186 	ltmplt = fl_key_get_start(&tmplt->mask, mask);
187 	for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) {
188 		if (~*ltmplt++ & *lmask++)
189 			return false;
190 	}
191 	return true;
192 }
193 
fl_clear_masked_range(struct fl_flow_key * key,struct fl_flow_mask * mask)194 static void fl_clear_masked_range(struct fl_flow_key *key,
195 				  struct fl_flow_mask *mask)
196 {
197 	memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
198 }
199 
fl_range_port_dst_cmp(struct cls_fl_filter * filter,struct fl_flow_key * key,struct fl_flow_key * mkey)200 static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
201 				  struct fl_flow_key *key,
202 				  struct fl_flow_key *mkey)
203 {
204 	__be16 min_mask, max_mask, min_val, max_val;
205 
206 	min_mask = htons(filter->mask->key.tp_range.tp_min.dst);
207 	max_mask = htons(filter->mask->key.tp_range.tp_max.dst);
208 	min_val = htons(filter->key.tp_range.tp_min.dst);
209 	max_val = htons(filter->key.tp_range.tp_max.dst);
210 
211 	if (min_mask && max_mask) {
212 		if (htons(key->tp_range.tp.dst) < min_val ||
213 		    htons(key->tp_range.tp.dst) > max_val)
214 			return false;
215 
216 		/* skb does not have min and max values */
217 		mkey->tp_range.tp_min.dst = filter->mkey.tp_range.tp_min.dst;
218 		mkey->tp_range.tp_max.dst = filter->mkey.tp_range.tp_max.dst;
219 	}
220 	return true;
221 }
222 
fl_range_port_src_cmp(struct cls_fl_filter * filter,struct fl_flow_key * key,struct fl_flow_key * mkey)223 static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
224 				  struct fl_flow_key *key,
225 				  struct fl_flow_key *mkey)
226 {
227 	__be16 min_mask, max_mask, min_val, max_val;
228 
229 	min_mask = htons(filter->mask->key.tp_range.tp_min.src);
230 	max_mask = htons(filter->mask->key.tp_range.tp_max.src);
231 	min_val = htons(filter->key.tp_range.tp_min.src);
232 	max_val = htons(filter->key.tp_range.tp_max.src);
233 
234 	if (min_mask && max_mask) {
235 		if (htons(key->tp_range.tp.src) < min_val ||
236 		    htons(key->tp_range.tp.src) > max_val)
237 			return false;
238 
239 		/* skb does not have min and max values */
240 		mkey->tp_range.tp_min.src = filter->mkey.tp_range.tp_min.src;
241 		mkey->tp_range.tp_max.src = filter->mkey.tp_range.tp_max.src;
242 	}
243 	return true;
244 }
245 
__fl_lookup(struct fl_flow_mask * mask,struct fl_flow_key * mkey)246 static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask,
247 					 struct fl_flow_key *mkey)
248 {
249 	return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask),
250 				      mask->filter_ht_params);
251 }
252 
fl_lookup_range(struct fl_flow_mask * mask,struct fl_flow_key * mkey,struct fl_flow_key * key)253 static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask,
254 					     struct fl_flow_key *mkey,
255 					     struct fl_flow_key *key)
256 {
257 	struct cls_fl_filter *filter, *f;
258 
259 	list_for_each_entry_rcu(filter, &mask->filters, list) {
260 		if (!fl_range_port_dst_cmp(filter, key, mkey))
261 			continue;
262 
263 		if (!fl_range_port_src_cmp(filter, key, mkey))
264 			continue;
265 
266 		f = __fl_lookup(mask, mkey);
267 		if (f)
268 			return f;
269 	}
270 	return NULL;
271 }
272 
273 static noinline_for_stack
fl_mask_lookup(struct fl_flow_mask * mask,struct fl_flow_key * key)274 struct cls_fl_filter *fl_mask_lookup(struct fl_flow_mask *mask, struct fl_flow_key *key)
275 {
276 	struct fl_flow_key mkey;
277 
278 	fl_set_masked_key(&mkey, key, mask);
279 	if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE))
280 		return fl_lookup_range(mask, &mkey, key);
281 
282 	return __fl_lookup(mask, &mkey);
283 }
284 
285 static u16 fl_ct_info_to_flower_map[] = {
286 	[IP_CT_ESTABLISHED] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
287 					TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
288 	[IP_CT_RELATED] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
289 					TCA_FLOWER_KEY_CT_FLAGS_RELATED,
290 	[IP_CT_ESTABLISHED_REPLY] =	TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
291 					TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
292 	[IP_CT_RELATED_REPLY] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
293 					TCA_FLOWER_KEY_CT_FLAGS_RELATED,
294 	[IP_CT_NEW] =			TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
295 					TCA_FLOWER_KEY_CT_FLAGS_NEW,
296 };
297 
fl_classify(struct sk_buff * skb,const struct tcf_proto * tp,struct tcf_result * res)298 static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
299 		       struct tcf_result *res)
300 {
301 	struct cls_fl_head *head = rcu_dereference_bh(tp->root);
302 	struct fl_flow_key skb_key;
303 	struct fl_flow_mask *mask;
304 	struct cls_fl_filter *f;
305 
306 	list_for_each_entry_rcu(mask, &head->masks, list) {
307 		flow_dissector_init_keys(&skb_key.control, &skb_key.basic);
308 		fl_clear_masked_range(&skb_key, mask);
309 
310 		skb_flow_dissect_meta(skb, &mask->dissector, &skb_key);
311 		/* skb_flow_dissect() does not set n_proto in case an unknown
312 		 * protocol, so do it rather here.
313 		 */
314 		skb_key.basic.n_proto = skb_protocol(skb, false);
315 		skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
316 		skb_flow_dissect_ct(skb, &mask->dissector, &skb_key,
317 				    fl_ct_info_to_flower_map,
318 				    ARRAY_SIZE(fl_ct_info_to_flower_map));
319 		skb_flow_dissect(skb, &mask->dissector, &skb_key, 0);
320 
321 		f = fl_mask_lookup(mask, &skb_key);
322 		if (f && !tc_skip_sw(f->flags)) {
323 			*res = f->res;
324 			return tcf_exts_exec(skb, &f->exts, res);
325 		}
326 	}
327 	return -1;
328 }
329 
fl_init(struct tcf_proto * tp)330 static int fl_init(struct tcf_proto *tp)
331 {
332 	struct cls_fl_head *head;
333 
334 	head = kzalloc(sizeof(*head), GFP_KERNEL);
335 	if (!head)
336 		return -ENOBUFS;
337 
338 	spin_lock_init(&head->masks_lock);
339 	INIT_LIST_HEAD_RCU(&head->masks);
340 	INIT_LIST_HEAD(&head->hw_filters);
341 	rcu_assign_pointer(tp->root, head);
342 	idr_init(&head->handle_idr);
343 
344 	return rhashtable_init(&head->ht, &mask_ht_params);
345 }
346 
fl_mask_free(struct fl_flow_mask * mask,bool mask_init_done)347 static void fl_mask_free(struct fl_flow_mask *mask, bool mask_init_done)
348 {
349 	/* temporary masks don't have their filters list and ht initialized */
350 	if (mask_init_done) {
351 		WARN_ON(!list_empty(&mask->filters));
352 		rhashtable_destroy(&mask->ht);
353 	}
354 	kfree(mask);
355 }
356 
fl_mask_free_work(struct work_struct * work)357 static void fl_mask_free_work(struct work_struct *work)
358 {
359 	struct fl_flow_mask *mask = container_of(to_rcu_work(work),
360 						 struct fl_flow_mask, rwork);
361 
362 	fl_mask_free(mask, true);
363 }
364 
fl_uninit_mask_free_work(struct work_struct * work)365 static void fl_uninit_mask_free_work(struct work_struct *work)
366 {
367 	struct fl_flow_mask *mask = container_of(to_rcu_work(work),
368 						 struct fl_flow_mask, rwork);
369 
370 	fl_mask_free(mask, false);
371 }
372 
fl_mask_put(struct cls_fl_head * head,struct fl_flow_mask * mask)373 static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask)
374 {
375 	if (!refcount_dec_and_test(&mask->refcnt))
376 		return false;
377 
378 	rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
379 
380 	spin_lock(&head->masks_lock);
381 	list_del_rcu(&mask->list);
382 	spin_unlock(&head->masks_lock);
383 
384 	tcf_queue_work(&mask->rwork, fl_mask_free_work);
385 
386 	return true;
387 }
388 
fl_head_dereference(struct tcf_proto * tp)389 static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp)
390 {
391 	/* Flower classifier only changes root pointer during init and destroy.
392 	 * Users must obtain reference to tcf_proto instance before calling its
393 	 * API, so tp->root pointer is protected from concurrent call to
394 	 * fl_destroy() by reference counting.
395 	 */
396 	return rcu_dereference_raw(tp->root);
397 }
398 
__fl_destroy_filter(struct cls_fl_filter * f)399 static void __fl_destroy_filter(struct cls_fl_filter *f)
400 {
401 	tcf_exts_destroy(&f->exts);
402 	tcf_exts_put_net(&f->exts);
403 	kfree(f);
404 }
405 
fl_destroy_filter_work(struct work_struct * work)406 static void fl_destroy_filter_work(struct work_struct *work)
407 {
408 	struct cls_fl_filter *f = container_of(to_rcu_work(work),
409 					struct cls_fl_filter, rwork);
410 
411 	__fl_destroy_filter(f);
412 }
413 
fl_hw_destroy_filter(struct tcf_proto * tp,struct cls_fl_filter * f,bool rtnl_held,struct netlink_ext_ack * extack)414 static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
415 				 bool rtnl_held, struct netlink_ext_ack *extack)
416 {
417 	struct tcf_block *block = tp->chain->block;
418 	struct flow_cls_offload cls_flower = {};
419 
420 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
421 	cls_flower.command = FLOW_CLS_DESTROY;
422 	cls_flower.cookie = (unsigned long) f;
423 
424 	tc_setup_cb_destroy(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, false,
425 			    &f->flags, &f->in_hw_count, rtnl_held);
426 
427 }
428 
fl_hw_replace_filter(struct tcf_proto * tp,struct cls_fl_filter * f,bool rtnl_held,struct netlink_ext_ack * extack)429 static int fl_hw_replace_filter(struct tcf_proto *tp,
430 				struct cls_fl_filter *f, bool rtnl_held,
431 				struct netlink_ext_ack *extack)
432 {
433 	struct tcf_block *block = tp->chain->block;
434 	struct flow_cls_offload cls_flower = {};
435 	bool skip_sw = tc_skip_sw(f->flags);
436 	int err = 0;
437 
438 	cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts));
439 	if (!cls_flower.rule)
440 		return -ENOMEM;
441 
442 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
443 	cls_flower.command = FLOW_CLS_REPLACE;
444 	cls_flower.cookie = (unsigned long) f;
445 	cls_flower.rule->match.dissector = &f->mask->dissector;
446 	cls_flower.rule->match.mask = &f->mask->key;
447 	cls_flower.rule->match.key = &f->mkey;
448 	cls_flower.classid = f->res.classid;
449 
450 	err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts,
451 				   rtnl_held);
452 	if (err) {
453 		kfree(cls_flower.rule);
454 		if (skip_sw) {
455 			NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
456 			return err;
457 		}
458 		return 0;
459 	}
460 
461 	err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower,
462 			      skip_sw, &f->flags, &f->in_hw_count, rtnl_held);
463 	tc_cleanup_flow_action(&cls_flower.rule->action);
464 	kfree(cls_flower.rule);
465 
466 	if (err) {
467 		fl_hw_destroy_filter(tp, f, rtnl_held, NULL);
468 		return err;
469 	}
470 
471 	if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW))
472 		return -EINVAL;
473 
474 	return 0;
475 }
476 
fl_hw_update_stats(struct tcf_proto * tp,struct cls_fl_filter * f,bool rtnl_held)477 static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
478 			       bool rtnl_held)
479 {
480 	struct tcf_block *block = tp->chain->block;
481 	struct flow_cls_offload cls_flower = {};
482 
483 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
484 	cls_flower.command = FLOW_CLS_STATS;
485 	cls_flower.cookie = (unsigned long) f;
486 	cls_flower.classid = f->res.classid;
487 
488 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false,
489 			 rtnl_held);
490 
491 	tcf_exts_stats_update(&f->exts, cls_flower.stats.bytes,
492 			      cls_flower.stats.pkts,
493 			      cls_flower.stats.lastused);
494 }
495 
__fl_put(struct cls_fl_filter * f)496 static void __fl_put(struct cls_fl_filter *f)
497 {
498 	if (!refcount_dec_and_test(&f->refcnt))
499 		return;
500 
501 	if (tcf_exts_get_net(&f->exts))
502 		tcf_queue_work(&f->rwork, fl_destroy_filter_work);
503 	else
504 		__fl_destroy_filter(f);
505 }
506 
__fl_get(struct cls_fl_head * head,u32 handle)507 static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle)
508 {
509 	struct cls_fl_filter *f;
510 
511 	rcu_read_lock();
512 	f = idr_find(&head->handle_idr, handle);
513 	if (f && !refcount_inc_not_zero(&f->refcnt))
514 		f = NULL;
515 	rcu_read_unlock();
516 
517 	return f;
518 }
519 
__fl_delete(struct tcf_proto * tp,struct cls_fl_filter * f,bool * last,bool rtnl_held,struct netlink_ext_ack * extack)520 static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
521 		       bool *last, bool rtnl_held,
522 		       struct netlink_ext_ack *extack)
523 {
524 	struct cls_fl_head *head = fl_head_dereference(tp);
525 
526 	*last = false;
527 
528 	spin_lock(&tp->lock);
529 	if (f->deleted) {
530 		spin_unlock(&tp->lock);
531 		return -ENOENT;
532 	}
533 
534 	f->deleted = true;
535 	rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
536 			       f->mask->filter_ht_params);
537 	idr_remove(&head->handle_idr, f->handle);
538 	list_del_rcu(&f->list);
539 	spin_unlock(&tp->lock);
540 
541 	*last = fl_mask_put(head, f->mask);
542 	if (!tc_skip_hw(f->flags))
543 		fl_hw_destroy_filter(tp, f, rtnl_held, extack);
544 	tcf_unbind_filter(tp, &f->res);
545 	__fl_put(f);
546 
547 	return 0;
548 }
549 
fl_destroy_sleepable(struct work_struct * work)550 static void fl_destroy_sleepable(struct work_struct *work)
551 {
552 	struct cls_fl_head *head = container_of(to_rcu_work(work),
553 						struct cls_fl_head,
554 						rwork);
555 
556 	rhashtable_destroy(&head->ht);
557 	kfree(head);
558 	module_put(THIS_MODULE);
559 }
560 
fl_destroy(struct tcf_proto * tp,bool rtnl_held,struct netlink_ext_ack * extack)561 static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
562 		       struct netlink_ext_ack *extack)
563 {
564 	struct cls_fl_head *head = fl_head_dereference(tp);
565 	struct fl_flow_mask *mask, *next_mask;
566 	struct cls_fl_filter *f, *next;
567 	bool last;
568 
569 	list_for_each_entry_safe(mask, next_mask, &head->masks, list) {
570 		list_for_each_entry_safe(f, next, &mask->filters, list) {
571 			__fl_delete(tp, f, &last, rtnl_held, extack);
572 			if (last)
573 				break;
574 		}
575 	}
576 	idr_destroy(&head->handle_idr);
577 
578 	__module_get(THIS_MODULE);
579 	tcf_queue_work(&head->rwork, fl_destroy_sleepable);
580 }
581 
fl_put(struct tcf_proto * tp,void * arg)582 static void fl_put(struct tcf_proto *tp, void *arg)
583 {
584 	struct cls_fl_filter *f = arg;
585 
586 	__fl_put(f);
587 }
588 
fl_get(struct tcf_proto * tp,u32 handle)589 static void *fl_get(struct tcf_proto *tp, u32 handle)
590 {
591 	struct cls_fl_head *head = fl_head_dereference(tp);
592 
593 	return __fl_get(head, handle);
594 }
595 
596 static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
597 	[TCA_FLOWER_UNSPEC]		= { .type = NLA_UNSPEC },
598 	[TCA_FLOWER_CLASSID]		= { .type = NLA_U32 },
599 	[TCA_FLOWER_INDEV]		= { .type = NLA_STRING,
600 					    .len = IFNAMSIZ },
601 	[TCA_FLOWER_KEY_ETH_DST]	= { .len = ETH_ALEN },
602 	[TCA_FLOWER_KEY_ETH_DST_MASK]	= { .len = ETH_ALEN },
603 	[TCA_FLOWER_KEY_ETH_SRC]	= { .len = ETH_ALEN },
604 	[TCA_FLOWER_KEY_ETH_SRC_MASK]	= { .len = ETH_ALEN },
605 	[TCA_FLOWER_KEY_ETH_TYPE]	= { .type = NLA_U16 },
606 	[TCA_FLOWER_KEY_IP_PROTO]	= { .type = NLA_U8 },
607 	[TCA_FLOWER_KEY_IPV4_SRC]	= { .type = NLA_U32 },
608 	[TCA_FLOWER_KEY_IPV4_SRC_MASK]	= { .type = NLA_U32 },
609 	[TCA_FLOWER_KEY_IPV4_DST]	= { .type = NLA_U32 },
610 	[TCA_FLOWER_KEY_IPV4_DST_MASK]	= { .type = NLA_U32 },
611 	[TCA_FLOWER_KEY_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
612 	[TCA_FLOWER_KEY_IPV6_SRC_MASK]	= { .len = sizeof(struct in6_addr) },
613 	[TCA_FLOWER_KEY_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
614 	[TCA_FLOWER_KEY_IPV6_DST_MASK]	= { .len = sizeof(struct in6_addr) },
615 	[TCA_FLOWER_KEY_TCP_SRC]	= { .type = NLA_U16 },
616 	[TCA_FLOWER_KEY_TCP_DST]	= { .type = NLA_U16 },
617 	[TCA_FLOWER_KEY_UDP_SRC]	= { .type = NLA_U16 },
618 	[TCA_FLOWER_KEY_UDP_DST]	= { .type = NLA_U16 },
619 	[TCA_FLOWER_KEY_VLAN_ID]	= { .type = NLA_U16 },
620 	[TCA_FLOWER_KEY_VLAN_PRIO]	= { .type = NLA_U8 },
621 	[TCA_FLOWER_KEY_VLAN_ETH_TYPE]	= { .type = NLA_U16 },
622 	[TCA_FLOWER_KEY_ENC_KEY_ID]	= { .type = NLA_U32 },
623 	[TCA_FLOWER_KEY_ENC_IPV4_SRC]	= { .type = NLA_U32 },
624 	[TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
625 	[TCA_FLOWER_KEY_ENC_IPV4_DST]	= { .type = NLA_U32 },
626 	[TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
627 	[TCA_FLOWER_KEY_ENC_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
628 	[TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
629 	[TCA_FLOWER_KEY_ENC_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
630 	[TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
631 	[TCA_FLOWER_KEY_TCP_SRC_MASK]	= { .type = NLA_U16 },
632 	[TCA_FLOWER_KEY_TCP_DST_MASK]	= { .type = NLA_U16 },
633 	[TCA_FLOWER_KEY_UDP_SRC_MASK]	= { .type = NLA_U16 },
634 	[TCA_FLOWER_KEY_UDP_DST_MASK]	= { .type = NLA_U16 },
635 	[TCA_FLOWER_KEY_SCTP_SRC_MASK]	= { .type = NLA_U16 },
636 	[TCA_FLOWER_KEY_SCTP_DST_MASK]	= { .type = NLA_U16 },
637 	[TCA_FLOWER_KEY_SCTP_SRC]	= { .type = NLA_U16 },
638 	[TCA_FLOWER_KEY_SCTP_DST]	= { .type = NLA_U16 },
639 	[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT]	= { .type = NLA_U16 },
640 	[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK]	= { .type = NLA_U16 },
641 	[TCA_FLOWER_KEY_ENC_UDP_DST_PORT]	= { .type = NLA_U16 },
642 	[TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK]	= { .type = NLA_U16 },
643 	[TCA_FLOWER_KEY_FLAGS]		= { .type = NLA_U32 },
644 	[TCA_FLOWER_KEY_FLAGS_MASK]	= { .type = NLA_U32 },
645 	[TCA_FLOWER_KEY_ICMPV4_TYPE]	= { .type = NLA_U8 },
646 	[TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
647 	[TCA_FLOWER_KEY_ICMPV4_CODE]	= { .type = NLA_U8 },
648 	[TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
649 	[TCA_FLOWER_KEY_ICMPV6_TYPE]	= { .type = NLA_U8 },
650 	[TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
651 	[TCA_FLOWER_KEY_ICMPV6_CODE]	= { .type = NLA_U8 },
652 	[TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
653 	[TCA_FLOWER_KEY_ARP_SIP]	= { .type = NLA_U32 },
654 	[TCA_FLOWER_KEY_ARP_SIP_MASK]	= { .type = NLA_U32 },
655 	[TCA_FLOWER_KEY_ARP_TIP]	= { .type = NLA_U32 },
656 	[TCA_FLOWER_KEY_ARP_TIP_MASK]	= { .type = NLA_U32 },
657 	[TCA_FLOWER_KEY_ARP_OP]		= { .type = NLA_U8 },
658 	[TCA_FLOWER_KEY_ARP_OP_MASK]	= { .type = NLA_U8 },
659 	[TCA_FLOWER_KEY_ARP_SHA]	= { .len = ETH_ALEN },
660 	[TCA_FLOWER_KEY_ARP_SHA_MASK]	= { .len = ETH_ALEN },
661 	[TCA_FLOWER_KEY_ARP_THA]	= { .len = ETH_ALEN },
662 	[TCA_FLOWER_KEY_ARP_THA_MASK]	= { .len = ETH_ALEN },
663 	[TCA_FLOWER_KEY_MPLS_TTL]	= { .type = NLA_U8 },
664 	[TCA_FLOWER_KEY_MPLS_BOS]	= { .type = NLA_U8 },
665 	[TCA_FLOWER_KEY_MPLS_TC]	= { .type = NLA_U8 },
666 	[TCA_FLOWER_KEY_MPLS_LABEL]	= { .type = NLA_U32 },
667 	[TCA_FLOWER_KEY_TCP_FLAGS]	= { .type = NLA_U16 },
668 	[TCA_FLOWER_KEY_TCP_FLAGS_MASK]	= { .type = NLA_U16 },
669 	[TCA_FLOWER_KEY_IP_TOS]		= { .type = NLA_U8 },
670 	[TCA_FLOWER_KEY_IP_TOS_MASK]	= { .type = NLA_U8 },
671 	[TCA_FLOWER_KEY_IP_TTL]		= { .type = NLA_U8 },
672 	[TCA_FLOWER_KEY_IP_TTL_MASK]	= { .type = NLA_U8 },
673 	[TCA_FLOWER_KEY_CVLAN_ID]	= { .type = NLA_U16 },
674 	[TCA_FLOWER_KEY_CVLAN_PRIO]	= { .type = NLA_U8 },
675 	[TCA_FLOWER_KEY_CVLAN_ETH_TYPE]	= { .type = NLA_U16 },
676 	[TCA_FLOWER_KEY_ENC_IP_TOS]	= { .type = NLA_U8 },
677 	[TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 },
678 	[TCA_FLOWER_KEY_ENC_IP_TTL]	 = { .type = NLA_U8 },
679 	[TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 },
680 	[TCA_FLOWER_KEY_ENC_OPTS]	= { .type = NLA_NESTED },
681 	[TCA_FLOWER_KEY_ENC_OPTS_MASK]	= { .type = NLA_NESTED },
682 	[TCA_FLOWER_KEY_CT_STATE]	= { .type = NLA_U16 },
683 	[TCA_FLOWER_KEY_CT_STATE_MASK]	= { .type = NLA_U16 },
684 	[TCA_FLOWER_KEY_CT_ZONE]	= { .type = NLA_U16 },
685 	[TCA_FLOWER_KEY_CT_ZONE_MASK]	= { .type = NLA_U16 },
686 	[TCA_FLOWER_KEY_CT_MARK]	= { .type = NLA_U32 },
687 	[TCA_FLOWER_KEY_CT_MARK_MASK]	= { .type = NLA_U32 },
688 	[TCA_FLOWER_KEY_CT_LABELS]	= { .type = NLA_BINARY,
689 					    .len = 128 / BITS_PER_BYTE },
690 	[TCA_FLOWER_KEY_CT_LABELS_MASK]	= { .type = NLA_BINARY,
691 					    .len = 128 / BITS_PER_BYTE },
692 	[TCA_FLOWER_FLAGS]		= { .type = NLA_U32 },
693 };
694 
695 static const struct nla_policy
696 enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
697 	[TCA_FLOWER_KEY_ENC_OPTS_GENEVE]        = { .type = NLA_NESTED },
698 };
699 
700 static const struct nla_policy
701 geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = {
702 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]      = { .type = NLA_U16 },
703 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]       = { .type = NLA_U8 },
704 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]       = { .type = NLA_BINARY,
705 						       .len = 128 },
706 };
707 
fl_set_key_val(struct nlattr ** tb,void * val,int val_type,void * mask,int mask_type,int len)708 static void fl_set_key_val(struct nlattr **tb,
709 			   void *val, int val_type,
710 			   void *mask, int mask_type, int len)
711 {
712 	if (!tb[val_type])
713 		return;
714 	nla_memcpy(val, tb[val_type], len);
715 	if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
716 		memset(mask, 0xff, len);
717 	else
718 		nla_memcpy(mask, tb[mask_type], len);
719 }
720 
fl_set_key_port_range(struct nlattr ** tb,struct fl_flow_key * key,struct fl_flow_key * mask,struct netlink_ext_ack * extack)721 static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
722 				 struct fl_flow_key *mask,
723 				 struct netlink_ext_ack *extack)
724 {
725 	fl_set_key_val(tb, &key->tp_range.tp_min.dst,
726 		       TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_range.tp_min.dst,
727 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.dst));
728 	fl_set_key_val(tb, &key->tp_range.tp_max.dst,
729 		       TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_range.tp_max.dst,
730 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.dst));
731 	fl_set_key_val(tb, &key->tp_range.tp_min.src,
732 		       TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_range.tp_min.src,
733 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.src));
734 	fl_set_key_val(tb, &key->tp_range.tp_max.src,
735 		       TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_range.tp_max.src,
736 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src));
737 
738 	if (mask->tp_range.tp_min.dst != mask->tp_range.tp_max.dst) {
739 		NL_SET_ERR_MSG(extack,
740 			       "Both min and max destination ports must be specified");
741 		return -EINVAL;
742 	}
743 	if (mask->tp_range.tp_min.src != mask->tp_range.tp_max.src) {
744 		NL_SET_ERR_MSG(extack,
745 			       "Both min and max source ports must be specified");
746 		return -EINVAL;
747 	}
748 	if (mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst &&
749 	    htons(key->tp_range.tp_max.dst) <=
750 	    htons(key->tp_range.tp_min.dst)) {
751 		NL_SET_ERR_MSG_ATTR(extack,
752 				    tb[TCA_FLOWER_KEY_PORT_DST_MIN],
753 				    "Invalid destination port range (min must be strictly smaller than max)");
754 		return -EINVAL;
755 	}
756 	if (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src &&
757 	    htons(key->tp_range.tp_max.src) <=
758 	    htons(key->tp_range.tp_min.src)) {
759 		NL_SET_ERR_MSG_ATTR(extack,
760 				    tb[TCA_FLOWER_KEY_PORT_SRC_MIN],
761 				    "Invalid source port range (min must be strictly smaller than max)");
762 		return -EINVAL;
763 	}
764 
765 	return 0;
766 }
767 
fl_set_key_mpls(struct nlattr ** tb,struct flow_dissector_key_mpls * key_val,struct flow_dissector_key_mpls * key_mask)768 static int fl_set_key_mpls(struct nlattr **tb,
769 			   struct flow_dissector_key_mpls *key_val,
770 			   struct flow_dissector_key_mpls *key_mask)
771 {
772 	if (tb[TCA_FLOWER_KEY_MPLS_TTL]) {
773 		key_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
774 		key_mask->mpls_ttl = MPLS_TTL_MASK;
775 	}
776 	if (tb[TCA_FLOWER_KEY_MPLS_BOS]) {
777 		u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]);
778 
779 		if (bos & ~MPLS_BOS_MASK)
780 			return -EINVAL;
781 		key_val->mpls_bos = bos;
782 		key_mask->mpls_bos = MPLS_BOS_MASK;
783 	}
784 	if (tb[TCA_FLOWER_KEY_MPLS_TC]) {
785 		u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]);
786 
787 		if (tc & ~MPLS_TC_MASK)
788 			return -EINVAL;
789 		key_val->mpls_tc = tc;
790 		key_mask->mpls_tc = MPLS_TC_MASK;
791 	}
792 	if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
793 		u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]);
794 
795 		if (label & ~MPLS_LABEL_MASK)
796 			return -EINVAL;
797 		key_val->mpls_label = label;
798 		key_mask->mpls_label = MPLS_LABEL_MASK;
799 	}
800 	return 0;
801 }
802 
fl_set_key_vlan(struct nlattr ** tb,__be16 ethertype,int vlan_id_key,int vlan_prio_key,int vlan_next_eth_type_key,struct flow_dissector_key_vlan * key_val,struct flow_dissector_key_vlan * key_mask)803 static void fl_set_key_vlan(struct nlattr **tb,
804 			    __be16 ethertype,
805 			    int vlan_id_key, int vlan_prio_key,
806 			    int vlan_next_eth_type_key,
807 			    struct flow_dissector_key_vlan *key_val,
808 			    struct flow_dissector_key_vlan *key_mask)
809 {
810 #define VLAN_PRIORITY_MASK	0x7
811 
812 	if (tb[vlan_id_key]) {
813 		key_val->vlan_id =
814 			nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK;
815 		key_mask->vlan_id = VLAN_VID_MASK;
816 	}
817 	if (tb[vlan_prio_key]) {
818 		key_val->vlan_priority =
819 			nla_get_u8(tb[vlan_prio_key]) &
820 			VLAN_PRIORITY_MASK;
821 		key_mask->vlan_priority = VLAN_PRIORITY_MASK;
822 	}
823 	key_val->vlan_tpid = ethertype;
824 	key_mask->vlan_tpid = cpu_to_be16(~0);
825 	if (tb[vlan_next_eth_type_key]) {
826 		key_val->vlan_eth_type =
827 			nla_get_be16(tb[vlan_next_eth_type_key]);
828 		key_mask->vlan_eth_type = cpu_to_be16(~0);
829 	}
830 }
831 
fl_set_key_flag(u32 flower_key,u32 flower_mask,u32 * dissector_key,u32 * dissector_mask,u32 flower_flag_bit,u32 dissector_flag_bit)832 static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
833 			    u32 *dissector_key, u32 *dissector_mask,
834 			    u32 flower_flag_bit, u32 dissector_flag_bit)
835 {
836 	if (flower_mask & flower_flag_bit) {
837 		*dissector_mask |= dissector_flag_bit;
838 		if (flower_key & flower_flag_bit)
839 			*dissector_key |= dissector_flag_bit;
840 	}
841 }
842 
fl_set_key_flags(struct nlattr ** tb,u32 * flags_key,u32 * flags_mask)843 static int fl_set_key_flags(struct nlattr **tb,
844 			    u32 *flags_key, u32 *flags_mask)
845 {
846 	u32 key, mask;
847 
848 	/* mask is mandatory for flags */
849 	if (!tb[TCA_FLOWER_KEY_FLAGS_MASK])
850 		return -EINVAL;
851 
852 	key = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS]));
853 	mask = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
854 
855 	*flags_key  = 0;
856 	*flags_mask = 0;
857 
858 	fl_set_key_flag(key, mask, flags_key, flags_mask,
859 			TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
860 	fl_set_key_flag(key, mask, flags_key, flags_mask,
861 			TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
862 			FLOW_DIS_FIRST_FRAG);
863 
864 	return 0;
865 }
866 
fl_set_key_ip(struct nlattr ** tb,bool encap,struct flow_dissector_key_ip * key,struct flow_dissector_key_ip * mask)867 static void fl_set_key_ip(struct nlattr **tb, bool encap,
868 			  struct flow_dissector_key_ip *key,
869 			  struct flow_dissector_key_ip *mask)
870 {
871 	int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
872 	int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
873 	int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
874 	int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
875 
876 	fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos));
877 	fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl));
878 }
879 
fl_set_geneve_opt(const struct nlattr * nla,struct fl_flow_key * key,int depth,int option_len,struct netlink_ext_ack * extack)880 static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key,
881 			     int depth, int option_len,
882 			     struct netlink_ext_ack *extack)
883 {
884 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1];
885 	struct nlattr *class = NULL, *type = NULL, *data = NULL;
886 	struct geneve_opt *opt;
887 	int err, data_len = 0;
888 
889 	if (option_len > sizeof(struct geneve_opt))
890 		data_len = option_len - sizeof(struct geneve_opt);
891 
892 	if (key->enc_opts.len > FLOW_DIS_TUN_OPTS_MAX - 4)
893 		return -ERANGE;
894 
895 	opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len];
896 	memset(opt, 0xff, option_len);
897 	opt->length = data_len / 4;
898 	opt->r1 = 0;
899 	opt->r2 = 0;
900 	opt->r3 = 0;
901 
902 	/* If no mask has been prodived we assume an exact match. */
903 	if (!depth)
904 		return sizeof(struct geneve_opt) + data_len;
905 
906 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) {
907 		NL_SET_ERR_MSG(extack, "Non-geneve option type for mask");
908 		return -EINVAL;
909 	}
910 
911 	err = nla_parse_nested_deprecated(tb,
912 					  TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
913 					  nla, geneve_opt_policy, extack);
914 	if (err < 0)
915 		return err;
916 
917 	/* We are not allowed to omit any of CLASS, TYPE or DATA
918 	 * fields from the key.
919 	 */
920 	if (!option_len &&
921 	    (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] ||
922 	     !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] ||
923 	     !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) {
924 		NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
925 		return -EINVAL;
926 	}
927 
928 	/* Omitting any of CLASS, TYPE or DATA fields is allowed
929 	 * for the mask.
930 	 */
931 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) {
932 		int new_len = key->enc_opts.len;
933 
934 		data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA];
935 		data_len = nla_len(data);
936 		if (data_len < 4) {
937 			NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
938 			return -ERANGE;
939 		}
940 		if (data_len % 4) {
941 			NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
942 			return -ERANGE;
943 		}
944 
945 		new_len += sizeof(struct geneve_opt) + data_len;
946 		BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX);
947 		if (new_len > FLOW_DIS_TUN_OPTS_MAX) {
948 			NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
949 			return -ERANGE;
950 		}
951 		opt->length = data_len / 4;
952 		memcpy(opt->opt_data, nla_data(data), data_len);
953 	}
954 
955 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) {
956 		class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS];
957 		opt->opt_class = nla_get_be16(class);
958 	}
959 
960 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) {
961 		type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE];
962 		opt->type = nla_get_u8(type);
963 	}
964 
965 	return sizeof(struct geneve_opt) + data_len;
966 }
967 
fl_set_enc_opt(struct nlattr ** tb,struct fl_flow_key * key,struct fl_flow_key * mask,struct netlink_ext_ack * extack)968 static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
969 			  struct fl_flow_key *mask,
970 			  struct netlink_ext_ack *extack)
971 {
972 	const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
973 	int err, option_len, key_depth, msk_depth = 0;
974 
975 	err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS],
976 					     TCA_FLOWER_KEY_ENC_OPTS_MAX,
977 					     enc_opts_policy, extack);
978 	if (err)
979 		return err;
980 
981 	nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
982 
983 	if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
984 		err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
985 						     TCA_FLOWER_KEY_ENC_OPTS_MAX,
986 						     enc_opts_policy, extack);
987 		if (err)
988 			return err;
989 
990 		nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
991 		msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
992 	}
993 
994 	nla_for_each_attr(nla_opt_key, nla_enc_key,
995 			  nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) {
996 		switch (nla_type(nla_opt_key)) {
997 		case TCA_FLOWER_KEY_ENC_OPTS_GENEVE:
998 			option_len = 0;
999 			key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1000 			option_len = fl_set_geneve_opt(nla_opt_key, key,
1001 						       key_depth, option_len,
1002 						       extack);
1003 			if (option_len < 0)
1004 				return option_len;
1005 
1006 			key->enc_opts.len += option_len;
1007 			/* At the same time we need to parse through the mask
1008 			 * in order to verify exact and mask attribute lengths.
1009 			 */
1010 			mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1011 			option_len = fl_set_geneve_opt(nla_opt_msk, mask,
1012 						       msk_depth, option_len,
1013 						       extack);
1014 			if (option_len < 0)
1015 				return option_len;
1016 
1017 			mask->enc_opts.len += option_len;
1018 			if (key->enc_opts.len != mask->enc_opts.len) {
1019 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1020 				return -EINVAL;
1021 			}
1022 
1023 			if (msk_depth)
1024 				nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
1025 			break;
1026 		default:
1027 			NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
1028 			return -EINVAL;
1029 		}
1030 	}
1031 
1032 	return 0;
1033 }
1034 
fl_set_key_ct(struct nlattr ** tb,struct flow_dissector_key_ct * key,struct flow_dissector_key_ct * mask,struct netlink_ext_ack * extack)1035 static int fl_set_key_ct(struct nlattr **tb,
1036 			 struct flow_dissector_key_ct *key,
1037 			 struct flow_dissector_key_ct *mask,
1038 			 struct netlink_ext_ack *extack)
1039 {
1040 	if (tb[TCA_FLOWER_KEY_CT_STATE]) {
1041 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK)) {
1042 			NL_SET_ERR_MSG(extack, "Conntrack isn't enabled");
1043 			return -EOPNOTSUPP;
1044 		}
1045 		fl_set_key_val(tb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
1046 			       &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
1047 			       sizeof(key->ct_state));
1048 	}
1049 	if (tb[TCA_FLOWER_KEY_CT_ZONE]) {
1050 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1051 			NL_SET_ERR_MSG(extack, "Conntrack zones isn't enabled");
1052 			return -EOPNOTSUPP;
1053 		}
1054 		fl_set_key_val(tb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
1055 			       &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
1056 			       sizeof(key->ct_zone));
1057 	}
1058 	if (tb[TCA_FLOWER_KEY_CT_MARK]) {
1059 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1060 			NL_SET_ERR_MSG(extack, "Conntrack mark isn't enabled");
1061 			return -EOPNOTSUPP;
1062 		}
1063 		fl_set_key_val(tb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
1064 			       &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
1065 			       sizeof(key->ct_mark));
1066 	}
1067 	if (tb[TCA_FLOWER_KEY_CT_LABELS]) {
1068 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1069 			NL_SET_ERR_MSG(extack, "Conntrack labels aren't enabled");
1070 			return -EOPNOTSUPP;
1071 		}
1072 		fl_set_key_val(tb, key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
1073 			       mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
1074 			       sizeof(key->ct_labels));
1075 	}
1076 
1077 	return 0;
1078 }
1079 
fl_set_key(struct net * net,struct nlattr ** tb,struct fl_flow_key * key,struct fl_flow_key * mask,struct netlink_ext_ack * extack)1080 static int fl_set_key(struct net *net, struct nlattr **tb,
1081 		      struct fl_flow_key *key, struct fl_flow_key *mask,
1082 		      struct netlink_ext_ack *extack)
1083 {
1084 	__be16 ethertype;
1085 	int ret = 0;
1086 
1087 	if (tb[TCA_FLOWER_INDEV]) {
1088 		int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
1089 		if (err < 0)
1090 			return err;
1091 		key->meta.ingress_ifindex = err;
1092 		mask->meta.ingress_ifindex = 0xffffffff;
1093 	}
1094 
1095 	fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
1096 		       mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
1097 		       sizeof(key->eth.dst));
1098 	fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
1099 		       mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
1100 		       sizeof(key->eth.src));
1101 
1102 	if (tb[TCA_FLOWER_KEY_ETH_TYPE]) {
1103 		ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]);
1104 
1105 		if (eth_type_vlan(ethertype)) {
1106 			fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
1107 					TCA_FLOWER_KEY_VLAN_PRIO,
1108 					TCA_FLOWER_KEY_VLAN_ETH_TYPE,
1109 					&key->vlan, &mask->vlan);
1110 
1111 			if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) {
1112 				ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]);
1113 				if (eth_type_vlan(ethertype)) {
1114 					fl_set_key_vlan(tb, ethertype,
1115 							TCA_FLOWER_KEY_CVLAN_ID,
1116 							TCA_FLOWER_KEY_CVLAN_PRIO,
1117 							TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1118 							&key->cvlan, &mask->cvlan);
1119 					fl_set_key_val(tb, &key->basic.n_proto,
1120 						       TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1121 						       &mask->basic.n_proto,
1122 						       TCA_FLOWER_UNSPEC,
1123 						       sizeof(key->basic.n_proto));
1124 				} else {
1125 					key->basic.n_proto = ethertype;
1126 					mask->basic.n_proto = cpu_to_be16(~0);
1127 				}
1128 			}
1129 		} else {
1130 			key->basic.n_proto = ethertype;
1131 			mask->basic.n_proto = cpu_to_be16(~0);
1132 		}
1133 	}
1134 
1135 	if (key->basic.n_proto == htons(ETH_P_IP) ||
1136 	    key->basic.n_proto == htons(ETH_P_IPV6)) {
1137 		fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
1138 			       &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
1139 			       sizeof(key->basic.ip_proto));
1140 		fl_set_key_ip(tb, false, &key->ip, &mask->ip);
1141 	}
1142 
1143 	if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
1144 		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1145 		mask->control.addr_type = ~0;
1146 		fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
1147 			       &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
1148 			       sizeof(key->ipv4.src));
1149 		fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
1150 			       &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
1151 			       sizeof(key->ipv4.dst));
1152 	} else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
1153 		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1154 		mask->control.addr_type = ~0;
1155 		fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
1156 			       &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
1157 			       sizeof(key->ipv6.src));
1158 		fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
1159 			       &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
1160 			       sizeof(key->ipv6.dst));
1161 	}
1162 
1163 	if (key->basic.ip_proto == IPPROTO_TCP) {
1164 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
1165 			       &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
1166 			       sizeof(key->tp.src));
1167 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
1168 			       &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
1169 			       sizeof(key->tp.dst));
1170 		fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
1171 			       &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
1172 			       sizeof(key->tcp.flags));
1173 	} else if (key->basic.ip_proto == IPPROTO_UDP) {
1174 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
1175 			       &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
1176 			       sizeof(key->tp.src));
1177 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
1178 			       &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
1179 			       sizeof(key->tp.dst));
1180 	} else if (key->basic.ip_proto == IPPROTO_SCTP) {
1181 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
1182 			       &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
1183 			       sizeof(key->tp.src));
1184 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
1185 			       &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
1186 			       sizeof(key->tp.dst));
1187 	} else if (key->basic.n_proto == htons(ETH_P_IP) &&
1188 		   key->basic.ip_proto == IPPROTO_ICMP) {
1189 		fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
1190 			       &mask->icmp.type,
1191 			       TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
1192 			       sizeof(key->icmp.type));
1193 		fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
1194 			       &mask->icmp.code,
1195 			       TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
1196 			       sizeof(key->icmp.code));
1197 	} else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
1198 		   key->basic.ip_proto == IPPROTO_ICMPV6) {
1199 		fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
1200 			       &mask->icmp.type,
1201 			       TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
1202 			       sizeof(key->icmp.type));
1203 		fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
1204 			       &mask->icmp.code,
1205 			       TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
1206 			       sizeof(key->icmp.code));
1207 	} else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) ||
1208 		   key->basic.n_proto == htons(ETH_P_MPLS_MC)) {
1209 		ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls);
1210 		if (ret)
1211 			return ret;
1212 	} else if (key->basic.n_proto == htons(ETH_P_ARP) ||
1213 		   key->basic.n_proto == htons(ETH_P_RARP)) {
1214 		fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP,
1215 			       &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK,
1216 			       sizeof(key->arp.sip));
1217 		fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP,
1218 			       &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK,
1219 			       sizeof(key->arp.tip));
1220 		fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP,
1221 			       &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK,
1222 			       sizeof(key->arp.op));
1223 		fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
1224 			       mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
1225 			       sizeof(key->arp.sha));
1226 		fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
1227 			       mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
1228 			       sizeof(key->arp.tha));
1229 	}
1230 
1231 	if (key->basic.ip_proto == IPPROTO_TCP ||
1232 	    key->basic.ip_proto == IPPROTO_UDP ||
1233 	    key->basic.ip_proto == IPPROTO_SCTP) {
1234 		ret = fl_set_key_port_range(tb, key, mask, extack);
1235 		if (ret)
1236 			return ret;
1237 	}
1238 
1239 	if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
1240 	    tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
1241 		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1242 		mask->enc_control.addr_type = ~0;
1243 		fl_set_key_val(tb, &key->enc_ipv4.src,
1244 			       TCA_FLOWER_KEY_ENC_IPV4_SRC,
1245 			       &mask->enc_ipv4.src,
1246 			       TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
1247 			       sizeof(key->enc_ipv4.src));
1248 		fl_set_key_val(tb, &key->enc_ipv4.dst,
1249 			       TCA_FLOWER_KEY_ENC_IPV4_DST,
1250 			       &mask->enc_ipv4.dst,
1251 			       TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
1252 			       sizeof(key->enc_ipv4.dst));
1253 	}
1254 
1255 	if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
1256 	    tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
1257 		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1258 		mask->enc_control.addr_type = ~0;
1259 		fl_set_key_val(tb, &key->enc_ipv6.src,
1260 			       TCA_FLOWER_KEY_ENC_IPV6_SRC,
1261 			       &mask->enc_ipv6.src,
1262 			       TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
1263 			       sizeof(key->enc_ipv6.src));
1264 		fl_set_key_val(tb, &key->enc_ipv6.dst,
1265 			       TCA_FLOWER_KEY_ENC_IPV6_DST,
1266 			       &mask->enc_ipv6.dst,
1267 			       TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
1268 			       sizeof(key->enc_ipv6.dst));
1269 	}
1270 
1271 	fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
1272 		       &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
1273 		       sizeof(key->enc_key_id.keyid));
1274 
1275 	fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
1276 		       &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
1277 		       sizeof(key->enc_tp.src));
1278 
1279 	fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
1280 		       &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
1281 		       sizeof(key->enc_tp.dst));
1282 
1283 	fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip);
1284 
1285 	if (tb[TCA_FLOWER_KEY_ENC_OPTS]) {
1286 		ret = fl_set_enc_opt(tb, key, mask, extack);
1287 		if (ret)
1288 			return ret;
1289 	}
1290 
1291 	ret = fl_set_key_ct(tb, &key->ct, &mask->ct, extack);
1292 	if (ret)
1293 		return ret;
1294 
1295 	if (tb[TCA_FLOWER_KEY_FLAGS])
1296 		ret = fl_set_key_flags(tb, &key->control.flags, &mask->control.flags);
1297 
1298 	return ret;
1299 }
1300 
fl_mask_copy(struct fl_flow_mask * dst,struct fl_flow_mask * src)1301 static void fl_mask_copy(struct fl_flow_mask *dst,
1302 			 struct fl_flow_mask *src)
1303 {
1304 	const void *psrc = fl_key_get_start(&src->key, src);
1305 	void *pdst = fl_key_get_start(&dst->key, src);
1306 
1307 	memcpy(pdst, psrc, fl_mask_range(src));
1308 	dst->range = src->range;
1309 }
1310 
1311 static const struct rhashtable_params fl_ht_params = {
1312 	.key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
1313 	.head_offset = offsetof(struct cls_fl_filter, ht_node),
1314 	.automatic_shrinking = true,
1315 };
1316 
fl_init_mask_hashtable(struct fl_flow_mask * mask)1317 static int fl_init_mask_hashtable(struct fl_flow_mask *mask)
1318 {
1319 	mask->filter_ht_params = fl_ht_params;
1320 	mask->filter_ht_params.key_len = fl_mask_range(mask);
1321 	mask->filter_ht_params.key_offset += mask->range.start;
1322 
1323 	return rhashtable_init(&mask->ht, &mask->filter_ht_params);
1324 }
1325 
1326 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
1327 #define FL_KEY_MEMBER_SIZE(member) FIELD_SIZEOF(struct fl_flow_key, member)
1328 
1329 #define FL_KEY_IS_MASKED(mask, member)						\
1330 	memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member),		\
1331 		   0, FL_KEY_MEMBER_SIZE(member))				\
1332 
1333 #define FL_KEY_SET(keys, cnt, id, member)					\
1334 	do {									\
1335 		keys[cnt].key_id = id;						\
1336 		keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member);		\
1337 		cnt++;								\
1338 	} while(0);
1339 
1340 #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member)			\
1341 	do {									\
1342 		if (FL_KEY_IS_MASKED(mask, member))				\
1343 			FL_KEY_SET(keys, cnt, id, member);			\
1344 	} while(0);
1345 
fl_init_dissector(struct flow_dissector * dissector,struct fl_flow_key * mask)1346 static void fl_init_dissector(struct flow_dissector *dissector,
1347 			      struct fl_flow_key *mask)
1348 {
1349 	struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
1350 	size_t cnt = 0;
1351 
1352 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1353 			     FLOW_DISSECTOR_KEY_META, meta);
1354 	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
1355 	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
1356 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1357 			     FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
1358 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1359 			     FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
1360 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1361 			     FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
1362 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1363 			     FLOW_DISSECTOR_KEY_PORTS, tp);
1364 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1365 			     FLOW_DISSECTOR_KEY_PORTS_RANGE, tp_range);
1366 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1367 			     FLOW_DISSECTOR_KEY_IP, ip);
1368 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1369 			     FLOW_DISSECTOR_KEY_TCP, tcp);
1370 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1371 			     FLOW_DISSECTOR_KEY_ICMP, icmp);
1372 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1373 			     FLOW_DISSECTOR_KEY_ARP, arp);
1374 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1375 			     FLOW_DISSECTOR_KEY_MPLS, mpls);
1376 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1377 			     FLOW_DISSECTOR_KEY_VLAN, vlan);
1378 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1379 			     FLOW_DISSECTOR_KEY_CVLAN, cvlan);
1380 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1381 			     FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
1382 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1383 			     FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
1384 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1385 			     FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
1386 	if (FL_KEY_IS_MASKED(mask, enc_ipv4) ||
1387 	    FL_KEY_IS_MASKED(mask, enc_ipv6))
1388 		FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
1389 			   enc_control);
1390 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1391 			     FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
1392 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1393 			     FLOW_DISSECTOR_KEY_ENC_IP, enc_ip);
1394 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1395 			     FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts);
1396 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1397 			     FLOW_DISSECTOR_KEY_CT, ct);
1398 
1399 	skb_flow_dissector_init(dissector, keys, cnt);
1400 }
1401 
fl_create_new_mask(struct cls_fl_head * head,struct fl_flow_mask * mask)1402 static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
1403 					       struct fl_flow_mask *mask)
1404 {
1405 	struct fl_flow_mask *newmask;
1406 	int err;
1407 
1408 	newmask = kzalloc(sizeof(*newmask), GFP_KERNEL);
1409 	if (!newmask)
1410 		return ERR_PTR(-ENOMEM);
1411 
1412 	fl_mask_copy(newmask, mask);
1413 
1414 	if ((newmask->key.tp_range.tp_min.dst &&
1415 	     newmask->key.tp_range.tp_max.dst) ||
1416 	    (newmask->key.tp_range.tp_min.src &&
1417 	     newmask->key.tp_range.tp_max.src))
1418 		newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE;
1419 
1420 	err = fl_init_mask_hashtable(newmask);
1421 	if (err)
1422 		goto errout_free;
1423 
1424 	fl_init_dissector(&newmask->dissector, &newmask->key);
1425 
1426 	INIT_LIST_HEAD_RCU(&newmask->filters);
1427 
1428 	refcount_set(&newmask->refcnt, 1);
1429 	err = rhashtable_replace_fast(&head->ht, &mask->ht_node,
1430 				      &newmask->ht_node, mask_ht_params);
1431 	if (err)
1432 		goto errout_destroy;
1433 
1434 	spin_lock(&head->masks_lock);
1435 	list_add_tail_rcu(&newmask->list, &head->masks);
1436 	spin_unlock(&head->masks_lock);
1437 
1438 	return newmask;
1439 
1440 errout_destroy:
1441 	rhashtable_destroy(&newmask->ht);
1442 errout_free:
1443 	kfree(newmask);
1444 
1445 	return ERR_PTR(err);
1446 }
1447 
fl_check_assign_mask(struct cls_fl_head * head,struct cls_fl_filter * fnew,struct cls_fl_filter * fold,struct fl_flow_mask * mask)1448 static int fl_check_assign_mask(struct cls_fl_head *head,
1449 				struct cls_fl_filter *fnew,
1450 				struct cls_fl_filter *fold,
1451 				struct fl_flow_mask *mask)
1452 {
1453 	struct fl_flow_mask *newmask;
1454 	int ret = 0;
1455 
1456 	rcu_read_lock();
1457 
1458 	/* Insert mask as temporary node to prevent concurrent creation of mask
1459 	 * with same key. Any concurrent lookups with same key will return
1460 	 * -EAGAIN because mask's refcnt is zero.
1461 	 */
1462 	fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht,
1463 						       &mask->ht_node,
1464 						       mask_ht_params);
1465 	if (!fnew->mask) {
1466 		rcu_read_unlock();
1467 
1468 		if (fold) {
1469 			ret = -EINVAL;
1470 			goto errout_cleanup;
1471 		}
1472 
1473 		newmask = fl_create_new_mask(head, mask);
1474 		if (IS_ERR(newmask)) {
1475 			ret = PTR_ERR(newmask);
1476 			goto errout_cleanup;
1477 		}
1478 
1479 		fnew->mask = newmask;
1480 		return 0;
1481 	} else if (IS_ERR(fnew->mask)) {
1482 		ret = PTR_ERR(fnew->mask);
1483 	} else if (fold && fold->mask != fnew->mask) {
1484 		ret = -EINVAL;
1485 	} else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) {
1486 		/* Mask was deleted concurrently, try again */
1487 		ret = -EAGAIN;
1488 	}
1489 	rcu_read_unlock();
1490 	return ret;
1491 
1492 errout_cleanup:
1493 	rhashtable_remove_fast(&head->ht, &mask->ht_node,
1494 			       mask_ht_params);
1495 	return ret;
1496 }
1497 
fl_set_parms(struct net * net,struct tcf_proto * tp,struct cls_fl_filter * f,struct fl_flow_mask * mask,unsigned long base,struct nlattr ** tb,struct nlattr * est,bool ovr,struct fl_flow_tmplt * tmplt,bool rtnl_held,struct netlink_ext_ack * extack)1498 static int fl_set_parms(struct net *net, struct tcf_proto *tp,
1499 			struct cls_fl_filter *f, struct fl_flow_mask *mask,
1500 			unsigned long base, struct nlattr **tb,
1501 			struct nlattr *est, bool ovr,
1502 			struct fl_flow_tmplt *tmplt, bool rtnl_held,
1503 			struct netlink_ext_ack *extack)
1504 {
1505 	int err;
1506 
1507 	err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, rtnl_held,
1508 				extack);
1509 	if (err < 0)
1510 		return err;
1511 
1512 	if (tb[TCA_FLOWER_CLASSID]) {
1513 		f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
1514 		if (!rtnl_held)
1515 			rtnl_lock();
1516 		tcf_bind_filter(tp, &f->res, base);
1517 		if (!rtnl_held)
1518 			rtnl_unlock();
1519 	}
1520 
1521 	err = fl_set_key(net, tb, &f->key, &mask->key, extack);
1522 	if (err)
1523 		return err;
1524 
1525 	fl_mask_update_range(mask);
1526 	fl_set_masked_key(&f->mkey, &f->key, mask);
1527 
1528 	if (!fl_mask_fits_tmplt(tmplt, mask)) {
1529 		NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template");
1530 		return -EINVAL;
1531 	}
1532 
1533 	return 0;
1534 }
1535 
fl_ht_insert_unique(struct cls_fl_filter * fnew,struct cls_fl_filter * fold,bool * in_ht)1536 static int fl_ht_insert_unique(struct cls_fl_filter *fnew,
1537 			       struct cls_fl_filter *fold,
1538 			       bool *in_ht)
1539 {
1540 	struct fl_flow_mask *mask = fnew->mask;
1541 	int err;
1542 
1543 	err = rhashtable_lookup_insert_fast(&mask->ht,
1544 					    &fnew->ht_node,
1545 					    mask->filter_ht_params);
1546 	if (err) {
1547 		*in_ht = false;
1548 		/* It is okay if filter with same key exists when
1549 		 * overwriting.
1550 		 */
1551 		return fold && err == -EEXIST ? 0 : err;
1552 	}
1553 
1554 	*in_ht = true;
1555 	return 0;
1556 }
1557 
fl_change(struct net * net,struct sk_buff * in_skb,struct tcf_proto * tp,unsigned long base,u32 handle,struct nlattr ** tca,void ** arg,bool ovr,bool rtnl_held,struct netlink_ext_ack * extack)1558 static int fl_change(struct net *net, struct sk_buff *in_skb,
1559 		     struct tcf_proto *tp, unsigned long base,
1560 		     u32 handle, struct nlattr **tca,
1561 		     void **arg, bool ovr, bool rtnl_held,
1562 		     struct netlink_ext_ack *extack)
1563 {
1564 	struct cls_fl_head *head = fl_head_dereference(tp);
1565 	struct cls_fl_filter *fold = *arg;
1566 	struct cls_fl_filter *fnew;
1567 	struct fl_flow_mask *mask;
1568 	struct nlattr **tb;
1569 	bool in_ht;
1570 	int err;
1571 
1572 	if (!tca[TCA_OPTIONS]) {
1573 		err = -EINVAL;
1574 		goto errout_fold;
1575 	}
1576 
1577 	mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
1578 	if (!mask) {
1579 		err = -ENOBUFS;
1580 		goto errout_fold;
1581 	}
1582 
1583 	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
1584 	if (!tb) {
1585 		err = -ENOBUFS;
1586 		goto errout_mask_alloc;
1587 	}
1588 
1589 	err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
1590 					  tca[TCA_OPTIONS], fl_policy, NULL);
1591 	if (err < 0)
1592 		goto errout_tb;
1593 
1594 	if (fold && handle && fold->handle != handle) {
1595 		err = -EINVAL;
1596 		goto errout_tb;
1597 	}
1598 
1599 	fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
1600 	if (!fnew) {
1601 		err = -ENOBUFS;
1602 		goto errout_tb;
1603 	}
1604 	INIT_LIST_HEAD(&fnew->hw_list);
1605 	refcount_set(&fnew->refcnt, 1);
1606 
1607 	err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0);
1608 	if (err < 0)
1609 		goto errout;
1610 
1611 	if (tb[TCA_FLOWER_FLAGS]) {
1612 		fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
1613 
1614 		if (!tc_flags_valid(fnew->flags)) {
1615 			err = -EINVAL;
1616 			goto errout;
1617 		}
1618 	}
1619 
1620 	err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
1621 			   tp->chain->tmplt_priv, rtnl_held, extack);
1622 	if (err)
1623 		goto errout;
1624 
1625 	err = fl_check_assign_mask(head, fnew, fold, mask);
1626 	if (err)
1627 		goto errout;
1628 
1629 	err = fl_ht_insert_unique(fnew, fold, &in_ht);
1630 	if (err)
1631 		goto errout_mask;
1632 
1633 	if (!tc_skip_hw(fnew->flags)) {
1634 		err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack);
1635 		if (err)
1636 			goto errout_ht;
1637 	}
1638 
1639 	if (!tc_in_hw(fnew->flags))
1640 		fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
1641 
1642 	spin_lock(&tp->lock);
1643 
1644 	/* tp was deleted concurrently. -EAGAIN will cause caller to lookup
1645 	 * proto again or create new one, if necessary.
1646 	 */
1647 	if (tp->deleting) {
1648 		err = -EAGAIN;
1649 		goto errout_hw;
1650 	}
1651 
1652 	if (fold) {
1653 		/* Fold filter was deleted concurrently. Retry lookup. */
1654 		if (fold->deleted) {
1655 			err = -EAGAIN;
1656 			goto errout_hw;
1657 		}
1658 
1659 		fnew->handle = handle;
1660 
1661 		if (!in_ht) {
1662 			struct rhashtable_params params =
1663 				fnew->mask->filter_ht_params;
1664 
1665 			err = rhashtable_insert_fast(&fnew->mask->ht,
1666 						     &fnew->ht_node,
1667 						     params);
1668 			if (err)
1669 				goto errout_hw;
1670 			in_ht = true;
1671 		}
1672 
1673 		refcount_inc(&fnew->refcnt);
1674 		rhashtable_remove_fast(&fold->mask->ht,
1675 				       &fold->ht_node,
1676 				       fold->mask->filter_ht_params);
1677 		idr_replace(&head->handle_idr, fnew, fnew->handle);
1678 		list_replace_rcu(&fold->list, &fnew->list);
1679 		fold->deleted = true;
1680 
1681 		spin_unlock(&tp->lock);
1682 
1683 		fl_mask_put(head, fold->mask);
1684 		if (!tc_skip_hw(fold->flags))
1685 			fl_hw_destroy_filter(tp, fold, rtnl_held, NULL);
1686 		tcf_unbind_filter(tp, &fold->res);
1687 		/* Caller holds reference to fold, so refcnt is always > 0
1688 		 * after this.
1689 		 */
1690 		refcount_dec(&fold->refcnt);
1691 		__fl_put(fold);
1692 	} else {
1693 		if (handle) {
1694 			/* user specifies a handle and it doesn't exist */
1695 			err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
1696 					    handle, GFP_ATOMIC);
1697 
1698 			/* Filter with specified handle was concurrently
1699 			 * inserted after initial check in cls_api. This is not
1700 			 * necessarily an error if NLM_F_EXCL is not set in
1701 			 * message flags. Returning EAGAIN will cause cls_api to
1702 			 * try to update concurrently inserted rule.
1703 			 */
1704 			if (err == -ENOSPC)
1705 				err = -EAGAIN;
1706 		} else {
1707 			handle = 1;
1708 			err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
1709 					    INT_MAX, GFP_ATOMIC);
1710 		}
1711 		if (err)
1712 			goto errout_hw;
1713 
1714 		refcount_inc(&fnew->refcnt);
1715 		fnew->handle = handle;
1716 		list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
1717 		spin_unlock(&tp->lock);
1718 	}
1719 
1720 	*arg = fnew;
1721 
1722 	kfree(tb);
1723 	tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
1724 	return 0;
1725 
1726 errout_ht:
1727 	spin_lock(&tp->lock);
1728 errout_hw:
1729 	fnew->deleted = true;
1730 	spin_unlock(&tp->lock);
1731 	if (!tc_skip_hw(fnew->flags))
1732 		fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL);
1733 	if (in_ht)
1734 		rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
1735 				       fnew->mask->filter_ht_params);
1736 errout_mask:
1737 	fl_mask_put(head, fnew->mask);
1738 errout:
1739 	__fl_put(fnew);
1740 errout_tb:
1741 	kfree(tb);
1742 errout_mask_alloc:
1743 	tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
1744 errout_fold:
1745 	if (fold)
1746 		__fl_put(fold);
1747 	return err;
1748 }
1749 
fl_delete(struct tcf_proto * tp,void * arg,bool * last,bool rtnl_held,struct netlink_ext_ack * extack)1750 static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
1751 		     bool rtnl_held, struct netlink_ext_ack *extack)
1752 {
1753 	struct cls_fl_head *head = fl_head_dereference(tp);
1754 	struct cls_fl_filter *f = arg;
1755 	bool last_on_mask;
1756 	int err = 0;
1757 
1758 	err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack);
1759 	*last = list_empty(&head->masks);
1760 	__fl_put(f);
1761 
1762 	return err;
1763 }
1764 
fl_walk(struct tcf_proto * tp,struct tcf_walker * arg,bool rtnl_held)1765 static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
1766 		    bool rtnl_held)
1767 {
1768 	struct cls_fl_head *head = fl_head_dereference(tp);
1769 	unsigned long id = arg->cookie, tmp;
1770 	struct cls_fl_filter *f;
1771 
1772 	arg->count = arg->skip;
1773 
1774 	rcu_read_lock();
1775 	idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) {
1776 		/* don't return filters that are being deleted */
1777 		if (!refcount_inc_not_zero(&f->refcnt))
1778 			continue;
1779 		rcu_read_unlock();
1780 
1781 		if (arg->fn(tp, f, arg) < 0) {
1782 			__fl_put(f);
1783 			arg->stop = 1;
1784 			rcu_read_lock();
1785 			break;
1786 		}
1787 		__fl_put(f);
1788 		arg->count++;
1789 		rcu_read_lock();
1790 	}
1791 	rcu_read_unlock();
1792 	arg->cookie = id;
1793 }
1794 
1795 static struct cls_fl_filter *
fl_get_next_hw_filter(struct tcf_proto * tp,struct cls_fl_filter * f,bool add)1796 fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add)
1797 {
1798 	struct cls_fl_head *head = fl_head_dereference(tp);
1799 
1800 	spin_lock(&tp->lock);
1801 	if (list_empty(&head->hw_filters)) {
1802 		spin_unlock(&tp->lock);
1803 		return NULL;
1804 	}
1805 
1806 	if (!f)
1807 		f = list_entry(&head->hw_filters, struct cls_fl_filter,
1808 			       hw_list);
1809 	list_for_each_entry_continue(f, &head->hw_filters, hw_list) {
1810 		if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) {
1811 			spin_unlock(&tp->lock);
1812 			return f;
1813 		}
1814 	}
1815 
1816 	spin_unlock(&tp->lock);
1817 	return NULL;
1818 }
1819 
fl_reoffload(struct tcf_proto * tp,bool add,flow_setup_cb_t * cb,void * cb_priv,struct netlink_ext_ack * extack)1820 static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
1821 			void *cb_priv, struct netlink_ext_ack *extack)
1822 {
1823 	struct tcf_block *block = tp->chain->block;
1824 	struct flow_cls_offload cls_flower = {};
1825 	struct cls_fl_filter *f = NULL;
1826 	int err;
1827 
1828 	/* hw_filters list can only be changed by hw offload functions after
1829 	 * obtaining rtnl lock. Make sure it is not changed while reoffload is
1830 	 * iterating it.
1831 	 */
1832 	ASSERT_RTNL();
1833 
1834 	while ((f = fl_get_next_hw_filter(tp, f, add))) {
1835 		cls_flower.rule =
1836 			flow_rule_alloc(tcf_exts_num_actions(&f->exts));
1837 		if (!cls_flower.rule) {
1838 			__fl_put(f);
1839 			return -ENOMEM;
1840 		}
1841 
1842 		tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
1843 					   extack);
1844 		cls_flower.command = add ?
1845 			FLOW_CLS_REPLACE : FLOW_CLS_DESTROY;
1846 		cls_flower.cookie = (unsigned long)f;
1847 		cls_flower.rule->match.dissector = &f->mask->dissector;
1848 		cls_flower.rule->match.mask = &f->mask->key;
1849 		cls_flower.rule->match.key = &f->mkey;
1850 
1851 		err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts,
1852 					   true);
1853 		if (err) {
1854 			kfree(cls_flower.rule);
1855 			if (tc_skip_sw(f->flags)) {
1856 				NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
1857 				__fl_put(f);
1858 				return err;
1859 			}
1860 			goto next_flow;
1861 		}
1862 
1863 		cls_flower.classid = f->res.classid;
1864 
1865 		err = tc_setup_cb_reoffload(block, tp, add, cb,
1866 					    TC_SETUP_CLSFLOWER, &cls_flower,
1867 					    cb_priv, &f->flags,
1868 					    &f->in_hw_count);
1869 		tc_cleanup_flow_action(&cls_flower.rule->action);
1870 		kfree(cls_flower.rule);
1871 
1872 		if (err) {
1873 			__fl_put(f);
1874 			return err;
1875 		}
1876 next_flow:
1877 		__fl_put(f);
1878 	}
1879 
1880 	return 0;
1881 }
1882 
fl_hw_add(struct tcf_proto * tp,void * type_data)1883 static void fl_hw_add(struct tcf_proto *tp, void *type_data)
1884 {
1885 	struct flow_cls_offload *cls_flower = type_data;
1886 	struct cls_fl_filter *f =
1887 		(struct cls_fl_filter *) cls_flower->cookie;
1888 	struct cls_fl_head *head = fl_head_dereference(tp);
1889 
1890 	spin_lock(&tp->lock);
1891 	list_add(&f->hw_list, &head->hw_filters);
1892 	spin_unlock(&tp->lock);
1893 }
1894 
fl_hw_del(struct tcf_proto * tp,void * type_data)1895 static void fl_hw_del(struct tcf_proto *tp, void *type_data)
1896 {
1897 	struct flow_cls_offload *cls_flower = type_data;
1898 	struct cls_fl_filter *f =
1899 		(struct cls_fl_filter *) cls_flower->cookie;
1900 
1901 	spin_lock(&tp->lock);
1902 	if (!list_empty(&f->hw_list))
1903 		list_del_init(&f->hw_list);
1904 	spin_unlock(&tp->lock);
1905 }
1906 
fl_hw_create_tmplt(struct tcf_chain * chain,struct fl_flow_tmplt * tmplt)1907 static int fl_hw_create_tmplt(struct tcf_chain *chain,
1908 			      struct fl_flow_tmplt *tmplt)
1909 {
1910 	struct flow_cls_offload cls_flower = {};
1911 	struct tcf_block *block = chain->block;
1912 
1913 	cls_flower.rule = flow_rule_alloc(0);
1914 	if (!cls_flower.rule)
1915 		return -ENOMEM;
1916 
1917 	cls_flower.common.chain_index = chain->index;
1918 	cls_flower.command = FLOW_CLS_TMPLT_CREATE;
1919 	cls_flower.cookie = (unsigned long) tmplt;
1920 	cls_flower.rule->match.dissector = &tmplt->dissector;
1921 	cls_flower.rule->match.mask = &tmplt->mask;
1922 	cls_flower.rule->match.key = &tmplt->dummy_key;
1923 
1924 	/* We don't care if driver (any of them) fails to handle this
1925 	 * call. It serves just as a hint for it.
1926 	 */
1927 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
1928 	kfree(cls_flower.rule);
1929 
1930 	return 0;
1931 }
1932 
fl_hw_destroy_tmplt(struct tcf_chain * chain,struct fl_flow_tmplt * tmplt)1933 static void fl_hw_destroy_tmplt(struct tcf_chain *chain,
1934 				struct fl_flow_tmplt *tmplt)
1935 {
1936 	struct flow_cls_offload cls_flower = {};
1937 	struct tcf_block *block = chain->block;
1938 
1939 	cls_flower.common.chain_index = chain->index;
1940 	cls_flower.command = FLOW_CLS_TMPLT_DESTROY;
1941 	cls_flower.cookie = (unsigned long) tmplt;
1942 
1943 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
1944 }
1945 
fl_tmplt_create(struct net * net,struct tcf_chain * chain,struct nlattr ** tca,struct netlink_ext_ack * extack)1946 static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
1947 			     struct nlattr **tca,
1948 			     struct netlink_ext_ack *extack)
1949 {
1950 	struct fl_flow_tmplt *tmplt;
1951 	struct nlattr **tb;
1952 	int err;
1953 
1954 	if (!tca[TCA_OPTIONS])
1955 		return ERR_PTR(-EINVAL);
1956 
1957 	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
1958 	if (!tb)
1959 		return ERR_PTR(-ENOBUFS);
1960 	err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
1961 					  tca[TCA_OPTIONS], fl_policy, NULL);
1962 	if (err)
1963 		goto errout_tb;
1964 
1965 	tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL);
1966 	if (!tmplt) {
1967 		err = -ENOMEM;
1968 		goto errout_tb;
1969 	}
1970 	tmplt->chain = chain;
1971 	err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack);
1972 	if (err)
1973 		goto errout_tmplt;
1974 
1975 	fl_init_dissector(&tmplt->dissector, &tmplt->mask);
1976 
1977 	err = fl_hw_create_tmplt(chain, tmplt);
1978 	if (err)
1979 		goto errout_tmplt;
1980 
1981 	kfree(tb);
1982 	return tmplt;
1983 
1984 errout_tmplt:
1985 	kfree(tmplt);
1986 errout_tb:
1987 	kfree(tb);
1988 	return ERR_PTR(err);
1989 }
1990 
fl_tmplt_destroy(void * tmplt_priv)1991 static void fl_tmplt_destroy(void *tmplt_priv)
1992 {
1993 	struct fl_flow_tmplt *tmplt = tmplt_priv;
1994 
1995 	fl_hw_destroy_tmplt(tmplt->chain, tmplt);
1996 	kfree(tmplt);
1997 }
1998 
fl_dump_key_val(struct sk_buff * skb,void * val,int val_type,void * mask,int mask_type,int len)1999 static int fl_dump_key_val(struct sk_buff *skb,
2000 			   void *val, int val_type,
2001 			   void *mask, int mask_type, int len)
2002 {
2003 	int err;
2004 
2005 	if (!memchr_inv(mask, 0, len))
2006 		return 0;
2007 	err = nla_put(skb, val_type, len, val);
2008 	if (err)
2009 		return err;
2010 	if (mask_type != TCA_FLOWER_UNSPEC) {
2011 		err = nla_put(skb, mask_type, len, mask);
2012 		if (err)
2013 			return err;
2014 	}
2015 	return 0;
2016 }
2017 
fl_dump_key_port_range(struct sk_buff * skb,struct fl_flow_key * key,struct fl_flow_key * mask)2018 static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key,
2019 				  struct fl_flow_key *mask)
2020 {
2021 	if (fl_dump_key_val(skb, &key->tp_range.tp_min.dst,
2022 			    TCA_FLOWER_KEY_PORT_DST_MIN,
2023 			    &mask->tp_range.tp_min.dst, TCA_FLOWER_UNSPEC,
2024 			    sizeof(key->tp_range.tp_min.dst)) ||
2025 	    fl_dump_key_val(skb, &key->tp_range.tp_max.dst,
2026 			    TCA_FLOWER_KEY_PORT_DST_MAX,
2027 			    &mask->tp_range.tp_max.dst, TCA_FLOWER_UNSPEC,
2028 			    sizeof(key->tp_range.tp_max.dst)) ||
2029 	    fl_dump_key_val(skb, &key->tp_range.tp_min.src,
2030 			    TCA_FLOWER_KEY_PORT_SRC_MIN,
2031 			    &mask->tp_range.tp_min.src, TCA_FLOWER_UNSPEC,
2032 			    sizeof(key->tp_range.tp_min.src)) ||
2033 	    fl_dump_key_val(skb, &key->tp_range.tp_max.src,
2034 			    TCA_FLOWER_KEY_PORT_SRC_MAX,
2035 			    &mask->tp_range.tp_max.src, TCA_FLOWER_UNSPEC,
2036 			    sizeof(key->tp_range.tp_max.src)))
2037 		return -1;
2038 
2039 	return 0;
2040 }
2041 
fl_dump_key_mpls(struct sk_buff * skb,struct flow_dissector_key_mpls * mpls_key,struct flow_dissector_key_mpls * mpls_mask)2042 static int fl_dump_key_mpls(struct sk_buff *skb,
2043 			    struct flow_dissector_key_mpls *mpls_key,
2044 			    struct flow_dissector_key_mpls *mpls_mask)
2045 {
2046 	int err;
2047 
2048 	if (!memchr_inv(mpls_mask, 0, sizeof(*mpls_mask)))
2049 		return 0;
2050 	if (mpls_mask->mpls_ttl) {
2051 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL,
2052 				 mpls_key->mpls_ttl);
2053 		if (err)
2054 			return err;
2055 	}
2056 	if (mpls_mask->mpls_tc) {
2057 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC,
2058 				 mpls_key->mpls_tc);
2059 		if (err)
2060 			return err;
2061 	}
2062 	if (mpls_mask->mpls_label) {
2063 		err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL,
2064 				  mpls_key->mpls_label);
2065 		if (err)
2066 			return err;
2067 	}
2068 	if (mpls_mask->mpls_bos) {
2069 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS,
2070 				 mpls_key->mpls_bos);
2071 		if (err)
2072 			return err;
2073 	}
2074 	return 0;
2075 }
2076 
fl_dump_key_ip(struct sk_buff * skb,bool encap,struct flow_dissector_key_ip * key,struct flow_dissector_key_ip * mask)2077 static int fl_dump_key_ip(struct sk_buff *skb, bool encap,
2078 			  struct flow_dissector_key_ip *key,
2079 			  struct flow_dissector_key_ip *mask)
2080 {
2081 	int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
2082 	int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
2083 	int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
2084 	int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
2085 
2086 	if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) ||
2087 	    fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)))
2088 		return -1;
2089 
2090 	return 0;
2091 }
2092 
fl_dump_key_vlan(struct sk_buff * skb,int vlan_id_key,int vlan_prio_key,struct flow_dissector_key_vlan * vlan_key,struct flow_dissector_key_vlan * vlan_mask)2093 static int fl_dump_key_vlan(struct sk_buff *skb,
2094 			    int vlan_id_key, int vlan_prio_key,
2095 			    struct flow_dissector_key_vlan *vlan_key,
2096 			    struct flow_dissector_key_vlan *vlan_mask)
2097 {
2098 	int err;
2099 
2100 	if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
2101 		return 0;
2102 	if (vlan_mask->vlan_id) {
2103 		err = nla_put_u16(skb, vlan_id_key,
2104 				  vlan_key->vlan_id);
2105 		if (err)
2106 			return err;
2107 	}
2108 	if (vlan_mask->vlan_priority) {
2109 		err = nla_put_u8(skb, vlan_prio_key,
2110 				 vlan_key->vlan_priority);
2111 		if (err)
2112 			return err;
2113 	}
2114 	return 0;
2115 }
2116 
fl_get_key_flag(u32 dissector_key,u32 dissector_mask,u32 * flower_key,u32 * flower_mask,u32 flower_flag_bit,u32 dissector_flag_bit)2117 static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
2118 			    u32 *flower_key, u32 *flower_mask,
2119 			    u32 flower_flag_bit, u32 dissector_flag_bit)
2120 {
2121 	if (dissector_mask & dissector_flag_bit) {
2122 		*flower_mask |= flower_flag_bit;
2123 		if (dissector_key & dissector_flag_bit)
2124 			*flower_key |= flower_flag_bit;
2125 	}
2126 }
2127 
fl_dump_key_flags(struct sk_buff * skb,u32 flags_key,u32 flags_mask)2128 static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
2129 {
2130 	u32 key, mask;
2131 	__be32 _key, _mask;
2132 	int err;
2133 
2134 	if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
2135 		return 0;
2136 
2137 	key = 0;
2138 	mask = 0;
2139 
2140 	fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2141 			TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
2142 	fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2143 			TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
2144 			FLOW_DIS_FIRST_FRAG);
2145 
2146 	_key = cpu_to_be32(key);
2147 	_mask = cpu_to_be32(mask);
2148 
2149 	err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
2150 	if (err)
2151 		return err;
2152 
2153 	return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
2154 }
2155 
fl_dump_key_geneve_opt(struct sk_buff * skb,struct flow_dissector_key_enc_opts * enc_opts)2156 static int fl_dump_key_geneve_opt(struct sk_buff *skb,
2157 				  struct flow_dissector_key_enc_opts *enc_opts)
2158 {
2159 	struct geneve_opt *opt;
2160 	struct nlattr *nest;
2161 	int opt_off = 0;
2162 
2163 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE);
2164 	if (!nest)
2165 		goto nla_put_failure;
2166 
2167 	while (enc_opts->len > opt_off) {
2168 		opt = (struct geneve_opt *)&enc_opts->data[opt_off];
2169 
2170 		if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS,
2171 				 opt->opt_class))
2172 			goto nla_put_failure;
2173 		if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE,
2174 			       opt->type))
2175 			goto nla_put_failure;
2176 		if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA,
2177 			    opt->length * 4, opt->opt_data))
2178 			goto nla_put_failure;
2179 
2180 		opt_off += sizeof(struct geneve_opt) + opt->length * 4;
2181 	}
2182 	nla_nest_end(skb, nest);
2183 	return 0;
2184 
2185 nla_put_failure:
2186 	nla_nest_cancel(skb, nest);
2187 	return -EMSGSIZE;
2188 }
2189 
fl_dump_key_ct(struct sk_buff * skb,struct flow_dissector_key_ct * key,struct flow_dissector_key_ct * mask)2190 static int fl_dump_key_ct(struct sk_buff *skb,
2191 			  struct flow_dissector_key_ct *key,
2192 			  struct flow_dissector_key_ct *mask)
2193 {
2194 	if (IS_ENABLED(CONFIG_NF_CONNTRACK) &&
2195 	    fl_dump_key_val(skb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
2196 			    &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
2197 			    sizeof(key->ct_state)))
2198 		goto nla_put_failure;
2199 
2200 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
2201 	    fl_dump_key_val(skb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
2202 			    &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
2203 			    sizeof(key->ct_zone)))
2204 		goto nla_put_failure;
2205 
2206 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
2207 	    fl_dump_key_val(skb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
2208 			    &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
2209 			    sizeof(key->ct_mark)))
2210 		goto nla_put_failure;
2211 
2212 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
2213 	    fl_dump_key_val(skb, &key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
2214 			    &mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
2215 			    sizeof(key->ct_labels)))
2216 		goto nla_put_failure;
2217 
2218 	return 0;
2219 
2220 nla_put_failure:
2221 	return -EMSGSIZE;
2222 }
2223 
fl_dump_key_options(struct sk_buff * skb,int enc_opt_type,struct flow_dissector_key_enc_opts * enc_opts)2224 static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
2225 			       struct flow_dissector_key_enc_opts *enc_opts)
2226 {
2227 	struct nlattr *nest;
2228 	int err;
2229 
2230 	if (!enc_opts->len)
2231 		return 0;
2232 
2233 	nest = nla_nest_start_noflag(skb, enc_opt_type);
2234 	if (!nest)
2235 		goto nla_put_failure;
2236 
2237 	switch (enc_opts->dst_opt_type) {
2238 	case TUNNEL_GENEVE_OPT:
2239 		err = fl_dump_key_geneve_opt(skb, enc_opts);
2240 		if (err)
2241 			goto nla_put_failure;
2242 		break;
2243 	default:
2244 		goto nla_put_failure;
2245 	}
2246 	nla_nest_end(skb, nest);
2247 	return 0;
2248 
2249 nla_put_failure:
2250 	nla_nest_cancel(skb, nest);
2251 	return -EMSGSIZE;
2252 }
2253 
fl_dump_key_enc_opt(struct sk_buff * skb,struct flow_dissector_key_enc_opts * key_opts,struct flow_dissector_key_enc_opts * msk_opts)2254 static int fl_dump_key_enc_opt(struct sk_buff *skb,
2255 			       struct flow_dissector_key_enc_opts *key_opts,
2256 			       struct flow_dissector_key_enc_opts *msk_opts)
2257 {
2258 	int err;
2259 
2260 	err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts);
2261 	if (err)
2262 		return err;
2263 
2264 	return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts);
2265 }
2266 
fl_dump_key(struct sk_buff * skb,struct net * net,struct fl_flow_key * key,struct fl_flow_key * mask)2267 static int fl_dump_key(struct sk_buff *skb, struct net *net,
2268 		       struct fl_flow_key *key, struct fl_flow_key *mask)
2269 {
2270 	if (mask->meta.ingress_ifindex) {
2271 		struct net_device *dev;
2272 
2273 		dev = __dev_get_by_index(net, key->meta.ingress_ifindex);
2274 		if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
2275 			goto nla_put_failure;
2276 	}
2277 
2278 	if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
2279 			    mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
2280 			    sizeof(key->eth.dst)) ||
2281 	    fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
2282 			    mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
2283 			    sizeof(key->eth.src)) ||
2284 	    fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
2285 			    &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
2286 			    sizeof(key->basic.n_proto)))
2287 		goto nla_put_failure;
2288 
2289 	if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls))
2290 		goto nla_put_failure;
2291 
2292 	if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID,
2293 			     TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan))
2294 		goto nla_put_failure;
2295 
2296 	if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID,
2297 			     TCA_FLOWER_KEY_CVLAN_PRIO,
2298 			     &key->cvlan, &mask->cvlan) ||
2299 	    (mask->cvlan.vlan_tpid &&
2300 	     nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
2301 			  key->cvlan.vlan_tpid)))
2302 		goto nla_put_failure;
2303 
2304 	if (mask->basic.n_proto) {
2305 		if (mask->cvlan.vlan_eth_type) {
2306 			if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
2307 					 key->basic.n_proto))
2308 				goto nla_put_failure;
2309 		} else if (mask->vlan.vlan_eth_type) {
2310 			if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
2311 					 key->vlan.vlan_eth_type))
2312 				goto nla_put_failure;
2313 		}
2314 	}
2315 
2316 	if ((key->basic.n_proto == htons(ETH_P_IP) ||
2317 	     key->basic.n_proto == htons(ETH_P_IPV6)) &&
2318 	    (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
2319 			    &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
2320 			    sizeof(key->basic.ip_proto)) ||
2321 	    fl_dump_key_ip(skb, false, &key->ip, &mask->ip)))
2322 		goto nla_put_failure;
2323 
2324 	if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
2325 	    (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
2326 			     &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
2327 			     sizeof(key->ipv4.src)) ||
2328 	     fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
2329 			     &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
2330 			     sizeof(key->ipv4.dst))))
2331 		goto nla_put_failure;
2332 	else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
2333 		 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
2334 				  &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
2335 				  sizeof(key->ipv6.src)) ||
2336 		  fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
2337 				  &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
2338 				  sizeof(key->ipv6.dst))))
2339 		goto nla_put_failure;
2340 
2341 	if (key->basic.ip_proto == IPPROTO_TCP &&
2342 	    (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
2343 			     &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
2344 			     sizeof(key->tp.src)) ||
2345 	     fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
2346 			     &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
2347 			     sizeof(key->tp.dst)) ||
2348 	     fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
2349 			     &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
2350 			     sizeof(key->tcp.flags))))
2351 		goto nla_put_failure;
2352 	else if (key->basic.ip_proto == IPPROTO_UDP &&
2353 		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
2354 				  &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
2355 				  sizeof(key->tp.src)) ||
2356 		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
2357 				  &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
2358 				  sizeof(key->tp.dst))))
2359 		goto nla_put_failure;
2360 	else if (key->basic.ip_proto == IPPROTO_SCTP &&
2361 		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
2362 				  &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
2363 				  sizeof(key->tp.src)) ||
2364 		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
2365 				  &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
2366 				  sizeof(key->tp.dst))))
2367 		goto nla_put_failure;
2368 	else if (key->basic.n_proto == htons(ETH_P_IP) &&
2369 		 key->basic.ip_proto == IPPROTO_ICMP &&
2370 		 (fl_dump_key_val(skb, &key->icmp.type,
2371 				  TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
2372 				  TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
2373 				  sizeof(key->icmp.type)) ||
2374 		  fl_dump_key_val(skb, &key->icmp.code,
2375 				  TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
2376 				  TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
2377 				  sizeof(key->icmp.code))))
2378 		goto nla_put_failure;
2379 	else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
2380 		 key->basic.ip_proto == IPPROTO_ICMPV6 &&
2381 		 (fl_dump_key_val(skb, &key->icmp.type,
2382 				  TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
2383 				  TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
2384 				  sizeof(key->icmp.type)) ||
2385 		  fl_dump_key_val(skb, &key->icmp.code,
2386 				  TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
2387 				  TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
2388 				  sizeof(key->icmp.code))))
2389 		goto nla_put_failure;
2390 	else if ((key->basic.n_proto == htons(ETH_P_ARP) ||
2391 		  key->basic.n_proto == htons(ETH_P_RARP)) &&
2392 		 (fl_dump_key_val(skb, &key->arp.sip,
2393 				  TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip,
2394 				  TCA_FLOWER_KEY_ARP_SIP_MASK,
2395 				  sizeof(key->arp.sip)) ||
2396 		  fl_dump_key_val(skb, &key->arp.tip,
2397 				  TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip,
2398 				  TCA_FLOWER_KEY_ARP_TIP_MASK,
2399 				  sizeof(key->arp.tip)) ||
2400 		  fl_dump_key_val(skb, &key->arp.op,
2401 				  TCA_FLOWER_KEY_ARP_OP, &mask->arp.op,
2402 				  TCA_FLOWER_KEY_ARP_OP_MASK,
2403 				  sizeof(key->arp.op)) ||
2404 		  fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
2405 				  mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
2406 				  sizeof(key->arp.sha)) ||
2407 		  fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
2408 				  mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
2409 				  sizeof(key->arp.tha))))
2410 		goto nla_put_failure;
2411 
2412 	if ((key->basic.ip_proto == IPPROTO_TCP ||
2413 	     key->basic.ip_proto == IPPROTO_UDP ||
2414 	     key->basic.ip_proto == IPPROTO_SCTP) &&
2415 	     fl_dump_key_port_range(skb, key, mask))
2416 		goto nla_put_failure;
2417 
2418 	if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
2419 	    (fl_dump_key_val(skb, &key->enc_ipv4.src,
2420 			    TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
2421 			    TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
2422 			    sizeof(key->enc_ipv4.src)) ||
2423 	     fl_dump_key_val(skb, &key->enc_ipv4.dst,
2424 			     TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
2425 			     TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
2426 			     sizeof(key->enc_ipv4.dst))))
2427 		goto nla_put_failure;
2428 	else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
2429 		 (fl_dump_key_val(skb, &key->enc_ipv6.src,
2430 			    TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
2431 			    TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
2432 			    sizeof(key->enc_ipv6.src)) ||
2433 		 fl_dump_key_val(skb, &key->enc_ipv6.dst,
2434 				 TCA_FLOWER_KEY_ENC_IPV6_DST,
2435 				 &mask->enc_ipv6.dst,
2436 				 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
2437 			    sizeof(key->enc_ipv6.dst))))
2438 		goto nla_put_failure;
2439 
2440 	if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
2441 			    &mask->enc_key_id, TCA_FLOWER_UNSPEC,
2442 			    sizeof(key->enc_key_id)) ||
2443 	    fl_dump_key_val(skb, &key->enc_tp.src,
2444 			    TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
2445 			    &mask->enc_tp.src,
2446 			    TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
2447 			    sizeof(key->enc_tp.src)) ||
2448 	    fl_dump_key_val(skb, &key->enc_tp.dst,
2449 			    TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
2450 			    &mask->enc_tp.dst,
2451 			    TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
2452 			    sizeof(key->enc_tp.dst)) ||
2453 	    fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) ||
2454 	    fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts))
2455 		goto nla_put_failure;
2456 
2457 	if (fl_dump_key_ct(skb, &key->ct, &mask->ct))
2458 		goto nla_put_failure;
2459 
2460 	if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
2461 		goto nla_put_failure;
2462 
2463 	return 0;
2464 
2465 nla_put_failure:
2466 	return -EMSGSIZE;
2467 }
2468 
fl_dump(struct net * net,struct tcf_proto * tp,void * fh,struct sk_buff * skb,struct tcmsg * t,bool rtnl_held)2469 static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
2470 		   struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
2471 {
2472 	struct cls_fl_filter *f = fh;
2473 	struct nlattr *nest;
2474 	struct fl_flow_key *key, *mask;
2475 	bool skip_hw;
2476 
2477 	if (!f)
2478 		return skb->len;
2479 
2480 	t->tcm_handle = f->handle;
2481 
2482 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
2483 	if (!nest)
2484 		goto nla_put_failure;
2485 
2486 	spin_lock(&tp->lock);
2487 
2488 	if (f->res.classid &&
2489 	    nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
2490 		goto nla_put_failure_locked;
2491 
2492 	key = &f->key;
2493 	mask = &f->mask->key;
2494 	skip_hw = tc_skip_hw(f->flags);
2495 
2496 	if (fl_dump_key(skb, net, key, mask))
2497 		goto nla_put_failure_locked;
2498 
2499 	if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
2500 		goto nla_put_failure_locked;
2501 
2502 	spin_unlock(&tp->lock);
2503 
2504 	if (!skip_hw)
2505 		fl_hw_update_stats(tp, f, rtnl_held);
2506 
2507 	if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count))
2508 		goto nla_put_failure;
2509 
2510 	if (tcf_exts_dump(skb, &f->exts))
2511 		goto nla_put_failure;
2512 
2513 	nla_nest_end(skb, nest);
2514 
2515 	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
2516 		goto nla_put_failure;
2517 
2518 	return skb->len;
2519 
2520 nla_put_failure_locked:
2521 	spin_unlock(&tp->lock);
2522 nla_put_failure:
2523 	nla_nest_cancel(skb, nest);
2524 	return -1;
2525 }
2526 
fl_tmplt_dump(struct sk_buff * skb,struct net * net,void * tmplt_priv)2527 static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv)
2528 {
2529 	struct fl_flow_tmplt *tmplt = tmplt_priv;
2530 	struct fl_flow_key *key, *mask;
2531 	struct nlattr *nest;
2532 
2533 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
2534 	if (!nest)
2535 		goto nla_put_failure;
2536 
2537 	key = &tmplt->dummy_key;
2538 	mask = &tmplt->mask;
2539 
2540 	if (fl_dump_key(skb, net, key, mask))
2541 		goto nla_put_failure;
2542 
2543 	nla_nest_end(skb, nest);
2544 
2545 	return skb->len;
2546 
2547 nla_put_failure:
2548 	nla_nest_cancel(skb, nest);
2549 	return -EMSGSIZE;
2550 }
2551 
fl_bind_class(void * fh,u32 classid,unsigned long cl,void * q,unsigned long base)2552 static void fl_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
2553 			  unsigned long base)
2554 {
2555 	struct cls_fl_filter *f = fh;
2556 
2557 	if (f && f->res.classid == classid) {
2558 		if (cl)
2559 			__tcf_bind_filter(q, &f->res, base);
2560 		else
2561 			__tcf_unbind_filter(q, &f->res);
2562 	}
2563 }
2564 
fl_delete_empty(struct tcf_proto * tp)2565 static bool fl_delete_empty(struct tcf_proto *tp)
2566 {
2567 	struct cls_fl_head *head = fl_head_dereference(tp);
2568 
2569 	spin_lock(&tp->lock);
2570 	tp->deleting = idr_is_empty(&head->handle_idr);
2571 	spin_unlock(&tp->lock);
2572 
2573 	return tp->deleting;
2574 }
2575 
2576 static struct tcf_proto_ops cls_fl_ops __read_mostly = {
2577 	.kind		= "flower",
2578 	.classify	= fl_classify,
2579 	.init		= fl_init,
2580 	.destroy	= fl_destroy,
2581 	.get		= fl_get,
2582 	.put		= fl_put,
2583 	.change		= fl_change,
2584 	.delete		= fl_delete,
2585 	.delete_empty	= fl_delete_empty,
2586 	.walk		= fl_walk,
2587 	.reoffload	= fl_reoffload,
2588 	.hw_add		= fl_hw_add,
2589 	.hw_del		= fl_hw_del,
2590 	.dump		= fl_dump,
2591 	.bind_class	= fl_bind_class,
2592 	.tmplt_create	= fl_tmplt_create,
2593 	.tmplt_destroy	= fl_tmplt_destroy,
2594 	.tmplt_dump	= fl_tmplt_dump,
2595 	.owner		= THIS_MODULE,
2596 	.flags		= TCF_PROTO_OPS_DOIT_UNLOCKED,
2597 };
2598 
cls_fl_init(void)2599 static int __init cls_fl_init(void)
2600 {
2601 	return register_tcf_proto_ops(&cls_fl_ops);
2602 }
2603 
cls_fl_exit(void)2604 static void __exit cls_fl_exit(void)
2605 {
2606 	unregister_tcf_proto_ops(&cls_fl_ops);
2607 }
2608 
2609 module_init(cls_fl_init);
2610 module_exit(cls_fl_exit);
2611 
2612 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
2613 MODULE_DESCRIPTION("Flower classifier");
2614 MODULE_LICENSE("GPL v2");
2615