1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * net/sched/cls_api.c Packet classifier API.
4 *
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 *
7 * Changes:
8 *
9 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
10 */
11
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/err.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/kmod.h>
21 #include <linux/slab.h>
22 #include <linux/idr.h>
23 #include <linux/jhash.h>
24 #include <linux/rculist.h>
25 #include <linux/rhashtable.h>
26 #include <net/net_namespace.h>
27 #include <net/sock.h>
28 #include <net/netlink.h>
29 #include <net/pkt_sched.h>
30 #include <net/pkt_cls.h>
31 #include <net/tc_act/tc_pedit.h>
32 #include <net/tc_act/tc_mirred.h>
33 #include <net/tc_act/tc_vlan.h>
34 #include <net/tc_act/tc_tunnel_key.h>
35 #include <net/tc_act/tc_csum.h>
36 #include <net/tc_act/tc_gact.h>
37 #include <net/tc_act/tc_police.h>
38 #include <net/tc_act/tc_sample.h>
39 #include <net/tc_act/tc_skbedit.h>
40 #include <net/tc_act/tc_ct.h>
41 #include <net/tc_act/tc_mpls.h>
42 #include <net/tc_act/tc_gate.h>
43 #include <net/flow_offload.h>
44 #include <net/tc_wrapper.h>
45
46 /* The list of all installed classifier types */
47 static LIST_HEAD(tcf_proto_base);
48
49 /* Protects list of registered TC modules. It is pure SMP lock. */
50 static DEFINE_RWLOCK(cls_mod_lock);
51
52 static struct xarray tcf_exts_miss_cookies_xa;
53 struct tcf_exts_miss_cookie_node {
54 const struct tcf_chain *chain;
55 const struct tcf_proto *tp;
56 const struct tcf_exts *exts;
57 u32 chain_index;
58 u32 tp_prio;
59 u32 handle;
60 u32 miss_cookie_base;
61 struct rcu_head rcu;
62 };
63
64 /* Each tc action entry cookie will be comprised of 32bit miss_cookie_base +
65 * action index in the exts tc actions array.
66 */
67 union tcf_exts_miss_cookie {
68 struct {
69 u32 miss_cookie_base;
70 u32 act_index;
71 };
72 u64 miss_cookie;
73 };
74
75 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
76 static int
tcf_exts_miss_cookie_base_alloc(struct tcf_exts * exts,struct tcf_proto * tp,u32 handle)77 tcf_exts_miss_cookie_base_alloc(struct tcf_exts *exts, struct tcf_proto *tp,
78 u32 handle)
79 {
80 struct tcf_exts_miss_cookie_node *n;
81 static u32 next;
82 int err;
83
84 if (WARN_ON(!handle || !tp->ops->get_exts))
85 return -EINVAL;
86
87 n = kzalloc(sizeof(*n), GFP_KERNEL);
88 if (!n)
89 return -ENOMEM;
90
91 n->chain_index = tp->chain->index;
92 n->chain = tp->chain;
93 n->tp_prio = tp->prio;
94 n->tp = tp;
95 n->exts = exts;
96 n->handle = handle;
97
98 err = xa_alloc_cyclic(&tcf_exts_miss_cookies_xa, &n->miss_cookie_base,
99 n, xa_limit_32b, &next, GFP_KERNEL);
100 if (err < 0)
101 goto err_xa_alloc;
102
103 exts->miss_cookie_node = n;
104 return 0;
105
106 err_xa_alloc:
107 kfree(n);
108 return err;
109 }
110
tcf_exts_miss_cookie_base_destroy(struct tcf_exts * exts)111 static void tcf_exts_miss_cookie_base_destroy(struct tcf_exts *exts)
112 {
113 struct tcf_exts_miss_cookie_node *n;
114
115 if (!exts->miss_cookie_node)
116 return;
117
118 n = exts->miss_cookie_node;
119 xa_erase(&tcf_exts_miss_cookies_xa, n->miss_cookie_base);
120 kfree_rcu(n, rcu);
121 }
122
123 static struct tcf_exts_miss_cookie_node *
tcf_exts_miss_cookie_lookup(u64 miss_cookie,int * act_index)124 tcf_exts_miss_cookie_lookup(u64 miss_cookie, int *act_index)
125 {
126 union tcf_exts_miss_cookie mc = { .miss_cookie = miss_cookie, };
127
128 *act_index = mc.act_index;
129 return xa_load(&tcf_exts_miss_cookies_xa, mc.miss_cookie_base);
130 }
131 #else /* IS_ENABLED(CONFIG_NET_TC_SKB_EXT) */
132 static int
tcf_exts_miss_cookie_base_alloc(struct tcf_exts * exts,struct tcf_proto * tp,u32 handle)133 tcf_exts_miss_cookie_base_alloc(struct tcf_exts *exts, struct tcf_proto *tp,
134 u32 handle)
135 {
136 return 0;
137 }
138
tcf_exts_miss_cookie_base_destroy(struct tcf_exts * exts)139 static void tcf_exts_miss_cookie_base_destroy(struct tcf_exts *exts)
140 {
141 }
142 #endif /* IS_ENABLED(CONFIG_NET_TC_SKB_EXT) */
143
tcf_exts_miss_cookie_get(u32 miss_cookie_base,int act_index)144 static u64 tcf_exts_miss_cookie_get(u32 miss_cookie_base, int act_index)
145 {
146 union tcf_exts_miss_cookie mc = { .act_index = act_index, };
147
148 if (!miss_cookie_base)
149 return 0;
150
151 mc.miss_cookie_base = miss_cookie_base;
152 return mc.miss_cookie;
153 }
154
155 #ifdef CONFIG_NET_CLS_ACT
156 DEFINE_STATIC_KEY_FALSE(tc_skb_ext_tc);
157 EXPORT_SYMBOL(tc_skb_ext_tc);
158
tc_skb_ext_tc_enable(void)159 void tc_skb_ext_tc_enable(void)
160 {
161 static_branch_inc(&tc_skb_ext_tc);
162 }
163 EXPORT_SYMBOL(tc_skb_ext_tc_enable);
164
tc_skb_ext_tc_disable(void)165 void tc_skb_ext_tc_disable(void)
166 {
167 static_branch_dec(&tc_skb_ext_tc);
168 }
169 EXPORT_SYMBOL(tc_skb_ext_tc_disable);
170 #endif
171
destroy_obj_hashfn(const struct tcf_proto * tp)172 static u32 destroy_obj_hashfn(const struct tcf_proto *tp)
173 {
174 return jhash_3words(tp->chain->index, tp->prio,
175 (__force __u32)tp->protocol, 0);
176 }
177
tcf_proto_signal_destroying(struct tcf_chain * chain,struct tcf_proto * tp)178 static void tcf_proto_signal_destroying(struct tcf_chain *chain,
179 struct tcf_proto *tp)
180 {
181 struct tcf_block *block = chain->block;
182
183 mutex_lock(&block->proto_destroy_lock);
184 hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node,
185 destroy_obj_hashfn(tp));
186 mutex_unlock(&block->proto_destroy_lock);
187 }
188
tcf_proto_cmp(const struct tcf_proto * tp1,const struct tcf_proto * tp2)189 static bool tcf_proto_cmp(const struct tcf_proto *tp1,
190 const struct tcf_proto *tp2)
191 {
192 return tp1->chain->index == tp2->chain->index &&
193 tp1->prio == tp2->prio &&
194 tp1->protocol == tp2->protocol;
195 }
196
tcf_proto_exists_destroying(struct tcf_chain * chain,struct tcf_proto * tp)197 static bool tcf_proto_exists_destroying(struct tcf_chain *chain,
198 struct tcf_proto *tp)
199 {
200 u32 hash = destroy_obj_hashfn(tp);
201 struct tcf_proto *iter;
202 bool found = false;
203
204 rcu_read_lock();
205 hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter,
206 destroy_ht_node, hash) {
207 if (tcf_proto_cmp(tp, iter)) {
208 found = true;
209 break;
210 }
211 }
212 rcu_read_unlock();
213
214 return found;
215 }
216
217 static void
tcf_proto_signal_destroyed(struct tcf_chain * chain,struct tcf_proto * tp)218 tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp)
219 {
220 struct tcf_block *block = chain->block;
221
222 mutex_lock(&block->proto_destroy_lock);
223 if (hash_hashed(&tp->destroy_ht_node))
224 hash_del_rcu(&tp->destroy_ht_node);
225 mutex_unlock(&block->proto_destroy_lock);
226 }
227
228 /* Find classifier type by string name */
229
__tcf_proto_lookup_ops(const char * kind)230 static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
231 {
232 const struct tcf_proto_ops *t, *res = NULL;
233
234 if (kind) {
235 read_lock(&cls_mod_lock);
236 list_for_each_entry(t, &tcf_proto_base, head) {
237 if (strcmp(kind, t->kind) == 0) {
238 if (try_module_get(t->owner))
239 res = t;
240 break;
241 }
242 }
243 read_unlock(&cls_mod_lock);
244 }
245 return res;
246 }
247
248 static const struct tcf_proto_ops *
tcf_proto_lookup_ops(const char * kind,bool rtnl_held,struct netlink_ext_ack * extack)249 tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
250 struct netlink_ext_ack *extack)
251 {
252 const struct tcf_proto_ops *ops;
253
254 ops = __tcf_proto_lookup_ops(kind);
255 if (ops)
256 return ops;
257 #ifdef CONFIG_MODULES
258 if (rtnl_held)
259 rtnl_unlock();
260 request_module(NET_CLS_ALIAS_PREFIX "%s", kind);
261 if (rtnl_held)
262 rtnl_lock();
263 ops = __tcf_proto_lookup_ops(kind);
264 /* We dropped the RTNL semaphore in order to perform
265 * the module load. So, even if we succeeded in loading
266 * the module we have to replay the request. We indicate
267 * this using -EAGAIN.
268 */
269 if (ops) {
270 module_put(ops->owner);
271 return ERR_PTR(-EAGAIN);
272 }
273 #endif
274 NL_SET_ERR_MSG(extack, "TC classifier not found");
275 return ERR_PTR(-ENOENT);
276 }
277
278 /* Register(unregister) new classifier type */
279
register_tcf_proto_ops(struct tcf_proto_ops * ops)280 int register_tcf_proto_ops(struct tcf_proto_ops *ops)
281 {
282 struct tcf_proto_ops *t;
283 int rc = -EEXIST;
284
285 write_lock(&cls_mod_lock);
286 list_for_each_entry(t, &tcf_proto_base, head)
287 if (!strcmp(ops->kind, t->kind))
288 goto out;
289
290 list_add_tail(&ops->head, &tcf_proto_base);
291 rc = 0;
292 out:
293 write_unlock(&cls_mod_lock);
294 return rc;
295 }
296 EXPORT_SYMBOL(register_tcf_proto_ops);
297
298 static struct workqueue_struct *tc_filter_wq;
299
unregister_tcf_proto_ops(struct tcf_proto_ops * ops)300 void unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
301 {
302 struct tcf_proto_ops *t;
303 int rc = -ENOENT;
304
305 /* Wait for outstanding call_rcu()s, if any, from a
306 * tcf_proto_ops's destroy() handler.
307 */
308 rcu_barrier();
309 flush_workqueue(tc_filter_wq);
310
311 write_lock(&cls_mod_lock);
312 list_for_each_entry(t, &tcf_proto_base, head) {
313 if (t == ops) {
314 list_del(&t->head);
315 rc = 0;
316 break;
317 }
318 }
319 write_unlock(&cls_mod_lock);
320
321 WARN(rc, "unregister tc filter kind(%s) failed %d\n", ops->kind, rc);
322 }
323 EXPORT_SYMBOL(unregister_tcf_proto_ops);
324
tcf_queue_work(struct rcu_work * rwork,work_func_t func)325 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
326 {
327 INIT_RCU_WORK(rwork, func);
328 return queue_rcu_work(tc_filter_wq, rwork);
329 }
330 EXPORT_SYMBOL(tcf_queue_work);
331
332 /* Select new prio value from the range, managed by kernel. */
333
tcf_auto_prio(struct tcf_proto * tp)334 static inline u32 tcf_auto_prio(struct tcf_proto *tp)
335 {
336 u32 first = TC_H_MAKE(0xC0000000U, 0U);
337
338 if (tp)
339 first = tp->prio - 1;
340
341 return TC_H_MAJ(first);
342 }
343
tcf_proto_check_kind(struct nlattr * kind,char * name)344 static bool tcf_proto_check_kind(struct nlattr *kind, char *name)
345 {
346 if (kind)
347 return nla_strscpy(name, kind, IFNAMSIZ) < 0;
348 memset(name, 0, IFNAMSIZ);
349 return false;
350 }
351
tcf_proto_is_unlocked(const char * kind)352 static bool tcf_proto_is_unlocked(const char *kind)
353 {
354 const struct tcf_proto_ops *ops;
355 bool ret;
356
357 if (strlen(kind) == 0)
358 return false;
359
360 ops = tcf_proto_lookup_ops(kind, false, NULL);
361 /* On error return false to take rtnl lock. Proto lookup/create
362 * functions will perform lookup again and properly handle errors.
363 */
364 if (IS_ERR(ops))
365 return false;
366
367 ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED);
368 module_put(ops->owner);
369 return ret;
370 }
371
tcf_proto_create(const char * kind,u32 protocol,u32 prio,struct tcf_chain * chain,bool rtnl_held,struct netlink_ext_ack * extack)372 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
373 u32 prio, struct tcf_chain *chain,
374 bool rtnl_held,
375 struct netlink_ext_ack *extack)
376 {
377 struct tcf_proto *tp;
378 int err;
379
380 tp = kzalloc(sizeof(*tp), GFP_KERNEL);
381 if (!tp)
382 return ERR_PTR(-ENOBUFS);
383
384 tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack);
385 if (IS_ERR(tp->ops)) {
386 err = PTR_ERR(tp->ops);
387 goto errout;
388 }
389 tp->classify = tp->ops->classify;
390 tp->protocol = protocol;
391 tp->prio = prio;
392 tp->chain = chain;
393 tp->usesw = !tp->ops->reoffload;
394 spin_lock_init(&tp->lock);
395 refcount_set(&tp->refcnt, 1);
396
397 err = tp->ops->init(tp);
398 if (err) {
399 module_put(tp->ops->owner);
400 goto errout;
401 }
402 return tp;
403
404 errout:
405 kfree(tp);
406 return ERR_PTR(err);
407 }
408
tcf_proto_get(struct tcf_proto * tp)409 static void tcf_proto_get(struct tcf_proto *tp)
410 {
411 refcount_inc(&tp->refcnt);
412 }
413
tcf_proto_count_usesw(struct tcf_proto * tp,bool add)414 static void tcf_proto_count_usesw(struct tcf_proto *tp, bool add)
415 {
416 #ifdef CONFIG_NET_CLS_ACT
417 struct tcf_block *block = tp->chain->block;
418 bool counted = false;
419
420 if (!add) {
421 if (tp->usesw && tp->counted) {
422 if (!atomic_dec_return(&block->useswcnt))
423 static_branch_dec(&tcf_sw_enabled_key);
424 tp->counted = false;
425 }
426 return;
427 }
428
429 spin_lock(&tp->lock);
430 if (tp->usesw && !tp->counted) {
431 counted = true;
432 tp->counted = true;
433 }
434 spin_unlock(&tp->lock);
435
436 if (counted && atomic_inc_return(&block->useswcnt) == 1)
437 static_branch_inc(&tcf_sw_enabled_key);
438 #endif
439 }
440
441 static void tcf_chain_put(struct tcf_chain *chain);
442
tcf_proto_destroy(struct tcf_proto * tp,bool rtnl_held,bool sig_destroy,struct netlink_ext_ack * extack)443 static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
444 bool sig_destroy, struct netlink_ext_ack *extack)
445 {
446 tp->ops->destroy(tp, rtnl_held, extack);
447 tcf_proto_count_usesw(tp, false);
448 if (sig_destroy)
449 tcf_proto_signal_destroyed(tp->chain, tp);
450 tcf_chain_put(tp->chain);
451 module_put(tp->ops->owner);
452 kfree_rcu(tp, rcu);
453 }
454
tcf_proto_put(struct tcf_proto * tp,bool rtnl_held,struct netlink_ext_ack * extack)455 static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
456 struct netlink_ext_ack *extack)
457 {
458 if (refcount_dec_and_test(&tp->refcnt))
459 tcf_proto_destroy(tp, rtnl_held, true, extack);
460 }
461
tcf_proto_check_delete(struct tcf_proto * tp)462 static bool tcf_proto_check_delete(struct tcf_proto *tp)
463 {
464 if (tp->ops->delete_empty)
465 return tp->ops->delete_empty(tp);
466
467 tp->deleting = true;
468 return tp->deleting;
469 }
470
tcf_proto_mark_delete(struct tcf_proto * tp)471 static void tcf_proto_mark_delete(struct tcf_proto *tp)
472 {
473 spin_lock(&tp->lock);
474 tp->deleting = true;
475 spin_unlock(&tp->lock);
476 }
477
tcf_proto_is_deleting(struct tcf_proto * tp)478 static bool tcf_proto_is_deleting(struct tcf_proto *tp)
479 {
480 bool deleting;
481
482 spin_lock(&tp->lock);
483 deleting = tp->deleting;
484 spin_unlock(&tp->lock);
485
486 return deleting;
487 }
488
489 #define ASSERT_BLOCK_LOCKED(block) \
490 lockdep_assert_held(&(block)->lock)
491
492 struct tcf_filter_chain_list_item {
493 struct list_head list;
494 tcf_chain_head_change_t *chain_head_change;
495 void *chain_head_change_priv;
496 };
497
tcf_chain_create(struct tcf_block * block,u32 chain_index)498 static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
499 u32 chain_index)
500 {
501 struct tcf_chain *chain;
502
503 ASSERT_BLOCK_LOCKED(block);
504
505 chain = kzalloc(sizeof(*chain), GFP_KERNEL);
506 if (!chain)
507 return NULL;
508 list_add_tail_rcu(&chain->list, &block->chain_list);
509 mutex_init(&chain->filter_chain_lock);
510 chain->block = block;
511 chain->index = chain_index;
512 chain->refcnt = 1;
513 if (!chain->index)
514 block->chain0.chain = chain;
515 return chain;
516 }
517
tcf_chain_head_change_item(struct tcf_filter_chain_list_item * item,struct tcf_proto * tp_head)518 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
519 struct tcf_proto *tp_head)
520 {
521 if (item->chain_head_change)
522 item->chain_head_change(tp_head, item->chain_head_change_priv);
523 }
524
tcf_chain0_head_change(struct tcf_chain * chain,struct tcf_proto * tp_head)525 static void tcf_chain0_head_change(struct tcf_chain *chain,
526 struct tcf_proto *tp_head)
527 {
528 struct tcf_filter_chain_list_item *item;
529 struct tcf_block *block = chain->block;
530
531 if (chain->index)
532 return;
533
534 mutex_lock(&block->lock);
535 list_for_each_entry(item, &block->chain0.filter_chain_list, list)
536 tcf_chain_head_change_item(item, tp_head);
537 mutex_unlock(&block->lock);
538 }
539
540 /* Returns true if block can be safely freed. */
541
tcf_chain_detach(struct tcf_chain * chain)542 static bool tcf_chain_detach(struct tcf_chain *chain)
543 {
544 struct tcf_block *block = chain->block;
545
546 ASSERT_BLOCK_LOCKED(block);
547
548 list_del_rcu(&chain->list);
549 if (!chain->index)
550 block->chain0.chain = NULL;
551
552 if (list_empty(&block->chain_list) &&
553 refcount_read(&block->refcnt) == 0)
554 return true;
555
556 return false;
557 }
558
tcf_block_destroy(struct tcf_block * block)559 static void tcf_block_destroy(struct tcf_block *block)
560 {
561 mutex_destroy(&block->lock);
562 mutex_destroy(&block->proto_destroy_lock);
563 xa_destroy(&block->ports);
564 kfree_rcu(block, rcu);
565 }
566
tcf_chain_destroy(struct tcf_chain * chain,bool free_block)567 static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
568 {
569 struct tcf_block *block = chain->block;
570
571 mutex_destroy(&chain->filter_chain_lock);
572 kfree_rcu(chain, rcu);
573 if (free_block)
574 tcf_block_destroy(block);
575 }
576
tcf_chain_hold(struct tcf_chain * chain)577 static void tcf_chain_hold(struct tcf_chain *chain)
578 {
579 ASSERT_BLOCK_LOCKED(chain->block);
580
581 ++chain->refcnt;
582 }
583
tcf_chain_held_by_acts_only(struct tcf_chain * chain)584 static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
585 {
586 ASSERT_BLOCK_LOCKED(chain->block);
587
588 /* In case all the references are action references, this
589 * chain should not be shown to the user.
590 */
591 return chain->refcnt == chain->action_refcnt;
592 }
593
tcf_chain_lookup(struct tcf_block * block,u32 chain_index)594 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
595 u32 chain_index)
596 {
597 struct tcf_chain *chain;
598
599 ASSERT_BLOCK_LOCKED(block);
600
601 list_for_each_entry(chain, &block->chain_list, list) {
602 if (chain->index == chain_index)
603 return chain;
604 }
605 return NULL;
606 }
607
608 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
tcf_chain_lookup_rcu(const struct tcf_block * block,u32 chain_index)609 static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block,
610 u32 chain_index)
611 {
612 struct tcf_chain *chain;
613
614 list_for_each_entry_rcu(chain, &block->chain_list, list) {
615 if (chain->index == chain_index)
616 return chain;
617 }
618 return NULL;
619 }
620 #endif
621
622 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
623 u32 seq, u16 flags, int event, bool unicast,
624 struct netlink_ext_ack *extack);
625
__tcf_chain_get(struct tcf_block * block,u32 chain_index,bool create,bool by_act)626 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
627 u32 chain_index, bool create,
628 bool by_act)
629 {
630 struct tcf_chain *chain = NULL;
631 bool is_first_reference;
632
633 mutex_lock(&block->lock);
634 chain = tcf_chain_lookup(block, chain_index);
635 if (chain) {
636 tcf_chain_hold(chain);
637 } else {
638 if (!create)
639 goto errout;
640 chain = tcf_chain_create(block, chain_index);
641 if (!chain)
642 goto errout;
643 }
644
645 if (by_act)
646 ++chain->action_refcnt;
647 is_first_reference = chain->refcnt - chain->action_refcnt == 1;
648 mutex_unlock(&block->lock);
649
650 /* Send notification only in case we got the first
651 * non-action reference. Until then, the chain acts only as
652 * a placeholder for actions pointing to it and user ought
653 * not know about them.
654 */
655 if (is_first_reference && !by_act)
656 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
657 RTM_NEWCHAIN, false, NULL);
658
659 return chain;
660
661 errout:
662 mutex_unlock(&block->lock);
663 return chain;
664 }
665
tcf_chain_get(struct tcf_block * block,u32 chain_index,bool create)666 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
667 bool create)
668 {
669 return __tcf_chain_get(block, chain_index, create, false);
670 }
671
tcf_chain_get_by_act(struct tcf_block * block,u32 chain_index)672 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
673 {
674 return __tcf_chain_get(block, chain_index, true, true);
675 }
676 EXPORT_SYMBOL(tcf_chain_get_by_act);
677
678 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
679 void *tmplt_priv);
680 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
681 void *tmplt_priv, u32 chain_index,
682 struct tcf_block *block, struct sk_buff *oskb,
683 u32 seq, u16 flags);
684
__tcf_chain_put(struct tcf_chain * chain,bool by_act,bool explicitly_created)685 static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
686 bool explicitly_created)
687 {
688 struct tcf_block *block = chain->block;
689 const struct tcf_proto_ops *tmplt_ops;
690 unsigned int refcnt, non_act_refcnt;
691 bool free_block = false;
692 void *tmplt_priv;
693
694 mutex_lock(&block->lock);
695 if (explicitly_created) {
696 if (!chain->explicitly_created) {
697 mutex_unlock(&block->lock);
698 return;
699 }
700 chain->explicitly_created = false;
701 }
702
703 if (by_act)
704 chain->action_refcnt--;
705
706 /* tc_chain_notify_delete can't be called while holding block lock.
707 * However, when block is unlocked chain can be changed concurrently, so
708 * save these to temporary variables.
709 */
710 refcnt = --chain->refcnt;
711 non_act_refcnt = refcnt - chain->action_refcnt;
712 tmplt_ops = chain->tmplt_ops;
713 tmplt_priv = chain->tmplt_priv;
714
715 if (non_act_refcnt == chain->explicitly_created && !by_act) {
716 if (non_act_refcnt == 0)
717 tc_chain_notify_delete(tmplt_ops, tmplt_priv,
718 chain->index, block, NULL, 0, 0);
719 /* Last reference to chain, no need to lock. */
720 chain->flushing = false;
721 }
722
723 if (refcnt == 0)
724 free_block = tcf_chain_detach(chain);
725 mutex_unlock(&block->lock);
726
727 if (refcnt == 0) {
728 tc_chain_tmplt_del(tmplt_ops, tmplt_priv);
729 tcf_chain_destroy(chain, free_block);
730 }
731 }
732
tcf_chain_put(struct tcf_chain * chain)733 static void tcf_chain_put(struct tcf_chain *chain)
734 {
735 __tcf_chain_put(chain, false, false);
736 }
737
tcf_chain_put_by_act(struct tcf_chain * chain)738 void tcf_chain_put_by_act(struct tcf_chain *chain)
739 {
740 __tcf_chain_put(chain, true, false);
741 }
742 EXPORT_SYMBOL(tcf_chain_put_by_act);
743
tcf_chain_put_explicitly_created(struct tcf_chain * chain)744 static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
745 {
746 __tcf_chain_put(chain, false, true);
747 }
748
tcf_chain_flush(struct tcf_chain * chain,bool rtnl_held)749 static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
750 {
751 struct tcf_proto *tp, *tp_next;
752
753 mutex_lock(&chain->filter_chain_lock);
754 tp = tcf_chain_dereference(chain->filter_chain, chain);
755 while (tp) {
756 tp_next = rcu_dereference_protected(tp->next, 1);
757 tcf_proto_signal_destroying(chain, tp);
758 tp = tp_next;
759 }
760 tp = tcf_chain_dereference(chain->filter_chain, chain);
761 RCU_INIT_POINTER(chain->filter_chain, NULL);
762 tcf_chain0_head_change(chain, NULL);
763 chain->flushing = true;
764 mutex_unlock(&chain->filter_chain_lock);
765
766 while (tp) {
767 tp_next = rcu_dereference_protected(tp->next, 1);
768 tcf_proto_put(tp, rtnl_held, NULL);
769 tp = tp_next;
770 }
771 }
772
773 static int tcf_block_setup(struct tcf_block *block,
774 struct flow_block_offload *bo);
775
tcf_block_offload_init(struct flow_block_offload * bo,struct net_device * dev,struct Qdisc * sch,enum flow_block_command command,enum flow_block_binder_type binder_type,struct flow_block * flow_block,bool shared,struct netlink_ext_ack * extack)776 static void tcf_block_offload_init(struct flow_block_offload *bo,
777 struct net_device *dev, struct Qdisc *sch,
778 enum flow_block_command command,
779 enum flow_block_binder_type binder_type,
780 struct flow_block *flow_block,
781 bool shared, struct netlink_ext_ack *extack)
782 {
783 bo->net = dev_net(dev);
784 bo->command = command;
785 bo->binder_type = binder_type;
786 bo->block = flow_block;
787 bo->block_shared = shared;
788 bo->extack = extack;
789 bo->sch = sch;
790 bo->cb_list_head = &flow_block->cb_list;
791 INIT_LIST_HEAD(&bo->cb_list);
792 }
793
794 static void tcf_block_unbind(struct tcf_block *block,
795 struct flow_block_offload *bo);
796
tc_block_indr_cleanup(struct flow_block_cb * block_cb)797 static void tc_block_indr_cleanup(struct flow_block_cb *block_cb)
798 {
799 struct tcf_block *block = block_cb->indr.data;
800 struct net_device *dev = block_cb->indr.dev;
801 struct Qdisc *sch = block_cb->indr.sch;
802 struct netlink_ext_ack extack = {};
803 struct flow_block_offload bo = {};
804
805 tcf_block_offload_init(&bo, dev, sch, FLOW_BLOCK_UNBIND,
806 block_cb->indr.binder_type,
807 &block->flow_block, tcf_block_shared(block),
808 &extack);
809 rtnl_lock();
810 down_write(&block->cb_lock);
811 list_del(&block_cb->driver_list);
812 list_move(&block_cb->list, &bo.cb_list);
813 tcf_block_unbind(block, &bo);
814 up_write(&block->cb_lock);
815 rtnl_unlock();
816 }
817
tcf_block_offload_in_use(struct tcf_block * block)818 static bool tcf_block_offload_in_use(struct tcf_block *block)
819 {
820 return atomic_read(&block->offloadcnt);
821 }
822
tcf_block_offload_cmd(struct tcf_block * block,struct net_device * dev,struct Qdisc * sch,struct tcf_block_ext_info * ei,enum flow_block_command command,struct netlink_ext_ack * extack)823 static int tcf_block_offload_cmd(struct tcf_block *block,
824 struct net_device *dev, struct Qdisc *sch,
825 struct tcf_block_ext_info *ei,
826 enum flow_block_command command,
827 struct netlink_ext_ack *extack)
828 {
829 struct flow_block_offload bo = {};
830
831 tcf_block_offload_init(&bo, dev, sch, command, ei->binder_type,
832 &block->flow_block, tcf_block_shared(block),
833 extack);
834
835 if (dev->netdev_ops->ndo_setup_tc) {
836 int err;
837
838 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
839 if (err < 0) {
840 if (err != -EOPNOTSUPP)
841 NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed");
842 return err;
843 }
844
845 return tcf_block_setup(block, &bo);
846 }
847
848 flow_indr_dev_setup_offload(dev, sch, TC_SETUP_BLOCK, block, &bo,
849 tc_block_indr_cleanup);
850 tcf_block_setup(block, &bo);
851
852 return -EOPNOTSUPP;
853 }
854
tcf_block_offload_bind(struct tcf_block * block,struct Qdisc * q,struct tcf_block_ext_info * ei,struct netlink_ext_ack * extack)855 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
856 struct tcf_block_ext_info *ei,
857 struct netlink_ext_ack *extack)
858 {
859 struct net_device *dev = q->dev_queue->dev;
860 int err;
861
862 down_write(&block->cb_lock);
863
864 /* If tc offload feature is disabled and the block we try to bind
865 * to already has some offloaded filters, forbid to bind.
866 */
867 if (dev->netdev_ops->ndo_setup_tc &&
868 !tc_can_offload(dev) &&
869 tcf_block_offload_in_use(block)) {
870 NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
871 err = -EOPNOTSUPP;
872 goto err_unlock;
873 }
874
875 err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_BIND, extack);
876 if (err == -EOPNOTSUPP)
877 goto no_offload_dev_inc;
878 if (err)
879 goto err_unlock;
880
881 up_write(&block->cb_lock);
882 return 0;
883
884 no_offload_dev_inc:
885 if (tcf_block_offload_in_use(block))
886 goto err_unlock;
887
888 err = 0;
889 block->nooffloaddevcnt++;
890 err_unlock:
891 up_write(&block->cb_lock);
892 return err;
893 }
894
tcf_block_offload_unbind(struct tcf_block * block,struct Qdisc * q,struct tcf_block_ext_info * ei)895 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
896 struct tcf_block_ext_info *ei)
897 {
898 struct net_device *dev = q->dev_queue->dev;
899 int err;
900
901 down_write(&block->cb_lock);
902 err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_UNBIND, NULL);
903 if (err == -EOPNOTSUPP)
904 goto no_offload_dev_dec;
905 up_write(&block->cb_lock);
906 return;
907
908 no_offload_dev_dec:
909 WARN_ON(block->nooffloaddevcnt-- == 0);
910 up_write(&block->cb_lock);
911 }
912
913 static int
tcf_chain0_head_change_cb_add(struct tcf_block * block,struct tcf_block_ext_info * ei,struct netlink_ext_ack * extack)914 tcf_chain0_head_change_cb_add(struct tcf_block *block,
915 struct tcf_block_ext_info *ei,
916 struct netlink_ext_ack *extack)
917 {
918 struct tcf_filter_chain_list_item *item;
919 struct tcf_chain *chain0;
920
921 item = kmalloc(sizeof(*item), GFP_KERNEL);
922 if (!item) {
923 NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
924 return -ENOMEM;
925 }
926 item->chain_head_change = ei->chain_head_change;
927 item->chain_head_change_priv = ei->chain_head_change_priv;
928
929 mutex_lock(&block->lock);
930 chain0 = block->chain0.chain;
931 if (chain0)
932 tcf_chain_hold(chain0);
933 else
934 list_add(&item->list, &block->chain0.filter_chain_list);
935 mutex_unlock(&block->lock);
936
937 if (chain0) {
938 struct tcf_proto *tp_head;
939
940 mutex_lock(&chain0->filter_chain_lock);
941
942 tp_head = tcf_chain_dereference(chain0->filter_chain, chain0);
943 if (tp_head)
944 tcf_chain_head_change_item(item, tp_head);
945
946 mutex_lock(&block->lock);
947 list_add(&item->list, &block->chain0.filter_chain_list);
948 mutex_unlock(&block->lock);
949
950 mutex_unlock(&chain0->filter_chain_lock);
951 tcf_chain_put(chain0);
952 }
953
954 return 0;
955 }
956
957 static void
tcf_chain0_head_change_cb_del(struct tcf_block * block,struct tcf_block_ext_info * ei)958 tcf_chain0_head_change_cb_del(struct tcf_block *block,
959 struct tcf_block_ext_info *ei)
960 {
961 struct tcf_filter_chain_list_item *item;
962
963 mutex_lock(&block->lock);
964 list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
965 if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
966 (item->chain_head_change == ei->chain_head_change &&
967 item->chain_head_change_priv == ei->chain_head_change_priv)) {
968 if (block->chain0.chain)
969 tcf_chain_head_change_item(item, NULL);
970 list_del(&item->list);
971 mutex_unlock(&block->lock);
972
973 kfree(item);
974 return;
975 }
976 }
977 mutex_unlock(&block->lock);
978 WARN_ON(1);
979 }
980
981 struct tcf_net {
982 spinlock_t idr_lock; /* Protects idr */
983 struct idr idr;
984 };
985
986 static unsigned int tcf_net_id;
987
tcf_block_insert(struct tcf_block * block,struct net * net,struct netlink_ext_ack * extack)988 static int tcf_block_insert(struct tcf_block *block, struct net *net,
989 struct netlink_ext_ack *extack)
990 {
991 struct tcf_net *tn = net_generic(net, tcf_net_id);
992 int err;
993
994 idr_preload(GFP_KERNEL);
995 spin_lock(&tn->idr_lock);
996 err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
997 GFP_NOWAIT);
998 spin_unlock(&tn->idr_lock);
999 idr_preload_end();
1000
1001 return err;
1002 }
1003
tcf_block_remove(struct tcf_block * block,struct net * net)1004 static void tcf_block_remove(struct tcf_block *block, struct net *net)
1005 {
1006 struct tcf_net *tn = net_generic(net, tcf_net_id);
1007
1008 spin_lock(&tn->idr_lock);
1009 idr_remove(&tn->idr, block->index);
1010 spin_unlock(&tn->idr_lock);
1011 }
1012
tcf_block_create(struct net * net,struct Qdisc * q,u32 block_index,struct netlink_ext_ack * extack)1013 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
1014 u32 block_index,
1015 struct netlink_ext_ack *extack)
1016 {
1017 struct tcf_block *block;
1018
1019 block = kzalloc(sizeof(*block), GFP_KERNEL);
1020 if (!block) {
1021 NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
1022 return ERR_PTR(-ENOMEM);
1023 }
1024 mutex_init(&block->lock);
1025 mutex_init(&block->proto_destroy_lock);
1026 init_rwsem(&block->cb_lock);
1027 flow_block_init(&block->flow_block);
1028 INIT_LIST_HEAD(&block->chain_list);
1029 INIT_LIST_HEAD(&block->owner_list);
1030 INIT_LIST_HEAD(&block->chain0.filter_chain_list);
1031
1032 refcount_set(&block->refcnt, 1);
1033 block->net = net;
1034 block->index = block_index;
1035 xa_init(&block->ports);
1036
1037 /* Don't store q pointer for blocks which are shared */
1038 if (!tcf_block_shared(block))
1039 block->q = q;
1040 return block;
1041 }
1042
tcf_block_lookup(struct net * net,u32 block_index)1043 struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
1044 {
1045 struct tcf_net *tn = net_generic(net, tcf_net_id);
1046
1047 return idr_find(&tn->idr, block_index);
1048 }
1049 EXPORT_SYMBOL(tcf_block_lookup);
1050
tcf_block_refcnt_get(struct net * net,u32 block_index)1051 static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
1052 {
1053 struct tcf_block *block;
1054
1055 rcu_read_lock();
1056 block = tcf_block_lookup(net, block_index);
1057 if (block && !refcount_inc_not_zero(&block->refcnt))
1058 block = NULL;
1059 rcu_read_unlock();
1060
1061 return block;
1062 }
1063
1064 static struct tcf_chain *
__tcf_get_next_chain(struct tcf_block * block,struct tcf_chain * chain)1065 __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
1066 {
1067 mutex_lock(&block->lock);
1068 if (chain)
1069 chain = list_is_last(&chain->list, &block->chain_list) ?
1070 NULL : list_next_entry(chain, list);
1071 else
1072 chain = list_first_entry_or_null(&block->chain_list,
1073 struct tcf_chain, list);
1074
1075 /* skip all action-only chains */
1076 while (chain && tcf_chain_held_by_acts_only(chain))
1077 chain = list_is_last(&chain->list, &block->chain_list) ?
1078 NULL : list_next_entry(chain, list);
1079
1080 if (chain)
1081 tcf_chain_hold(chain);
1082 mutex_unlock(&block->lock);
1083
1084 return chain;
1085 }
1086
1087 /* Function to be used by all clients that want to iterate over all chains on
1088 * block. It properly obtains block->lock and takes reference to chain before
1089 * returning it. Users of this function must be tolerant to concurrent chain
1090 * insertion/deletion or ensure that no concurrent chain modification is
1091 * possible. Note that all netlink dump callbacks cannot guarantee to provide
1092 * consistent dump because rtnl lock is released each time skb is filled with
1093 * data and sent to user-space.
1094 */
1095
1096 struct tcf_chain *
tcf_get_next_chain(struct tcf_block * block,struct tcf_chain * chain)1097 tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
1098 {
1099 struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain);
1100
1101 if (chain)
1102 tcf_chain_put(chain);
1103
1104 return chain_next;
1105 }
1106 EXPORT_SYMBOL(tcf_get_next_chain);
1107
1108 static struct tcf_proto *
__tcf_get_next_proto(struct tcf_chain * chain,struct tcf_proto * tp)1109 __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
1110 {
1111 u32 prio = 0;
1112
1113 ASSERT_RTNL();
1114 mutex_lock(&chain->filter_chain_lock);
1115
1116 if (!tp) {
1117 tp = tcf_chain_dereference(chain->filter_chain, chain);
1118 } else if (tcf_proto_is_deleting(tp)) {
1119 /* 'deleting' flag is set and chain->filter_chain_lock was
1120 * unlocked, which means next pointer could be invalid. Restart
1121 * search.
1122 */
1123 prio = tp->prio + 1;
1124 tp = tcf_chain_dereference(chain->filter_chain, chain);
1125
1126 for (; tp; tp = tcf_chain_dereference(tp->next, chain))
1127 if (!tp->deleting && tp->prio >= prio)
1128 break;
1129 } else {
1130 tp = tcf_chain_dereference(tp->next, chain);
1131 }
1132
1133 if (tp)
1134 tcf_proto_get(tp);
1135
1136 mutex_unlock(&chain->filter_chain_lock);
1137
1138 return tp;
1139 }
1140
1141 /* Function to be used by all clients that want to iterate over all tp's on
1142 * chain. Users of this function must be tolerant to concurrent tp
1143 * insertion/deletion or ensure that no concurrent chain modification is
1144 * possible. Note that all netlink dump callbacks cannot guarantee to provide
1145 * consistent dump because rtnl lock is released each time skb is filled with
1146 * data and sent to user-space.
1147 */
1148
1149 struct tcf_proto *
tcf_get_next_proto(struct tcf_chain * chain,struct tcf_proto * tp)1150 tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
1151 {
1152 struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp);
1153
1154 if (tp)
1155 tcf_proto_put(tp, true, NULL);
1156
1157 return tp_next;
1158 }
1159 EXPORT_SYMBOL(tcf_get_next_proto);
1160
tcf_block_flush_all_chains(struct tcf_block * block,bool rtnl_held)1161 static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held)
1162 {
1163 struct tcf_chain *chain;
1164
1165 /* Last reference to block. At this point chains cannot be added or
1166 * removed concurrently.
1167 */
1168 for (chain = tcf_get_next_chain(block, NULL);
1169 chain;
1170 chain = tcf_get_next_chain(block, chain)) {
1171 tcf_chain_put_explicitly_created(chain);
1172 tcf_chain_flush(chain, rtnl_held);
1173 }
1174 }
1175
1176 /* Lookup Qdisc and increments its reference counter.
1177 * Set parent, if necessary.
1178 */
1179
__tcf_qdisc_find(struct net * net,struct Qdisc ** q,u32 * parent,int ifindex,bool rtnl_held,struct netlink_ext_ack * extack)1180 static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
1181 u32 *parent, int ifindex, bool rtnl_held,
1182 struct netlink_ext_ack *extack)
1183 {
1184 const struct Qdisc_class_ops *cops;
1185 struct net_device *dev;
1186 int err = 0;
1187
1188 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1189 return 0;
1190
1191 rcu_read_lock();
1192
1193 /* Find link */
1194 dev = dev_get_by_index_rcu(net, ifindex);
1195 if (!dev) {
1196 rcu_read_unlock();
1197 return -ENODEV;
1198 }
1199
1200 /* Find qdisc */
1201 if (!*parent) {
1202 *q = rcu_dereference(dev->qdisc);
1203 *parent = (*q)->handle;
1204 } else {
1205 *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
1206 if (!*q) {
1207 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1208 err = -EINVAL;
1209 goto errout_rcu;
1210 }
1211 }
1212
1213 *q = qdisc_refcount_inc_nz(*q);
1214 if (!*q) {
1215 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1216 err = -EINVAL;
1217 goto errout_rcu;
1218 }
1219
1220 /* Is it classful? */
1221 cops = (*q)->ops->cl_ops;
1222 if (!cops) {
1223 NL_SET_ERR_MSG(extack, "Qdisc not classful");
1224 err = -EINVAL;
1225 goto errout_qdisc;
1226 }
1227
1228 if (!cops->tcf_block) {
1229 NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
1230 err = -EOPNOTSUPP;
1231 goto errout_qdisc;
1232 }
1233
1234 errout_rcu:
1235 /* At this point we know that qdisc is not noop_qdisc,
1236 * which means that qdisc holds a reference to net_device
1237 * and we hold a reference to qdisc, so it is safe to release
1238 * rcu read lock.
1239 */
1240 rcu_read_unlock();
1241 return err;
1242
1243 errout_qdisc:
1244 rcu_read_unlock();
1245
1246 if (rtnl_held)
1247 qdisc_put(*q);
1248 else
1249 qdisc_put_unlocked(*q);
1250 *q = NULL;
1251
1252 return err;
1253 }
1254
__tcf_qdisc_cl_find(struct Qdisc * q,u32 parent,unsigned long * cl,int ifindex,struct netlink_ext_ack * extack)1255 static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
1256 int ifindex, struct netlink_ext_ack *extack)
1257 {
1258 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1259 return 0;
1260
1261 /* Do we search for filter, attached to class? */
1262 if (TC_H_MIN(parent)) {
1263 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1264
1265 *cl = cops->find(q, parent);
1266 if (*cl == 0) {
1267 NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
1268 return -ENOENT;
1269 }
1270 }
1271
1272 return 0;
1273 }
1274
__tcf_block_find(struct net * net,struct Qdisc * q,unsigned long cl,int ifindex,u32 block_index,struct netlink_ext_ack * extack)1275 static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
1276 unsigned long cl, int ifindex,
1277 u32 block_index,
1278 struct netlink_ext_ack *extack)
1279 {
1280 struct tcf_block *block;
1281
1282 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1283 block = tcf_block_refcnt_get(net, block_index);
1284 if (!block) {
1285 NL_SET_ERR_MSG(extack, "Block of given index was not found");
1286 return ERR_PTR(-EINVAL);
1287 }
1288 } else {
1289 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1290
1291 block = cops->tcf_block(q, cl, extack);
1292 if (!block)
1293 return ERR_PTR(-EINVAL);
1294
1295 if (tcf_block_shared(block)) {
1296 NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
1297 return ERR_PTR(-EOPNOTSUPP);
1298 }
1299
1300 /* Always take reference to block in order to support execution
1301 * of rules update path of cls API without rtnl lock. Caller
1302 * must release block when it is finished using it. 'if' block
1303 * of this conditional obtain reference to block by calling
1304 * tcf_block_refcnt_get().
1305 */
1306 refcount_inc(&block->refcnt);
1307 }
1308
1309 return block;
1310 }
1311
__tcf_block_put(struct tcf_block * block,struct Qdisc * q,struct tcf_block_ext_info * ei,bool rtnl_held)1312 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
1313 struct tcf_block_ext_info *ei, bool rtnl_held)
1314 {
1315 if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
1316 /* Flushing/putting all chains will cause the block to be
1317 * deallocated when last chain is freed. However, if chain_list
1318 * is empty, block has to be manually deallocated. After block
1319 * reference counter reached 0, it is no longer possible to
1320 * increment it or add new chains to block.
1321 */
1322 bool free_block = list_empty(&block->chain_list);
1323
1324 mutex_unlock(&block->lock);
1325 if (tcf_block_shared(block))
1326 tcf_block_remove(block, block->net);
1327
1328 if (q)
1329 tcf_block_offload_unbind(block, q, ei);
1330
1331 if (free_block)
1332 tcf_block_destroy(block);
1333 else
1334 tcf_block_flush_all_chains(block, rtnl_held);
1335 } else if (q) {
1336 tcf_block_offload_unbind(block, q, ei);
1337 }
1338 }
1339
tcf_block_refcnt_put(struct tcf_block * block,bool rtnl_held)1340 static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held)
1341 {
1342 __tcf_block_put(block, NULL, NULL, rtnl_held);
1343 }
1344
1345 /* Find tcf block.
1346 * Set q, parent, cl when appropriate.
1347 */
1348
tcf_block_find(struct net * net,struct Qdisc ** q,u32 * parent,unsigned long * cl,int ifindex,u32 block_index,struct netlink_ext_ack * extack)1349 static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
1350 u32 *parent, unsigned long *cl,
1351 int ifindex, u32 block_index,
1352 struct netlink_ext_ack *extack)
1353 {
1354 struct tcf_block *block;
1355 int err = 0;
1356
1357 ASSERT_RTNL();
1358
1359 err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack);
1360 if (err)
1361 goto errout;
1362
1363 err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack);
1364 if (err)
1365 goto errout_qdisc;
1366
1367 block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
1368 if (IS_ERR(block)) {
1369 err = PTR_ERR(block);
1370 goto errout_qdisc;
1371 }
1372
1373 return block;
1374
1375 errout_qdisc:
1376 if (*q)
1377 qdisc_put(*q);
1378 errout:
1379 *q = NULL;
1380 return ERR_PTR(err);
1381 }
1382
tcf_block_release(struct Qdisc * q,struct tcf_block * block,bool rtnl_held)1383 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
1384 bool rtnl_held)
1385 {
1386 if (!IS_ERR_OR_NULL(block))
1387 tcf_block_refcnt_put(block, rtnl_held);
1388
1389 if (q) {
1390 if (rtnl_held)
1391 qdisc_put(q);
1392 else
1393 qdisc_put_unlocked(q);
1394 }
1395 }
1396
1397 struct tcf_block_owner_item {
1398 struct list_head list;
1399 struct Qdisc *q;
1400 enum flow_block_binder_type binder_type;
1401 };
1402
1403 static void
tcf_block_owner_netif_keep_dst(struct tcf_block * block,struct Qdisc * q,enum flow_block_binder_type binder_type)1404 tcf_block_owner_netif_keep_dst(struct tcf_block *block,
1405 struct Qdisc *q,
1406 enum flow_block_binder_type binder_type)
1407 {
1408 if (block->keep_dst &&
1409 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1410 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1411 netif_keep_dst(qdisc_dev(q));
1412 }
1413
tcf_block_netif_keep_dst(struct tcf_block * block)1414 void tcf_block_netif_keep_dst(struct tcf_block *block)
1415 {
1416 struct tcf_block_owner_item *item;
1417
1418 block->keep_dst = true;
1419 list_for_each_entry(item, &block->owner_list, list)
1420 tcf_block_owner_netif_keep_dst(block, item->q,
1421 item->binder_type);
1422 }
1423 EXPORT_SYMBOL(tcf_block_netif_keep_dst);
1424
tcf_block_owner_add(struct tcf_block * block,struct Qdisc * q,enum flow_block_binder_type binder_type)1425 static int tcf_block_owner_add(struct tcf_block *block,
1426 struct Qdisc *q,
1427 enum flow_block_binder_type binder_type)
1428 {
1429 struct tcf_block_owner_item *item;
1430
1431 item = kmalloc(sizeof(*item), GFP_KERNEL);
1432 if (!item)
1433 return -ENOMEM;
1434 item->q = q;
1435 item->binder_type = binder_type;
1436 list_add(&item->list, &block->owner_list);
1437 return 0;
1438 }
1439
tcf_block_owner_del(struct tcf_block * block,struct Qdisc * q,enum flow_block_binder_type binder_type)1440 static void tcf_block_owner_del(struct tcf_block *block,
1441 struct Qdisc *q,
1442 enum flow_block_binder_type binder_type)
1443 {
1444 struct tcf_block_owner_item *item;
1445
1446 list_for_each_entry(item, &block->owner_list, list) {
1447 if (item->q == q && item->binder_type == binder_type) {
1448 list_del(&item->list);
1449 kfree(item);
1450 return;
1451 }
1452 }
1453 WARN_ON(1);
1454 }
1455
tcf_block_tracks_dev(struct tcf_block * block,struct tcf_block_ext_info * ei)1456 static bool tcf_block_tracks_dev(struct tcf_block *block,
1457 struct tcf_block_ext_info *ei)
1458 {
1459 return tcf_block_shared(block) &&
1460 (ei->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS ||
1461 ei->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS);
1462 }
1463
tcf_block_get_ext(struct tcf_block ** p_block,struct Qdisc * q,struct tcf_block_ext_info * ei,struct netlink_ext_ack * extack)1464 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
1465 struct tcf_block_ext_info *ei,
1466 struct netlink_ext_ack *extack)
1467 {
1468 struct net_device *dev = qdisc_dev(q);
1469 struct net *net = qdisc_net(q);
1470 struct tcf_block *block = NULL;
1471 int err;
1472
1473 if (ei->block_index)
1474 /* block_index not 0 means the shared block is requested */
1475 block = tcf_block_refcnt_get(net, ei->block_index);
1476
1477 if (!block) {
1478 block = tcf_block_create(net, q, ei->block_index, extack);
1479 if (IS_ERR(block))
1480 return PTR_ERR(block);
1481 if (tcf_block_shared(block)) {
1482 err = tcf_block_insert(block, net, extack);
1483 if (err)
1484 goto err_block_insert;
1485 }
1486 }
1487
1488 err = tcf_block_owner_add(block, q, ei->binder_type);
1489 if (err)
1490 goto err_block_owner_add;
1491
1492 tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1493
1494 err = tcf_chain0_head_change_cb_add(block, ei, extack);
1495 if (err)
1496 goto err_chain0_head_change_cb_add;
1497
1498 err = tcf_block_offload_bind(block, q, ei, extack);
1499 if (err)
1500 goto err_block_offload_bind;
1501
1502 if (tcf_block_tracks_dev(block, ei)) {
1503 err = xa_insert(&block->ports, dev->ifindex, dev, GFP_KERNEL);
1504 if (err) {
1505 NL_SET_ERR_MSG(extack, "block dev insert failed");
1506 goto err_dev_insert;
1507 }
1508 }
1509
1510 *p_block = block;
1511 return 0;
1512
1513 err_dev_insert:
1514 tcf_block_offload_unbind(block, q, ei);
1515 err_block_offload_bind:
1516 tcf_chain0_head_change_cb_del(block, ei);
1517 err_chain0_head_change_cb_add:
1518 tcf_block_owner_del(block, q, ei->binder_type);
1519 err_block_owner_add:
1520 err_block_insert:
1521 tcf_block_refcnt_put(block, true);
1522 return err;
1523 }
1524 EXPORT_SYMBOL(tcf_block_get_ext);
1525
tcf_chain_head_change_dflt(struct tcf_proto * tp_head,void * priv)1526 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
1527 {
1528 struct tcf_proto __rcu **p_filter_chain = priv;
1529
1530 rcu_assign_pointer(*p_filter_chain, tp_head);
1531 }
1532
tcf_block_get(struct tcf_block ** p_block,struct tcf_proto __rcu ** p_filter_chain,struct Qdisc * q,struct netlink_ext_ack * extack)1533 int tcf_block_get(struct tcf_block **p_block,
1534 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
1535 struct netlink_ext_ack *extack)
1536 {
1537 struct tcf_block_ext_info ei = {
1538 .chain_head_change = tcf_chain_head_change_dflt,
1539 .chain_head_change_priv = p_filter_chain,
1540 };
1541
1542 WARN_ON(!p_filter_chain);
1543 return tcf_block_get_ext(p_block, q, &ei, extack);
1544 }
1545 EXPORT_SYMBOL(tcf_block_get);
1546
1547 /* XXX: Standalone actions are not allowed to jump to any chain, and bound
1548 * actions should be all removed after flushing.
1549 */
tcf_block_put_ext(struct tcf_block * block,struct Qdisc * q,struct tcf_block_ext_info * ei)1550 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
1551 struct tcf_block_ext_info *ei)
1552 {
1553 struct net_device *dev = qdisc_dev(q);
1554
1555 if (!block)
1556 return;
1557 if (tcf_block_tracks_dev(block, ei))
1558 xa_erase(&block->ports, dev->ifindex);
1559 tcf_chain0_head_change_cb_del(block, ei);
1560 tcf_block_owner_del(block, q, ei->binder_type);
1561
1562 __tcf_block_put(block, q, ei, true);
1563 }
1564 EXPORT_SYMBOL(tcf_block_put_ext);
1565
tcf_block_put(struct tcf_block * block)1566 void tcf_block_put(struct tcf_block *block)
1567 {
1568 struct tcf_block_ext_info ei = {0, };
1569
1570 if (!block)
1571 return;
1572 tcf_block_put_ext(block, block->q, &ei);
1573 }
1574
1575 EXPORT_SYMBOL(tcf_block_put);
1576
1577 static int
tcf_block_playback_offloads(struct tcf_block * block,flow_setup_cb_t * cb,void * cb_priv,bool add,bool offload_in_use,struct netlink_ext_ack * extack)1578 tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
1579 void *cb_priv, bool add, bool offload_in_use,
1580 struct netlink_ext_ack *extack)
1581 {
1582 struct tcf_chain *chain, *chain_prev;
1583 struct tcf_proto *tp, *tp_prev;
1584 int err;
1585
1586 lockdep_assert_held(&block->cb_lock);
1587
1588 for (chain = __tcf_get_next_chain(block, NULL);
1589 chain;
1590 chain_prev = chain,
1591 chain = __tcf_get_next_chain(block, chain),
1592 tcf_chain_put(chain_prev)) {
1593 if (chain->tmplt_ops && add)
1594 chain->tmplt_ops->tmplt_reoffload(chain, true, cb,
1595 cb_priv);
1596 for (tp = __tcf_get_next_proto(chain, NULL); tp;
1597 tp_prev = tp,
1598 tp = __tcf_get_next_proto(chain, tp),
1599 tcf_proto_put(tp_prev, true, NULL)) {
1600 if (tp->ops->reoffload) {
1601 err = tp->ops->reoffload(tp, add, cb, cb_priv,
1602 extack);
1603 if (err && add)
1604 goto err_playback_remove;
1605 } else if (add && offload_in_use) {
1606 err = -EOPNOTSUPP;
1607 NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
1608 goto err_playback_remove;
1609 }
1610 }
1611 if (chain->tmplt_ops && !add)
1612 chain->tmplt_ops->tmplt_reoffload(chain, false, cb,
1613 cb_priv);
1614 }
1615
1616 return 0;
1617
1618 err_playback_remove:
1619 tcf_proto_put(tp, true, NULL);
1620 tcf_chain_put(chain);
1621 tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1622 extack);
1623 return err;
1624 }
1625
tcf_block_bind(struct tcf_block * block,struct flow_block_offload * bo)1626 static int tcf_block_bind(struct tcf_block *block,
1627 struct flow_block_offload *bo)
1628 {
1629 struct flow_block_cb *block_cb, *next;
1630 int err, i = 0;
1631
1632 lockdep_assert_held(&block->cb_lock);
1633
1634 list_for_each_entry(block_cb, &bo->cb_list, list) {
1635 err = tcf_block_playback_offloads(block, block_cb->cb,
1636 block_cb->cb_priv, true,
1637 tcf_block_offload_in_use(block),
1638 bo->extack);
1639 if (err)
1640 goto err_unroll;
1641 if (!bo->unlocked_driver_cb)
1642 block->lockeddevcnt++;
1643
1644 i++;
1645 }
1646 list_splice(&bo->cb_list, &block->flow_block.cb_list);
1647
1648 return 0;
1649
1650 err_unroll:
1651 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1652 list_del(&block_cb->driver_list);
1653 if (i-- > 0) {
1654 list_del(&block_cb->list);
1655 tcf_block_playback_offloads(block, block_cb->cb,
1656 block_cb->cb_priv, false,
1657 tcf_block_offload_in_use(block),
1658 NULL);
1659 if (!bo->unlocked_driver_cb)
1660 block->lockeddevcnt--;
1661 }
1662 flow_block_cb_free(block_cb);
1663 }
1664
1665 return err;
1666 }
1667
tcf_block_unbind(struct tcf_block * block,struct flow_block_offload * bo)1668 static void tcf_block_unbind(struct tcf_block *block,
1669 struct flow_block_offload *bo)
1670 {
1671 struct flow_block_cb *block_cb, *next;
1672
1673 lockdep_assert_held(&block->cb_lock);
1674
1675 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1676 tcf_block_playback_offloads(block, block_cb->cb,
1677 block_cb->cb_priv, false,
1678 tcf_block_offload_in_use(block),
1679 NULL);
1680 list_del(&block_cb->list);
1681 flow_block_cb_free(block_cb);
1682 if (!bo->unlocked_driver_cb)
1683 block->lockeddevcnt--;
1684 }
1685 }
1686
tcf_block_setup(struct tcf_block * block,struct flow_block_offload * bo)1687 static int tcf_block_setup(struct tcf_block *block,
1688 struct flow_block_offload *bo)
1689 {
1690 int err;
1691
1692 switch (bo->command) {
1693 case FLOW_BLOCK_BIND:
1694 err = tcf_block_bind(block, bo);
1695 break;
1696 case FLOW_BLOCK_UNBIND:
1697 err = 0;
1698 tcf_block_unbind(block, bo);
1699 break;
1700 default:
1701 WARN_ON_ONCE(1);
1702 err = -EOPNOTSUPP;
1703 }
1704
1705 return err;
1706 }
1707
1708 /* Main classifier routine: scans classifier chain attached
1709 * to this qdisc, (optionally) tests for protocol and asks
1710 * specific classifiers.
1711 */
__tcf_classify(struct sk_buff * skb,const struct tcf_proto * tp,const struct tcf_proto * orig_tp,struct tcf_result * res,bool compat_mode,struct tcf_exts_miss_cookie_node * n,int act_index,u32 * last_executed_chain)1712 static inline int __tcf_classify(struct sk_buff *skb,
1713 const struct tcf_proto *tp,
1714 const struct tcf_proto *orig_tp,
1715 struct tcf_result *res,
1716 bool compat_mode,
1717 struct tcf_exts_miss_cookie_node *n,
1718 int act_index,
1719 u32 *last_executed_chain)
1720 {
1721 #ifdef CONFIG_NET_CLS_ACT
1722 const int max_reclassify_loop = 16;
1723 const struct tcf_proto *first_tp;
1724 int limit = 0;
1725
1726 reclassify:
1727 #endif
1728 for (; tp; tp = rcu_dereference_bh(tp->next)) {
1729 __be16 protocol = skb_protocol(skb, false);
1730 int err = 0;
1731
1732 if (n) {
1733 struct tcf_exts *exts;
1734
1735 if (n->tp_prio != tp->prio)
1736 continue;
1737
1738 /* We re-lookup the tp and chain based on index instead
1739 * of having hard refs and locks to them, so do a sanity
1740 * check if any of tp,chain,exts was replaced by the
1741 * time we got here with a cookie from hardware.
1742 */
1743 if (unlikely(n->tp != tp || n->tp->chain != n->chain ||
1744 !tp->ops->get_exts)) {
1745 tcf_set_drop_reason(skb,
1746 SKB_DROP_REASON_TC_COOKIE_ERROR);
1747 return TC_ACT_SHOT;
1748 }
1749
1750 exts = tp->ops->get_exts(tp, n->handle);
1751 if (unlikely(!exts || n->exts != exts)) {
1752 tcf_set_drop_reason(skb,
1753 SKB_DROP_REASON_TC_COOKIE_ERROR);
1754 return TC_ACT_SHOT;
1755 }
1756
1757 n = NULL;
1758 err = tcf_exts_exec_ex(skb, exts, act_index, res);
1759 } else {
1760 if (tp->protocol != protocol &&
1761 tp->protocol != htons(ETH_P_ALL))
1762 continue;
1763
1764 err = tc_classify(skb, tp, res);
1765 }
1766 #ifdef CONFIG_NET_CLS_ACT
1767 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
1768 first_tp = orig_tp;
1769 *last_executed_chain = first_tp->chain->index;
1770 goto reset;
1771 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
1772 first_tp = res->goto_tp;
1773 *last_executed_chain = err & TC_ACT_EXT_VAL_MASK;
1774 goto reset;
1775 }
1776 #endif
1777 if (err >= 0)
1778 return err;
1779 }
1780
1781 if (unlikely(n)) {
1782 tcf_set_drop_reason(skb,
1783 SKB_DROP_REASON_TC_COOKIE_ERROR);
1784 return TC_ACT_SHOT;
1785 }
1786
1787 return TC_ACT_UNSPEC; /* signal: continue lookup */
1788 #ifdef CONFIG_NET_CLS_ACT
1789 reset:
1790 if (unlikely(limit++ >= max_reclassify_loop)) {
1791 net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1792 tp->chain->block->index,
1793 tp->prio & 0xffff,
1794 ntohs(tp->protocol));
1795 tcf_set_drop_reason(skb,
1796 SKB_DROP_REASON_TC_RECLASSIFY_LOOP);
1797 return TC_ACT_SHOT;
1798 }
1799
1800 tp = first_tp;
1801 goto reclassify;
1802 #endif
1803 }
1804
tcf_classify(struct sk_buff * skb,const struct tcf_block * block,const struct tcf_proto * tp,struct tcf_result * res,bool compat_mode)1805 int tcf_classify(struct sk_buff *skb,
1806 const struct tcf_block *block,
1807 const struct tcf_proto *tp,
1808 struct tcf_result *res, bool compat_mode)
1809 {
1810 #if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
1811 u32 last_executed_chain = 0;
1812
1813 return __tcf_classify(skb, tp, tp, res, compat_mode, NULL, 0,
1814 &last_executed_chain);
1815 #else
1816 u32 last_executed_chain = tp ? tp->chain->index : 0;
1817 struct tcf_exts_miss_cookie_node *n = NULL;
1818 const struct tcf_proto *orig_tp = tp;
1819 struct tc_skb_ext *ext;
1820 int act_index = 0;
1821 int ret;
1822
1823 if (block) {
1824 ext = skb_ext_find(skb, TC_SKB_EXT);
1825
1826 if (ext && (ext->chain || ext->act_miss)) {
1827 struct tcf_chain *fchain;
1828 u32 chain;
1829
1830 if (ext->act_miss) {
1831 n = tcf_exts_miss_cookie_lookup(ext->act_miss_cookie,
1832 &act_index);
1833 if (!n) {
1834 tcf_set_drop_reason(skb,
1835 SKB_DROP_REASON_TC_COOKIE_ERROR);
1836 return TC_ACT_SHOT;
1837 }
1838
1839 chain = n->chain_index;
1840 } else {
1841 chain = ext->chain;
1842 }
1843
1844 fchain = tcf_chain_lookup_rcu(block, chain);
1845 if (!fchain) {
1846 tcf_set_drop_reason(skb,
1847 SKB_DROP_REASON_TC_CHAIN_NOTFOUND);
1848
1849 return TC_ACT_SHOT;
1850 }
1851
1852 /* Consume, so cloned/redirect skbs won't inherit ext */
1853 skb_ext_del(skb, TC_SKB_EXT);
1854
1855 tp = rcu_dereference_bh(fchain->filter_chain);
1856 last_executed_chain = fchain->index;
1857 }
1858 }
1859
1860 ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode, n, act_index,
1861 &last_executed_chain);
1862
1863 if (tc_skb_ext_tc_enabled()) {
1864 /* If we missed on some chain */
1865 if (ret == TC_ACT_UNSPEC && last_executed_chain) {
1866 struct tc_skb_cb *cb = tc_skb_cb(skb);
1867
1868 ext = tc_skb_ext_alloc(skb);
1869 if (WARN_ON_ONCE(!ext)) {
1870 tcf_set_drop_reason(skb, SKB_DROP_REASON_NOMEM);
1871 return TC_ACT_SHOT;
1872 }
1873 ext->chain = last_executed_chain;
1874 ext->mru = cb->mru;
1875 ext->post_ct = cb->post_ct;
1876 ext->post_ct_snat = cb->post_ct_snat;
1877 ext->post_ct_dnat = cb->post_ct_dnat;
1878 ext->zone = cb->zone;
1879 }
1880 }
1881
1882 return ret;
1883 #endif
1884 }
1885 EXPORT_SYMBOL(tcf_classify);
1886
1887 struct tcf_chain_info {
1888 struct tcf_proto __rcu **pprev;
1889 struct tcf_proto __rcu *next;
1890 };
1891
tcf_chain_tp_prev(struct tcf_chain * chain,struct tcf_chain_info * chain_info)1892 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain,
1893 struct tcf_chain_info *chain_info)
1894 {
1895 return tcf_chain_dereference(*chain_info->pprev, chain);
1896 }
1897
tcf_chain_tp_insert(struct tcf_chain * chain,struct tcf_chain_info * chain_info,struct tcf_proto * tp)1898 static int tcf_chain_tp_insert(struct tcf_chain *chain,
1899 struct tcf_chain_info *chain_info,
1900 struct tcf_proto *tp)
1901 {
1902 if (chain->flushing)
1903 return -EAGAIN;
1904
1905 RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
1906 if (*chain_info->pprev == chain->filter_chain)
1907 tcf_chain0_head_change(chain, tp);
1908 tcf_proto_get(tp);
1909 rcu_assign_pointer(*chain_info->pprev, tp);
1910
1911 return 0;
1912 }
1913
tcf_chain_tp_remove(struct tcf_chain * chain,struct tcf_chain_info * chain_info,struct tcf_proto * tp)1914 static void tcf_chain_tp_remove(struct tcf_chain *chain,
1915 struct tcf_chain_info *chain_info,
1916 struct tcf_proto *tp)
1917 {
1918 struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain);
1919
1920 tcf_proto_mark_delete(tp);
1921 if (tp == chain->filter_chain)
1922 tcf_chain0_head_change(chain, next);
1923 RCU_INIT_POINTER(*chain_info->pprev, next);
1924 }
1925
1926 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1927 struct tcf_chain_info *chain_info,
1928 u32 protocol, u32 prio,
1929 bool prio_allocate);
1930
1931 /* Try to insert new proto.
1932 * If proto with specified priority already exists, free new proto
1933 * and return existing one.
1934 */
1935
tcf_chain_tp_insert_unique(struct tcf_chain * chain,struct tcf_proto * tp_new,u32 protocol,u32 prio,bool rtnl_held)1936 static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1937 struct tcf_proto *tp_new,
1938 u32 protocol, u32 prio,
1939 bool rtnl_held)
1940 {
1941 struct tcf_chain_info chain_info;
1942 struct tcf_proto *tp;
1943 int err = 0;
1944
1945 mutex_lock(&chain->filter_chain_lock);
1946
1947 if (tcf_proto_exists_destroying(chain, tp_new)) {
1948 mutex_unlock(&chain->filter_chain_lock);
1949 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1950 return ERR_PTR(-EAGAIN);
1951 }
1952
1953 tp = tcf_chain_tp_find(chain, &chain_info,
1954 protocol, prio, false);
1955 if (!tp)
1956 err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
1957 mutex_unlock(&chain->filter_chain_lock);
1958
1959 if (tp) {
1960 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1961 tp_new = tp;
1962 } else if (err) {
1963 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1964 tp_new = ERR_PTR(err);
1965 }
1966
1967 return tp_new;
1968 }
1969
tcf_chain_tp_delete_empty(struct tcf_chain * chain,struct tcf_proto * tp,bool rtnl_held,struct netlink_ext_ack * extack)1970 static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
1971 struct tcf_proto *tp, bool rtnl_held,
1972 struct netlink_ext_ack *extack)
1973 {
1974 struct tcf_chain_info chain_info;
1975 struct tcf_proto *tp_iter;
1976 struct tcf_proto **pprev;
1977 struct tcf_proto *next;
1978
1979 mutex_lock(&chain->filter_chain_lock);
1980
1981 /* Atomically find and remove tp from chain. */
1982 for (pprev = &chain->filter_chain;
1983 (tp_iter = tcf_chain_dereference(*pprev, chain));
1984 pprev = &tp_iter->next) {
1985 if (tp_iter == tp) {
1986 chain_info.pprev = pprev;
1987 chain_info.next = tp_iter->next;
1988 WARN_ON(tp_iter->deleting);
1989 break;
1990 }
1991 }
1992 /* Verify that tp still exists and no new filters were inserted
1993 * concurrently.
1994 * Mark tp for deletion if it is empty.
1995 */
1996 if (!tp_iter || !tcf_proto_check_delete(tp)) {
1997 mutex_unlock(&chain->filter_chain_lock);
1998 return;
1999 }
2000
2001 tcf_proto_signal_destroying(chain, tp);
2002 next = tcf_chain_dereference(chain_info.next, chain);
2003 if (tp == chain->filter_chain)
2004 tcf_chain0_head_change(chain, next);
2005 RCU_INIT_POINTER(*chain_info.pprev, next);
2006 mutex_unlock(&chain->filter_chain_lock);
2007
2008 tcf_proto_put(tp, rtnl_held, extack);
2009 }
2010
tcf_chain_tp_find(struct tcf_chain * chain,struct tcf_chain_info * chain_info,u32 protocol,u32 prio,bool prio_allocate)2011 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
2012 struct tcf_chain_info *chain_info,
2013 u32 protocol, u32 prio,
2014 bool prio_allocate)
2015 {
2016 struct tcf_proto **pprev;
2017 struct tcf_proto *tp;
2018
2019 /* Check the chain for existence of proto-tcf with this priority */
2020 for (pprev = &chain->filter_chain;
2021 (tp = tcf_chain_dereference(*pprev, chain));
2022 pprev = &tp->next) {
2023 if (tp->prio >= prio) {
2024 if (tp->prio == prio) {
2025 if (prio_allocate ||
2026 (tp->protocol != protocol && protocol))
2027 return ERR_PTR(-EINVAL);
2028 } else {
2029 tp = NULL;
2030 }
2031 break;
2032 }
2033 }
2034 chain_info->pprev = pprev;
2035 if (tp) {
2036 chain_info->next = tp->next;
2037 tcf_proto_get(tp);
2038 } else {
2039 chain_info->next = NULL;
2040 }
2041 return tp;
2042 }
2043
tcf_fill_node(struct net * net,struct sk_buff * skb,struct tcf_proto * tp,struct tcf_block * block,struct Qdisc * q,u32 parent,void * fh,u32 portid,u32 seq,u16 flags,int event,bool terse_dump,bool rtnl_held,struct netlink_ext_ack * extack)2044 static int tcf_fill_node(struct net *net, struct sk_buff *skb,
2045 struct tcf_proto *tp, struct tcf_block *block,
2046 struct Qdisc *q, u32 parent, void *fh,
2047 u32 portid, u32 seq, u16 flags, int event,
2048 bool terse_dump, bool rtnl_held,
2049 struct netlink_ext_ack *extack)
2050 {
2051 struct tcmsg *tcm;
2052 struct nlmsghdr *nlh;
2053 unsigned char *b = skb_tail_pointer(skb);
2054 int ret = -EMSGSIZE;
2055
2056 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2057 if (!nlh)
2058 goto out_nlmsg_trim;
2059 tcm = nlmsg_data(nlh);
2060 tcm->tcm_family = AF_UNSPEC;
2061 tcm->tcm__pad1 = 0;
2062 tcm->tcm__pad2 = 0;
2063 if (q) {
2064 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
2065 tcm->tcm_parent = parent;
2066 } else {
2067 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2068 tcm->tcm_block_index = block->index;
2069 }
2070 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
2071 if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
2072 goto nla_put_failure;
2073 if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
2074 goto nla_put_failure;
2075 if (!fh) {
2076 tcm->tcm_handle = 0;
2077 } else if (terse_dump) {
2078 if (tp->ops->terse_dump) {
2079 if (tp->ops->terse_dump(net, tp, fh, skb, tcm,
2080 rtnl_held) < 0)
2081 goto nla_put_failure;
2082 } else {
2083 goto cls_op_not_supp;
2084 }
2085 } else {
2086 if (tp->ops->dump &&
2087 tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0)
2088 goto nla_put_failure;
2089 }
2090
2091 if (extack && extack->_msg &&
2092 nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
2093 goto nla_put_failure;
2094
2095 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2096
2097 return skb->len;
2098
2099 cls_op_not_supp:
2100 ret = -EOPNOTSUPP;
2101 out_nlmsg_trim:
2102 nla_put_failure:
2103 nlmsg_trim(skb, b);
2104 return ret;
2105 }
2106
tfilter_notify_prep(struct net * net,struct sk_buff * oskb,struct nlmsghdr * n,struct tcf_proto * tp,struct tcf_block * block,struct Qdisc * q,u32 parent,void * fh,int event,u32 portid,bool rtnl_held,struct netlink_ext_ack * extack)2107 static struct sk_buff *tfilter_notify_prep(struct net *net,
2108 struct sk_buff *oskb,
2109 struct nlmsghdr *n,
2110 struct tcf_proto *tp,
2111 struct tcf_block *block,
2112 struct Qdisc *q, u32 parent,
2113 void *fh, int event,
2114 u32 portid, bool rtnl_held,
2115 struct netlink_ext_ack *extack)
2116 {
2117 unsigned int size = oskb ? max(NLMSG_GOODSIZE, oskb->len) : NLMSG_GOODSIZE;
2118 struct sk_buff *skb;
2119 int ret;
2120
2121 retry:
2122 skb = alloc_skb(size, GFP_KERNEL);
2123 if (!skb)
2124 return ERR_PTR(-ENOBUFS);
2125
2126 ret = tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
2127 n->nlmsg_seq, n->nlmsg_flags, event, false,
2128 rtnl_held, extack);
2129 if (ret <= 0) {
2130 kfree_skb(skb);
2131 if (ret == -EMSGSIZE) {
2132 size += NLMSG_GOODSIZE;
2133 goto retry;
2134 }
2135 return ERR_PTR(-EINVAL);
2136 }
2137 return skb;
2138 }
2139
tfilter_notify(struct net * net,struct sk_buff * oskb,struct nlmsghdr * n,struct tcf_proto * tp,struct tcf_block * block,struct Qdisc * q,u32 parent,void * fh,int event,bool unicast,bool rtnl_held,struct netlink_ext_ack * extack)2140 static int tfilter_notify(struct net *net, struct sk_buff *oskb,
2141 struct nlmsghdr *n, struct tcf_proto *tp,
2142 struct tcf_block *block, struct Qdisc *q,
2143 u32 parent, void *fh, int event, bool unicast,
2144 bool rtnl_held, struct netlink_ext_ack *extack)
2145 {
2146 struct sk_buff *skb;
2147 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2148 int err = 0;
2149
2150 if (!unicast && !rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC))
2151 return 0;
2152
2153 skb = tfilter_notify_prep(net, oskb, n, tp, block, q, parent, fh, event,
2154 portid, rtnl_held, extack);
2155 if (IS_ERR(skb))
2156 return PTR_ERR(skb);
2157
2158 if (unicast)
2159 err = rtnl_unicast(skb, net, portid);
2160 else
2161 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2162 n->nlmsg_flags & NLM_F_ECHO);
2163 return err;
2164 }
2165
tfilter_del_notify(struct net * net,struct sk_buff * oskb,struct nlmsghdr * n,struct tcf_proto * tp,struct tcf_block * block,struct Qdisc * q,u32 parent,void * fh,bool * last,bool rtnl_held,struct netlink_ext_ack * extack)2166 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
2167 struct nlmsghdr *n, struct tcf_proto *tp,
2168 struct tcf_block *block, struct Qdisc *q,
2169 u32 parent, void *fh, bool *last, bool rtnl_held,
2170 struct netlink_ext_ack *extack)
2171 {
2172 struct sk_buff *skb;
2173 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2174 int err;
2175
2176 if (!rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC))
2177 return tp->ops->delete(tp, fh, last, rtnl_held, extack);
2178
2179 skb = tfilter_notify_prep(net, oskb, n, tp, block, q, parent, fh,
2180 RTM_DELTFILTER, portid, rtnl_held, extack);
2181 if (IS_ERR(skb)) {
2182 NL_SET_ERR_MSG(extack, "Failed to build del event notification");
2183 return PTR_ERR(skb);
2184 }
2185
2186 err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
2187 if (err) {
2188 kfree_skb(skb);
2189 return err;
2190 }
2191
2192 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2193 n->nlmsg_flags & NLM_F_ECHO);
2194 if (err < 0)
2195 NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
2196
2197 return err;
2198 }
2199
tfilter_notify_chain(struct net * net,struct sk_buff * oskb,struct tcf_block * block,struct Qdisc * q,u32 parent,struct nlmsghdr * n,struct tcf_chain * chain,int event,struct netlink_ext_ack * extack)2200 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
2201 struct tcf_block *block, struct Qdisc *q,
2202 u32 parent, struct nlmsghdr *n,
2203 struct tcf_chain *chain, int event,
2204 struct netlink_ext_ack *extack)
2205 {
2206 struct tcf_proto *tp;
2207
2208 for (tp = tcf_get_next_proto(chain, NULL);
2209 tp; tp = tcf_get_next_proto(chain, tp))
2210 tfilter_notify(net, oskb, n, tp, block, q, parent, NULL,
2211 event, false, true, extack);
2212 }
2213
tfilter_put(struct tcf_proto * tp,void * fh)2214 static void tfilter_put(struct tcf_proto *tp, void *fh)
2215 {
2216 if (tp->ops->put && fh)
2217 tp->ops->put(tp, fh);
2218 }
2219
is_qdisc_ingress(__u32 classid)2220 static bool is_qdisc_ingress(__u32 classid)
2221 {
2222 return (TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_INGRESS));
2223 }
2224
tc_new_tfilter(struct sk_buff * skb,struct nlmsghdr * n,struct netlink_ext_ack * extack)2225 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2226 struct netlink_ext_ack *extack)
2227 {
2228 struct net *net = sock_net(skb->sk);
2229 struct nlattr *tca[TCA_MAX + 1];
2230 char name[IFNAMSIZ];
2231 struct tcmsg *t;
2232 u32 protocol;
2233 u32 prio;
2234 bool prio_allocate;
2235 u32 parent;
2236 u32 chain_index;
2237 struct Qdisc *q;
2238 struct tcf_chain_info chain_info;
2239 struct tcf_chain *chain;
2240 struct tcf_block *block;
2241 struct tcf_proto *tp;
2242 unsigned long cl;
2243 void *fh;
2244 int err;
2245 int tp_created;
2246 bool rtnl_held = false;
2247 u32 flags;
2248
2249 replay:
2250 tp_created = 0;
2251
2252 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2253 rtm_tca_policy, extack);
2254 if (err < 0)
2255 return err;
2256
2257 t = nlmsg_data(n);
2258 protocol = TC_H_MIN(t->tcm_info);
2259 prio = TC_H_MAJ(t->tcm_info);
2260 prio_allocate = false;
2261 parent = t->tcm_parent;
2262 tp = NULL;
2263 cl = 0;
2264 block = NULL;
2265 q = NULL;
2266 chain = NULL;
2267 flags = 0;
2268
2269 if (prio == 0) {
2270 /* If no priority is provided by the user,
2271 * we allocate one.
2272 */
2273 if (n->nlmsg_flags & NLM_F_CREATE) {
2274 prio = TC_H_MAKE(0x80000000U, 0U);
2275 prio_allocate = true;
2276 } else {
2277 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2278 return -ENOENT;
2279 }
2280 }
2281
2282 /* Find head of filter chain. */
2283
2284 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2285 if (err)
2286 return err;
2287
2288 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2289 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2290 err = -EINVAL;
2291 goto errout;
2292 }
2293
2294 /* Take rtnl mutex if rtnl_held was set to true on previous iteration,
2295 * block is shared (no qdisc found), qdisc is not unlocked, classifier
2296 * type is not specified, classifier is not unlocked.
2297 */
2298 if (rtnl_held ||
2299 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2300 !tcf_proto_is_unlocked(name)) {
2301 rtnl_held = true;
2302 rtnl_lock();
2303 }
2304
2305 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2306 if (err)
2307 goto errout;
2308
2309 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2310 extack);
2311 if (IS_ERR(block)) {
2312 err = PTR_ERR(block);
2313 goto errout;
2314 }
2315 block->classid = parent;
2316
2317 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2318 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2319 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2320 err = -EINVAL;
2321 goto errout;
2322 }
2323 chain = tcf_chain_get(block, chain_index, true);
2324 if (!chain) {
2325 NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
2326 err = -ENOMEM;
2327 goto errout;
2328 }
2329
2330 mutex_lock(&chain->filter_chain_lock);
2331 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2332 prio, prio_allocate);
2333 if (IS_ERR(tp)) {
2334 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2335 err = PTR_ERR(tp);
2336 goto errout_locked;
2337 }
2338
2339 if (tp == NULL) {
2340 struct tcf_proto *tp_new = NULL;
2341
2342 if (chain->flushing) {
2343 err = -EAGAIN;
2344 goto errout_locked;
2345 }
2346
2347 /* Proto-tcf does not exist, create new one */
2348
2349 if (tca[TCA_KIND] == NULL || !protocol) {
2350 NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
2351 err = -EINVAL;
2352 goto errout_locked;
2353 }
2354
2355 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2356 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2357 err = -ENOENT;
2358 goto errout_locked;
2359 }
2360
2361 if (prio_allocate)
2362 prio = tcf_auto_prio(tcf_chain_tp_prev(chain,
2363 &chain_info));
2364
2365 mutex_unlock(&chain->filter_chain_lock);
2366 tp_new = tcf_proto_create(name, protocol, prio, chain,
2367 rtnl_held, extack);
2368 if (IS_ERR(tp_new)) {
2369 err = PTR_ERR(tp_new);
2370 goto errout_tp;
2371 }
2372
2373 tp_created = 1;
2374 tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio,
2375 rtnl_held);
2376 if (IS_ERR(tp)) {
2377 err = PTR_ERR(tp);
2378 goto errout_tp;
2379 }
2380 } else {
2381 mutex_unlock(&chain->filter_chain_lock);
2382 }
2383
2384 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2385 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2386 err = -EINVAL;
2387 goto errout;
2388 }
2389
2390 fh = tp->ops->get(tp, t->tcm_handle);
2391
2392 if (!fh) {
2393 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2394 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2395 err = -ENOENT;
2396 goto errout;
2397 }
2398 } else if (n->nlmsg_flags & NLM_F_EXCL) {
2399 tfilter_put(tp, fh);
2400 NL_SET_ERR_MSG(extack, "Filter already exists");
2401 err = -EEXIST;
2402 goto errout;
2403 }
2404
2405 if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
2406 tfilter_put(tp, fh);
2407 NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
2408 err = -EINVAL;
2409 goto errout;
2410 }
2411
2412 if (!(n->nlmsg_flags & NLM_F_CREATE))
2413 flags |= TCA_ACT_FLAGS_REPLACE;
2414 if (!rtnl_held)
2415 flags |= TCA_ACT_FLAGS_NO_RTNL;
2416 if (is_qdisc_ingress(parent))
2417 flags |= TCA_ACT_FLAGS_AT_INGRESS;
2418 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
2419 flags, extack);
2420 if (err == 0) {
2421 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2422 RTM_NEWTFILTER, false, rtnl_held, extack);
2423 tfilter_put(tp, fh);
2424 tcf_proto_count_usesw(tp, true);
2425 /* q pointer is NULL for shared blocks */
2426 if (q)
2427 q->flags &= ~TCQ_F_CAN_BYPASS;
2428 }
2429
2430 errout:
2431 if (err && tp_created)
2432 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL);
2433 errout_tp:
2434 if (chain) {
2435 if (tp && !IS_ERR(tp))
2436 tcf_proto_put(tp, rtnl_held, NULL);
2437 if (!tp_created)
2438 tcf_chain_put(chain);
2439 }
2440 tcf_block_release(q, block, rtnl_held);
2441
2442 if (rtnl_held)
2443 rtnl_unlock();
2444
2445 if (err == -EAGAIN) {
2446 /* Take rtnl lock in case EAGAIN is caused by concurrent flush
2447 * of target chain.
2448 */
2449 rtnl_held = true;
2450 /* Replay the request. */
2451 goto replay;
2452 }
2453 return err;
2454
2455 errout_locked:
2456 mutex_unlock(&chain->filter_chain_lock);
2457 goto errout;
2458 }
2459
tc_del_tfilter(struct sk_buff * skb,struct nlmsghdr * n,struct netlink_ext_ack * extack)2460 static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2461 struct netlink_ext_ack *extack)
2462 {
2463 struct net *net = sock_net(skb->sk);
2464 struct nlattr *tca[TCA_MAX + 1];
2465 char name[IFNAMSIZ];
2466 struct tcmsg *t;
2467 u32 protocol;
2468 u32 prio;
2469 u32 parent;
2470 u32 chain_index;
2471 struct Qdisc *q = NULL;
2472 struct tcf_chain_info chain_info;
2473 struct tcf_chain *chain = NULL;
2474 struct tcf_block *block = NULL;
2475 struct tcf_proto *tp = NULL;
2476 unsigned long cl = 0;
2477 void *fh = NULL;
2478 int err;
2479 bool rtnl_held = false;
2480
2481 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2482 rtm_tca_policy, extack);
2483 if (err < 0)
2484 return err;
2485
2486 t = nlmsg_data(n);
2487 protocol = TC_H_MIN(t->tcm_info);
2488 prio = TC_H_MAJ(t->tcm_info);
2489 parent = t->tcm_parent;
2490
2491 if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
2492 NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
2493 return -ENOENT;
2494 }
2495
2496 /* Find head of filter chain. */
2497
2498 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2499 if (err)
2500 return err;
2501
2502 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2503 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2504 err = -EINVAL;
2505 goto errout;
2506 }
2507 /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
2508 * found), qdisc is not unlocked, classifier type is not specified,
2509 * classifier is not unlocked.
2510 */
2511 if (!prio ||
2512 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2513 !tcf_proto_is_unlocked(name)) {
2514 rtnl_held = true;
2515 rtnl_lock();
2516 }
2517
2518 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2519 if (err)
2520 goto errout;
2521
2522 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2523 extack);
2524 if (IS_ERR(block)) {
2525 err = PTR_ERR(block);
2526 goto errout;
2527 }
2528
2529 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2530 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2531 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2532 err = -EINVAL;
2533 goto errout;
2534 }
2535 chain = tcf_chain_get(block, chain_index, false);
2536 if (!chain) {
2537 /* User requested flush on non-existent chain. Nothing to do,
2538 * so just return success.
2539 */
2540 if (prio == 0) {
2541 err = 0;
2542 goto errout;
2543 }
2544 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2545 err = -ENOENT;
2546 goto errout;
2547 }
2548
2549 if (prio == 0) {
2550 tfilter_notify_chain(net, skb, block, q, parent, n,
2551 chain, RTM_DELTFILTER, extack);
2552 tcf_chain_flush(chain, rtnl_held);
2553 err = 0;
2554 goto errout;
2555 }
2556
2557 mutex_lock(&chain->filter_chain_lock);
2558 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2559 prio, false);
2560 if (!tp || IS_ERR(tp)) {
2561 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2562 err = tp ? PTR_ERR(tp) : -ENOENT;
2563 goto errout_locked;
2564 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2565 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2566 err = -EINVAL;
2567 goto errout_locked;
2568 } else if (t->tcm_handle == 0) {
2569 tcf_proto_signal_destroying(chain, tp);
2570 tcf_chain_tp_remove(chain, &chain_info, tp);
2571 mutex_unlock(&chain->filter_chain_lock);
2572
2573 tcf_proto_put(tp, rtnl_held, NULL);
2574 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2575 RTM_DELTFILTER, false, rtnl_held, extack);
2576 err = 0;
2577 goto errout;
2578 }
2579 mutex_unlock(&chain->filter_chain_lock);
2580
2581 fh = tp->ops->get(tp, t->tcm_handle);
2582
2583 if (!fh) {
2584 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2585 err = -ENOENT;
2586 } else {
2587 bool last;
2588
2589 err = tfilter_del_notify(net, skb, n, tp, block, q, parent, fh,
2590 &last, rtnl_held, extack);
2591
2592 if (err)
2593 goto errout;
2594 if (last)
2595 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack);
2596 }
2597
2598 errout:
2599 if (chain) {
2600 if (tp && !IS_ERR(tp))
2601 tcf_proto_put(tp, rtnl_held, NULL);
2602 tcf_chain_put(chain);
2603 }
2604 tcf_block_release(q, block, rtnl_held);
2605
2606 if (rtnl_held)
2607 rtnl_unlock();
2608
2609 return err;
2610
2611 errout_locked:
2612 mutex_unlock(&chain->filter_chain_lock);
2613 goto errout;
2614 }
2615
tc_get_tfilter(struct sk_buff * skb,struct nlmsghdr * n,struct netlink_ext_ack * extack)2616 static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2617 struct netlink_ext_ack *extack)
2618 {
2619 struct net *net = sock_net(skb->sk);
2620 struct nlattr *tca[TCA_MAX + 1];
2621 char name[IFNAMSIZ];
2622 struct tcmsg *t;
2623 u32 protocol;
2624 u32 prio;
2625 u32 parent;
2626 u32 chain_index;
2627 struct Qdisc *q = NULL;
2628 struct tcf_chain_info chain_info;
2629 struct tcf_chain *chain = NULL;
2630 struct tcf_block *block = NULL;
2631 struct tcf_proto *tp = NULL;
2632 unsigned long cl = 0;
2633 void *fh = NULL;
2634 int err;
2635 bool rtnl_held = false;
2636
2637 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2638 rtm_tca_policy, extack);
2639 if (err < 0)
2640 return err;
2641
2642 t = nlmsg_data(n);
2643 protocol = TC_H_MIN(t->tcm_info);
2644 prio = TC_H_MAJ(t->tcm_info);
2645 parent = t->tcm_parent;
2646
2647 if (prio == 0) {
2648 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2649 return -ENOENT;
2650 }
2651
2652 /* Find head of filter chain. */
2653
2654 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2655 if (err)
2656 return err;
2657
2658 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2659 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2660 err = -EINVAL;
2661 goto errout;
2662 }
2663 /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
2664 * unlocked, classifier type is not specified, classifier is not
2665 * unlocked.
2666 */
2667 if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2668 !tcf_proto_is_unlocked(name)) {
2669 rtnl_held = true;
2670 rtnl_lock();
2671 }
2672
2673 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2674 if (err)
2675 goto errout;
2676
2677 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2678 extack);
2679 if (IS_ERR(block)) {
2680 err = PTR_ERR(block);
2681 goto errout;
2682 }
2683
2684 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2685 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2686 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2687 err = -EINVAL;
2688 goto errout;
2689 }
2690 chain = tcf_chain_get(block, chain_index, false);
2691 if (!chain) {
2692 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2693 err = -EINVAL;
2694 goto errout;
2695 }
2696
2697 mutex_lock(&chain->filter_chain_lock);
2698 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2699 prio, false);
2700 mutex_unlock(&chain->filter_chain_lock);
2701 if (!tp || IS_ERR(tp)) {
2702 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2703 err = tp ? PTR_ERR(tp) : -ENOENT;
2704 goto errout;
2705 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2706 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2707 err = -EINVAL;
2708 goto errout;
2709 }
2710
2711 fh = tp->ops->get(tp, t->tcm_handle);
2712
2713 if (!fh) {
2714 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2715 err = -ENOENT;
2716 } else {
2717 err = tfilter_notify(net, skb, n, tp, block, q, parent,
2718 fh, RTM_NEWTFILTER, true, rtnl_held, NULL);
2719 if (err < 0)
2720 NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
2721 }
2722
2723 tfilter_put(tp, fh);
2724 errout:
2725 if (chain) {
2726 if (tp && !IS_ERR(tp))
2727 tcf_proto_put(tp, rtnl_held, NULL);
2728 tcf_chain_put(chain);
2729 }
2730 tcf_block_release(q, block, rtnl_held);
2731
2732 if (rtnl_held)
2733 rtnl_unlock();
2734
2735 return err;
2736 }
2737
2738 struct tcf_dump_args {
2739 struct tcf_walker w;
2740 struct sk_buff *skb;
2741 struct netlink_callback *cb;
2742 struct tcf_block *block;
2743 struct Qdisc *q;
2744 u32 parent;
2745 bool terse_dump;
2746 };
2747
tcf_node_dump(struct tcf_proto * tp,void * n,struct tcf_walker * arg)2748 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
2749 {
2750 struct tcf_dump_args *a = (void *)arg;
2751 struct net *net = sock_net(a->skb->sk);
2752
2753 return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
2754 n, NETLINK_CB(a->cb->skb).portid,
2755 a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2756 RTM_NEWTFILTER, a->terse_dump, true, NULL);
2757 }
2758
tcf_chain_dump(struct tcf_chain * chain,struct Qdisc * q,u32 parent,struct sk_buff * skb,struct netlink_callback * cb,long index_start,long * p_index,bool terse)2759 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
2760 struct sk_buff *skb, struct netlink_callback *cb,
2761 long index_start, long *p_index, bool terse)
2762 {
2763 struct net *net = sock_net(skb->sk);
2764 struct tcf_block *block = chain->block;
2765 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2766 struct tcf_proto *tp, *tp_prev;
2767 struct tcf_dump_args arg;
2768
2769 for (tp = __tcf_get_next_proto(chain, NULL);
2770 tp;
2771 tp_prev = tp,
2772 tp = __tcf_get_next_proto(chain, tp),
2773 tcf_proto_put(tp_prev, true, NULL),
2774 (*p_index)++) {
2775 if (*p_index < index_start)
2776 continue;
2777 if (TC_H_MAJ(tcm->tcm_info) &&
2778 TC_H_MAJ(tcm->tcm_info) != tp->prio)
2779 continue;
2780 if (TC_H_MIN(tcm->tcm_info) &&
2781 TC_H_MIN(tcm->tcm_info) != tp->protocol)
2782 continue;
2783 if (*p_index > index_start)
2784 memset(&cb->args[1], 0,
2785 sizeof(cb->args) - sizeof(cb->args[0]));
2786 if (cb->args[1] == 0) {
2787 if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
2788 NETLINK_CB(cb->skb).portid,
2789 cb->nlh->nlmsg_seq, NLM_F_MULTI,
2790 RTM_NEWTFILTER, false, true, NULL) <= 0)
2791 goto errout;
2792 cb->args[1] = 1;
2793 }
2794 if (!tp->ops->walk)
2795 continue;
2796 arg.w.fn = tcf_node_dump;
2797 arg.skb = skb;
2798 arg.cb = cb;
2799 arg.block = block;
2800 arg.q = q;
2801 arg.parent = parent;
2802 arg.w.stop = 0;
2803 arg.w.skip = cb->args[1] - 1;
2804 arg.w.count = 0;
2805 arg.w.cookie = cb->args[2];
2806 arg.terse_dump = terse;
2807 tp->ops->walk(tp, &arg.w, true);
2808 cb->args[2] = arg.w.cookie;
2809 cb->args[1] = arg.w.count + 1;
2810 if (arg.w.stop)
2811 goto errout;
2812 }
2813 return true;
2814
2815 errout:
2816 tcf_proto_put(tp, true, NULL);
2817 return false;
2818 }
2819
2820 static const struct nla_policy tcf_tfilter_dump_policy[TCA_MAX + 1] = {
2821 [TCA_CHAIN] = { .type = NLA_U32 },
2822 [TCA_DUMP_FLAGS] = NLA_POLICY_BITFIELD32(TCA_DUMP_FLAGS_TERSE),
2823 };
2824
2825 /* called with RTNL */
tc_dump_tfilter(struct sk_buff * skb,struct netlink_callback * cb)2826 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
2827 {
2828 struct tcf_chain *chain, *chain_prev;
2829 struct net *net = sock_net(skb->sk);
2830 struct nlattr *tca[TCA_MAX + 1];
2831 struct Qdisc *q = NULL;
2832 struct tcf_block *block;
2833 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2834 bool terse_dump = false;
2835 long index_start;
2836 long index;
2837 u32 parent;
2838 int err;
2839
2840 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2841 return skb->len;
2842
2843 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2844 tcf_tfilter_dump_policy, cb->extack);
2845 if (err)
2846 return err;
2847
2848 if (tca[TCA_DUMP_FLAGS]) {
2849 struct nla_bitfield32 flags =
2850 nla_get_bitfield32(tca[TCA_DUMP_FLAGS]);
2851
2852 terse_dump = flags.value & TCA_DUMP_FLAGS_TERSE;
2853 }
2854
2855 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2856 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2857 if (!block)
2858 goto out;
2859 /* If we work with block index, q is NULL and parent value
2860 * will never be used in the following code. The check
2861 * in tcf_fill_node prevents it. However, compiler does not
2862 * see that far, so set parent to zero to silence the warning
2863 * about parent being uninitialized.
2864 */
2865 parent = 0;
2866 } else {
2867 const struct Qdisc_class_ops *cops;
2868 struct net_device *dev;
2869 unsigned long cl = 0;
2870
2871 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2872 if (!dev)
2873 return skb->len;
2874
2875 parent = tcm->tcm_parent;
2876 if (!parent)
2877 q = rtnl_dereference(dev->qdisc);
2878 else
2879 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2880 if (!q)
2881 goto out;
2882 cops = q->ops->cl_ops;
2883 if (!cops)
2884 goto out;
2885 if (!cops->tcf_block)
2886 goto out;
2887 if (TC_H_MIN(tcm->tcm_parent)) {
2888 cl = cops->find(q, tcm->tcm_parent);
2889 if (cl == 0)
2890 goto out;
2891 }
2892 block = cops->tcf_block(q, cl, NULL);
2893 if (!block)
2894 goto out;
2895 parent = block->classid;
2896 if (tcf_block_shared(block))
2897 q = NULL;
2898 }
2899
2900 index_start = cb->args[0];
2901 index = 0;
2902
2903 for (chain = __tcf_get_next_chain(block, NULL);
2904 chain;
2905 chain_prev = chain,
2906 chain = __tcf_get_next_chain(block, chain),
2907 tcf_chain_put(chain_prev)) {
2908 if (tca[TCA_CHAIN] &&
2909 nla_get_u32(tca[TCA_CHAIN]) != chain->index)
2910 continue;
2911 if (!tcf_chain_dump(chain, q, parent, skb, cb,
2912 index_start, &index, terse_dump)) {
2913 tcf_chain_put(chain);
2914 err = -EMSGSIZE;
2915 break;
2916 }
2917 }
2918
2919 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2920 tcf_block_refcnt_put(block, true);
2921 cb->args[0] = index;
2922
2923 out:
2924 /* If we did no progress, the error (EMSGSIZE) is real */
2925 if (skb->len == 0 && err)
2926 return err;
2927 return skb->len;
2928 }
2929
tc_chain_fill_node(const struct tcf_proto_ops * tmplt_ops,void * tmplt_priv,u32 chain_index,struct net * net,struct sk_buff * skb,struct tcf_block * block,u32 portid,u32 seq,u16 flags,int event,struct netlink_ext_ack * extack)2930 static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
2931 void *tmplt_priv, u32 chain_index,
2932 struct net *net, struct sk_buff *skb,
2933 struct tcf_block *block,
2934 u32 portid, u32 seq, u16 flags, int event,
2935 struct netlink_ext_ack *extack)
2936 {
2937 unsigned char *b = skb_tail_pointer(skb);
2938 const struct tcf_proto_ops *ops;
2939 struct nlmsghdr *nlh;
2940 struct tcmsg *tcm;
2941 void *priv;
2942
2943 ops = tmplt_ops;
2944 priv = tmplt_priv;
2945
2946 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2947 if (!nlh)
2948 goto out_nlmsg_trim;
2949 tcm = nlmsg_data(nlh);
2950 tcm->tcm_family = AF_UNSPEC;
2951 tcm->tcm__pad1 = 0;
2952 tcm->tcm__pad2 = 0;
2953 tcm->tcm_handle = 0;
2954 if (block->q) {
2955 tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2956 tcm->tcm_parent = block->q->handle;
2957 } else {
2958 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2959 tcm->tcm_block_index = block->index;
2960 }
2961
2962 if (nla_put_u32(skb, TCA_CHAIN, chain_index))
2963 goto nla_put_failure;
2964
2965 if (ops) {
2966 if (nla_put_string(skb, TCA_KIND, ops->kind))
2967 goto nla_put_failure;
2968 if (ops->tmplt_dump(skb, net, priv) < 0)
2969 goto nla_put_failure;
2970 }
2971
2972 if (extack && extack->_msg &&
2973 nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
2974 goto out_nlmsg_trim;
2975
2976 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2977
2978 return skb->len;
2979
2980 out_nlmsg_trim:
2981 nla_put_failure:
2982 nlmsg_trim(skb, b);
2983 return -EMSGSIZE;
2984 }
2985
tc_chain_notify(struct tcf_chain * chain,struct sk_buff * oskb,u32 seq,u16 flags,int event,bool unicast,struct netlink_ext_ack * extack)2986 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
2987 u32 seq, u16 flags, int event, bool unicast,
2988 struct netlink_ext_ack *extack)
2989 {
2990 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2991 struct tcf_block *block = chain->block;
2992 struct net *net = block->net;
2993 struct sk_buff *skb;
2994 int err = 0;
2995
2996 if (!unicast && !rtnl_notify_needed(net, flags, RTNLGRP_TC))
2997 return 0;
2998
2999 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3000 if (!skb)
3001 return -ENOBUFS;
3002
3003 if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
3004 chain->index, net, skb, block, portid,
3005 seq, flags, event, extack) <= 0) {
3006 kfree_skb(skb);
3007 return -EINVAL;
3008 }
3009
3010 if (unicast)
3011 err = rtnl_unicast(skb, net, portid);
3012 else
3013 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
3014 flags & NLM_F_ECHO);
3015
3016 return err;
3017 }
3018
tc_chain_notify_delete(const struct tcf_proto_ops * tmplt_ops,void * tmplt_priv,u32 chain_index,struct tcf_block * block,struct sk_buff * oskb,u32 seq,u16 flags)3019 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
3020 void *tmplt_priv, u32 chain_index,
3021 struct tcf_block *block, struct sk_buff *oskb,
3022 u32 seq, u16 flags)
3023 {
3024 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
3025 struct net *net = block->net;
3026 struct sk_buff *skb;
3027
3028 if (!rtnl_notify_needed(net, flags, RTNLGRP_TC))
3029 return 0;
3030
3031 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3032 if (!skb)
3033 return -ENOBUFS;
3034
3035 if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
3036 block, portid, seq, flags, RTM_DELCHAIN, NULL) <= 0) {
3037 kfree_skb(skb);
3038 return -EINVAL;
3039 }
3040
3041 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
3042 }
3043
tc_chain_tmplt_add(struct tcf_chain * chain,struct net * net,struct nlattr ** tca,struct netlink_ext_ack * extack)3044 static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
3045 struct nlattr **tca,
3046 struct netlink_ext_ack *extack)
3047 {
3048 const struct tcf_proto_ops *ops;
3049 char name[IFNAMSIZ];
3050 void *tmplt_priv;
3051
3052 /* If kind is not set, user did not specify template. */
3053 if (!tca[TCA_KIND])
3054 return 0;
3055
3056 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
3057 NL_SET_ERR_MSG(extack, "Specified TC chain template name too long");
3058 return -EINVAL;
3059 }
3060
3061 ops = tcf_proto_lookup_ops(name, true, extack);
3062 if (IS_ERR(ops))
3063 return PTR_ERR(ops);
3064 if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump ||
3065 !ops->tmplt_reoffload) {
3066 NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
3067 module_put(ops->owner);
3068 return -EOPNOTSUPP;
3069 }
3070
3071 tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
3072 if (IS_ERR(tmplt_priv)) {
3073 module_put(ops->owner);
3074 return PTR_ERR(tmplt_priv);
3075 }
3076 chain->tmplt_ops = ops;
3077 chain->tmplt_priv = tmplt_priv;
3078 return 0;
3079 }
3080
tc_chain_tmplt_del(const struct tcf_proto_ops * tmplt_ops,void * tmplt_priv)3081 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
3082 void *tmplt_priv)
3083 {
3084 /* If template ops are set, no work to do for us. */
3085 if (!tmplt_ops)
3086 return;
3087
3088 tmplt_ops->tmplt_destroy(tmplt_priv);
3089 module_put(tmplt_ops->owner);
3090 }
3091
3092 /* Add/delete/get a chain */
3093
tc_ctl_chain(struct sk_buff * skb,struct nlmsghdr * n,struct netlink_ext_ack * extack)3094 static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
3095 struct netlink_ext_ack *extack)
3096 {
3097 struct net *net = sock_net(skb->sk);
3098 struct nlattr *tca[TCA_MAX + 1];
3099 struct tcmsg *t;
3100 u32 parent;
3101 u32 chain_index;
3102 struct Qdisc *q;
3103 struct tcf_chain *chain;
3104 struct tcf_block *block;
3105 unsigned long cl;
3106 int err;
3107
3108 replay:
3109 q = NULL;
3110 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
3111 rtm_tca_policy, extack);
3112 if (err < 0)
3113 return err;
3114
3115 t = nlmsg_data(n);
3116 parent = t->tcm_parent;
3117 cl = 0;
3118
3119 block = tcf_block_find(net, &q, &parent, &cl,
3120 t->tcm_ifindex, t->tcm_block_index, extack);
3121 if (IS_ERR(block))
3122 return PTR_ERR(block);
3123
3124 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
3125 if (chain_index > TC_ACT_EXT_VAL_MASK) {
3126 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
3127 err = -EINVAL;
3128 goto errout_block;
3129 }
3130
3131 mutex_lock(&block->lock);
3132 chain = tcf_chain_lookup(block, chain_index);
3133 if (n->nlmsg_type == RTM_NEWCHAIN) {
3134 if (chain) {
3135 if (tcf_chain_held_by_acts_only(chain)) {
3136 /* The chain exists only because there is
3137 * some action referencing it.
3138 */
3139 tcf_chain_hold(chain);
3140 } else {
3141 NL_SET_ERR_MSG(extack, "Filter chain already exists");
3142 err = -EEXIST;
3143 goto errout_block_locked;
3144 }
3145 } else {
3146 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
3147 NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
3148 err = -ENOENT;
3149 goto errout_block_locked;
3150 }
3151 chain = tcf_chain_create(block, chain_index);
3152 if (!chain) {
3153 NL_SET_ERR_MSG(extack, "Failed to create filter chain");
3154 err = -ENOMEM;
3155 goto errout_block_locked;
3156 }
3157 }
3158 } else {
3159 if (!chain || tcf_chain_held_by_acts_only(chain)) {
3160 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
3161 err = -EINVAL;
3162 goto errout_block_locked;
3163 }
3164 tcf_chain_hold(chain);
3165 }
3166
3167 if (n->nlmsg_type == RTM_NEWCHAIN) {
3168 /* Modifying chain requires holding parent block lock. In case
3169 * the chain was successfully added, take a reference to the
3170 * chain. This ensures that an empty chain does not disappear at
3171 * the end of this function.
3172 */
3173 tcf_chain_hold(chain);
3174 chain->explicitly_created = true;
3175 }
3176 mutex_unlock(&block->lock);
3177
3178 switch (n->nlmsg_type) {
3179 case RTM_NEWCHAIN:
3180 err = tc_chain_tmplt_add(chain, net, tca, extack);
3181 if (err) {
3182 tcf_chain_put_explicitly_created(chain);
3183 goto errout;
3184 }
3185
3186 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
3187 RTM_NEWCHAIN, false, extack);
3188 break;
3189 case RTM_DELCHAIN:
3190 tfilter_notify_chain(net, skb, block, q, parent, n,
3191 chain, RTM_DELTFILTER, extack);
3192 /* Flush the chain first as the user requested chain removal. */
3193 tcf_chain_flush(chain, true);
3194 /* In case the chain was successfully deleted, put a reference
3195 * to the chain previously taken during addition.
3196 */
3197 tcf_chain_put_explicitly_created(chain);
3198 break;
3199 case RTM_GETCHAIN:
3200 err = tc_chain_notify(chain, skb, n->nlmsg_seq,
3201 n->nlmsg_flags, n->nlmsg_type, true, extack);
3202 if (err < 0)
3203 NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
3204 break;
3205 default:
3206 err = -EOPNOTSUPP;
3207 NL_SET_ERR_MSG(extack, "Unsupported message type");
3208 goto errout;
3209 }
3210
3211 errout:
3212 tcf_chain_put(chain);
3213 errout_block:
3214 tcf_block_release(q, block, true);
3215 if (err == -EAGAIN)
3216 /* Replay the request. */
3217 goto replay;
3218 return err;
3219
3220 errout_block_locked:
3221 mutex_unlock(&block->lock);
3222 goto errout_block;
3223 }
3224
3225 /* called with RTNL */
tc_dump_chain(struct sk_buff * skb,struct netlink_callback * cb)3226 static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
3227 {
3228 struct net *net = sock_net(skb->sk);
3229 struct nlattr *tca[TCA_MAX + 1];
3230 struct Qdisc *q = NULL;
3231 struct tcf_block *block;
3232 struct tcmsg *tcm = nlmsg_data(cb->nlh);
3233 struct tcf_chain *chain;
3234 long index_start;
3235 long index;
3236 int err;
3237
3238 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
3239 return skb->len;
3240
3241 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
3242 rtm_tca_policy, cb->extack);
3243 if (err)
3244 return err;
3245
3246 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
3247 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
3248 if (!block)
3249 goto out;
3250 } else {
3251 const struct Qdisc_class_ops *cops;
3252 struct net_device *dev;
3253 unsigned long cl = 0;
3254
3255 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
3256 if (!dev)
3257 return skb->len;
3258
3259 if (!tcm->tcm_parent)
3260 q = rtnl_dereference(dev->qdisc);
3261 else
3262 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
3263
3264 if (!q)
3265 goto out;
3266 cops = q->ops->cl_ops;
3267 if (!cops)
3268 goto out;
3269 if (!cops->tcf_block)
3270 goto out;
3271 if (TC_H_MIN(tcm->tcm_parent)) {
3272 cl = cops->find(q, tcm->tcm_parent);
3273 if (cl == 0)
3274 goto out;
3275 }
3276 block = cops->tcf_block(q, cl, NULL);
3277 if (!block)
3278 goto out;
3279 if (tcf_block_shared(block))
3280 q = NULL;
3281 }
3282
3283 index_start = cb->args[0];
3284 index = 0;
3285
3286 mutex_lock(&block->lock);
3287 list_for_each_entry(chain, &block->chain_list, list) {
3288 if ((tca[TCA_CHAIN] &&
3289 nla_get_u32(tca[TCA_CHAIN]) != chain->index))
3290 continue;
3291 if (index < index_start) {
3292 index++;
3293 continue;
3294 }
3295 if (tcf_chain_held_by_acts_only(chain))
3296 continue;
3297 err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
3298 chain->index, net, skb, block,
3299 NETLINK_CB(cb->skb).portid,
3300 cb->nlh->nlmsg_seq, NLM_F_MULTI,
3301 RTM_NEWCHAIN, NULL);
3302 if (err <= 0)
3303 break;
3304 index++;
3305 }
3306 mutex_unlock(&block->lock);
3307
3308 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
3309 tcf_block_refcnt_put(block, true);
3310 cb->args[0] = index;
3311
3312 out:
3313 /* If we did no progress, the error (EMSGSIZE) is real */
3314 if (skb->len == 0 && err)
3315 return err;
3316 return skb->len;
3317 }
3318
tcf_exts_init_ex(struct tcf_exts * exts,struct net * net,int action,int police,struct tcf_proto * tp,u32 handle,bool use_action_miss)3319 int tcf_exts_init_ex(struct tcf_exts *exts, struct net *net, int action,
3320 int police, struct tcf_proto *tp, u32 handle,
3321 bool use_action_miss)
3322 {
3323 int err = 0;
3324
3325 #ifdef CONFIG_NET_CLS_ACT
3326 exts->type = 0;
3327 exts->nr_actions = 0;
3328 exts->miss_cookie_node = NULL;
3329 /* Note: we do not own yet a reference on net.
3330 * This reference might be taken later from tcf_exts_get_net().
3331 */
3332 exts->net = net;
3333 exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
3334 GFP_KERNEL);
3335 if (!exts->actions)
3336 return -ENOMEM;
3337 #endif
3338
3339 exts->action = action;
3340 exts->police = police;
3341
3342 if (!use_action_miss)
3343 return 0;
3344
3345 err = tcf_exts_miss_cookie_base_alloc(exts, tp, handle);
3346 if (err)
3347 goto err_miss_alloc;
3348
3349 return 0;
3350
3351 err_miss_alloc:
3352 tcf_exts_destroy(exts);
3353 #ifdef CONFIG_NET_CLS_ACT
3354 exts->actions = NULL;
3355 #endif
3356 return err;
3357 }
3358 EXPORT_SYMBOL(tcf_exts_init_ex);
3359
tcf_exts_destroy(struct tcf_exts * exts)3360 void tcf_exts_destroy(struct tcf_exts *exts)
3361 {
3362 tcf_exts_miss_cookie_base_destroy(exts);
3363
3364 #ifdef CONFIG_NET_CLS_ACT
3365 if (exts->actions) {
3366 tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
3367 kfree(exts->actions);
3368 }
3369 exts->nr_actions = 0;
3370 #endif
3371 }
3372 EXPORT_SYMBOL(tcf_exts_destroy);
3373
tcf_exts_validate_ex(struct net * net,struct tcf_proto * tp,struct nlattr ** tb,struct nlattr * rate_tlv,struct tcf_exts * exts,u32 flags,u32 fl_flags,struct netlink_ext_ack * extack)3374 int tcf_exts_validate_ex(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3375 struct nlattr *rate_tlv, struct tcf_exts *exts,
3376 u32 flags, u32 fl_flags, struct netlink_ext_ack *extack)
3377 {
3378 #ifdef CONFIG_NET_CLS_ACT
3379 {
3380 int init_res[TCA_ACT_MAX_PRIO] = {};
3381 struct tc_action *act;
3382 size_t attr_size = 0;
3383
3384 if (exts->police && tb[exts->police]) {
3385 struct tc_action_ops *a_o;
3386
3387 flags |= TCA_ACT_FLAGS_POLICE | TCA_ACT_FLAGS_BIND;
3388 a_o = tc_action_load_ops(tb[exts->police], flags,
3389 extack);
3390 if (IS_ERR(a_o))
3391 return PTR_ERR(a_o);
3392 act = tcf_action_init_1(net, tp, tb[exts->police],
3393 rate_tlv, a_o, init_res, flags,
3394 extack);
3395 module_put(a_o->owner);
3396 if (IS_ERR(act))
3397 return PTR_ERR(act);
3398
3399 act->type = exts->type = TCA_OLD_COMPAT;
3400 exts->actions[0] = act;
3401 exts->nr_actions = 1;
3402 tcf_idr_insert_many(exts->actions, init_res);
3403 } else if (exts->action && tb[exts->action]) {
3404 int err;
3405
3406 flags |= TCA_ACT_FLAGS_BIND;
3407 err = tcf_action_init(net, tp, tb[exts->action],
3408 rate_tlv, exts->actions, init_res,
3409 &attr_size, flags, fl_flags,
3410 extack);
3411 if (err < 0)
3412 return err;
3413 exts->nr_actions = err;
3414 }
3415 }
3416 #else
3417 if ((exts->action && tb[exts->action]) ||
3418 (exts->police && tb[exts->police])) {
3419 NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
3420 return -EOPNOTSUPP;
3421 }
3422 #endif
3423
3424 return 0;
3425 }
3426 EXPORT_SYMBOL(tcf_exts_validate_ex);
3427
tcf_exts_validate(struct net * net,struct tcf_proto * tp,struct nlattr ** tb,struct nlattr * rate_tlv,struct tcf_exts * exts,u32 flags,struct netlink_ext_ack * extack)3428 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3429 struct nlattr *rate_tlv, struct tcf_exts *exts,
3430 u32 flags, struct netlink_ext_ack *extack)
3431 {
3432 return tcf_exts_validate_ex(net, tp, tb, rate_tlv, exts,
3433 flags, 0, extack);
3434 }
3435 EXPORT_SYMBOL(tcf_exts_validate);
3436
tcf_exts_change(struct tcf_exts * dst,struct tcf_exts * src)3437 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
3438 {
3439 #ifdef CONFIG_NET_CLS_ACT
3440 struct tcf_exts old = *dst;
3441
3442 *dst = *src;
3443 tcf_exts_destroy(&old);
3444 #endif
3445 }
3446 EXPORT_SYMBOL(tcf_exts_change);
3447
3448 #ifdef CONFIG_NET_CLS_ACT
tcf_exts_first_act(struct tcf_exts * exts)3449 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
3450 {
3451 if (exts->nr_actions == 0)
3452 return NULL;
3453 else
3454 return exts->actions[0];
3455 }
3456 #endif
3457
tcf_exts_dump(struct sk_buff * skb,struct tcf_exts * exts)3458 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
3459 {
3460 #ifdef CONFIG_NET_CLS_ACT
3461 struct nlattr *nest;
3462
3463 if (exts->action && tcf_exts_has_actions(exts)) {
3464 /*
3465 * again for backward compatible mode - we want
3466 * to work with both old and new modes of entering
3467 * tc data even if iproute2 was newer - jhs
3468 */
3469 if (exts->type != TCA_OLD_COMPAT) {
3470 nest = nla_nest_start_noflag(skb, exts->action);
3471 if (nest == NULL)
3472 goto nla_put_failure;
3473
3474 if (tcf_action_dump(skb, exts->actions, 0, 0, false)
3475 < 0)
3476 goto nla_put_failure;
3477 nla_nest_end(skb, nest);
3478 } else if (exts->police) {
3479 struct tc_action *act = tcf_exts_first_act(exts);
3480 nest = nla_nest_start_noflag(skb, exts->police);
3481 if (nest == NULL || !act)
3482 goto nla_put_failure;
3483 if (tcf_action_dump_old(skb, act, 0, 0) < 0)
3484 goto nla_put_failure;
3485 nla_nest_end(skb, nest);
3486 }
3487 }
3488 return 0;
3489
3490 nla_put_failure:
3491 nla_nest_cancel(skb, nest);
3492 return -1;
3493 #else
3494 return 0;
3495 #endif
3496 }
3497 EXPORT_SYMBOL(tcf_exts_dump);
3498
tcf_exts_terse_dump(struct sk_buff * skb,struct tcf_exts * exts)3499 int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts)
3500 {
3501 #ifdef CONFIG_NET_CLS_ACT
3502 struct nlattr *nest;
3503
3504 if (!exts->action || !tcf_exts_has_actions(exts))
3505 return 0;
3506
3507 nest = nla_nest_start_noflag(skb, exts->action);
3508 if (!nest)
3509 goto nla_put_failure;
3510
3511 if (tcf_action_dump(skb, exts->actions, 0, 0, true) < 0)
3512 goto nla_put_failure;
3513 nla_nest_end(skb, nest);
3514 return 0;
3515
3516 nla_put_failure:
3517 nla_nest_cancel(skb, nest);
3518 return -1;
3519 #else
3520 return 0;
3521 #endif
3522 }
3523 EXPORT_SYMBOL(tcf_exts_terse_dump);
3524
tcf_exts_dump_stats(struct sk_buff * skb,struct tcf_exts * exts)3525 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
3526 {
3527 #ifdef CONFIG_NET_CLS_ACT
3528 struct tc_action *a = tcf_exts_first_act(exts);
3529 if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
3530 return -1;
3531 #endif
3532 return 0;
3533 }
3534 EXPORT_SYMBOL(tcf_exts_dump_stats);
3535
tcf_block_offload_inc(struct tcf_block * block,u32 * flags)3536 static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
3537 {
3538 if (*flags & TCA_CLS_FLAGS_IN_HW)
3539 return;
3540 *flags |= TCA_CLS_FLAGS_IN_HW;
3541 atomic_inc(&block->offloadcnt);
3542 }
3543
tcf_block_offload_dec(struct tcf_block * block,u32 * flags)3544 static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
3545 {
3546 if (!(*flags & TCA_CLS_FLAGS_IN_HW))
3547 return;
3548 *flags &= ~TCA_CLS_FLAGS_IN_HW;
3549 atomic_dec(&block->offloadcnt);
3550 }
3551
tc_cls_offload_cnt_update(struct tcf_block * block,struct tcf_proto * tp,u32 * cnt,u32 * flags,u32 diff,bool add)3552 static void tc_cls_offload_cnt_update(struct tcf_block *block,
3553 struct tcf_proto *tp, u32 *cnt,
3554 u32 *flags, u32 diff, bool add)
3555 {
3556 lockdep_assert_held(&block->cb_lock);
3557
3558 spin_lock(&tp->lock);
3559 if (add) {
3560 if (!*cnt)
3561 tcf_block_offload_inc(block, flags);
3562 *cnt += diff;
3563 } else {
3564 *cnt -= diff;
3565 if (!*cnt)
3566 tcf_block_offload_dec(block, flags);
3567 }
3568 spin_unlock(&tp->lock);
3569 }
3570
3571 static void
tc_cls_offload_cnt_reset(struct tcf_block * block,struct tcf_proto * tp,u32 * cnt,u32 * flags)3572 tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp,
3573 u32 *cnt, u32 *flags)
3574 {
3575 lockdep_assert_held(&block->cb_lock);
3576
3577 spin_lock(&tp->lock);
3578 tcf_block_offload_dec(block, flags);
3579 *cnt = 0;
3580 spin_unlock(&tp->lock);
3581 }
3582
3583 static int
__tc_setup_cb_call(struct tcf_block * block,enum tc_setup_type type,void * type_data,bool err_stop)3584 __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3585 void *type_data, bool err_stop)
3586 {
3587 struct flow_block_cb *block_cb;
3588 int ok_count = 0;
3589 int err;
3590
3591 list_for_each_entry(block_cb, &block->flow_block.cb_list, list) {
3592 err = block_cb->cb(type, type_data, block_cb->cb_priv);
3593 if (err) {
3594 if (err_stop)
3595 return err;
3596 } else {
3597 ok_count++;
3598 }
3599 }
3600 return ok_count;
3601 }
3602
tc_setup_cb_call(struct tcf_block * block,enum tc_setup_type type,void * type_data,bool err_stop,bool rtnl_held)3603 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3604 void *type_data, bool err_stop, bool rtnl_held)
3605 {
3606 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3607 int ok_count;
3608
3609 retry:
3610 if (take_rtnl)
3611 rtnl_lock();
3612 down_read(&block->cb_lock);
3613 /* Need to obtain rtnl lock if block is bound to devs that require it.
3614 * In block bind code cb_lock is obtained while holding rtnl, so we must
3615 * obtain the locks in same order here.
3616 */
3617 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3618 up_read(&block->cb_lock);
3619 take_rtnl = true;
3620 goto retry;
3621 }
3622
3623 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3624
3625 up_read(&block->cb_lock);
3626 if (take_rtnl)
3627 rtnl_unlock();
3628 return ok_count;
3629 }
3630 EXPORT_SYMBOL(tc_setup_cb_call);
3631
3632 /* Non-destructive filter add. If filter that wasn't already in hardware is
3633 * successfully offloaded, increment block offloads counter. On failure,
3634 * previously offloaded filter is considered to be intact and offloads counter
3635 * is not decremented.
3636 */
3637
tc_setup_cb_add(struct tcf_block * block,struct tcf_proto * tp,enum tc_setup_type type,void * type_data,bool err_stop,u32 * flags,unsigned int * in_hw_count,bool rtnl_held)3638 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
3639 enum tc_setup_type type, void *type_data, bool err_stop,
3640 u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3641 {
3642 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3643 int ok_count;
3644
3645 retry:
3646 if (take_rtnl)
3647 rtnl_lock();
3648 down_read(&block->cb_lock);
3649 /* Need to obtain rtnl lock if block is bound to devs that require it.
3650 * In block bind code cb_lock is obtained while holding rtnl, so we must
3651 * obtain the locks in same order here.
3652 */
3653 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3654 up_read(&block->cb_lock);
3655 take_rtnl = true;
3656 goto retry;
3657 }
3658
3659 /* Make sure all netdevs sharing this block are offload-capable. */
3660 if (block->nooffloaddevcnt && err_stop) {
3661 ok_count = -EOPNOTSUPP;
3662 goto err_unlock;
3663 }
3664
3665 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3666 if (ok_count < 0)
3667 goto err_unlock;
3668
3669 if (tp->ops->hw_add)
3670 tp->ops->hw_add(tp, type_data);
3671 if (ok_count > 0)
3672 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags,
3673 ok_count, true);
3674 err_unlock:
3675 up_read(&block->cb_lock);
3676 if (take_rtnl)
3677 rtnl_unlock();
3678 return min(ok_count, 0);
3679 }
3680 EXPORT_SYMBOL(tc_setup_cb_add);
3681
3682 /* Destructive filter replace. If filter that wasn't already in hardware is
3683 * successfully offloaded, increment block offload counter. On failure,
3684 * previously offloaded filter is considered to be destroyed and offload counter
3685 * is decremented.
3686 */
3687
tc_setup_cb_replace(struct tcf_block * block,struct tcf_proto * tp,enum tc_setup_type type,void * type_data,bool err_stop,u32 * old_flags,unsigned int * old_in_hw_count,u32 * new_flags,unsigned int * new_in_hw_count,bool rtnl_held)3688 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
3689 enum tc_setup_type type, void *type_data, bool err_stop,
3690 u32 *old_flags, unsigned int *old_in_hw_count,
3691 u32 *new_flags, unsigned int *new_in_hw_count,
3692 bool rtnl_held)
3693 {
3694 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3695 int ok_count;
3696
3697 retry:
3698 if (take_rtnl)
3699 rtnl_lock();
3700 down_read(&block->cb_lock);
3701 /* Need to obtain rtnl lock if block is bound to devs that require it.
3702 * In block bind code cb_lock is obtained while holding rtnl, so we must
3703 * obtain the locks in same order here.
3704 */
3705 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3706 up_read(&block->cb_lock);
3707 take_rtnl = true;
3708 goto retry;
3709 }
3710
3711 /* Make sure all netdevs sharing this block are offload-capable. */
3712 if (block->nooffloaddevcnt && err_stop) {
3713 ok_count = -EOPNOTSUPP;
3714 goto err_unlock;
3715 }
3716
3717 tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags);
3718 if (tp->ops->hw_del)
3719 tp->ops->hw_del(tp, type_data);
3720
3721 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3722 if (ok_count < 0)
3723 goto err_unlock;
3724
3725 if (tp->ops->hw_add)
3726 tp->ops->hw_add(tp, type_data);
3727 if (ok_count > 0)
3728 tc_cls_offload_cnt_update(block, tp, new_in_hw_count,
3729 new_flags, ok_count, true);
3730 err_unlock:
3731 up_read(&block->cb_lock);
3732 if (take_rtnl)
3733 rtnl_unlock();
3734 return min(ok_count, 0);
3735 }
3736 EXPORT_SYMBOL(tc_setup_cb_replace);
3737
3738 /* Destroy filter and decrement block offload counter, if filter was previously
3739 * offloaded.
3740 */
3741
tc_setup_cb_destroy(struct tcf_block * block,struct tcf_proto * tp,enum tc_setup_type type,void * type_data,bool err_stop,u32 * flags,unsigned int * in_hw_count,bool rtnl_held)3742 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
3743 enum tc_setup_type type, void *type_data, bool err_stop,
3744 u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3745 {
3746 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3747 int ok_count;
3748
3749 retry:
3750 if (take_rtnl)
3751 rtnl_lock();
3752 down_read(&block->cb_lock);
3753 /* Need to obtain rtnl lock if block is bound to devs that require it.
3754 * In block bind code cb_lock is obtained while holding rtnl, so we must
3755 * obtain the locks in same order here.
3756 */
3757 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3758 up_read(&block->cb_lock);
3759 take_rtnl = true;
3760 goto retry;
3761 }
3762
3763 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3764
3765 tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags);
3766 if (tp->ops->hw_del)
3767 tp->ops->hw_del(tp, type_data);
3768
3769 up_read(&block->cb_lock);
3770 if (take_rtnl)
3771 rtnl_unlock();
3772 return min(ok_count, 0);
3773 }
3774 EXPORT_SYMBOL(tc_setup_cb_destroy);
3775
tc_setup_cb_reoffload(struct tcf_block * block,struct tcf_proto * tp,bool add,flow_setup_cb_t * cb,enum tc_setup_type type,void * type_data,void * cb_priv,u32 * flags,unsigned int * in_hw_count)3776 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
3777 bool add, flow_setup_cb_t *cb,
3778 enum tc_setup_type type, void *type_data,
3779 void *cb_priv, u32 *flags, unsigned int *in_hw_count)
3780 {
3781 int err = cb(type, type_data, cb_priv);
3782
3783 if (err) {
3784 if (add && tc_skip_sw(*flags))
3785 return err;
3786 } else {
3787 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1,
3788 add);
3789 }
3790
3791 return 0;
3792 }
3793 EXPORT_SYMBOL(tc_setup_cb_reoffload);
3794
tcf_act_get_user_cookie(struct flow_action_entry * entry,const struct tc_action * act)3795 static int tcf_act_get_user_cookie(struct flow_action_entry *entry,
3796 const struct tc_action *act)
3797 {
3798 struct tc_cookie *user_cookie;
3799 int err = 0;
3800
3801 rcu_read_lock();
3802 user_cookie = rcu_dereference(act->user_cookie);
3803 if (user_cookie) {
3804 entry->user_cookie = flow_action_cookie_create(user_cookie->data,
3805 user_cookie->len,
3806 GFP_ATOMIC);
3807 if (!entry->user_cookie)
3808 err = -ENOMEM;
3809 }
3810 rcu_read_unlock();
3811 return err;
3812 }
3813
tcf_act_put_user_cookie(struct flow_action_entry * entry)3814 static void tcf_act_put_user_cookie(struct flow_action_entry *entry)
3815 {
3816 flow_action_cookie_destroy(entry->user_cookie);
3817 }
3818
tc_cleanup_offload_action(struct flow_action * flow_action)3819 void tc_cleanup_offload_action(struct flow_action *flow_action)
3820 {
3821 struct flow_action_entry *entry;
3822 int i;
3823
3824 flow_action_for_each(i, entry, flow_action) {
3825 tcf_act_put_user_cookie(entry);
3826 if (entry->destructor)
3827 entry->destructor(entry->destructor_priv);
3828 }
3829 }
3830 EXPORT_SYMBOL(tc_cleanup_offload_action);
3831
tc_setup_offload_act(struct tc_action * act,struct flow_action_entry * entry,u32 * index_inc,struct netlink_ext_ack * extack)3832 static int tc_setup_offload_act(struct tc_action *act,
3833 struct flow_action_entry *entry,
3834 u32 *index_inc,
3835 struct netlink_ext_ack *extack)
3836 {
3837 #ifdef CONFIG_NET_CLS_ACT
3838 if (act->ops->offload_act_setup) {
3839 return act->ops->offload_act_setup(act, entry, index_inc, true,
3840 extack);
3841 } else {
3842 NL_SET_ERR_MSG(extack, "Action does not support offload");
3843 return -EOPNOTSUPP;
3844 }
3845 #else
3846 return 0;
3847 #endif
3848 }
3849
tc_setup_action(struct flow_action * flow_action,struct tc_action * actions[],u32 miss_cookie_base,struct netlink_ext_ack * extack)3850 int tc_setup_action(struct flow_action *flow_action,
3851 struct tc_action *actions[],
3852 u32 miss_cookie_base,
3853 struct netlink_ext_ack *extack)
3854 {
3855 int i, j, k, index, err = 0;
3856 struct tc_action *act;
3857
3858 BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY);
3859 BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE);
3860 BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED);
3861
3862 if (!actions)
3863 return 0;
3864
3865 j = 0;
3866 tcf_act_for_each_action(i, act, actions) {
3867 struct flow_action_entry *entry;
3868
3869 entry = &flow_action->entries[j];
3870 spin_lock_bh(&act->tcfa_lock);
3871 err = tcf_act_get_user_cookie(entry, act);
3872 if (err)
3873 goto err_out_locked;
3874
3875 index = 0;
3876 err = tc_setup_offload_act(act, entry, &index, extack);
3877 if (err)
3878 goto err_out_locked;
3879
3880 for (k = 0; k < index ; k++) {
3881 entry[k].hw_stats = tc_act_hw_stats(act->hw_stats);
3882 entry[k].hw_index = act->tcfa_index;
3883 entry[k].cookie = (unsigned long)act;
3884 entry[k].miss_cookie =
3885 tcf_exts_miss_cookie_get(miss_cookie_base, i);
3886 }
3887
3888 j += index;
3889
3890 spin_unlock_bh(&act->tcfa_lock);
3891 }
3892
3893 err_out:
3894 if (err)
3895 tc_cleanup_offload_action(flow_action);
3896
3897 return err;
3898 err_out_locked:
3899 spin_unlock_bh(&act->tcfa_lock);
3900 goto err_out;
3901 }
3902
tc_setup_offload_action(struct flow_action * flow_action,const struct tcf_exts * exts,struct netlink_ext_ack * extack)3903 int tc_setup_offload_action(struct flow_action *flow_action,
3904 const struct tcf_exts *exts,
3905 struct netlink_ext_ack *extack)
3906 {
3907 #ifdef CONFIG_NET_CLS_ACT
3908 u32 miss_cookie_base;
3909
3910 if (!exts)
3911 return 0;
3912
3913 miss_cookie_base = exts->miss_cookie_node ?
3914 exts->miss_cookie_node->miss_cookie_base : 0;
3915 return tc_setup_action(flow_action, exts->actions, miss_cookie_base,
3916 extack);
3917 #else
3918 return 0;
3919 #endif
3920 }
3921 EXPORT_SYMBOL(tc_setup_offload_action);
3922
tcf_exts_num_actions(struct tcf_exts * exts)3923 unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
3924 {
3925 unsigned int num_acts = 0;
3926 struct tc_action *act;
3927 int i;
3928
3929 tcf_exts_for_each_action(i, act, exts) {
3930 if (is_tcf_pedit(act))
3931 num_acts += tcf_pedit_nkeys(act);
3932 else
3933 num_acts++;
3934 }
3935 return num_acts;
3936 }
3937 EXPORT_SYMBOL(tcf_exts_num_actions);
3938
3939 #ifdef CONFIG_NET_CLS_ACT
tcf_qevent_parse_block_index(struct nlattr * block_index_attr,u32 * p_block_index,struct netlink_ext_ack * extack)3940 static int tcf_qevent_parse_block_index(struct nlattr *block_index_attr,
3941 u32 *p_block_index,
3942 struct netlink_ext_ack *extack)
3943 {
3944 *p_block_index = nla_get_u32(block_index_attr);
3945 if (!*p_block_index) {
3946 NL_SET_ERR_MSG(extack, "Block number may not be zero");
3947 return -EINVAL;
3948 }
3949
3950 return 0;
3951 }
3952
tcf_qevent_init(struct tcf_qevent * qe,struct Qdisc * sch,enum flow_block_binder_type binder_type,struct nlattr * block_index_attr,struct netlink_ext_ack * extack)3953 int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
3954 enum flow_block_binder_type binder_type,
3955 struct nlattr *block_index_attr,
3956 struct netlink_ext_ack *extack)
3957 {
3958 u32 block_index;
3959 int err;
3960
3961 if (!block_index_attr)
3962 return 0;
3963
3964 err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3965 if (err)
3966 return err;
3967
3968 qe->info.binder_type = binder_type;
3969 qe->info.chain_head_change = tcf_chain_head_change_dflt;
3970 qe->info.chain_head_change_priv = &qe->filter_chain;
3971 qe->info.block_index = block_index;
3972
3973 return tcf_block_get_ext(&qe->block, sch, &qe->info, extack);
3974 }
3975 EXPORT_SYMBOL(tcf_qevent_init);
3976
tcf_qevent_destroy(struct tcf_qevent * qe,struct Qdisc * sch)3977 void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
3978 {
3979 if (qe->info.block_index)
3980 tcf_block_put_ext(qe->block, sch, &qe->info);
3981 }
3982 EXPORT_SYMBOL(tcf_qevent_destroy);
3983
tcf_qevent_validate_change(struct tcf_qevent * qe,struct nlattr * block_index_attr,struct netlink_ext_ack * extack)3984 int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
3985 struct netlink_ext_ack *extack)
3986 {
3987 u32 block_index;
3988 int err;
3989
3990 if (!block_index_attr)
3991 return 0;
3992
3993 err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3994 if (err)
3995 return err;
3996
3997 /* Bounce newly-configured block or change in block. */
3998 if (block_index != qe->info.block_index) {
3999 NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
4000 return -EINVAL;
4001 }
4002
4003 return 0;
4004 }
4005 EXPORT_SYMBOL(tcf_qevent_validate_change);
4006
tcf_qevent_handle(struct tcf_qevent * qe,struct Qdisc * sch,struct sk_buff * skb,struct sk_buff ** to_free,int * ret)4007 struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
4008 struct sk_buff **to_free, int *ret)
4009 {
4010 struct tcf_result cl_res;
4011 struct tcf_proto *fl;
4012
4013 if (!qe->info.block_index)
4014 return skb;
4015
4016 fl = rcu_dereference_bh(qe->filter_chain);
4017
4018 switch (tcf_classify(skb, NULL, fl, &cl_res, false)) {
4019 case TC_ACT_SHOT:
4020 qdisc_qstats_drop(sch);
4021 __qdisc_drop(skb, to_free);
4022 *ret = __NET_XMIT_BYPASS;
4023 return NULL;
4024 case TC_ACT_STOLEN:
4025 case TC_ACT_QUEUED:
4026 case TC_ACT_TRAP:
4027 __qdisc_drop(skb, to_free);
4028 *ret = __NET_XMIT_STOLEN;
4029 return NULL;
4030 case TC_ACT_REDIRECT:
4031 skb_do_redirect(skb);
4032 *ret = __NET_XMIT_STOLEN;
4033 return NULL;
4034 }
4035
4036 return skb;
4037 }
4038 EXPORT_SYMBOL(tcf_qevent_handle);
4039
tcf_qevent_dump(struct sk_buff * skb,int attr_name,struct tcf_qevent * qe)4040 int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
4041 {
4042 if (!qe->info.block_index)
4043 return 0;
4044 return nla_put_u32(skb, attr_name, qe->info.block_index);
4045 }
4046 EXPORT_SYMBOL(tcf_qevent_dump);
4047 #endif
4048
tcf_net_init(struct net * net)4049 static __net_init int tcf_net_init(struct net *net)
4050 {
4051 struct tcf_net *tn = net_generic(net, tcf_net_id);
4052
4053 spin_lock_init(&tn->idr_lock);
4054 idr_init(&tn->idr);
4055 return 0;
4056 }
4057
tcf_net_exit(struct net * net)4058 static void __net_exit tcf_net_exit(struct net *net)
4059 {
4060 struct tcf_net *tn = net_generic(net, tcf_net_id);
4061
4062 idr_destroy(&tn->idr);
4063 }
4064
4065 static struct pernet_operations tcf_net_ops = {
4066 .init = tcf_net_init,
4067 .exit = tcf_net_exit,
4068 .id = &tcf_net_id,
4069 .size = sizeof(struct tcf_net),
4070 };
4071
tc_filter_init(void)4072 static int __init tc_filter_init(void)
4073 {
4074 int err;
4075
4076 tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
4077 if (!tc_filter_wq)
4078 return -ENOMEM;
4079
4080 err = register_pernet_subsys(&tcf_net_ops);
4081 if (err)
4082 goto err_register_pernet_subsys;
4083
4084 xa_init_flags(&tcf_exts_miss_cookies_xa, XA_FLAGS_ALLOC1);
4085
4086 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
4087 RTNL_FLAG_DOIT_UNLOCKED);
4088 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL,
4089 RTNL_FLAG_DOIT_UNLOCKED);
4090 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
4091 tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED);
4092 rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
4093 rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
4094 rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
4095 tc_dump_chain, 0);
4096
4097 return 0;
4098
4099 err_register_pernet_subsys:
4100 destroy_workqueue(tc_filter_wq);
4101 return err;
4102 }
4103
4104 subsys_initcall(tc_filter_init);
4105