1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * net/sched/cls_api.c Packet classifier API.
4 *
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 *
7 * Changes:
8 *
9 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
10 */
11
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/err.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/kmod.h>
21 #include <linux/slab.h>
22 #include <linux/idr.h>
23 #include <linux/rhashtable.h>
24 #include <linux/jhash.h>
25 #include <net/net_namespace.h>
26 #include <net/sock.h>
27 #include <net/netlink.h>
28 #include <net/pkt_sched.h>
29 #include <net/pkt_cls.h>
30 #include <net/tc_act/tc_pedit.h>
31 #include <net/tc_act/tc_mirred.h>
32 #include <net/tc_act/tc_vlan.h>
33 #include <net/tc_act/tc_tunnel_key.h>
34 #include <net/tc_act/tc_csum.h>
35 #include <net/tc_act/tc_gact.h>
36 #include <net/tc_act/tc_police.h>
37 #include <net/tc_act/tc_sample.h>
38 #include <net/tc_act/tc_skbedit.h>
39 #include <net/tc_act/tc_ct.h>
40 #include <net/tc_act/tc_mpls.h>
41 #include <net/flow_offload.h>
42
43 /* The list of all installed classifier types */
44 static LIST_HEAD(tcf_proto_base);
45
46 /* Protects list of registered TC modules. It is pure SMP lock. */
47 static DEFINE_RWLOCK(cls_mod_lock);
48
destroy_obj_hashfn(const struct tcf_proto * tp)49 static u32 destroy_obj_hashfn(const struct tcf_proto *tp)
50 {
51 return jhash_3words(tp->chain->index, tp->prio,
52 (__force __u32)tp->protocol, 0);
53 }
54
tcf_proto_signal_destroying(struct tcf_chain * chain,struct tcf_proto * tp)55 static void tcf_proto_signal_destroying(struct tcf_chain *chain,
56 struct tcf_proto *tp)
57 {
58 struct tcf_block *block = chain->block;
59
60 mutex_lock(&block->proto_destroy_lock);
61 hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node,
62 destroy_obj_hashfn(tp));
63 mutex_unlock(&block->proto_destroy_lock);
64 }
65
tcf_proto_cmp(const struct tcf_proto * tp1,const struct tcf_proto * tp2)66 static bool tcf_proto_cmp(const struct tcf_proto *tp1,
67 const struct tcf_proto *tp2)
68 {
69 return tp1->chain->index == tp2->chain->index &&
70 tp1->prio == tp2->prio &&
71 tp1->protocol == tp2->protocol;
72 }
73
tcf_proto_exists_destroying(struct tcf_chain * chain,struct tcf_proto * tp)74 static bool tcf_proto_exists_destroying(struct tcf_chain *chain,
75 struct tcf_proto *tp)
76 {
77 u32 hash = destroy_obj_hashfn(tp);
78 struct tcf_proto *iter;
79 bool found = false;
80
81 rcu_read_lock();
82 hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter,
83 destroy_ht_node, hash) {
84 if (tcf_proto_cmp(tp, iter)) {
85 found = true;
86 break;
87 }
88 }
89 rcu_read_unlock();
90
91 return found;
92 }
93
94 static void
tcf_proto_signal_destroyed(struct tcf_chain * chain,struct tcf_proto * tp)95 tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp)
96 {
97 struct tcf_block *block = chain->block;
98
99 mutex_lock(&block->proto_destroy_lock);
100 if (hash_hashed(&tp->destroy_ht_node))
101 hash_del_rcu(&tp->destroy_ht_node);
102 mutex_unlock(&block->proto_destroy_lock);
103 }
104
105 /* Find classifier type by string name */
106
__tcf_proto_lookup_ops(const char * kind)107 static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
108 {
109 const struct tcf_proto_ops *t, *res = NULL;
110
111 if (kind) {
112 read_lock(&cls_mod_lock);
113 list_for_each_entry(t, &tcf_proto_base, head) {
114 if (strcmp(kind, t->kind) == 0) {
115 if (try_module_get(t->owner))
116 res = t;
117 break;
118 }
119 }
120 read_unlock(&cls_mod_lock);
121 }
122 return res;
123 }
124
125 static const struct tcf_proto_ops *
tcf_proto_lookup_ops(const char * kind,bool rtnl_held,struct netlink_ext_ack * extack)126 tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
127 struct netlink_ext_ack *extack)
128 {
129 const struct tcf_proto_ops *ops;
130
131 ops = __tcf_proto_lookup_ops(kind);
132 if (ops)
133 return ops;
134 #ifdef CONFIG_MODULES
135 if (rtnl_held)
136 rtnl_unlock();
137 request_module("cls_%s", kind);
138 if (rtnl_held)
139 rtnl_lock();
140 ops = __tcf_proto_lookup_ops(kind);
141 /* We dropped the RTNL semaphore in order to perform
142 * the module load. So, even if we succeeded in loading
143 * the module we have to replay the request. We indicate
144 * this using -EAGAIN.
145 */
146 if (ops) {
147 module_put(ops->owner);
148 return ERR_PTR(-EAGAIN);
149 }
150 #endif
151 NL_SET_ERR_MSG(extack, "TC classifier not found");
152 return ERR_PTR(-ENOENT);
153 }
154
155 /* Register(unregister) new classifier type */
156
register_tcf_proto_ops(struct tcf_proto_ops * ops)157 int register_tcf_proto_ops(struct tcf_proto_ops *ops)
158 {
159 struct tcf_proto_ops *t;
160 int rc = -EEXIST;
161
162 write_lock(&cls_mod_lock);
163 list_for_each_entry(t, &tcf_proto_base, head)
164 if (!strcmp(ops->kind, t->kind))
165 goto out;
166
167 list_add_tail(&ops->head, &tcf_proto_base);
168 rc = 0;
169 out:
170 write_unlock(&cls_mod_lock);
171 return rc;
172 }
173 EXPORT_SYMBOL(register_tcf_proto_ops);
174
175 static struct workqueue_struct *tc_filter_wq;
176
unregister_tcf_proto_ops(struct tcf_proto_ops * ops)177 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
178 {
179 struct tcf_proto_ops *t;
180 int rc = -ENOENT;
181
182 /* Wait for outstanding call_rcu()s, if any, from a
183 * tcf_proto_ops's destroy() handler.
184 */
185 rcu_barrier();
186 flush_workqueue(tc_filter_wq);
187
188 write_lock(&cls_mod_lock);
189 list_for_each_entry(t, &tcf_proto_base, head) {
190 if (t == ops) {
191 list_del(&t->head);
192 rc = 0;
193 break;
194 }
195 }
196 write_unlock(&cls_mod_lock);
197 return rc;
198 }
199 EXPORT_SYMBOL(unregister_tcf_proto_ops);
200
tcf_queue_work(struct rcu_work * rwork,work_func_t func)201 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
202 {
203 INIT_RCU_WORK(rwork, func);
204 return queue_rcu_work(tc_filter_wq, rwork);
205 }
206 EXPORT_SYMBOL(tcf_queue_work);
207
208 /* Select new prio value from the range, managed by kernel. */
209
tcf_auto_prio(struct tcf_proto * tp)210 static inline u32 tcf_auto_prio(struct tcf_proto *tp)
211 {
212 u32 first = TC_H_MAKE(0xC0000000U, 0U);
213
214 if (tp)
215 first = tp->prio - 1;
216
217 return TC_H_MAJ(first);
218 }
219
tcf_proto_check_kind(struct nlattr * kind,char * name)220 static bool tcf_proto_check_kind(struct nlattr *kind, char *name)
221 {
222 if (kind)
223 return nla_strlcpy(name, kind, IFNAMSIZ) >= IFNAMSIZ;
224 memset(name, 0, IFNAMSIZ);
225 return false;
226 }
227
tcf_proto_is_unlocked(const char * kind)228 static bool tcf_proto_is_unlocked(const char *kind)
229 {
230 const struct tcf_proto_ops *ops;
231 bool ret;
232
233 if (strlen(kind) == 0)
234 return false;
235
236 ops = tcf_proto_lookup_ops(kind, false, NULL);
237 /* On error return false to take rtnl lock. Proto lookup/create
238 * functions will perform lookup again and properly handle errors.
239 */
240 if (IS_ERR(ops))
241 return false;
242
243 ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED);
244 module_put(ops->owner);
245 return ret;
246 }
247
tcf_proto_create(const char * kind,u32 protocol,u32 prio,struct tcf_chain * chain,bool rtnl_held,struct netlink_ext_ack * extack)248 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
249 u32 prio, struct tcf_chain *chain,
250 bool rtnl_held,
251 struct netlink_ext_ack *extack)
252 {
253 struct tcf_proto *tp;
254 int err;
255
256 tp = kzalloc(sizeof(*tp), GFP_KERNEL);
257 if (!tp)
258 return ERR_PTR(-ENOBUFS);
259
260 tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack);
261 if (IS_ERR(tp->ops)) {
262 err = PTR_ERR(tp->ops);
263 goto errout;
264 }
265 tp->classify = tp->ops->classify;
266 tp->protocol = protocol;
267 tp->prio = prio;
268 tp->chain = chain;
269 spin_lock_init(&tp->lock);
270 refcount_set(&tp->refcnt, 1);
271
272 err = tp->ops->init(tp);
273 if (err) {
274 module_put(tp->ops->owner);
275 goto errout;
276 }
277 return tp;
278
279 errout:
280 kfree(tp);
281 return ERR_PTR(err);
282 }
283
tcf_proto_get(struct tcf_proto * tp)284 static void tcf_proto_get(struct tcf_proto *tp)
285 {
286 refcount_inc(&tp->refcnt);
287 }
288
289 static void tcf_chain_put(struct tcf_chain *chain);
290
tcf_proto_destroy(struct tcf_proto * tp,bool rtnl_held,bool sig_destroy,struct netlink_ext_ack * extack)291 static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
292 bool sig_destroy, struct netlink_ext_ack *extack)
293 {
294 tp->ops->destroy(tp, rtnl_held, extack);
295 if (sig_destroy)
296 tcf_proto_signal_destroyed(tp->chain, tp);
297 tcf_chain_put(tp->chain);
298 module_put(tp->ops->owner);
299 kfree_rcu(tp, rcu);
300 }
301
tcf_proto_put(struct tcf_proto * tp,bool rtnl_held,struct netlink_ext_ack * extack)302 static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
303 struct netlink_ext_ack *extack)
304 {
305 if (refcount_dec_and_test(&tp->refcnt))
306 tcf_proto_destroy(tp, rtnl_held, true, extack);
307 }
308
tcf_proto_check_delete(struct tcf_proto * tp)309 static bool tcf_proto_check_delete(struct tcf_proto *tp)
310 {
311 if (tp->ops->delete_empty)
312 return tp->ops->delete_empty(tp);
313
314 tp->deleting = true;
315 return tp->deleting;
316 }
317
tcf_proto_mark_delete(struct tcf_proto * tp)318 static void tcf_proto_mark_delete(struct tcf_proto *tp)
319 {
320 spin_lock(&tp->lock);
321 tp->deleting = true;
322 spin_unlock(&tp->lock);
323 }
324
tcf_proto_is_deleting(struct tcf_proto * tp)325 static bool tcf_proto_is_deleting(struct tcf_proto *tp)
326 {
327 bool deleting;
328
329 spin_lock(&tp->lock);
330 deleting = tp->deleting;
331 spin_unlock(&tp->lock);
332
333 return deleting;
334 }
335
336 #define ASSERT_BLOCK_LOCKED(block) \
337 lockdep_assert_held(&(block)->lock)
338
339 struct tcf_filter_chain_list_item {
340 struct list_head list;
341 tcf_chain_head_change_t *chain_head_change;
342 void *chain_head_change_priv;
343 };
344
tcf_chain_create(struct tcf_block * block,u32 chain_index)345 static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
346 u32 chain_index)
347 {
348 struct tcf_chain *chain;
349
350 ASSERT_BLOCK_LOCKED(block);
351
352 chain = kzalloc(sizeof(*chain), GFP_KERNEL);
353 if (!chain)
354 return NULL;
355 list_add_tail(&chain->list, &block->chain_list);
356 mutex_init(&chain->filter_chain_lock);
357 chain->block = block;
358 chain->index = chain_index;
359 chain->refcnt = 1;
360 if (!chain->index)
361 block->chain0.chain = chain;
362 return chain;
363 }
364
tcf_chain_head_change_item(struct tcf_filter_chain_list_item * item,struct tcf_proto * tp_head)365 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
366 struct tcf_proto *tp_head)
367 {
368 if (item->chain_head_change)
369 item->chain_head_change(tp_head, item->chain_head_change_priv);
370 }
371
tcf_chain0_head_change(struct tcf_chain * chain,struct tcf_proto * tp_head)372 static void tcf_chain0_head_change(struct tcf_chain *chain,
373 struct tcf_proto *tp_head)
374 {
375 struct tcf_filter_chain_list_item *item;
376 struct tcf_block *block = chain->block;
377
378 if (chain->index)
379 return;
380
381 mutex_lock(&block->lock);
382 list_for_each_entry(item, &block->chain0.filter_chain_list, list)
383 tcf_chain_head_change_item(item, tp_head);
384 mutex_unlock(&block->lock);
385 }
386
387 /* Returns true if block can be safely freed. */
388
tcf_chain_detach(struct tcf_chain * chain)389 static bool tcf_chain_detach(struct tcf_chain *chain)
390 {
391 struct tcf_block *block = chain->block;
392
393 ASSERT_BLOCK_LOCKED(block);
394
395 list_del(&chain->list);
396 if (!chain->index)
397 block->chain0.chain = NULL;
398
399 if (list_empty(&block->chain_list) &&
400 refcount_read(&block->refcnt) == 0)
401 return true;
402
403 return false;
404 }
405
tcf_block_destroy(struct tcf_block * block)406 static void tcf_block_destroy(struct tcf_block *block)
407 {
408 mutex_destroy(&block->lock);
409 mutex_destroy(&block->proto_destroy_lock);
410 kfree_rcu(block, rcu);
411 }
412
tcf_chain_destroy(struct tcf_chain * chain,bool free_block)413 static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
414 {
415 struct tcf_block *block = chain->block;
416
417 mutex_destroy(&chain->filter_chain_lock);
418 kfree_rcu(chain, rcu);
419 if (free_block)
420 tcf_block_destroy(block);
421 }
422
tcf_chain_hold(struct tcf_chain * chain)423 static void tcf_chain_hold(struct tcf_chain *chain)
424 {
425 ASSERT_BLOCK_LOCKED(chain->block);
426
427 ++chain->refcnt;
428 }
429
tcf_chain_held_by_acts_only(struct tcf_chain * chain)430 static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
431 {
432 ASSERT_BLOCK_LOCKED(chain->block);
433
434 /* In case all the references are action references, this
435 * chain should not be shown to the user.
436 */
437 return chain->refcnt == chain->action_refcnt;
438 }
439
tcf_chain_lookup(struct tcf_block * block,u32 chain_index)440 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
441 u32 chain_index)
442 {
443 struct tcf_chain *chain;
444
445 ASSERT_BLOCK_LOCKED(block);
446
447 list_for_each_entry(chain, &block->chain_list, list) {
448 if (chain->index == chain_index)
449 return chain;
450 }
451 return NULL;
452 }
453
454 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
455 u32 seq, u16 flags, int event, bool unicast);
456
__tcf_chain_get(struct tcf_block * block,u32 chain_index,bool create,bool by_act)457 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
458 u32 chain_index, bool create,
459 bool by_act)
460 {
461 struct tcf_chain *chain = NULL;
462 bool is_first_reference;
463
464 mutex_lock(&block->lock);
465 chain = tcf_chain_lookup(block, chain_index);
466 if (chain) {
467 tcf_chain_hold(chain);
468 } else {
469 if (!create)
470 goto errout;
471 chain = tcf_chain_create(block, chain_index);
472 if (!chain)
473 goto errout;
474 }
475
476 if (by_act)
477 ++chain->action_refcnt;
478 is_first_reference = chain->refcnt - chain->action_refcnt == 1;
479 mutex_unlock(&block->lock);
480
481 /* Send notification only in case we got the first
482 * non-action reference. Until then, the chain acts only as
483 * a placeholder for actions pointing to it and user ought
484 * not know about them.
485 */
486 if (is_first_reference && !by_act)
487 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
488 RTM_NEWCHAIN, false);
489
490 return chain;
491
492 errout:
493 mutex_unlock(&block->lock);
494 return chain;
495 }
496
tcf_chain_get(struct tcf_block * block,u32 chain_index,bool create)497 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
498 bool create)
499 {
500 return __tcf_chain_get(block, chain_index, create, false);
501 }
502
tcf_chain_get_by_act(struct tcf_block * block,u32 chain_index)503 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
504 {
505 return __tcf_chain_get(block, chain_index, true, true);
506 }
507 EXPORT_SYMBOL(tcf_chain_get_by_act);
508
509 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
510 void *tmplt_priv);
511 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
512 void *tmplt_priv, u32 chain_index,
513 struct tcf_block *block, struct sk_buff *oskb,
514 u32 seq, u16 flags, bool unicast);
515
__tcf_chain_put(struct tcf_chain * chain,bool by_act,bool explicitly_created)516 static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
517 bool explicitly_created)
518 {
519 struct tcf_block *block = chain->block;
520 const struct tcf_proto_ops *tmplt_ops;
521 unsigned int refcnt, non_act_refcnt;
522 bool free_block = false;
523 void *tmplt_priv;
524
525 mutex_lock(&block->lock);
526 if (explicitly_created) {
527 if (!chain->explicitly_created) {
528 mutex_unlock(&block->lock);
529 return;
530 }
531 chain->explicitly_created = false;
532 }
533
534 if (by_act)
535 chain->action_refcnt--;
536
537 /* tc_chain_notify_delete can't be called while holding block lock.
538 * However, when block is unlocked chain can be changed concurrently, so
539 * save these to temporary variables.
540 */
541 refcnt = --chain->refcnt;
542 non_act_refcnt = refcnt - chain->action_refcnt;
543 tmplt_ops = chain->tmplt_ops;
544 tmplt_priv = chain->tmplt_priv;
545
546 if (non_act_refcnt == chain->explicitly_created && !by_act) {
547 if (non_act_refcnt == 0)
548 tc_chain_notify_delete(tmplt_ops, tmplt_priv,
549 chain->index, block, NULL, 0, 0,
550 false);
551 /* Last reference to chain, no need to lock. */
552 chain->flushing = false;
553 }
554
555 if (refcnt == 0)
556 free_block = tcf_chain_detach(chain);
557 mutex_unlock(&block->lock);
558
559 if (refcnt == 0) {
560 tc_chain_tmplt_del(tmplt_ops, tmplt_priv);
561 tcf_chain_destroy(chain, free_block);
562 }
563 }
564
tcf_chain_put(struct tcf_chain * chain)565 static void tcf_chain_put(struct tcf_chain *chain)
566 {
567 __tcf_chain_put(chain, false, false);
568 }
569
tcf_chain_put_by_act(struct tcf_chain * chain)570 void tcf_chain_put_by_act(struct tcf_chain *chain)
571 {
572 __tcf_chain_put(chain, true, false);
573 }
574 EXPORT_SYMBOL(tcf_chain_put_by_act);
575
tcf_chain_put_explicitly_created(struct tcf_chain * chain)576 static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
577 {
578 __tcf_chain_put(chain, false, true);
579 }
580
tcf_chain_flush(struct tcf_chain * chain,bool rtnl_held)581 static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
582 {
583 struct tcf_proto *tp, *tp_next;
584
585 mutex_lock(&chain->filter_chain_lock);
586 tp = tcf_chain_dereference(chain->filter_chain, chain);
587 while (tp) {
588 tp_next = rcu_dereference_protected(tp->next, 1);
589 tcf_proto_signal_destroying(chain, tp);
590 tp = tp_next;
591 }
592 tp = tcf_chain_dereference(chain->filter_chain, chain);
593 RCU_INIT_POINTER(chain->filter_chain, NULL);
594 tcf_chain0_head_change(chain, NULL);
595 chain->flushing = true;
596 mutex_unlock(&chain->filter_chain_lock);
597
598 while (tp) {
599 tp_next = rcu_dereference_protected(tp->next, 1);
600 tcf_proto_put(tp, rtnl_held, NULL);
601 tp = tp_next;
602 }
603 }
604
605 static int tcf_block_setup(struct tcf_block *block,
606 struct flow_block_offload *bo);
607
tc_indr_block_cmd(struct net_device * dev,struct tcf_block * block,flow_indr_block_bind_cb_t * cb,void * cb_priv,enum flow_block_command command,bool ingress)608 static void tc_indr_block_cmd(struct net_device *dev, struct tcf_block *block,
609 flow_indr_block_bind_cb_t *cb, void *cb_priv,
610 enum flow_block_command command, bool ingress)
611 {
612 struct flow_block_offload bo = {
613 .command = command,
614 .binder_type = ingress ?
615 FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS :
616 FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
617 .net = dev_net(dev),
618 .block_shared = tcf_block_non_null_shared(block),
619 };
620 INIT_LIST_HEAD(&bo.cb_list);
621
622 if (!block)
623 return;
624
625 bo.block = &block->flow_block;
626
627 down_write(&block->cb_lock);
628 cb(dev, cb_priv, TC_SETUP_BLOCK, &bo);
629
630 tcf_block_setup(block, &bo);
631 up_write(&block->cb_lock);
632 }
633
tc_dev_block(struct net_device * dev,bool ingress)634 static struct tcf_block *tc_dev_block(struct net_device *dev, bool ingress)
635 {
636 const struct Qdisc_class_ops *cops;
637 const struct Qdisc_ops *ops;
638 struct Qdisc *qdisc;
639
640 if (!dev_ingress_queue(dev))
641 return NULL;
642
643 qdisc = dev_ingress_queue(dev)->qdisc_sleeping;
644 if (!qdisc)
645 return NULL;
646
647 ops = qdisc->ops;
648 if (!ops)
649 return NULL;
650
651 if (!ingress && !strcmp("ingress", ops->id))
652 return NULL;
653
654 cops = ops->cl_ops;
655 if (!cops)
656 return NULL;
657
658 if (!cops->tcf_block)
659 return NULL;
660
661 return cops->tcf_block(qdisc,
662 ingress ? TC_H_MIN_INGRESS : TC_H_MIN_EGRESS,
663 NULL);
664 }
665
tc_indr_block_get_and_cmd(struct net_device * dev,flow_indr_block_bind_cb_t * cb,void * cb_priv,enum flow_block_command command)666 static void tc_indr_block_get_and_cmd(struct net_device *dev,
667 flow_indr_block_bind_cb_t *cb,
668 void *cb_priv,
669 enum flow_block_command command)
670 {
671 struct tcf_block *block;
672
673 block = tc_dev_block(dev, true);
674 tc_indr_block_cmd(dev, block, cb, cb_priv, command, true);
675
676 block = tc_dev_block(dev, false);
677 tc_indr_block_cmd(dev, block, cb, cb_priv, command, false);
678 }
679
tc_indr_block_call(struct tcf_block * block,struct net_device * dev,struct tcf_block_ext_info * ei,enum flow_block_command command,struct netlink_ext_ack * extack)680 static void tc_indr_block_call(struct tcf_block *block,
681 struct net_device *dev,
682 struct tcf_block_ext_info *ei,
683 enum flow_block_command command,
684 struct netlink_ext_ack *extack)
685 {
686 struct flow_block_offload bo = {
687 .command = command,
688 .binder_type = ei->binder_type,
689 .net = dev_net(dev),
690 .block = &block->flow_block,
691 .block_shared = tcf_block_shared(block),
692 .extack = extack,
693 };
694 INIT_LIST_HEAD(&bo.cb_list);
695
696 flow_indr_block_call(dev, &bo, command);
697 tcf_block_setup(block, &bo);
698 }
699
tcf_block_offload_in_use(struct tcf_block * block)700 static bool tcf_block_offload_in_use(struct tcf_block *block)
701 {
702 return atomic_read(&block->offloadcnt);
703 }
704
tcf_block_offload_cmd(struct tcf_block * block,struct net_device * dev,struct tcf_block_ext_info * ei,enum flow_block_command command,struct netlink_ext_ack * extack)705 static int tcf_block_offload_cmd(struct tcf_block *block,
706 struct net_device *dev,
707 struct tcf_block_ext_info *ei,
708 enum flow_block_command command,
709 struct netlink_ext_ack *extack)
710 {
711 struct flow_block_offload bo = {};
712 int err;
713
714 bo.net = dev_net(dev);
715 bo.command = command;
716 bo.binder_type = ei->binder_type;
717 bo.block = &block->flow_block;
718 bo.block_shared = tcf_block_shared(block);
719 bo.extack = extack;
720 INIT_LIST_HEAD(&bo.cb_list);
721
722 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
723 if (err < 0)
724 return err;
725
726 return tcf_block_setup(block, &bo);
727 }
728
tcf_block_offload_bind(struct tcf_block * block,struct Qdisc * q,struct tcf_block_ext_info * ei,struct netlink_ext_ack * extack)729 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
730 struct tcf_block_ext_info *ei,
731 struct netlink_ext_ack *extack)
732 {
733 struct net_device *dev = q->dev_queue->dev;
734 int err;
735
736 down_write(&block->cb_lock);
737 if (!dev->netdev_ops->ndo_setup_tc)
738 goto no_offload_dev_inc;
739
740 /* If tc offload feature is disabled and the block we try to bind
741 * to already has some offloaded filters, forbid to bind.
742 */
743 if (!tc_can_offload(dev) && tcf_block_offload_in_use(block)) {
744 NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
745 err = -EOPNOTSUPP;
746 goto err_unlock;
747 }
748
749 err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_BIND, extack);
750 if (err == -EOPNOTSUPP)
751 goto no_offload_dev_inc;
752 if (err)
753 goto err_unlock;
754
755 tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack);
756 up_write(&block->cb_lock);
757 return 0;
758
759 no_offload_dev_inc:
760 if (tcf_block_offload_in_use(block)) {
761 err = -EOPNOTSUPP;
762 goto err_unlock;
763 }
764 err = 0;
765 block->nooffloaddevcnt++;
766 tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack);
767 err_unlock:
768 up_write(&block->cb_lock);
769 return err;
770 }
771
tcf_block_offload_unbind(struct tcf_block * block,struct Qdisc * q,struct tcf_block_ext_info * ei)772 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
773 struct tcf_block_ext_info *ei)
774 {
775 struct net_device *dev = q->dev_queue->dev;
776 int err;
777
778 down_write(&block->cb_lock);
779 tc_indr_block_call(block, dev, ei, FLOW_BLOCK_UNBIND, NULL);
780
781 if (!dev->netdev_ops->ndo_setup_tc)
782 goto no_offload_dev_dec;
783 err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_UNBIND, NULL);
784 if (err == -EOPNOTSUPP)
785 goto no_offload_dev_dec;
786 up_write(&block->cb_lock);
787 return;
788
789 no_offload_dev_dec:
790 WARN_ON(block->nooffloaddevcnt-- == 0);
791 up_write(&block->cb_lock);
792 }
793
794 static int
tcf_chain0_head_change_cb_add(struct tcf_block * block,struct tcf_block_ext_info * ei,struct netlink_ext_ack * extack)795 tcf_chain0_head_change_cb_add(struct tcf_block *block,
796 struct tcf_block_ext_info *ei,
797 struct netlink_ext_ack *extack)
798 {
799 struct tcf_filter_chain_list_item *item;
800 struct tcf_chain *chain0;
801
802 item = kmalloc(sizeof(*item), GFP_KERNEL);
803 if (!item) {
804 NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
805 return -ENOMEM;
806 }
807 item->chain_head_change = ei->chain_head_change;
808 item->chain_head_change_priv = ei->chain_head_change_priv;
809
810 mutex_lock(&block->lock);
811 chain0 = block->chain0.chain;
812 if (chain0)
813 tcf_chain_hold(chain0);
814 else
815 list_add(&item->list, &block->chain0.filter_chain_list);
816 mutex_unlock(&block->lock);
817
818 if (chain0) {
819 struct tcf_proto *tp_head;
820
821 mutex_lock(&chain0->filter_chain_lock);
822
823 tp_head = tcf_chain_dereference(chain0->filter_chain, chain0);
824 if (tp_head)
825 tcf_chain_head_change_item(item, tp_head);
826
827 mutex_lock(&block->lock);
828 list_add(&item->list, &block->chain0.filter_chain_list);
829 mutex_unlock(&block->lock);
830
831 mutex_unlock(&chain0->filter_chain_lock);
832 tcf_chain_put(chain0);
833 }
834
835 return 0;
836 }
837
838 static void
tcf_chain0_head_change_cb_del(struct tcf_block * block,struct tcf_block_ext_info * ei)839 tcf_chain0_head_change_cb_del(struct tcf_block *block,
840 struct tcf_block_ext_info *ei)
841 {
842 struct tcf_filter_chain_list_item *item;
843
844 mutex_lock(&block->lock);
845 list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
846 if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
847 (item->chain_head_change == ei->chain_head_change &&
848 item->chain_head_change_priv == ei->chain_head_change_priv)) {
849 if (block->chain0.chain)
850 tcf_chain_head_change_item(item, NULL);
851 list_del(&item->list);
852 mutex_unlock(&block->lock);
853
854 kfree(item);
855 return;
856 }
857 }
858 mutex_unlock(&block->lock);
859 WARN_ON(1);
860 }
861
862 struct tcf_net {
863 spinlock_t idr_lock; /* Protects idr */
864 struct idr idr;
865 };
866
867 static unsigned int tcf_net_id;
868
tcf_block_insert(struct tcf_block * block,struct net * net,struct netlink_ext_ack * extack)869 static int tcf_block_insert(struct tcf_block *block, struct net *net,
870 struct netlink_ext_ack *extack)
871 {
872 struct tcf_net *tn = net_generic(net, tcf_net_id);
873 int err;
874
875 idr_preload(GFP_KERNEL);
876 spin_lock(&tn->idr_lock);
877 err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
878 GFP_NOWAIT);
879 spin_unlock(&tn->idr_lock);
880 idr_preload_end();
881
882 return err;
883 }
884
tcf_block_remove(struct tcf_block * block,struct net * net)885 static void tcf_block_remove(struct tcf_block *block, struct net *net)
886 {
887 struct tcf_net *tn = net_generic(net, tcf_net_id);
888
889 spin_lock(&tn->idr_lock);
890 idr_remove(&tn->idr, block->index);
891 spin_unlock(&tn->idr_lock);
892 }
893
tcf_block_create(struct net * net,struct Qdisc * q,u32 block_index,struct netlink_ext_ack * extack)894 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
895 u32 block_index,
896 struct netlink_ext_ack *extack)
897 {
898 struct tcf_block *block;
899
900 block = kzalloc(sizeof(*block), GFP_KERNEL);
901 if (!block) {
902 NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
903 return ERR_PTR(-ENOMEM);
904 }
905 mutex_init(&block->lock);
906 mutex_init(&block->proto_destroy_lock);
907 init_rwsem(&block->cb_lock);
908 flow_block_init(&block->flow_block);
909 INIT_LIST_HEAD(&block->chain_list);
910 INIT_LIST_HEAD(&block->owner_list);
911 INIT_LIST_HEAD(&block->chain0.filter_chain_list);
912
913 refcount_set(&block->refcnt, 1);
914 block->net = net;
915 block->index = block_index;
916
917 /* Don't store q pointer for blocks which are shared */
918 if (!tcf_block_shared(block))
919 block->q = q;
920 return block;
921 }
922
tcf_block_lookup(struct net * net,u32 block_index)923 static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
924 {
925 struct tcf_net *tn = net_generic(net, tcf_net_id);
926
927 return idr_find(&tn->idr, block_index);
928 }
929
tcf_block_refcnt_get(struct net * net,u32 block_index)930 static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
931 {
932 struct tcf_block *block;
933
934 rcu_read_lock();
935 block = tcf_block_lookup(net, block_index);
936 if (block && !refcount_inc_not_zero(&block->refcnt))
937 block = NULL;
938 rcu_read_unlock();
939
940 return block;
941 }
942
943 static struct tcf_chain *
__tcf_get_next_chain(struct tcf_block * block,struct tcf_chain * chain)944 __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
945 {
946 mutex_lock(&block->lock);
947 if (chain)
948 chain = list_is_last(&chain->list, &block->chain_list) ?
949 NULL : list_next_entry(chain, list);
950 else
951 chain = list_first_entry_or_null(&block->chain_list,
952 struct tcf_chain, list);
953
954 /* skip all action-only chains */
955 while (chain && tcf_chain_held_by_acts_only(chain))
956 chain = list_is_last(&chain->list, &block->chain_list) ?
957 NULL : list_next_entry(chain, list);
958
959 if (chain)
960 tcf_chain_hold(chain);
961 mutex_unlock(&block->lock);
962
963 return chain;
964 }
965
966 /* Function to be used by all clients that want to iterate over all chains on
967 * block. It properly obtains block->lock and takes reference to chain before
968 * returning it. Users of this function must be tolerant to concurrent chain
969 * insertion/deletion or ensure that no concurrent chain modification is
970 * possible. Note that all netlink dump callbacks cannot guarantee to provide
971 * consistent dump because rtnl lock is released each time skb is filled with
972 * data and sent to user-space.
973 */
974
975 struct tcf_chain *
tcf_get_next_chain(struct tcf_block * block,struct tcf_chain * chain)976 tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
977 {
978 struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain);
979
980 if (chain)
981 tcf_chain_put(chain);
982
983 return chain_next;
984 }
985 EXPORT_SYMBOL(tcf_get_next_chain);
986
987 static struct tcf_proto *
__tcf_get_next_proto(struct tcf_chain * chain,struct tcf_proto * tp)988 __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
989 {
990 u32 prio = 0;
991
992 ASSERT_RTNL();
993 mutex_lock(&chain->filter_chain_lock);
994
995 if (!tp) {
996 tp = tcf_chain_dereference(chain->filter_chain, chain);
997 } else if (tcf_proto_is_deleting(tp)) {
998 /* 'deleting' flag is set and chain->filter_chain_lock was
999 * unlocked, which means next pointer could be invalid. Restart
1000 * search.
1001 */
1002 prio = tp->prio + 1;
1003 tp = tcf_chain_dereference(chain->filter_chain, chain);
1004
1005 for (; tp; tp = tcf_chain_dereference(tp->next, chain))
1006 if (!tp->deleting && tp->prio >= prio)
1007 break;
1008 } else {
1009 tp = tcf_chain_dereference(tp->next, chain);
1010 }
1011
1012 if (tp)
1013 tcf_proto_get(tp);
1014
1015 mutex_unlock(&chain->filter_chain_lock);
1016
1017 return tp;
1018 }
1019
1020 /* Function to be used by all clients that want to iterate over all tp's on
1021 * chain. Users of this function must be tolerant to concurrent tp
1022 * insertion/deletion or ensure that no concurrent chain modification is
1023 * possible. Note that all netlink dump callbacks cannot guarantee to provide
1024 * consistent dump because rtnl lock is released each time skb is filled with
1025 * data and sent to user-space.
1026 */
1027
1028 struct tcf_proto *
tcf_get_next_proto(struct tcf_chain * chain,struct tcf_proto * tp,bool rtnl_held)1029 tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp,
1030 bool rtnl_held)
1031 {
1032 struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp);
1033
1034 if (tp)
1035 tcf_proto_put(tp, rtnl_held, NULL);
1036
1037 return tp_next;
1038 }
1039 EXPORT_SYMBOL(tcf_get_next_proto);
1040
tcf_block_flush_all_chains(struct tcf_block * block,bool rtnl_held)1041 static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held)
1042 {
1043 struct tcf_chain *chain;
1044
1045 /* Last reference to block. At this point chains cannot be added or
1046 * removed concurrently.
1047 */
1048 for (chain = tcf_get_next_chain(block, NULL);
1049 chain;
1050 chain = tcf_get_next_chain(block, chain)) {
1051 tcf_chain_put_explicitly_created(chain);
1052 tcf_chain_flush(chain, rtnl_held);
1053 }
1054 }
1055
1056 /* Lookup Qdisc and increments its reference counter.
1057 * Set parent, if necessary.
1058 */
1059
__tcf_qdisc_find(struct net * net,struct Qdisc ** q,u32 * parent,int ifindex,bool rtnl_held,struct netlink_ext_ack * extack)1060 static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
1061 u32 *parent, int ifindex, bool rtnl_held,
1062 struct netlink_ext_ack *extack)
1063 {
1064 const struct Qdisc_class_ops *cops;
1065 struct net_device *dev;
1066 int err = 0;
1067
1068 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1069 return 0;
1070
1071 rcu_read_lock();
1072
1073 /* Find link */
1074 dev = dev_get_by_index_rcu(net, ifindex);
1075 if (!dev) {
1076 rcu_read_unlock();
1077 return -ENODEV;
1078 }
1079
1080 /* Find qdisc */
1081 if (!*parent) {
1082 *q = rcu_dereference(dev->qdisc);
1083 *parent = (*q)->handle;
1084 } else {
1085 *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
1086 if (!*q) {
1087 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1088 err = -EINVAL;
1089 goto errout_rcu;
1090 }
1091 }
1092
1093 *q = qdisc_refcount_inc_nz(*q);
1094 if (!*q) {
1095 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1096 err = -EINVAL;
1097 goto errout_rcu;
1098 }
1099
1100 /* Is it classful? */
1101 cops = (*q)->ops->cl_ops;
1102 if (!cops) {
1103 NL_SET_ERR_MSG(extack, "Qdisc not classful");
1104 err = -EINVAL;
1105 goto errout_qdisc;
1106 }
1107
1108 if (!cops->tcf_block) {
1109 NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
1110 err = -EOPNOTSUPP;
1111 goto errout_qdisc;
1112 }
1113
1114 errout_rcu:
1115 /* At this point we know that qdisc is not noop_qdisc,
1116 * which means that qdisc holds a reference to net_device
1117 * and we hold a reference to qdisc, so it is safe to release
1118 * rcu read lock.
1119 */
1120 rcu_read_unlock();
1121 return err;
1122
1123 errout_qdisc:
1124 rcu_read_unlock();
1125
1126 if (rtnl_held)
1127 qdisc_put(*q);
1128 else
1129 qdisc_put_unlocked(*q);
1130 *q = NULL;
1131
1132 return err;
1133 }
1134
__tcf_qdisc_cl_find(struct Qdisc * q,u32 parent,unsigned long * cl,int ifindex,struct netlink_ext_ack * extack)1135 static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
1136 int ifindex, struct netlink_ext_ack *extack)
1137 {
1138 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1139 return 0;
1140
1141 /* Do we search for filter, attached to class? */
1142 if (TC_H_MIN(parent)) {
1143 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1144
1145 *cl = cops->find(q, parent);
1146 if (*cl == 0) {
1147 NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
1148 return -ENOENT;
1149 }
1150 }
1151
1152 return 0;
1153 }
1154
__tcf_block_find(struct net * net,struct Qdisc * q,unsigned long cl,int ifindex,u32 block_index,struct netlink_ext_ack * extack)1155 static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
1156 unsigned long cl, int ifindex,
1157 u32 block_index,
1158 struct netlink_ext_ack *extack)
1159 {
1160 struct tcf_block *block;
1161
1162 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1163 block = tcf_block_refcnt_get(net, block_index);
1164 if (!block) {
1165 NL_SET_ERR_MSG(extack, "Block of given index was not found");
1166 return ERR_PTR(-EINVAL);
1167 }
1168 } else {
1169 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1170
1171 block = cops->tcf_block(q, cl, extack);
1172 if (!block)
1173 return ERR_PTR(-EINVAL);
1174
1175 if (tcf_block_shared(block)) {
1176 NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
1177 return ERR_PTR(-EOPNOTSUPP);
1178 }
1179
1180 /* Always take reference to block in order to support execution
1181 * of rules update path of cls API without rtnl lock. Caller
1182 * must release block when it is finished using it. 'if' block
1183 * of this conditional obtain reference to block by calling
1184 * tcf_block_refcnt_get().
1185 */
1186 refcount_inc(&block->refcnt);
1187 }
1188
1189 return block;
1190 }
1191
__tcf_block_put(struct tcf_block * block,struct Qdisc * q,struct tcf_block_ext_info * ei,bool rtnl_held)1192 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
1193 struct tcf_block_ext_info *ei, bool rtnl_held)
1194 {
1195 if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
1196 /* Flushing/putting all chains will cause the block to be
1197 * deallocated when last chain is freed. However, if chain_list
1198 * is empty, block has to be manually deallocated. After block
1199 * reference counter reached 0, it is no longer possible to
1200 * increment it or add new chains to block.
1201 */
1202 bool free_block = list_empty(&block->chain_list);
1203
1204 mutex_unlock(&block->lock);
1205 if (tcf_block_shared(block))
1206 tcf_block_remove(block, block->net);
1207
1208 if (q)
1209 tcf_block_offload_unbind(block, q, ei);
1210
1211 if (free_block)
1212 tcf_block_destroy(block);
1213 else
1214 tcf_block_flush_all_chains(block, rtnl_held);
1215 } else if (q) {
1216 tcf_block_offload_unbind(block, q, ei);
1217 }
1218 }
1219
tcf_block_refcnt_put(struct tcf_block * block,bool rtnl_held)1220 static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held)
1221 {
1222 __tcf_block_put(block, NULL, NULL, rtnl_held);
1223 }
1224
1225 /* Find tcf block.
1226 * Set q, parent, cl when appropriate.
1227 */
1228
tcf_block_find(struct net * net,struct Qdisc ** q,u32 * parent,unsigned long * cl,int ifindex,u32 block_index,struct netlink_ext_ack * extack)1229 static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
1230 u32 *parent, unsigned long *cl,
1231 int ifindex, u32 block_index,
1232 struct netlink_ext_ack *extack)
1233 {
1234 struct tcf_block *block;
1235 int err = 0;
1236
1237 ASSERT_RTNL();
1238
1239 err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack);
1240 if (err)
1241 goto errout;
1242
1243 err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack);
1244 if (err)
1245 goto errout_qdisc;
1246
1247 block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
1248 if (IS_ERR(block)) {
1249 err = PTR_ERR(block);
1250 goto errout_qdisc;
1251 }
1252
1253 return block;
1254
1255 errout_qdisc:
1256 if (*q)
1257 qdisc_put(*q);
1258 errout:
1259 *q = NULL;
1260 return ERR_PTR(err);
1261 }
1262
tcf_block_release(struct Qdisc * q,struct tcf_block * block,bool rtnl_held)1263 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
1264 bool rtnl_held)
1265 {
1266 if (!IS_ERR_OR_NULL(block))
1267 tcf_block_refcnt_put(block, rtnl_held);
1268
1269 if (q) {
1270 if (rtnl_held)
1271 qdisc_put(q);
1272 else
1273 qdisc_put_unlocked(q);
1274 }
1275 }
1276
1277 struct tcf_block_owner_item {
1278 struct list_head list;
1279 struct Qdisc *q;
1280 enum flow_block_binder_type binder_type;
1281 };
1282
1283 static void
tcf_block_owner_netif_keep_dst(struct tcf_block * block,struct Qdisc * q,enum flow_block_binder_type binder_type)1284 tcf_block_owner_netif_keep_dst(struct tcf_block *block,
1285 struct Qdisc *q,
1286 enum flow_block_binder_type binder_type)
1287 {
1288 if (block->keep_dst &&
1289 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1290 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1291 netif_keep_dst(qdisc_dev(q));
1292 }
1293
tcf_block_netif_keep_dst(struct tcf_block * block)1294 void tcf_block_netif_keep_dst(struct tcf_block *block)
1295 {
1296 struct tcf_block_owner_item *item;
1297
1298 block->keep_dst = true;
1299 list_for_each_entry(item, &block->owner_list, list)
1300 tcf_block_owner_netif_keep_dst(block, item->q,
1301 item->binder_type);
1302 }
1303 EXPORT_SYMBOL(tcf_block_netif_keep_dst);
1304
tcf_block_owner_add(struct tcf_block * block,struct Qdisc * q,enum flow_block_binder_type binder_type)1305 static int tcf_block_owner_add(struct tcf_block *block,
1306 struct Qdisc *q,
1307 enum flow_block_binder_type binder_type)
1308 {
1309 struct tcf_block_owner_item *item;
1310
1311 item = kmalloc(sizeof(*item), GFP_KERNEL);
1312 if (!item)
1313 return -ENOMEM;
1314 item->q = q;
1315 item->binder_type = binder_type;
1316 list_add(&item->list, &block->owner_list);
1317 return 0;
1318 }
1319
tcf_block_owner_del(struct tcf_block * block,struct Qdisc * q,enum flow_block_binder_type binder_type)1320 static void tcf_block_owner_del(struct tcf_block *block,
1321 struct Qdisc *q,
1322 enum flow_block_binder_type binder_type)
1323 {
1324 struct tcf_block_owner_item *item;
1325
1326 list_for_each_entry(item, &block->owner_list, list) {
1327 if (item->q == q && item->binder_type == binder_type) {
1328 list_del(&item->list);
1329 kfree(item);
1330 return;
1331 }
1332 }
1333 WARN_ON(1);
1334 }
1335
tcf_block_get_ext(struct tcf_block ** p_block,struct Qdisc * q,struct tcf_block_ext_info * ei,struct netlink_ext_ack * extack)1336 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
1337 struct tcf_block_ext_info *ei,
1338 struct netlink_ext_ack *extack)
1339 {
1340 struct net *net = qdisc_net(q);
1341 struct tcf_block *block = NULL;
1342 int err;
1343
1344 if (ei->block_index)
1345 /* block_index not 0 means the shared block is requested */
1346 block = tcf_block_refcnt_get(net, ei->block_index);
1347
1348 if (!block) {
1349 block = tcf_block_create(net, q, ei->block_index, extack);
1350 if (IS_ERR(block))
1351 return PTR_ERR(block);
1352 if (tcf_block_shared(block)) {
1353 err = tcf_block_insert(block, net, extack);
1354 if (err)
1355 goto err_block_insert;
1356 }
1357 }
1358
1359 err = tcf_block_owner_add(block, q, ei->binder_type);
1360 if (err)
1361 goto err_block_owner_add;
1362
1363 tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1364
1365 err = tcf_chain0_head_change_cb_add(block, ei, extack);
1366 if (err)
1367 goto err_chain0_head_change_cb_add;
1368
1369 err = tcf_block_offload_bind(block, q, ei, extack);
1370 if (err)
1371 goto err_block_offload_bind;
1372
1373 *p_block = block;
1374 return 0;
1375
1376 err_block_offload_bind:
1377 tcf_chain0_head_change_cb_del(block, ei);
1378 err_chain0_head_change_cb_add:
1379 tcf_block_owner_del(block, q, ei->binder_type);
1380 err_block_owner_add:
1381 err_block_insert:
1382 tcf_block_refcnt_put(block, true);
1383 return err;
1384 }
1385 EXPORT_SYMBOL(tcf_block_get_ext);
1386
tcf_chain_head_change_dflt(struct tcf_proto * tp_head,void * priv)1387 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
1388 {
1389 struct tcf_proto __rcu **p_filter_chain = priv;
1390
1391 rcu_assign_pointer(*p_filter_chain, tp_head);
1392 }
1393
tcf_block_get(struct tcf_block ** p_block,struct tcf_proto __rcu ** p_filter_chain,struct Qdisc * q,struct netlink_ext_ack * extack)1394 int tcf_block_get(struct tcf_block **p_block,
1395 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
1396 struct netlink_ext_ack *extack)
1397 {
1398 struct tcf_block_ext_info ei = {
1399 .chain_head_change = tcf_chain_head_change_dflt,
1400 .chain_head_change_priv = p_filter_chain,
1401 };
1402
1403 WARN_ON(!p_filter_chain);
1404 return tcf_block_get_ext(p_block, q, &ei, extack);
1405 }
1406 EXPORT_SYMBOL(tcf_block_get);
1407
1408 /* XXX: Standalone actions are not allowed to jump to any chain, and bound
1409 * actions should be all removed after flushing.
1410 */
tcf_block_put_ext(struct tcf_block * block,struct Qdisc * q,struct tcf_block_ext_info * ei)1411 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
1412 struct tcf_block_ext_info *ei)
1413 {
1414 if (!block)
1415 return;
1416 tcf_chain0_head_change_cb_del(block, ei);
1417 tcf_block_owner_del(block, q, ei->binder_type);
1418
1419 __tcf_block_put(block, q, ei, true);
1420 }
1421 EXPORT_SYMBOL(tcf_block_put_ext);
1422
tcf_block_put(struct tcf_block * block)1423 void tcf_block_put(struct tcf_block *block)
1424 {
1425 struct tcf_block_ext_info ei = {0, };
1426
1427 if (!block)
1428 return;
1429 tcf_block_put_ext(block, block->q, &ei);
1430 }
1431
1432 EXPORT_SYMBOL(tcf_block_put);
1433
1434 static int
tcf_block_playback_offloads(struct tcf_block * block,flow_setup_cb_t * cb,void * cb_priv,bool add,bool offload_in_use,struct netlink_ext_ack * extack)1435 tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
1436 void *cb_priv, bool add, bool offload_in_use,
1437 struct netlink_ext_ack *extack)
1438 {
1439 struct tcf_chain *chain, *chain_prev;
1440 struct tcf_proto *tp, *tp_prev;
1441 int err;
1442
1443 lockdep_assert_held(&block->cb_lock);
1444
1445 for (chain = __tcf_get_next_chain(block, NULL);
1446 chain;
1447 chain_prev = chain,
1448 chain = __tcf_get_next_chain(block, chain),
1449 tcf_chain_put(chain_prev)) {
1450 for (tp = __tcf_get_next_proto(chain, NULL); tp;
1451 tp_prev = tp,
1452 tp = __tcf_get_next_proto(chain, tp),
1453 tcf_proto_put(tp_prev, true, NULL)) {
1454 if (tp->ops->reoffload) {
1455 err = tp->ops->reoffload(tp, add, cb, cb_priv,
1456 extack);
1457 if (err && add)
1458 goto err_playback_remove;
1459 } else if (add && offload_in_use) {
1460 err = -EOPNOTSUPP;
1461 NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
1462 goto err_playback_remove;
1463 }
1464 }
1465 }
1466
1467 return 0;
1468
1469 err_playback_remove:
1470 tcf_proto_put(tp, true, NULL);
1471 tcf_chain_put(chain);
1472 tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1473 extack);
1474 return err;
1475 }
1476
tcf_block_bind(struct tcf_block * block,struct flow_block_offload * bo)1477 static int tcf_block_bind(struct tcf_block *block,
1478 struct flow_block_offload *bo)
1479 {
1480 struct flow_block_cb *block_cb, *next;
1481 int err, i = 0;
1482
1483 lockdep_assert_held(&block->cb_lock);
1484
1485 list_for_each_entry(block_cb, &bo->cb_list, list) {
1486 err = tcf_block_playback_offloads(block, block_cb->cb,
1487 block_cb->cb_priv, true,
1488 tcf_block_offload_in_use(block),
1489 bo->extack);
1490 if (err)
1491 goto err_unroll;
1492 if (!bo->unlocked_driver_cb)
1493 block->lockeddevcnt++;
1494
1495 i++;
1496 }
1497 list_splice(&bo->cb_list, &block->flow_block.cb_list);
1498
1499 return 0;
1500
1501 err_unroll:
1502 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1503 list_del(&block_cb->driver_list);
1504 if (i-- > 0) {
1505 list_del(&block_cb->list);
1506 tcf_block_playback_offloads(block, block_cb->cb,
1507 block_cb->cb_priv, false,
1508 tcf_block_offload_in_use(block),
1509 NULL);
1510 if (!bo->unlocked_driver_cb)
1511 block->lockeddevcnt--;
1512 }
1513 flow_block_cb_free(block_cb);
1514 }
1515
1516 return err;
1517 }
1518
tcf_block_unbind(struct tcf_block * block,struct flow_block_offload * bo)1519 static void tcf_block_unbind(struct tcf_block *block,
1520 struct flow_block_offload *bo)
1521 {
1522 struct flow_block_cb *block_cb, *next;
1523
1524 lockdep_assert_held(&block->cb_lock);
1525
1526 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1527 tcf_block_playback_offloads(block, block_cb->cb,
1528 block_cb->cb_priv, false,
1529 tcf_block_offload_in_use(block),
1530 NULL);
1531 list_del(&block_cb->list);
1532 flow_block_cb_free(block_cb);
1533 if (!bo->unlocked_driver_cb)
1534 block->lockeddevcnt--;
1535 }
1536 }
1537
tcf_block_setup(struct tcf_block * block,struct flow_block_offload * bo)1538 static int tcf_block_setup(struct tcf_block *block,
1539 struct flow_block_offload *bo)
1540 {
1541 int err;
1542
1543 switch (bo->command) {
1544 case FLOW_BLOCK_BIND:
1545 err = tcf_block_bind(block, bo);
1546 break;
1547 case FLOW_BLOCK_UNBIND:
1548 err = 0;
1549 tcf_block_unbind(block, bo);
1550 break;
1551 default:
1552 WARN_ON_ONCE(1);
1553 err = -EOPNOTSUPP;
1554 }
1555
1556 return err;
1557 }
1558
1559 /* Main classifier routine: scans classifier chain attached
1560 * to this qdisc, (optionally) tests for protocol and asks
1561 * specific classifiers.
1562 */
tcf_classify(struct sk_buff * skb,const struct tcf_proto * tp,struct tcf_result * res,bool compat_mode)1563 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
1564 struct tcf_result *res, bool compat_mode)
1565 {
1566 #ifdef CONFIG_NET_CLS_ACT
1567 const int max_reclassify_loop = 4;
1568 const struct tcf_proto *orig_tp = tp;
1569 const struct tcf_proto *first_tp;
1570 int limit = 0;
1571
1572 reclassify:
1573 #endif
1574 for (; tp; tp = rcu_dereference_bh(tp->next)) {
1575 __be16 protocol = skb_protocol(skb, false);
1576 int err;
1577
1578 if (tp->protocol != protocol &&
1579 tp->protocol != htons(ETH_P_ALL))
1580 continue;
1581
1582 err = tp->classify(skb, tp, res);
1583 #ifdef CONFIG_NET_CLS_ACT
1584 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
1585 first_tp = orig_tp;
1586 goto reset;
1587 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
1588 first_tp = res->goto_tp;
1589
1590 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
1591 {
1592 struct tc_skb_ext *ext;
1593
1594 ext = skb_ext_add(skb, TC_SKB_EXT);
1595 if (WARN_ON_ONCE(!ext))
1596 return TC_ACT_SHOT;
1597
1598 ext->chain = err & TC_ACT_EXT_VAL_MASK;
1599 }
1600 #endif
1601 goto reset;
1602 }
1603 #endif
1604 if (err >= 0)
1605 return err;
1606 }
1607
1608 return TC_ACT_UNSPEC; /* signal: continue lookup */
1609 #ifdef CONFIG_NET_CLS_ACT
1610 reset:
1611 if (unlikely(limit++ >= max_reclassify_loop)) {
1612 net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1613 tp->chain->block->index,
1614 tp->prio & 0xffff,
1615 ntohs(tp->protocol));
1616 return TC_ACT_SHOT;
1617 }
1618
1619 tp = first_tp;
1620 goto reclassify;
1621 #endif
1622 }
1623 EXPORT_SYMBOL(tcf_classify);
1624
1625 struct tcf_chain_info {
1626 struct tcf_proto __rcu **pprev;
1627 struct tcf_proto __rcu *next;
1628 };
1629
tcf_chain_tp_prev(struct tcf_chain * chain,struct tcf_chain_info * chain_info)1630 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain,
1631 struct tcf_chain_info *chain_info)
1632 {
1633 return tcf_chain_dereference(*chain_info->pprev, chain);
1634 }
1635
tcf_chain_tp_insert(struct tcf_chain * chain,struct tcf_chain_info * chain_info,struct tcf_proto * tp)1636 static int tcf_chain_tp_insert(struct tcf_chain *chain,
1637 struct tcf_chain_info *chain_info,
1638 struct tcf_proto *tp)
1639 {
1640 if (chain->flushing)
1641 return -EAGAIN;
1642
1643 RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
1644 if (*chain_info->pprev == chain->filter_chain)
1645 tcf_chain0_head_change(chain, tp);
1646 tcf_proto_get(tp);
1647 rcu_assign_pointer(*chain_info->pprev, tp);
1648
1649 return 0;
1650 }
1651
tcf_chain_tp_remove(struct tcf_chain * chain,struct tcf_chain_info * chain_info,struct tcf_proto * tp)1652 static void tcf_chain_tp_remove(struct tcf_chain *chain,
1653 struct tcf_chain_info *chain_info,
1654 struct tcf_proto *tp)
1655 {
1656 struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain);
1657
1658 tcf_proto_mark_delete(tp);
1659 if (tp == chain->filter_chain)
1660 tcf_chain0_head_change(chain, next);
1661 RCU_INIT_POINTER(*chain_info->pprev, next);
1662 }
1663
1664 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1665 struct tcf_chain_info *chain_info,
1666 u32 protocol, u32 prio,
1667 bool prio_allocate);
1668
1669 /* Try to insert new proto.
1670 * If proto with specified priority already exists, free new proto
1671 * and return existing one.
1672 */
1673
tcf_chain_tp_insert_unique(struct tcf_chain * chain,struct tcf_proto * tp_new,u32 protocol,u32 prio,bool rtnl_held)1674 static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1675 struct tcf_proto *tp_new,
1676 u32 protocol, u32 prio,
1677 bool rtnl_held)
1678 {
1679 struct tcf_chain_info chain_info;
1680 struct tcf_proto *tp;
1681 int err = 0;
1682
1683 mutex_lock(&chain->filter_chain_lock);
1684
1685 if (tcf_proto_exists_destroying(chain, tp_new)) {
1686 mutex_unlock(&chain->filter_chain_lock);
1687 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1688 return ERR_PTR(-EAGAIN);
1689 }
1690
1691 tp = tcf_chain_tp_find(chain, &chain_info,
1692 protocol, prio, false);
1693 if (!tp)
1694 err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
1695 mutex_unlock(&chain->filter_chain_lock);
1696
1697 if (tp) {
1698 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1699 tp_new = tp;
1700 } else if (err) {
1701 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1702 tp_new = ERR_PTR(err);
1703 }
1704
1705 return tp_new;
1706 }
1707
tcf_chain_tp_delete_empty(struct tcf_chain * chain,struct tcf_proto * tp,bool rtnl_held,struct netlink_ext_ack * extack)1708 static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
1709 struct tcf_proto *tp, bool rtnl_held,
1710 struct netlink_ext_ack *extack)
1711 {
1712 struct tcf_chain_info chain_info;
1713 struct tcf_proto *tp_iter;
1714 struct tcf_proto **pprev;
1715 struct tcf_proto *next;
1716
1717 mutex_lock(&chain->filter_chain_lock);
1718
1719 /* Atomically find and remove tp from chain. */
1720 for (pprev = &chain->filter_chain;
1721 (tp_iter = tcf_chain_dereference(*pprev, chain));
1722 pprev = &tp_iter->next) {
1723 if (tp_iter == tp) {
1724 chain_info.pprev = pprev;
1725 chain_info.next = tp_iter->next;
1726 WARN_ON(tp_iter->deleting);
1727 break;
1728 }
1729 }
1730 /* Verify that tp still exists and no new filters were inserted
1731 * concurrently.
1732 * Mark tp for deletion if it is empty.
1733 */
1734 if (!tp_iter || !tcf_proto_check_delete(tp)) {
1735 mutex_unlock(&chain->filter_chain_lock);
1736 return;
1737 }
1738
1739 tcf_proto_signal_destroying(chain, tp);
1740 next = tcf_chain_dereference(chain_info.next, chain);
1741 if (tp == chain->filter_chain)
1742 tcf_chain0_head_change(chain, next);
1743 RCU_INIT_POINTER(*chain_info.pprev, next);
1744 mutex_unlock(&chain->filter_chain_lock);
1745
1746 tcf_proto_put(tp, rtnl_held, extack);
1747 }
1748
tcf_chain_tp_find(struct tcf_chain * chain,struct tcf_chain_info * chain_info,u32 protocol,u32 prio,bool prio_allocate)1749 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1750 struct tcf_chain_info *chain_info,
1751 u32 protocol, u32 prio,
1752 bool prio_allocate)
1753 {
1754 struct tcf_proto **pprev;
1755 struct tcf_proto *tp;
1756
1757 /* Check the chain for existence of proto-tcf with this priority */
1758 for (pprev = &chain->filter_chain;
1759 (tp = tcf_chain_dereference(*pprev, chain));
1760 pprev = &tp->next) {
1761 if (tp->prio >= prio) {
1762 if (tp->prio == prio) {
1763 if (prio_allocate ||
1764 (tp->protocol != protocol && protocol))
1765 return ERR_PTR(-EINVAL);
1766 } else {
1767 tp = NULL;
1768 }
1769 break;
1770 }
1771 }
1772 chain_info->pprev = pprev;
1773 if (tp) {
1774 chain_info->next = tp->next;
1775 tcf_proto_get(tp);
1776 } else {
1777 chain_info->next = NULL;
1778 }
1779 return tp;
1780 }
1781
tcf_fill_node(struct net * net,struct sk_buff * skb,struct tcf_proto * tp,struct tcf_block * block,struct Qdisc * q,u32 parent,void * fh,u32 portid,u32 seq,u16 flags,int event,bool rtnl_held)1782 static int tcf_fill_node(struct net *net, struct sk_buff *skb,
1783 struct tcf_proto *tp, struct tcf_block *block,
1784 struct Qdisc *q, u32 parent, void *fh,
1785 u32 portid, u32 seq, u16 flags, int event,
1786 bool rtnl_held)
1787 {
1788 struct tcmsg *tcm;
1789 struct nlmsghdr *nlh;
1790 unsigned char *b = skb_tail_pointer(skb);
1791
1792 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1793 if (!nlh)
1794 goto out_nlmsg_trim;
1795 tcm = nlmsg_data(nlh);
1796 tcm->tcm_family = AF_UNSPEC;
1797 tcm->tcm__pad1 = 0;
1798 tcm->tcm__pad2 = 0;
1799 if (q) {
1800 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1801 tcm->tcm_parent = parent;
1802 } else {
1803 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
1804 tcm->tcm_block_index = block->index;
1805 }
1806 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
1807 if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
1808 goto nla_put_failure;
1809 if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
1810 goto nla_put_failure;
1811 if (!fh) {
1812 tcm->tcm_handle = 0;
1813 } else {
1814 if (tp->ops->dump &&
1815 tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0)
1816 goto nla_put_failure;
1817 }
1818 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1819 return skb->len;
1820
1821 out_nlmsg_trim:
1822 nla_put_failure:
1823 nlmsg_trim(skb, b);
1824 return -1;
1825 }
1826
tfilter_notify(struct net * net,struct sk_buff * oskb,struct nlmsghdr * n,struct tcf_proto * tp,struct tcf_block * block,struct Qdisc * q,u32 parent,void * fh,int event,bool unicast,bool rtnl_held)1827 static int tfilter_notify(struct net *net, struct sk_buff *oskb,
1828 struct nlmsghdr *n, struct tcf_proto *tp,
1829 struct tcf_block *block, struct Qdisc *q,
1830 u32 parent, void *fh, int event, bool unicast,
1831 bool rtnl_held)
1832 {
1833 struct sk_buff *skb;
1834 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1835 int err = 0;
1836
1837 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1838 if (!skb)
1839 return -ENOBUFS;
1840
1841 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1842 n->nlmsg_seq, n->nlmsg_flags, event,
1843 rtnl_held) <= 0) {
1844 kfree_skb(skb);
1845 return -EINVAL;
1846 }
1847
1848 if (unicast)
1849 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
1850 else
1851 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1852 n->nlmsg_flags & NLM_F_ECHO);
1853
1854 if (err > 0)
1855 err = 0;
1856 return err;
1857 }
1858
tfilter_del_notify(struct net * net,struct sk_buff * oskb,struct nlmsghdr * n,struct tcf_proto * tp,struct tcf_block * block,struct Qdisc * q,u32 parent,void * fh,bool unicast,bool * last,bool rtnl_held,struct netlink_ext_ack * extack)1859 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
1860 struct nlmsghdr *n, struct tcf_proto *tp,
1861 struct tcf_block *block, struct Qdisc *q,
1862 u32 parent, void *fh, bool unicast, bool *last,
1863 bool rtnl_held, struct netlink_ext_ack *extack)
1864 {
1865 struct sk_buff *skb;
1866 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1867 int err;
1868
1869 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1870 if (!skb)
1871 return -ENOBUFS;
1872
1873 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1874 n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER,
1875 rtnl_held) <= 0) {
1876 NL_SET_ERR_MSG(extack, "Failed to build del event notification");
1877 kfree_skb(skb);
1878 return -EINVAL;
1879 }
1880
1881 err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
1882 if (err) {
1883 kfree_skb(skb);
1884 return err;
1885 }
1886
1887 if (unicast)
1888 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
1889 else
1890 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1891 n->nlmsg_flags & NLM_F_ECHO);
1892 if (err < 0)
1893 NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
1894
1895 if (err > 0)
1896 err = 0;
1897 return err;
1898 }
1899
tfilter_notify_chain(struct net * net,struct sk_buff * oskb,struct tcf_block * block,struct Qdisc * q,u32 parent,struct nlmsghdr * n,struct tcf_chain * chain,int event,bool rtnl_held)1900 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
1901 struct tcf_block *block, struct Qdisc *q,
1902 u32 parent, struct nlmsghdr *n,
1903 struct tcf_chain *chain, int event,
1904 bool rtnl_held)
1905 {
1906 struct tcf_proto *tp;
1907
1908 for (tp = tcf_get_next_proto(chain, NULL, rtnl_held);
1909 tp; tp = tcf_get_next_proto(chain, tp, rtnl_held))
1910 tfilter_notify(net, oskb, n, tp, block,
1911 q, parent, NULL, event, false, rtnl_held);
1912 }
1913
tfilter_put(struct tcf_proto * tp,void * fh)1914 static void tfilter_put(struct tcf_proto *tp, void *fh)
1915 {
1916 if (tp->ops->put && fh)
1917 tp->ops->put(tp, fh);
1918 }
1919
tc_new_tfilter(struct sk_buff * skb,struct nlmsghdr * n,struct netlink_ext_ack * extack)1920 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
1921 struct netlink_ext_ack *extack)
1922 {
1923 struct net *net = sock_net(skb->sk);
1924 struct nlattr *tca[TCA_MAX + 1];
1925 char name[IFNAMSIZ];
1926 struct tcmsg *t;
1927 u32 protocol;
1928 u32 prio;
1929 bool prio_allocate;
1930 u32 parent;
1931 u32 chain_index;
1932 struct Qdisc *q;
1933 struct tcf_chain_info chain_info;
1934 struct tcf_chain *chain;
1935 struct tcf_block *block;
1936 struct tcf_proto *tp;
1937 unsigned long cl;
1938 void *fh;
1939 int err;
1940 int tp_created;
1941 bool rtnl_held = false;
1942
1943 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1944 return -EPERM;
1945
1946 replay:
1947 tp_created = 0;
1948
1949 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
1950 rtm_tca_policy, extack);
1951 if (err < 0)
1952 return err;
1953
1954 t = nlmsg_data(n);
1955 protocol = TC_H_MIN(t->tcm_info);
1956 prio = TC_H_MAJ(t->tcm_info);
1957 prio_allocate = false;
1958 parent = t->tcm_parent;
1959 tp = NULL;
1960 cl = 0;
1961 block = NULL;
1962 q = NULL;
1963 chain = NULL;
1964
1965 if (prio == 0) {
1966 /* If no priority is provided by the user,
1967 * we allocate one.
1968 */
1969 if (n->nlmsg_flags & NLM_F_CREATE) {
1970 prio = TC_H_MAKE(0x80000000U, 0U);
1971 prio_allocate = true;
1972 } else {
1973 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
1974 return -ENOENT;
1975 }
1976 }
1977
1978 /* Find head of filter chain. */
1979
1980 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
1981 if (err)
1982 return err;
1983
1984 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
1985 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
1986 err = -EINVAL;
1987 goto errout;
1988 }
1989
1990 /* Take rtnl mutex if rtnl_held was set to true on previous iteration,
1991 * block is shared (no qdisc found), qdisc is not unlocked, classifier
1992 * type is not specified, classifier is not unlocked.
1993 */
1994 if (rtnl_held ||
1995 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
1996 !tcf_proto_is_unlocked(name)) {
1997 rtnl_held = true;
1998 rtnl_lock();
1999 }
2000
2001 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2002 if (err)
2003 goto errout;
2004
2005 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2006 extack);
2007 if (IS_ERR(block)) {
2008 err = PTR_ERR(block);
2009 goto errout;
2010 }
2011 block->classid = parent;
2012
2013 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2014 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2015 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2016 err = -EINVAL;
2017 goto errout;
2018 }
2019 chain = tcf_chain_get(block, chain_index, true);
2020 if (!chain) {
2021 NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
2022 err = -ENOMEM;
2023 goto errout;
2024 }
2025
2026 mutex_lock(&chain->filter_chain_lock);
2027 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2028 prio, prio_allocate);
2029 if (IS_ERR(tp)) {
2030 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2031 err = PTR_ERR(tp);
2032 goto errout_locked;
2033 }
2034
2035 if (tp == NULL) {
2036 struct tcf_proto *tp_new = NULL;
2037
2038 if (chain->flushing) {
2039 err = -EAGAIN;
2040 goto errout_locked;
2041 }
2042
2043 /* Proto-tcf does not exist, create new one */
2044
2045 if (tca[TCA_KIND] == NULL || !protocol) {
2046 NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
2047 err = -EINVAL;
2048 goto errout_locked;
2049 }
2050
2051 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2052 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2053 err = -ENOENT;
2054 goto errout_locked;
2055 }
2056
2057 if (prio_allocate)
2058 prio = tcf_auto_prio(tcf_chain_tp_prev(chain,
2059 &chain_info));
2060
2061 mutex_unlock(&chain->filter_chain_lock);
2062 tp_new = tcf_proto_create(name, protocol, prio, chain,
2063 rtnl_held, extack);
2064 if (IS_ERR(tp_new)) {
2065 err = PTR_ERR(tp_new);
2066 goto errout_tp;
2067 }
2068
2069 tp_created = 1;
2070 tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio,
2071 rtnl_held);
2072 if (IS_ERR(tp)) {
2073 err = PTR_ERR(tp);
2074 goto errout_tp;
2075 }
2076 } else {
2077 mutex_unlock(&chain->filter_chain_lock);
2078 }
2079
2080 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2081 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2082 err = -EINVAL;
2083 goto errout;
2084 }
2085
2086 fh = tp->ops->get(tp, t->tcm_handle);
2087
2088 if (!fh) {
2089 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2090 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2091 err = -ENOENT;
2092 goto errout;
2093 }
2094 } else if (n->nlmsg_flags & NLM_F_EXCL) {
2095 tfilter_put(tp, fh);
2096 NL_SET_ERR_MSG(extack, "Filter already exists");
2097 err = -EEXIST;
2098 goto errout;
2099 }
2100
2101 if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
2102 tfilter_put(tp, fh);
2103 NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
2104 err = -EINVAL;
2105 goto errout;
2106 }
2107
2108 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
2109 n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE,
2110 rtnl_held, extack);
2111 if (err == 0) {
2112 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2113 RTM_NEWTFILTER, false, rtnl_held);
2114 tfilter_put(tp, fh);
2115 /* q pointer is NULL for shared blocks */
2116 if (q)
2117 q->flags &= ~TCQ_F_CAN_BYPASS;
2118 }
2119
2120 errout:
2121 if (err && tp_created)
2122 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL);
2123 errout_tp:
2124 if (chain) {
2125 if (tp && !IS_ERR(tp))
2126 tcf_proto_put(tp, rtnl_held, NULL);
2127 if (!tp_created)
2128 tcf_chain_put(chain);
2129 }
2130 tcf_block_release(q, block, rtnl_held);
2131
2132 if (rtnl_held)
2133 rtnl_unlock();
2134
2135 if (err == -EAGAIN) {
2136 /* Take rtnl lock in case EAGAIN is caused by concurrent flush
2137 * of target chain.
2138 */
2139 rtnl_held = true;
2140 /* Replay the request. */
2141 goto replay;
2142 }
2143 return err;
2144
2145 errout_locked:
2146 mutex_unlock(&chain->filter_chain_lock);
2147 goto errout;
2148 }
2149
tc_del_tfilter(struct sk_buff * skb,struct nlmsghdr * n,struct netlink_ext_ack * extack)2150 static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2151 struct netlink_ext_ack *extack)
2152 {
2153 struct net *net = sock_net(skb->sk);
2154 struct nlattr *tca[TCA_MAX + 1];
2155 char name[IFNAMSIZ];
2156 struct tcmsg *t;
2157 u32 protocol;
2158 u32 prio;
2159 u32 parent;
2160 u32 chain_index;
2161 struct Qdisc *q = NULL;
2162 struct tcf_chain_info chain_info;
2163 struct tcf_chain *chain = NULL;
2164 struct tcf_block *block = NULL;
2165 struct tcf_proto *tp = NULL;
2166 unsigned long cl = 0;
2167 void *fh = NULL;
2168 int err;
2169 bool rtnl_held = false;
2170
2171 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2172 return -EPERM;
2173
2174 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2175 rtm_tca_policy, extack);
2176 if (err < 0)
2177 return err;
2178
2179 t = nlmsg_data(n);
2180 protocol = TC_H_MIN(t->tcm_info);
2181 prio = TC_H_MAJ(t->tcm_info);
2182 parent = t->tcm_parent;
2183
2184 if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
2185 NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
2186 return -ENOENT;
2187 }
2188
2189 /* Find head of filter chain. */
2190
2191 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2192 if (err)
2193 return err;
2194
2195 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2196 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2197 err = -EINVAL;
2198 goto errout;
2199 }
2200 /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
2201 * found), qdisc is not unlocked, classifier type is not specified,
2202 * classifier is not unlocked.
2203 */
2204 if (!prio ||
2205 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2206 !tcf_proto_is_unlocked(name)) {
2207 rtnl_held = true;
2208 rtnl_lock();
2209 }
2210
2211 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2212 if (err)
2213 goto errout;
2214
2215 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2216 extack);
2217 if (IS_ERR(block)) {
2218 err = PTR_ERR(block);
2219 goto errout;
2220 }
2221
2222 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2223 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2224 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2225 err = -EINVAL;
2226 goto errout;
2227 }
2228 chain = tcf_chain_get(block, chain_index, false);
2229 if (!chain) {
2230 /* User requested flush on non-existent chain. Nothing to do,
2231 * so just return success.
2232 */
2233 if (prio == 0) {
2234 err = 0;
2235 goto errout;
2236 }
2237 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2238 err = -ENOENT;
2239 goto errout;
2240 }
2241
2242 if (prio == 0) {
2243 tfilter_notify_chain(net, skb, block, q, parent, n,
2244 chain, RTM_DELTFILTER, rtnl_held);
2245 tcf_chain_flush(chain, rtnl_held);
2246 err = 0;
2247 goto errout;
2248 }
2249
2250 mutex_lock(&chain->filter_chain_lock);
2251 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2252 prio, false);
2253 if (!tp || IS_ERR(tp)) {
2254 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2255 err = tp ? PTR_ERR(tp) : -ENOENT;
2256 goto errout_locked;
2257 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2258 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2259 err = -EINVAL;
2260 goto errout_locked;
2261 } else if (t->tcm_handle == 0) {
2262 tcf_proto_signal_destroying(chain, tp);
2263 tcf_chain_tp_remove(chain, &chain_info, tp);
2264 mutex_unlock(&chain->filter_chain_lock);
2265
2266 tcf_proto_put(tp, rtnl_held, NULL);
2267 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2268 RTM_DELTFILTER, false, rtnl_held);
2269 err = 0;
2270 goto errout;
2271 }
2272 mutex_unlock(&chain->filter_chain_lock);
2273
2274 fh = tp->ops->get(tp, t->tcm_handle);
2275
2276 if (!fh) {
2277 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2278 err = -ENOENT;
2279 } else {
2280 bool last;
2281
2282 err = tfilter_del_notify(net, skb, n, tp, block,
2283 q, parent, fh, false, &last,
2284 rtnl_held, extack);
2285
2286 if (err)
2287 goto errout;
2288 if (last)
2289 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack);
2290 }
2291
2292 errout:
2293 if (chain) {
2294 if (tp && !IS_ERR(tp))
2295 tcf_proto_put(tp, rtnl_held, NULL);
2296 tcf_chain_put(chain);
2297 }
2298 tcf_block_release(q, block, rtnl_held);
2299
2300 if (rtnl_held)
2301 rtnl_unlock();
2302
2303 return err;
2304
2305 errout_locked:
2306 mutex_unlock(&chain->filter_chain_lock);
2307 goto errout;
2308 }
2309
tc_get_tfilter(struct sk_buff * skb,struct nlmsghdr * n,struct netlink_ext_ack * extack)2310 static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2311 struct netlink_ext_ack *extack)
2312 {
2313 struct net *net = sock_net(skb->sk);
2314 struct nlattr *tca[TCA_MAX + 1];
2315 char name[IFNAMSIZ];
2316 struct tcmsg *t;
2317 u32 protocol;
2318 u32 prio;
2319 u32 parent;
2320 u32 chain_index;
2321 struct Qdisc *q = NULL;
2322 struct tcf_chain_info chain_info;
2323 struct tcf_chain *chain = NULL;
2324 struct tcf_block *block = NULL;
2325 struct tcf_proto *tp = NULL;
2326 unsigned long cl = 0;
2327 void *fh = NULL;
2328 int err;
2329 bool rtnl_held = false;
2330
2331 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2332 rtm_tca_policy, extack);
2333 if (err < 0)
2334 return err;
2335
2336 t = nlmsg_data(n);
2337 protocol = TC_H_MIN(t->tcm_info);
2338 prio = TC_H_MAJ(t->tcm_info);
2339 parent = t->tcm_parent;
2340
2341 if (prio == 0) {
2342 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2343 return -ENOENT;
2344 }
2345
2346 /* Find head of filter chain. */
2347
2348 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2349 if (err)
2350 return err;
2351
2352 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2353 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2354 err = -EINVAL;
2355 goto errout;
2356 }
2357 /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
2358 * unlocked, classifier type is not specified, classifier is not
2359 * unlocked.
2360 */
2361 if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2362 !tcf_proto_is_unlocked(name)) {
2363 rtnl_held = true;
2364 rtnl_lock();
2365 }
2366
2367 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2368 if (err)
2369 goto errout;
2370
2371 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2372 extack);
2373 if (IS_ERR(block)) {
2374 err = PTR_ERR(block);
2375 goto errout;
2376 }
2377
2378 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2379 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2380 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2381 err = -EINVAL;
2382 goto errout;
2383 }
2384 chain = tcf_chain_get(block, chain_index, false);
2385 if (!chain) {
2386 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2387 err = -EINVAL;
2388 goto errout;
2389 }
2390
2391 mutex_lock(&chain->filter_chain_lock);
2392 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2393 prio, false);
2394 mutex_unlock(&chain->filter_chain_lock);
2395 if (!tp || IS_ERR(tp)) {
2396 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2397 err = tp ? PTR_ERR(tp) : -ENOENT;
2398 goto errout;
2399 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2400 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2401 err = -EINVAL;
2402 goto errout;
2403 }
2404
2405 fh = tp->ops->get(tp, t->tcm_handle);
2406
2407 if (!fh) {
2408 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2409 err = -ENOENT;
2410 } else {
2411 err = tfilter_notify(net, skb, n, tp, block, q, parent,
2412 fh, RTM_NEWTFILTER, true, rtnl_held);
2413 if (err < 0)
2414 NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
2415 }
2416
2417 tfilter_put(tp, fh);
2418 errout:
2419 if (chain) {
2420 if (tp && !IS_ERR(tp))
2421 tcf_proto_put(tp, rtnl_held, NULL);
2422 tcf_chain_put(chain);
2423 }
2424 tcf_block_release(q, block, rtnl_held);
2425
2426 if (rtnl_held)
2427 rtnl_unlock();
2428
2429 return err;
2430 }
2431
2432 struct tcf_dump_args {
2433 struct tcf_walker w;
2434 struct sk_buff *skb;
2435 struct netlink_callback *cb;
2436 struct tcf_block *block;
2437 struct Qdisc *q;
2438 u32 parent;
2439 };
2440
tcf_node_dump(struct tcf_proto * tp,void * n,struct tcf_walker * arg)2441 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
2442 {
2443 struct tcf_dump_args *a = (void *)arg;
2444 struct net *net = sock_net(a->skb->sk);
2445
2446 return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
2447 n, NETLINK_CB(a->cb->skb).portid,
2448 a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2449 RTM_NEWTFILTER, true);
2450 }
2451
tcf_chain_dump(struct tcf_chain * chain,struct Qdisc * q,u32 parent,struct sk_buff * skb,struct netlink_callback * cb,long index_start,long * p_index)2452 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
2453 struct sk_buff *skb, struct netlink_callback *cb,
2454 long index_start, long *p_index)
2455 {
2456 struct net *net = sock_net(skb->sk);
2457 struct tcf_block *block = chain->block;
2458 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2459 struct tcf_proto *tp, *tp_prev;
2460 struct tcf_dump_args arg;
2461
2462 for (tp = __tcf_get_next_proto(chain, NULL);
2463 tp;
2464 tp_prev = tp,
2465 tp = __tcf_get_next_proto(chain, tp),
2466 tcf_proto_put(tp_prev, true, NULL),
2467 (*p_index)++) {
2468 if (*p_index < index_start)
2469 continue;
2470 if (TC_H_MAJ(tcm->tcm_info) &&
2471 TC_H_MAJ(tcm->tcm_info) != tp->prio)
2472 continue;
2473 if (TC_H_MIN(tcm->tcm_info) &&
2474 TC_H_MIN(tcm->tcm_info) != tp->protocol)
2475 continue;
2476 if (*p_index > index_start)
2477 memset(&cb->args[1], 0,
2478 sizeof(cb->args) - sizeof(cb->args[0]));
2479 if (cb->args[1] == 0) {
2480 if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
2481 NETLINK_CB(cb->skb).portid,
2482 cb->nlh->nlmsg_seq, NLM_F_MULTI,
2483 RTM_NEWTFILTER, true) <= 0)
2484 goto errout;
2485 cb->args[1] = 1;
2486 }
2487 if (!tp->ops->walk)
2488 continue;
2489 arg.w.fn = tcf_node_dump;
2490 arg.skb = skb;
2491 arg.cb = cb;
2492 arg.block = block;
2493 arg.q = q;
2494 arg.parent = parent;
2495 arg.w.stop = 0;
2496 arg.w.skip = cb->args[1] - 1;
2497 arg.w.count = 0;
2498 arg.w.cookie = cb->args[2];
2499 tp->ops->walk(tp, &arg.w, true);
2500 cb->args[2] = arg.w.cookie;
2501 cb->args[1] = arg.w.count + 1;
2502 if (arg.w.stop)
2503 goto errout;
2504 }
2505 return true;
2506
2507 errout:
2508 tcf_proto_put(tp, true, NULL);
2509 return false;
2510 }
2511
2512 /* called with RTNL */
tc_dump_tfilter(struct sk_buff * skb,struct netlink_callback * cb)2513 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
2514 {
2515 struct tcf_chain *chain, *chain_prev;
2516 struct net *net = sock_net(skb->sk);
2517 struct nlattr *tca[TCA_MAX + 1];
2518 struct Qdisc *q = NULL;
2519 struct tcf_block *block;
2520 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2521 long index_start;
2522 long index;
2523 u32 parent;
2524 int err;
2525
2526 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2527 return skb->len;
2528
2529 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2530 NULL, cb->extack);
2531 if (err)
2532 return err;
2533
2534 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2535 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2536 if (!block)
2537 goto out;
2538 /* If we work with block index, q is NULL and parent value
2539 * will never be used in the following code. The check
2540 * in tcf_fill_node prevents it. However, compiler does not
2541 * see that far, so set parent to zero to silence the warning
2542 * about parent being uninitialized.
2543 */
2544 parent = 0;
2545 } else {
2546 const struct Qdisc_class_ops *cops;
2547 struct net_device *dev;
2548 unsigned long cl = 0;
2549
2550 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2551 if (!dev)
2552 return skb->len;
2553
2554 parent = tcm->tcm_parent;
2555 if (!parent)
2556 q = rtnl_dereference(dev->qdisc);
2557 else
2558 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2559 if (!q)
2560 goto out;
2561 cops = q->ops->cl_ops;
2562 if (!cops)
2563 goto out;
2564 if (!cops->tcf_block)
2565 goto out;
2566 if (TC_H_MIN(tcm->tcm_parent)) {
2567 cl = cops->find(q, tcm->tcm_parent);
2568 if (cl == 0)
2569 goto out;
2570 }
2571 block = cops->tcf_block(q, cl, NULL);
2572 if (!block)
2573 goto out;
2574 parent = block->classid;
2575 if (tcf_block_shared(block))
2576 q = NULL;
2577 }
2578
2579 index_start = cb->args[0];
2580 index = 0;
2581
2582 for (chain = __tcf_get_next_chain(block, NULL);
2583 chain;
2584 chain_prev = chain,
2585 chain = __tcf_get_next_chain(block, chain),
2586 tcf_chain_put(chain_prev)) {
2587 if (tca[TCA_CHAIN] &&
2588 nla_get_u32(tca[TCA_CHAIN]) != chain->index)
2589 continue;
2590 if (!tcf_chain_dump(chain, q, parent, skb, cb,
2591 index_start, &index)) {
2592 tcf_chain_put(chain);
2593 err = -EMSGSIZE;
2594 break;
2595 }
2596 }
2597
2598 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2599 tcf_block_refcnt_put(block, true);
2600 cb->args[0] = index;
2601
2602 out:
2603 /* If we did no progress, the error (EMSGSIZE) is real */
2604 if (skb->len == 0 && err)
2605 return err;
2606 return skb->len;
2607 }
2608
tc_chain_fill_node(const struct tcf_proto_ops * tmplt_ops,void * tmplt_priv,u32 chain_index,struct net * net,struct sk_buff * skb,struct tcf_block * block,u32 portid,u32 seq,u16 flags,int event)2609 static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
2610 void *tmplt_priv, u32 chain_index,
2611 struct net *net, struct sk_buff *skb,
2612 struct tcf_block *block,
2613 u32 portid, u32 seq, u16 flags, int event)
2614 {
2615 unsigned char *b = skb_tail_pointer(skb);
2616 const struct tcf_proto_ops *ops;
2617 struct nlmsghdr *nlh;
2618 struct tcmsg *tcm;
2619 void *priv;
2620
2621 ops = tmplt_ops;
2622 priv = tmplt_priv;
2623
2624 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2625 if (!nlh)
2626 goto out_nlmsg_trim;
2627 tcm = nlmsg_data(nlh);
2628 tcm->tcm_family = AF_UNSPEC;
2629 tcm->tcm__pad1 = 0;
2630 tcm->tcm__pad2 = 0;
2631 tcm->tcm_handle = 0;
2632 if (block->q) {
2633 tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2634 tcm->tcm_parent = block->q->handle;
2635 } else {
2636 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2637 tcm->tcm_block_index = block->index;
2638 }
2639
2640 if (nla_put_u32(skb, TCA_CHAIN, chain_index))
2641 goto nla_put_failure;
2642
2643 if (ops) {
2644 if (nla_put_string(skb, TCA_KIND, ops->kind))
2645 goto nla_put_failure;
2646 if (ops->tmplt_dump(skb, net, priv) < 0)
2647 goto nla_put_failure;
2648 }
2649
2650 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2651 return skb->len;
2652
2653 out_nlmsg_trim:
2654 nla_put_failure:
2655 nlmsg_trim(skb, b);
2656 return -EMSGSIZE;
2657 }
2658
tc_chain_notify(struct tcf_chain * chain,struct sk_buff * oskb,u32 seq,u16 flags,int event,bool unicast)2659 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
2660 u32 seq, u16 flags, int event, bool unicast)
2661 {
2662 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2663 struct tcf_block *block = chain->block;
2664 struct net *net = block->net;
2665 struct sk_buff *skb;
2666 int err = 0;
2667
2668 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2669 if (!skb)
2670 return -ENOBUFS;
2671
2672 if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2673 chain->index, net, skb, block, portid,
2674 seq, flags, event) <= 0) {
2675 kfree_skb(skb);
2676 return -EINVAL;
2677 }
2678
2679 if (unicast)
2680 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
2681 else
2682 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2683 flags & NLM_F_ECHO);
2684
2685 if (err > 0)
2686 err = 0;
2687 return err;
2688 }
2689
tc_chain_notify_delete(const struct tcf_proto_ops * tmplt_ops,void * tmplt_priv,u32 chain_index,struct tcf_block * block,struct sk_buff * oskb,u32 seq,u16 flags,bool unicast)2690 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
2691 void *tmplt_priv, u32 chain_index,
2692 struct tcf_block *block, struct sk_buff *oskb,
2693 u32 seq, u16 flags, bool unicast)
2694 {
2695 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2696 struct net *net = block->net;
2697 struct sk_buff *skb;
2698
2699 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2700 if (!skb)
2701 return -ENOBUFS;
2702
2703 if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
2704 block, portid, seq, flags, RTM_DELCHAIN) <= 0) {
2705 kfree_skb(skb);
2706 return -EINVAL;
2707 }
2708
2709 if (unicast)
2710 return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
2711
2712 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
2713 }
2714
tc_chain_tmplt_add(struct tcf_chain * chain,struct net * net,struct nlattr ** tca,struct netlink_ext_ack * extack)2715 static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
2716 struct nlattr **tca,
2717 struct netlink_ext_ack *extack)
2718 {
2719 const struct tcf_proto_ops *ops;
2720 char name[IFNAMSIZ];
2721 void *tmplt_priv;
2722
2723 /* If kind is not set, user did not specify template. */
2724 if (!tca[TCA_KIND])
2725 return 0;
2726
2727 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2728 NL_SET_ERR_MSG(extack, "Specified TC chain template name too long");
2729 return -EINVAL;
2730 }
2731
2732 ops = tcf_proto_lookup_ops(name, true, extack);
2733 if (IS_ERR(ops))
2734 return PTR_ERR(ops);
2735 if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
2736 NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
2737 module_put(ops->owner);
2738 return -EOPNOTSUPP;
2739 }
2740
2741 tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
2742 if (IS_ERR(tmplt_priv)) {
2743 module_put(ops->owner);
2744 return PTR_ERR(tmplt_priv);
2745 }
2746 chain->tmplt_ops = ops;
2747 chain->tmplt_priv = tmplt_priv;
2748 return 0;
2749 }
2750
tc_chain_tmplt_del(const struct tcf_proto_ops * tmplt_ops,void * tmplt_priv)2751 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
2752 void *tmplt_priv)
2753 {
2754 /* If template ops are set, no work to do for us. */
2755 if (!tmplt_ops)
2756 return;
2757
2758 tmplt_ops->tmplt_destroy(tmplt_priv);
2759 module_put(tmplt_ops->owner);
2760 }
2761
2762 /* Add/delete/get a chain */
2763
tc_ctl_chain(struct sk_buff * skb,struct nlmsghdr * n,struct netlink_ext_ack * extack)2764 static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
2765 struct netlink_ext_ack *extack)
2766 {
2767 struct net *net = sock_net(skb->sk);
2768 struct nlattr *tca[TCA_MAX + 1];
2769 struct tcmsg *t;
2770 u32 parent;
2771 u32 chain_index;
2772 struct Qdisc *q;
2773 struct tcf_chain *chain;
2774 struct tcf_block *block;
2775 unsigned long cl;
2776 int err;
2777
2778 if (n->nlmsg_type != RTM_GETCHAIN &&
2779 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2780 return -EPERM;
2781
2782 replay:
2783 q = NULL;
2784 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2785 rtm_tca_policy, extack);
2786 if (err < 0)
2787 return err;
2788
2789 t = nlmsg_data(n);
2790 parent = t->tcm_parent;
2791 cl = 0;
2792
2793 block = tcf_block_find(net, &q, &parent, &cl,
2794 t->tcm_ifindex, t->tcm_block_index, extack);
2795 if (IS_ERR(block))
2796 return PTR_ERR(block);
2797
2798 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2799 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2800 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2801 err = -EINVAL;
2802 goto errout_block;
2803 }
2804
2805 mutex_lock(&block->lock);
2806 chain = tcf_chain_lookup(block, chain_index);
2807 if (n->nlmsg_type == RTM_NEWCHAIN) {
2808 if (chain) {
2809 if (tcf_chain_held_by_acts_only(chain)) {
2810 /* The chain exists only because there is
2811 * some action referencing it.
2812 */
2813 tcf_chain_hold(chain);
2814 } else {
2815 NL_SET_ERR_MSG(extack, "Filter chain already exists");
2816 err = -EEXIST;
2817 goto errout_block_locked;
2818 }
2819 } else {
2820 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2821 NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
2822 err = -ENOENT;
2823 goto errout_block_locked;
2824 }
2825 chain = tcf_chain_create(block, chain_index);
2826 if (!chain) {
2827 NL_SET_ERR_MSG(extack, "Failed to create filter chain");
2828 err = -ENOMEM;
2829 goto errout_block_locked;
2830 }
2831 }
2832 } else {
2833 if (!chain || tcf_chain_held_by_acts_only(chain)) {
2834 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2835 err = -EINVAL;
2836 goto errout_block_locked;
2837 }
2838 tcf_chain_hold(chain);
2839 }
2840
2841 if (n->nlmsg_type == RTM_NEWCHAIN) {
2842 /* Modifying chain requires holding parent block lock. In case
2843 * the chain was successfully added, take a reference to the
2844 * chain. This ensures that an empty chain does not disappear at
2845 * the end of this function.
2846 */
2847 tcf_chain_hold(chain);
2848 chain->explicitly_created = true;
2849 }
2850 mutex_unlock(&block->lock);
2851
2852 switch (n->nlmsg_type) {
2853 case RTM_NEWCHAIN:
2854 err = tc_chain_tmplt_add(chain, net, tca, extack);
2855 if (err) {
2856 tcf_chain_put_explicitly_created(chain);
2857 goto errout;
2858 }
2859
2860 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
2861 RTM_NEWCHAIN, false);
2862 break;
2863 case RTM_DELCHAIN:
2864 tfilter_notify_chain(net, skb, block, q, parent, n,
2865 chain, RTM_DELTFILTER, true);
2866 /* Flush the chain first as the user requested chain removal. */
2867 tcf_chain_flush(chain, true);
2868 /* In case the chain was successfully deleted, put a reference
2869 * to the chain previously taken during addition.
2870 */
2871 tcf_chain_put_explicitly_created(chain);
2872 break;
2873 case RTM_GETCHAIN:
2874 err = tc_chain_notify(chain, skb, n->nlmsg_seq,
2875 n->nlmsg_flags, n->nlmsg_type, true);
2876 if (err < 0)
2877 NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
2878 break;
2879 default:
2880 err = -EOPNOTSUPP;
2881 NL_SET_ERR_MSG(extack, "Unsupported message type");
2882 goto errout;
2883 }
2884
2885 errout:
2886 tcf_chain_put(chain);
2887 errout_block:
2888 tcf_block_release(q, block, true);
2889 if (err == -EAGAIN)
2890 /* Replay the request. */
2891 goto replay;
2892 return err;
2893
2894 errout_block_locked:
2895 mutex_unlock(&block->lock);
2896 goto errout_block;
2897 }
2898
2899 /* called with RTNL */
tc_dump_chain(struct sk_buff * skb,struct netlink_callback * cb)2900 static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
2901 {
2902 struct net *net = sock_net(skb->sk);
2903 struct nlattr *tca[TCA_MAX + 1];
2904 struct Qdisc *q = NULL;
2905 struct tcf_block *block;
2906 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2907 struct tcf_chain *chain;
2908 long index_start;
2909 long index;
2910 u32 parent;
2911 int err;
2912
2913 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2914 return skb->len;
2915
2916 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2917 rtm_tca_policy, cb->extack);
2918 if (err)
2919 return err;
2920
2921 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2922 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2923 if (!block)
2924 goto out;
2925 /* If we work with block index, q is NULL and parent value
2926 * will never be used in the following code. The check
2927 * in tcf_fill_node prevents it. However, compiler does not
2928 * see that far, so set parent to zero to silence the warning
2929 * about parent being uninitialized.
2930 */
2931 parent = 0;
2932 } else {
2933 const struct Qdisc_class_ops *cops;
2934 struct net_device *dev;
2935 unsigned long cl = 0;
2936
2937 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2938 if (!dev)
2939 return skb->len;
2940
2941 parent = tcm->tcm_parent;
2942 if (!parent) {
2943 q = rtnl_dereference(dev->qdisc);
2944 parent = q->handle;
2945 } else {
2946 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2947 }
2948 if (!q)
2949 goto out;
2950 cops = q->ops->cl_ops;
2951 if (!cops)
2952 goto out;
2953 if (!cops->tcf_block)
2954 goto out;
2955 if (TC_H_MIN(tcm->tcm_parent)) {
2956 cl = cops->find(q, tcm->tcm_parent);
2957 if (cl == 0)
2958 goto out;
2959 }
2960 block = cops->tcf_block(q, cl, NULL);
2961 if (!block)
2962 goto out;
2963 if (tcf_block_shared(block))
2964 q = NULL;
2965 }
2966
2967 index_start = cb->args[0];
2968 index = 0;
2969
2970 mutex_lock(&block->lock);
2971 list_for_each_entry(chain, &block->chain_list, list) {
2972 if ((tca[TCA_CHAIN] &&
2973 nla_get_u32(tca[TCA_CHAIN]) != chain->index))
2974 continue;
2975 if (index < index_start) {
2976 index++;
2977 continue;
2978 }
2979 if (tcf_chain_held_by_acts_only(chain))
2980 continue;
2981 err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2982 chain->index, net, skb, block,
2983 NETLINK_CB(cb->skb).portid,
2984 cb->nlh->nlmsg_seq, NLM_F_MULTI,
2985 RTM_NEWCHAIN);
2986 if (err <= 0)
2987 break;
2988 index++;
2989 }
2990 mutex_unlock(&block->lock);
2991
2992 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2993 tcf_block_refcnt_put(block, true);
2994 cb->args[0] = index;
2995
2996 out:
2997 /* If we did no progress, the error (EMSGSIZE) is real */
2998 if (skb->len == 0 && err)
2999 return err;
3000 return skb->len;
3001 }
3002
tcf_exts_destroy(struct tcf_exts * exts)3003 void tcf_exts_destroy(struct tcf_exts *exts)
3004 {
3005 #ifdef CONFIG_NET_CLS_ACT
3006 if (exts->actions) {
3007 tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
3008 kfree(exts->actions);
3009 }
3010 exts->nr_actions = 0;
3011 #endif
3012 }
3013 EXPORT_SYMBOL(tcf_exts_destroy);
3014
tcf_exts_validate(struct net * net,struct tcf_proto * tp,struct nlattr ** tb,struct nlattr * rate_tlv,struct tcf_exts * exts,bool ovr,bool rtnl_held,struct netlink_ext_ack * extack)3015 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3016 struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr,
3017 bool rtnl_held, struct netlink_ext_ack *extack)
3018 {
3019 #ifdef CONFIG_NET_CLS_ACT
3020 {
3021 struct tc_action *act;
3022 size_t attr_size = 0;
3023
3024 if (exts->police && tb[exts->police]) {
3025 act = tcf_action_init_1(net, tp, tb[exts->police],
3026 rate_tlv, "police", ovr,
3027 TCA_ACT_BIND, rtnl_held,
3028 extack);
3029 if (IS_ERR(act))
3030 return PTR_ERR(act);
3031
3032 act->type = exts->type = TCA_OLD_COMPAT;
3033 exts->actions[0] = act;
3034 exts->nr_actions = 1;
3035 tcf_idr_insert_many(exts->actions);
3036 } else if (exts->action && tb[exts->action]) {
3037 int err;
3038
3039 err = tcf_action_init(net, tp, tb[exts->action],
3040 rate_tlv, NULL, ovr, TCA_ACT_BIND,
3041 exts->actions, &attr_size,
3042 rtnl_held, extack);
3043 if (err < 0)
3044 return err;
3045 exts->nr_actions = err;
3046 }
3047 }
3048 #else
3049 if ((exts->action && tb[exts->action]) ||
3050 (exts->police && tb[exts->police])) {
3051 NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
3052 return -EOPNOTSUPP;
3053 }
3054 #endif
3055
3056 return 0;
3057 }
3058 EXPORT_SYMBOL(tcf_exts_validate);
3059
tcf_exts_change(struct tcf_exts * dst,struct tcf_exts * src)3060 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
3061 {
3062 #ifdef CONFIG_NET_CLS_ACT
3063 struct tcf_exts old = *dst;
3064
3065 *dst = *src;
3066 tcf_exts_destroy(&old);
3067 #endif
3068 }
3069 EXPORT_SYMBOL(tcf_exts_change);
3070
3071 #ifdef CONFIG_NET_CLS_ACT
tcf_exts_first_act(struct tcf_exts * exts)3072 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
3073 {
3074 if (exts->nr_actions == 0)
3075 return NULL;
3076 else
3077 return exts->actions[0];
3078 }
3079 #endif
3080
tcf_exts_dump(struct sk_buff * skb,struct tcf_exts * exts)3081 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
3082 {
3083 #ifdef CONFIG_NET_CLS_ACT
3084 struct nlattr *nest;
3085
3086 if (exts->action && tcf_exts_has_actions(exts)) {
3087 /*
3088 * again for backward compatible mode - we want
3089 * to work with both old and new modes of entering
3090 * tc data even if iproute2 was newer - jhs
3091 */
3092 if (exts->type != TCA_OLD_COMPAT) {
3093 nest = nla_nest_start_noflag(skb, exts->action);
3094 if (nest == NULL)
3095 goto nla_put_failure;
3096
3097 if (tcf_action_dump(skb, exts->actions, 0, 0) < 0)
3098 goto nla_put_failure;
3099 nla_nest_end(skb, nest);
3100 } else if (exts->police) {
3101 struct tc_action *act = tcf_exts_first_act(exts);
3102 nest = nla_nest_start_noflag(skb, exts->police);
3103 if (nest == NULL || !act)
3104 goto nla_put_failure;
3105 if (tcf_action_dump_old(skb, act, 0, 0) < 0)
3106 goto nla_put_failure;
3107 nla_nest_end(skb, nest);
3108 }
3109 }
3110 return 0;
3111
3112 nla_put_failure:
3113 nla_nest_cancel(skb, nest);
3114 return -1;
3115 #else
3116 return 0;
3117 #endif
3118 }
3119 EXPORT_SYMBOL(tcf_exts_dump);
3120
3121
tcf_exts_dump_stats(struct sk_buff * skb,struct tcf_exts * exts)3122 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
3123 {
3124 #ifdef CONFIG_NET_CLS_ACT
3125 struct tc_action *a = tcf_exts_first_act(exts);
3126 if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
3127 return -1;
3128 #endif
3129 return 0;
3130 }
3131 EXPORT_SYMBOL(tcf_exts_dump_stats);
3132
tcf_block_offload_inc(struct tcf_block * block,u32 * flags)3133 static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
3134 {
3135 if (*flags & TCA_CLS_FLAGS_IN_HW)
3136 return;
3137 *flags |= TCA_CLS_FLAGS_IN_HW;
3138 atomic_inc(&block->offloadcnt);
3139 }
3140
tcf_block_offload_dec(struct tcf_block * block,u32 * flags)3141 static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
3142 {
3143 if (!(*flags & TCA_CLS_FLAGS_IN_HW))
3144 return;
3145 *flags &= ~TCA_CLS_FLAGS_IN_HW;
3146 atomic_dec(&block->offloadcnt);
3147 }
3148
tc_cls_offload_cnt_update(struct tcf_block * block,struct tcf_proto * tp,u32 * cnt,u32 * flags,u32 diff,bool add)3149 static void tc_cls_offload_cnt_update(struct tcf_block *block,
3150 struct tcf_proto *tp, u32 *cnt,
3151 u32 *flags, u32 diff, bool add)
3152 {
3153 lockdep_assert_held(&block->cb_lock);
3154
3155 spin_lock(&tp->lock);
3156 if (add) {
3157 if (!*cnt)
3158 tcf_block_offload_inc(block, flags);
3159 *cnt += diff;
3160 } else {
3161 *cnt -= diff;
3162 if (!*cnt)
3163 tcf_block_offload_dec(block, flags);
3164 }
3165 spin_unlock(&tp->lock);
3166 }
3167
3168 static void
tc_cls_offload_cnt_reset(struct tcf_block * block,struct tcf_proto * tp,u32 * cnt,u32 * flags)3169 tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp,
3170 u32 *cnt, u32 *flags)
3171 {
3172 lockdep_assert_held(&block->cb_lock);
3173
3174 spin_lock(&tp->lock);
3175 tcf_block_offload_dec(block, flags);
3176 *cnt = 0;
3177 spin_unlock(&tp->lock);
3178 }
3179
3180 static int
__tc_setup_cb_call(struct tcf_block * block,enum tc_setup_type type,void * type_data,bool err_stop)3181 __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3182 void *type_data, bool err_stop)
3183 {
3184 struct flow_block_cb *block_cb;
3185 int ok_count = 0;
3186 int err;
3187
3188 list_for_each_entry(block_cb, &block->flow_block.cb_list, list) {
3189 err = block_cb->cb(type, type_data, block_cb->cb_priv);
3190 if (err) {
3191 if (err_stop)
3192 return err;
3193 } else {
3194 ok_count++;
3195 }
3196 }
3197 return ok_count;
3198 }
3199
tc_setup_cb_call(struct tcf_block * block,enum tc_setup_type type,void * type_data,bool err_stop,bool rtnl_held)3200 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3201 void *type_data, bool err_stop, bool rtnl_held)
3202 {
3203 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3204 int ok_count;
3205
3206 retry:
3207 if (take_rtnl)
3208 rtnl_lock();
3209 down_read(&block->cb_lock);
3210 /* Need to obtain rtnl lock if block is bound to devs that require it.
3211 * In block bind code cb_lock is obtained while holding rtnl, so we must
3212 * obtain the locks in same order here.
3213 */
3214 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3215 up_read(&block->cb_lock);
3216 take_rtnl = true;
3217 goto retry;
3218 }
3219
3220 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3221
3222 up_read(&block->cb_lock);
3223 if (take_rtnl)
3224 rtnl_unlock();
3225 return ok_count;
3226 }
3227 EXPORT_SYMBOL(tc_setup_cb_call);
3228
3229 /* Non-destructive filter add. If filter that wasn't already in hardware is
3230 * successfully offloaded, increment block offloads counter. On failure,
3231 * previously offloaded filter is considered to be intact and offloads counter
3232 * is not decremented.
3233 */
3234
tc_setup_cb_add(struct tcf_block * block,struct tcf_proto * tp,enum tc_setup_type type,void * type_data,bool err_stop,u32 * flags,unsigned int * in_hw_count,bool rtnl_held)3235 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
3236 enum tc_setup_type type, void *type_data, bool err_stop,
3237 u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3238 {
3239 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3240 int ok_count;
3241
3242 retry:
3243 if (take_rtnl)
3244 rtnl_lock();
3245 down_read(&block->cb_lock);
3246 /* Need to obtain rtnl lock if block is bound to devs that require it.
3247 * In block bind code cb_lock is obtained while holding rtnl, so we must
3248 * obtain the locks in same order here.
3249 */
3250 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3251 up_read(&block->cb_lock);
3252 take_rtnl = true;
3253 goto retry;
3254 }
3255
3256 /* Make sure all netdevs sharing this block are offload-capable. */
3257 if (block->nooffloaddevcnt && err_stop) {
3258 ok_count = -EOPNOTSUPP;
3259 goto err_unlock;
3260 }
3261
3262 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3263 if (ok_count < 0)
3264 goto err_unlock;
3265
3266 if (tp->ops->hw_add)
3267 tp->ops->hw_add(tp, type_data);
3268 if (ok_count > 0)
3269 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags,
3270 ok_count, true);
3271 err_unlock:
3272 up_read(&block->cb_lock);
3273 if (take_rtnl)
3274 rtnl_unlock();
3275 return ok_count < 0 ? ok_count : 0;
3276 }
3277 EXPORT_SYMBOL(tc_setup_cb_add);
3278
3279 /* Destructive filter replace. If filter that wasn't already in hardware is
3280 * successfully offloaded, increment block offload counter. On failure,
3281 * previously offloaded filter is considered to be destroyed and offload counter
3282 * is decremented.
3283 */
3284
tc_setup_cb_replace(struct tcf_block * block,struct tcf_proto * tp,enum tc_setup_type type,void * type_data,bool err_stop,u32 * old_flags,unsigned int * old_in_hw_count,u32 * new_flags,unsigned int * new_in_hw_count,bool rtnl_held)3285 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
3286 enum tc_setup_type type, void *type_data, bool err_stop,
3287 u32 *old_flags, unsigned int *old_in_hw_count,
3288 u32 *new_flags, unsigned int *new_in_hw_count,
3289 bool rtnl_held)
3290 {
3291 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3292 int ok_count;
3293
3294 retry:
3295 if (take_rtnl)
3296 rtnl_lock();
3297 down_read(&block->cb_lock);
3298 /* Need to obtain rtnl lock if block is bound to devs that require it.
3299 * In block bind code cb_lock is obtained while holding rtnl, so we must
3300 * obtain the locks in same order here.
3301 */
3302 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3303 up_read(&block->cb_lock);
3304 take_rtnl = true;
3305 goto retry;
3306 }
3307
3308 /* Make sure all netdevs sharing this block are offload-capable. */
3309 if (block->nooffloaddevcnt && err_stop) {
3310 ok_count = -EOPNOTSUPP;
3311 goto err_unlock;
3312 }
3313
3314 tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags);
3315 if (tp->ops->hw_del)
3316 tp->ops->hw_del(tp, type_data);
3317
3318 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3319 if (ok_count < 0)
3320 goto err_unlock;
3321
3322 if (tp->ops->hw_add)
3323 tp->ops->hw_add(tp, type_data);
3324 if (ok_count > 0)
3325 tc_cls_offload_cnt_update(block, tp, new_in_hw_count,
3326 new_flags, ok_count, true);
3327 err_unlock:
3328 up_read(&block->cb_lock);
3329 if (take_rtnl)
3330 rtnl_unlock();
3331 return ok_count < 0 ? ok_count : 0;
3332 }
3333 EXPORT_SYMBOL(tc_setup_cb_replace);
3334
3335 /* Destroy filter and decrement block offload counter, if filter was previously
3336 * offloaded.
3337 */
3338
tc_setup_cb_destroy(struct tcf_block * block,struct tcf_proto * tp,enum tc_setup_type type,void * type_data,bool err_stop,u32 * flags,unsigned int * in_hw_count,bool rtnl_held)3339 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
3340 enum tc_setup_type type, void *type_data, bool err_stop,
3341 u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3342 {
3343 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3344 int ok_count;
3345
3346 retry:
3347 if (take_rtnl)
3348 rtnl_lock();
3349 down_read(&block->cb_lock);
3350 /* Need to obtain rtnl lock if block is bound to devs that require it.
3351 * In block bind code cb_lock is obtained while holding rtnl, so we must
3352 * obtain the locks in same order here.
3353 */
3354 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3355 up_read(&block->cb_lock);
3356 take_rtnl = true;
3357 goto retry;
3358 }
3359
3360 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3361
3362 tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags);
3363 if (tp->ops->hw_del)
3364 tp->ops->hw_del(tp, type_data);
3365
3366 up_read(&block->cb_lock);
3367 if (take_rtnl)
3368 rtnl_unlock();
3369 return ok_count < 0 ? ok_count : 0;
3370 }
3371 EXPORT_SYMBOL(tc_setup_cb_destroy);
3372
tc_setup_cb_reoffload(struct tcf_block * block,struct tcf_proto * tp,bool add,flow_setup_cb_t * cb,enum tc_setup_type type,void * type_data,void * cb_priv,u32 * flags,unsigned int * in_hw_count)3373 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
3374 bool add, flow_setup_cb_t *cb,
3375 enum tc_setup_type type, void *type_data,
3376 void *cb_priv, u32 *flags, unsigned int *in_hw_count)
3377 {
3378 int err = cb(type, type_data, cb_priv);
3379
3380 if (err) {
3381 if (add && tc_skip_sw(*flags))
3382 return err;
3383 } else {
3384 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1,
3385 add);
3386 }
3387
3388 return 0;
3389 }
3390 EXPORT_SYMBOL(tc_setup_cb_reoffload);
3391
tc_cleanup_flow_action(struct flow_action * flow_action)3392 void tc_cleanup_flow_action(struct flow_action *flow_action)
3393 {
3394 struct flow_action_entry *entry;
3395 int i;
3396
3397 flow_action_for_each(i, entry, flow_action)
3398 if (entry->destructor)
3399 entry->destructor(entry->destructor_priv);
3400 }
3401 EXPORT_SYMBOL(tc_cleanup_flow_action);
3402
tcf_mirred_get_dev(struct flow_action_entry * entry,const struct tc_action * act)3403 static void tcf_mirred_get_dev(struct flow_action_entry *entry,
3404 const struct tc_action *act)
3405 {
3406 #ifdef CONFIG_NET_CLS_ACT
3407 entry->dev = act->ops->get_dev(act, &entry->destructor);
3408 if (!entry->dev)
3409 return;
3410 entry->destructor_priv = entry->dev;
3411 #endif
3412 }
3413
tcf_tunnel_encap_put_tunnel(void * priv)3414 static void tcf_tunnel_encap_put_tunnel(void *priv)
3415 {
3416 struct ip_tunnel_info *tunnel = priv;
3417
3418 kfree(tunnel);
3419 }
3420
tcf_tunnel_encap_get_tunnel(struct flow_action_entry * entry,const struct tc_action * act)3421 static int tcf_tunnel_encap_get_tunnel(struct flow_action_entry *entry,
3422 const struct tc_action *act)
3423 {
3424 entry->tunnel = tcf_tunnel_info_copy(act);
3425 if (!entry->tunnel)
3426 return -ENOMEM;
3427 entry->destructor = tcf_tunnel_encap_put_tunnel;
3428 entry->destructor_priv = entry->tunnel;
3429 return 0;
3430 }
3431
tcf_sample_get_group(struct flow_action_entry * entry,const struct tc_action * act)3432 static void tcf_sample_get_group(struct flow_action_entry *entry,
3433 const struct tc_action *act)
3434 {
3435 #ifdef CONFIG_NET_CLS_ACT
3436 entry->sample.psample_group =
3437 act->ops->get_psample_group(act, &entry->destructor);
3438 entry->destructor_priv = entry->sample.psample_group;
3439 #endif
3440 }
3441
tc_setup_flow_action(struct flow_action * flow_action,const struct tcf_exts * exts,bool rtnl_held)3442 int tc_setup_flow_action(struct flow_action *flow_action,
3443 const struct tcf_exts *exts, bool rtnl_held)
3444 {
3445 struct tc_action *act;
3446 int i, j, k, err = 0;
3447
3448 if (!exts)
3449 return 0;
3450
3451 if (!rtnl_held)
3452 rtnl_lock();
3453
3454 j = 0;
3455 tcf_exts_for_each_action(i, act, exts) {
3456 struct flow_action_entry *entry;
3457
3458 entry = &flow_action->entries[j];
3459 spin_lock_bh(&act->tcfa_lock);
3460 if (is_tcf_gact_ok(act)) {
3461 entry->id = FLOW_ACTION_ACCEPT;
3462 } else if (is_tcf_gact_shot(act)) {
3463 entry->id = FLOW_ACTION_DROP;
3464 } else if (is_tcf_gact_trap(act)) {
3465 entry->id = FLOW_ACTION_TRAP;
3466 } else if (is_tcf_gact_goto_chain(act)) {
3467 entry->id = FLOW_ACTION_GOTO;
3468 entry->chain_index = tcf_gact_goto_chain_index(act);
3469 } else if (is_tcf_mirred_egress_redirect(act)) {
3470 entry->id = FLOW_ACTION_REDIRECT;
3471 tcf_mirred_get_dev(entry, act);
3472 } else if (is_tcf_mirred_egress_mirror(act)) {
3473 entry->id = FLOW_ACTION_MIRRED;
3474 tcf_mirred_get_dev(entry, act);
3475 } else if (is_tcf_mirred_ingress_redirect(act)) {
3476 entry->id = FLOW_ACTION_REDIRECT_INGRESS;
3477 tcf_mirred_get_dev(entry, act);
3478 } else if (is_tcf_mirred_ingress_mirror(act)) {
3479 entry->id = FLOW_ACTION_MIRRED_INGRESS;
3480 tcf_mirred_get_dev(entry, act);
3481 } else if (is_tcf_vlan(act)) {
3482 switch (tcf_vlan_action(act)) {
3483 case TCA_VLAN_ACT_PUSH:
3484 entry->id = FLOW_ACTION_VLAN_PUSH;
3485 entry->vlan.vid = tcf_vlan_push_vid(act);
3486 entry->vlan.proto = tcf_vlan_push_proto(act);
3487 entry->vlan.prio = tcf_vlan_push_prio(act);
3488 break;
3489 case TCA_VLAN_ACT_POP:
3490 entry->id = FLOW_ACTION_VLAN_POP;
3491 break;
3492 case TCA_VLAN_ACT_MODIFY:
3493 entry->id = FLOW_ACTION_VLAN_MANGLE;
3494 entry->vlan.vid = tcf_vlan_push_vid(act);
3495 entry->vlan.proto = tcf_vlan_push_proto(act);
3496 entry->vlan.prio = tcf_vlan_push_prio(act);
3497 break;
3498 default:
3499 err = -EOPNOTSUPP;
3500 goto err_out_locked;
3501 }
3502 } else if (is_tcf_tunnel_set(act)) {
3503 entry->id = FLOW_ACTION_TUNNEL_ENCAP;
3504 err = tcf_tunnel_encap_get_tunnel(entry, act);
3505 if (err)
3506 goto err_out_locked;
3507 } else if (is_tcf_tunnel_release(act)) {
3508 entry->id = FLOW_ACTION_TUNNEL_DECAP;
3509 } else if (is_tcf_pedit(act)) {
3510 for (k = 0; k < tcf_pedit_nkeys(act); k++) {
3511 switch (tcf_pedit_cmd(act, k)) {
3512 case TCA_PEDIT_KEY_EX_CMD_SET:
3513 entry->id = FLOW_ACTION_MANGLE;
3514 break;
3515 case TCA_PEDIT_KEY_EX_CMD_ADD:
3516 entry->id = FLOW_ACTION_ADD;
3517 break;
3518 default:
3519 err = -EOPNOTSUPP;
3520 goto err_out_locked;
3521 }
3522 entry->mangle.htype = tcf_pedit_htype(act, k);
3523 entry->mangle.mask = tcf_pedit_mask(act, k);
3524 entry->mangle.val = tcf_pedit_val(act, k);
3525 entry->mangle.offset = tcf_pedit_offset(act, k);
3526 entry = &flow_action->entries[++j];
3527 }
3528 } else if (is_tcf_csum(act)) {
3529 entry->id = FLOW_ACTION_CSUM;
3530 entry->csum_flags = tcf_csum_update_flags(act);
3531 } else if (is_tcf_skbedit_mark(act)) {
3532 entry->id = FLOW_ACTION_MARK;
3533 entry->mark = tcf_skbedit_mark(act);
3534 } else if (is_tcf_sample(act)) {
3535 entry->id = FLOW_ACTION_SAMPLE;
3536 entry->sample.trunc_size = tcf_sample_trunc_size(act);
3537 entry->sample.truncate = tcf_sample_truncate(act);
3538 entry->sample.rate = tcf_sample_rate(act);
3539 tcf_sample_get_group(entry, act);
3540 } else if (is_tcf_police(act)) {
3541 entry->id = FLOW_ACTION_POLICE;
3542 entry->police.burst = tcf_police_tcfp_burst(act);
3543 entry->police.rate_bytes_ps =
3544 tcf_police_rate_bytes_ps(act);
3545 } else if (is_tcf_ct(act)) {
3546 entry->id = FLOW_ACTION_CT;
3547 entry->ct.action = tcf_ct_action(act);
3548 entry->ct.zone = tcf_ct_zone(act);
3549 } else if (is_tcf_mpls(act)) {
3550 switch (tcf_mpls_action(act)) {
3551 case TCA_MPLS_ACT_PUSH:
3552 entry->id = FLOW_ACTION_MPLS_PUSH;
3553 entry->mpls_push.proto = tcf_mpls_proto(act);
3554 entry->mpls_push.label = tcf_mpls_label(act);
3555 entry->mpls_push.tc = tcf_mpls_tc(act);
3556 entry->mpls_push.bos = tcf_mpls_bos(act);
3557 entry->mpls_push.ttl = tcf_mpls_ttl(act);
3558 break;
3559 case TCA_MPLS_ACT_POP:
3560 entry->id = FLOW_ACTION_MPLS_POP;
3561 entry->mpls_pop.proto = tcf_mpls_proto(act);
3562 break;
3563 case TCA_MPLS_ACT_MODIFY:
3564 entry->id = FLOW_ACTION_MPLS_MANGLE;
3565 entry->mpls_mangle.label = tcf_mpls_label(act);
3566 entry->mpls_mangle.tc = tcf_mpls_tc(act);
3567 entry->mpls_mangle.bos = tcf_mpls_bos(act);
3568 entry->mpls_mangle.ttl = tcf_mpls_ttl(act);
3569 break;
3570 default:
3571 err = -EOPNOTSUPP;
3572 goto err_out_locked;
3573 }
3574 } else if (is_tcf_skbedit_ptype(act)) {
3575 entry->id = FLOW_ACTION_PTYPE;
3576 entry->ptype = tcf_skbedit_ptype(act);
3577 } else {
3578 err = -EOPNOTSUPP;
3579 goto err_out_locked;
3580 }
3581 spin_unlock_bh(&act->tcfa_lock);
3582
3583 if (!is_tcf_pedit(act))
3584 j++;
3585 }
3586
3587 err_out:
3588 if (!rtnl_held)
3589 rtnl_unlock();
3590
3591 if (err)
3592 tc_cleanup_flow_action(flow_action);
3593
3594 return err;
3595 err_out_locked:
3596 spin_unlock_bh(&act->tcfa_lock);
3597 goto err_out;
3598 }
3599 EXPORT_SYMBOL(tc_setup_flow_action);
3600
tcf_exts_num_actions(struct tcf_exts * exts)3601 unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
3602 {
3603 unsigned int num_acts = 0;
3604 struct tc_action *act;
3605 int i;
3606
3607 tcf_exts_for_each_action(i, act, exts) {
3608 if (is_tcf_pedit(act))
3609 num_acts += tcf_pedit_nkeys(act);
3610 else
3611 num_acts++;
3612 }
3613 return num_acts;
3614 }
3615 EXPORT_SYMBOL(tcf_exts_num_actions);
3616
tcf_net_init(struct net * net)3617 static __net_init int tcf_net_init(struct net *net)
3618 {
3619 struct tcf_net *tn = net_generic(net, tcf_net_id);
3620
3621 spin_lock_init(&tn->idr_lock);
3622 idr_init(&tn->idr);
3623 return 0;
3624 }
3625
tcf_net_exit(struct net * net)3626 static void __net_exit tcf_net_exit(struct net *net)
3627 {
3628 struct tcf_net *tn = net_generic(net, tcf_net_id);
3629
3630 idr_destroy(&tn->idr);
3631 }
3632
3633 static struct pernet_operations tcf_net_ops = {
3634 .init = tcf_net_init,
3635 .exit = tcf_net_exit,
3636 .id = &tcf_net_id,
3637 .size = sizeof(struct tcf_net),
3638 };
3639
3640 static struct flow_indr_block_entry block_entry = {
3641 .cb = tc_indr_block_get_and_cmd,
3642 .list = LIST_HEAD_INIT(block_entry.list),
3643 };
3644
tc_filter_init(void)3645 static int __init tc_filter_init(void)
3646 {
3647 int err;
3648
3649 tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
3650 if (!tc_filter_wq)
3651 return -ENOMEM;
3652
3653 err = register_pernet_subsys(&tcf_net_ops);
3654 if (err)
3655 goto err_register_pernet_subsys;
3656
3657 flow_indr_add_block_cb(&block_entry);
3658
3659 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
3660 RTNL_FLAG_DOIT_UNLOCKED);
3661 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL,
3662 RTNL_FLAG_DOIT_UNLOCKED);
3663 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
3664 tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED);
3665 rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
3666 rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
3667 rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
3668 tc_dump_chain, 0);
3669
3670 return 0;
3671
3672 err_register_pernet_subsys:
3673 destroy_workqueue(tc_filter_wq);
3674 return err;
3675 }
3676
3677 subsys_initcall(tc_filter_init);
3678