1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * net/sched/act_api.c Packet action API.
4 *
5 * Author: Jamal Hadi Salim
6 */
7
8 #include <linux/types.h>
9 #include <linux/kernel.h>
10 #include <linux/string.h>
11 #include <linux/errno.h>
12 #include <linux/slab.h>
13 #include <linux/skbuff.h>
14 #include <linux/init.h>
15 #include <linux/kmod.h>
16 #include <linux/err.h>
17 #include <linux/module.h>
18 #include <net/net_namespace.h>
19 #include <net/sock.h>
20 #include <net/sch_generic.h>
21 #include <net/pkt_cls.h>
22 #include <net/tc_act/tc_pedit.h>
23 #include <net/act_api.h>
24 #include <net/netlink.h>
25 #include <net/flow_offload.h>
26
27 #ifdef CONFIG_INET
28 DEFINE_STATIC_KEY_FALSE(tcf_frag_xmit_count);
29 EXPORT_SYMBOL_GPL(tcf_frag_xmit_count);
30 #endif
31
tcf_dev_queue_xmit(struct sk_buff * skb,int (* xmit)(struct sk_buff * skb))32 int tcf_dev_queue_xmit(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb))
33 {
34 #ifdef CONFIG_INET
35 if (static_branch_unlikely(&tcf_frag_xmit_count))
36 return sch_frag_xmit_hook(skb, xmit);
37 #endif
38
39 return xmit(skb);
40 }
41 EXPORT_SYMBOL_GPL(tcf_dev_queue_xmit);
42
tcf_action_goto_chain_exec(const struct tc_action * a,struct tcf_result * res)43 static void tcf_action_goto_chain_exec(const struct tc_action *a,
44 struct tcf_result *res)
45 {
46 const struct tcf_chain *chain = rcu_dereference_bh(a->goto_chain);
47
48 res->goto_tp = rcu_dereference_bh(chain->filter_chain);
49 }
50
tcf_free_cookie_rcu(struct rcu_head * p)51 static void tcf_free_cookie_rcu(struct rcu_head *p)
52 {
53 struct tc_cookie *cookie = container_of(p, struct tc_cookie, rcu);
54
55 kfree(cookie->data);
56 kfree(cookie);
57 }
58
tcf_set_action_cookie(struct tc_cookie __rcu ** old_cookie,struct tc_cookie * new_cookie)59 static void tcf_set_action_cookie(struct tc_cookie __rcu **old_cookie,
60 struct tc_cookie *new_cookie)
61 {
62 struct tc_cookie *old;
63
64 old = xchg((__force struct tc_cookie **)old_cookie, new_cookie);
65 if (old)
66 call_rcu(&old->rcu, tcf_free_cookie_rcu);
67 }
68
tcf_action_check_ctrlact(int action,struct tcf_proto * tp,struct tcf_chain ** newchain,struct netlink_ext_ack * extack)69 int tcf_action_check_ctrlact(int action, struct tcf_proto *tp,
70 struct tcf_chain **newchain,
71 struct netlink_ext_ack *extack)
72 {
73 int opcode = TC_ACT_EXT_OPCODE(action), ret = -EINVAL;
74 u32 chain_index;
75
76 if (!opcode)
77 ret = action > TC_ACT_VALUE_MAX ? -EINVAL : 0;
78 else if (opcode <= TC_ACT_EXT_OPCODE_MAX || action == TC_ACT_UNSPEC)
79 ret = 0;
80 if (ret) {
81 NL_SET_ERR_MSG(extack, "invalid control action");
82 goto end;
83 }
84
85 if (TC_ACT_EXT_CMP(action, TC_ACT_GOTO_CHAIN)) {
86 chain_index = action & TC_ACT_EXT_VAL_MASK;
87 if (!tp || !newchain) {
88 ret = -EINVAL;
89 NL_SET_ERR_MSG(extack,
90 "can't goto NULL proto/chain");
91 goto end;
92 }
93 *newchain = tcf_chain_get_by_act(tp->chain->block, chain_index);
94 if (!*newchain) {
95 ret = -ENOMEM;
96 NL_SET_ERR_MSG(extack,
97 "can't allocate goto_chain");
98 }
99 }
100 end:
101 return ret;
102 }
103 EXPORT_SYMBOL(tcf_action_check_ctrlact);
104
tcf_action_set_ctrlact(struct tc_action * a,int action,struct tcf_chain * goto_chain)105 struct tcf_chain *tcf_action_set_ctrlact(struct tc_action *a, int action,
106 struct tcf_chain *goto_chain)
107 {
108 a->tcfa_action = action;
109 goto_chain = rcu_replace_pointer(a->goto_chain, goto_chain, 1);
110 return goto_chain;
111 }
112 EXPORT_SYMBOL(tcf_action_set_ctrlact);
113
114 /* XXX: For standalone actions, we don't need a RCU grace period either, because
115 * actions are always connected to filters and filters are already destroyed in
116 * RCU callbacks, so after a RCU grace period actions are already disconnected
117 * from filters. Readers later can not find us.
118 */
free_tcf(struct tc_action * p)119 static void free_tcf(struct tc_action *p)
120 {
121 struct tcf_chain *chain = rcu_dereference_protected(p->goto_chain, 1);
122
123 free_percpu(p->cpu_bstats);
124 free_percpu(p->cpu_bstats_hw);
125 free_percpu(p->cpu_qstats);
126
127 tcf_set_action_cookie(&p->act_cookie, NULL);
128 if (chain)
129 tcf_chain_put_by_act(chain);
130
131 kfree(p);
132 }
133
offload_action_hw_count_set(struct tc_action * act,u32 hw_count)134 static void offload_action_hw_count_set(struct tc_action *act,
135 u32 hw_count)
136 {
137 act->in_hw_count = hw_count;
138 }
139
offload_action_hw_count_inc(struct tc_action * act,u32 hw_count)140 static void offload_action_hw_count_inc(struct tc_action *act,
141 u32 hw_count)
142 {
143 act->in_hw_count += hw_count;
144 }
145
offload_action_hw_count_dec(struct tc_action * act,u32 hw_count)146 static void offload_action_hw_count_dec(struct tc_action *act,
147 u32 hw_count)
148 {
149 act->in_hw_count = act->in_hw_count > hw_count ?
150 act->in_hw_count - hw_count : 0;
151 }
152
tcf_offload_act_num_actions_single(struct tc_action * act)153 static unsigned int tcf_offload_act_num_actions_single(struct tc_action *act)
154 {
155 if (is_tcf_pedit(act))
156 return tcf_pedit_nkeys(act);
157 else
158 return 1;
159 }
160
tc_act_skip_hw(u32 flags)161 static bool tc_act_skip_hw(u32 flags)
162 {
163 return (flags & TCA_ACT_FLAGS_SKIP_HW) ? true : false;
164 }
165
tc_act_skip_sw(u32 flags)166 static bool tc_act_skip_sw(u32 flags)
167 {
168 return (flags & TCA_ACT_FLAGS_SKIP_SW) ? true : false;
169 }
170
tc_act_in_hw(struct tc_action * act)171 static bool tc_act_in_hw(struct tc_action *act)
172 {
173 return !!act->in_hw_count;
174 }
175
176 /* SKIP_HW and SKIP_SW are mutually exclusive flags. */
tc_act_flags_valid(u32 flags)177 static bool tc_act_flags_valid(u32 flags)
178 {
179 flags &= TCA_ACT_FLAGS_SKIP_HW | TCA_ACT_FLAGS_SKIP_SW;
180
181 return flags ^ (TCA_ACT_FLAGS_SKIP_HW | TCA_ACT_FLAGS_SKIP_SW);
182 }
183
offload_action_init(struct flow_offload_action * fl_action,struct tc_action * act,enum offload_act_command cmd,struct netlink_ext_ack * extack)184 static int offload_action_init(struct flow_offload_action *fl_action,
185 struct tc_action *act,
186 enum offload_act_command cmd,
187 struct netlink_ext_ack *extack)
188 {
189 int err;
190
191 fl_action->extack = extack;
192 fl_action->command = cmd;
193 fl_action->index = act->tcfa_index;
194
195 if (act->ops->offload_act_setup) {
196 spin_lock_bh(&act->tcfa_lock);
197 err = act->ops->offload_act_setup(act, fl_action, NULL,
198 false, extack);
199 spin_unlock_bh(&act->tcfa_lock);
200 return err;
201 }
202
203 return -EOPNOTSUPP;
204 }
205
tcf_action_offload_cmd_ex(struct flow_offload_action * fl_act,u32 * hw_count)206 static int tcf_action_offload_cmd_ex(struct flow_offload_action *fl_act,
207 u32 *hw_count)
208 {
209 int err;
210
211 err = flow_indr_dev_setup_offload(NULL, NULL, TC_SETUP_ACT,
212 fl_act, NULL, NULL);
213 if (err < 0)
214 return err;
215
216 if (hw_count)
217 *hw_count = err;
218
219 return 0;
220 }
221
tcf_action_offload_cmd_cb_ex(struct flow_offload_action * fl_act,u32 * hw_count,flow_indr_block_bind_cb_t * cb,void * cb_priv)222 static int tcf_action_offload_cmd_cb_ex(struct flow_offload_action *fl_act,
223 u32 *hw_count,
224 flow_indr_block_bind_cb_t *cb,
225 void *cb_priv)
226 {
227 int err;
228
229 err = cb(NULL, NULL, cb_priv, TC_SETUP_ACT, NULL, fl_act, NULL);
230 if (err < 0)
231 return err;
232
233 if (hw_count)
234 *hw_count = 1;
235
236 return 0;
237 }
238
tcf_action_offload_cmd(struct flow_offload_action * fl_act,u32 * hw_count,flow_indr_block_bind_cb_t * cb,void * cb_priv)239 static int tcf_action_offload_cmd(struct flow_offload_action *fl_act,
240 u32 *hw_count,
241 flow_indr_block_bind_cb_t *cb,
242 void *cb_priv)
243 {
244 return cb ? tcf_action_offload_cmd_cb_ex(fl_act, hw_count,
245 cb, cb_priv) :
246 tcf_action_offload_cmd_ex(fl_act, hw_count);
247 }
248
tcf_action_offload_add_ex(struct tc_action * action,struct netlink_ext_ack * extack,flow_indr_block_bind_cb_t * cb,void * cb_priv)249 static int tcf_action_offload_add_ex(struct tc_action *action,
250 struct netlink_ext_ack *extack,
251 flow_indr_block_bind_cb_t *cb,
252 void *cb_priv)
253 {
254 bool skip_sw = tc_act_skip_sw(action->tcfa_flags);
255 struct tc_action *actions[TCA_ACT_MAX_PRIO] = {
256 [0] = action,
257 };
258 struct flow_offload_action *fl_action;
259 u32 in_hw_count = 0;
260 int num, err = 0;
261
262 if (tc_act_skip_hw(action->tcfa_flags))
263 return 0;
264
265 num = tcf_offload_act_num_actions_single(action);
266 fl_action = offload_action_alloc(num);
267 if (!fl_action)
268 return -ENOMEM;
269
270 err = offload_action_init(fl_action, action, FLOW_ACT_REPLACE, extack);
271 if (err)
272 goto fl_err;
273
274 err = tc_setup_action(&fl_action->action, actions, extack);
275 if (err) {
276 NL_SET_ERR_MSG_MOD(extack,
277 "Failed to setup tc actions for offload");
278 goto fl_err;
279 }
280
281 err = tcf_action_offload_cmd(fl_action, &in_hw_count, cb, cb_priv);
282 if (!err)
283 cb ? offload_action_hw_count_inc(action, in_hw_count) :
284 offload_action_hw_count_set(action, in_hw_count);
285
286 if (skip_sw && !tc_act_in_hw(action))
287 err = -EINVAL;
288
289 tc_cleanup_offload_action(&fl_action->action);
290
291 fl_err:
292 kfree(fl_action);
293
294 return err;
295 }
296
297 /* offload the tc action after it is inserted */
tcf_action_offload_add(struct tc_action * action,struct netlink_ext_ack * extack)298 static int tcf_action_offload_add(struct tc_action *action,
299 struct netlink_ext_ack *extack)
300 {
301 return tcf_action_offload_add_ex(action, extack, NULL, NULL);
302 }
303
tcf_action_update_hw_stats(struct tc_action * action)304 int tcf_action_update_hw_stats(struct tc_action *action)
305 {
306 struct flow_offload_action fl_act = {};
307 int err;
308
309 if (!tc_act_in_hw(action))
310 return -EOPNOTSUPP;
311
312 err = offload_action_init(&fl_act, action, FLOW_ACT_STATS, NULL);
313 if (err)
314 return err;
315
316 err = tcf_action_offload_cmd(&fl_act, NULL, NULL, NULL);
317 if (!err) {
318 preempt_disable();
319 tcf_action_stats_update(action, fl_act.stats.bytes,
320 fl_act.stats.pkts,
321 fl_act.stats.drops,
322 fl_act.stats.lastused,
323 true);
324 preempt_enable();
325 action->used_hw_stats = fl_act.stats.used_hw_stats;
326 action->used_hw_stats_valid = true;
327 } else {
328 return -EOPNOTSUPP;
329 }
330
331 return 0;
332 }
333 EXPORT_SYMBOL(tcf_action_update_hw_stats);
334
tcf_action_offload_del_ex(struct tc_action * action,flow_indr_block_bind_cb_t * cb,void * cb_priv)335 static int tcf_action_offload_del_ex(struct tc_action *action,
336 flow_indr_block_bind_cb_t *cb,
337 void *cb_priv)
338 {
339 struct flow_offload_action fl_act = {};
340 u32 in_hw_count = 0;
341 int err = 0;
342
343 if (!tc_act_in_hw(action))
344 return 0;
345
346 err = offload_action_init(&fl_act, action, FLOW_ACT_DESTROY, NULL);
347 if (err)
348 return err;
349
350 err = tcf_action_offload_cmd(&fl_act, &in_hw_count, cb, cb_priv);
351 if (err < 0)
352 return err;
353
354 if (!cb && action->in_hw_count != in_hw_count)
355 return -EINVAL;
356
357 /* do not need to update hw state when deleting action */
358 if (cb && in_hw_count)
359 offload_action_hw_count_dec(action, in_hw_count);
360
361 return 0;
362 }
363
tcf_action_offload_del(struct tc_action * action)364 static int tcf_action_offload_del(struct tc_action *action)
365 {
366 return tcf_action_offload_del_ex(action, NULL, NULL);
367 }
368
tcf_action_cleanup(struct tc_action * p)369 static void tcf_action_cleanup(struct tc_action *p)
370 {
371 tcf_action_offload_del(p);
372 if (p->ops->cleanup)
373 p->ops->cleanup(p);
374
375 gen_kill_estimator(&p->tcfa_rate_est);
376 free_tcf(p);
377 }
378
__tcf_action_put(struct tc_action * p,bool bind)379 static int __tcf_action_put(struct tc_action *p, bool bind)
380 {
381 struct tcf_idrinfo *idrinfo = p->idrinfo;
382
383 if (refcount_dec_and_mutex_lock(&p->tcfa_refcnt, &idrinfo->lock)) {
384 if (bind)
385 atomic_dec(&p->tcfa_bindcnt);
386 idr_remove(&idrinfo->action_idr, p->tcfa_index);
387 mutex_unlock(&idrinfo->lock);
388
389 tcf_action_cleanup(p);
390 return 1;
391 }
392
393 if (bind)
394 atomic_dec(&p->tcfa_bindcnt);
395
396 return 0;
397 }
398
__tcf_idr_release(struct tc_action * p,bool bind,bool strict)399 static int __tcf_idr_release(struct tc_action *p, bool bind, bool strict)
400 {
401 int ret = 0;
402
403 /* Release with strict==1 and bind==0 is only called through act API
404 * interface (classifiers always bind). Only case when action with
405 * positive reference count and zero bind count can exist is when it was
406 * also created with act API (unbinding last classifier will destroy the
407 * action if it was created by classifier). So only case when bind count
408 * can be changed after initial check is when unbound action is
409 * destroyed by act API while classifier binds to action with same id
410 * concurrently. This result either creation of new action(same behavior
411 * as before), or reusing existing action if concurrent process
412 * increments reference count before action is deleted. Both scenarios
413 * are acceptable.
414 */
415 if (p) {
416 if (!bind && strict && atomic_read(&p->tcfa_bindcnt) > 0)
417 return -EPERM;
418
419 if (__tcf_action_put(p, bind))
420 ret = ACT_P_DELETED;
421 }
422
423 return ret;
424 }
425
tcf_idr_release(struct tc_action * a,bool bind)426 int tcf_idr_release(struct tc_action *a, bool bind)
427 {
428 const struct tc_action_ops *ops = a->ops;
429 int ret;
430
431 ret = __tcf_idr_release(a, bind, false);
432 if (ret == ACT_P_DELETED)
433 module_put(ops->owner);
434 return ret;
435 }
436 EXPORT_SYMBOL(tcf_idr_release);
437
tcf_action_shared_attrs_size(const struct tc_action * act)438 static size_t tcf_action_shared_attrs_size(const struct tc_action *act)
439 {
440 struct tc_cookie *act_cookie;
441 u32 cookie_len = 0;
442
443 rcu_read_lock();
444 act_cookie = rcu_dereference(act->act_cookie);
445
446 if (act_cookie)
447 cookie_len = nla_total_size(act_cookie->len);
448 rcu_read_unlock();
449
450 return nla_total_size(0) /* action number nested */
451 + nla_total_size(IFNAMSIZ) /* TCA_ACT_KIND */
452 + cookie_len /* TCA_ACT_COOKIE */
453 + nla_total_size(sizeof(struct nla_bitfield32)) /* TCA_ACT_HW_STATS */
454 + nla_total_size(0) /* TCA_ACT_STATS nested */
455 + nla_total_size(sizeof(struct nla_bitfield32)) /* TCA_ACT_FLAGS */
456 /* TCA_STATS_BASIC */
457 + nla_total_size_64bit(sizeof(struct gnet_stats_basic))
458 /* TCA_STATS_PKT64 */
459 + nla_total_size_64bit(sizeof(u64))
460 /* TCA_STATS_QUEUE */
461 + nla_total_size_64bit(sizeof(struct gnet_stats_queue))
462 + nla_total_size(0) /* TCA_OPTIONS nested */
463 + nla_total_size(sizeof(struct tcf_t)); /* TCA_GACT_TM */
464 }
465
tcf_action_full_attrs_size(size_t sz)466 static size_t tcf_action_full_attrs_size(size_t sz)
467 {
468 return NLMSG_HDRLEN /* struct nlmsghdr */
469 + sizeof(struct tcamsg)
470 + nla_total_size(0) /* TCA_ACT_TAB nested */
471 + sz;
472 }
473
tcf_action_fill_size(const struct tc_action * act)474 static size_t tcf_action_fill_size(const struct tc_action *act)
475 {
476 size_t sz = tcf_action_shared_attrs_size(act);
477
478 if (act->ops->get_fill_size)
479 return act->ops->get_fill_size(act) + sz;
480 return sz;
481 }
482
483 static int
tcf_action_dump_terse(struct sk_buff * skb,struct tc_action * a,bool from_act)484 tcf_action_dump_terse(struct sk_buff *skb, struct tc_action *a, bool from_act)
485 {
486 unsigned char *b = skb_tail_pointer(skb);
487 struct tc_cookie *cookie;
488
489 if (nla_put_string(skb, TCA_KIND, a->ops->kind))
490 goto nla_put_failure;
491 if (tcf_action_copy_stats(skb, a, 0))
492 goto nla_put_failure;
493 if (from_act && nla_put_u32(skb, TCA_ACT_INDEX, a->tcfa_index))
494 goto nla_put_failure;
495
496 rcu_read_lock();
497 cookie = rcu_dereference(a->act_cookie);
498 if (cookie) {
499 if (nla_put(skb, TCA_ACT_COOKIE, cookie->len, cookie->data)) {
500 rcu_read_unlock();
501 goto nla_put_failure;
502 }
503 }
504 rcu_read_unlock();
505
506 return 0;
507
508 nla_put_failure:
509 nlmsg_trim(skb, b);
510 return -1;
511 }
512
tcf_dump_walker(struct tcf_idrinfo * idrinfo,struct sk_buff * skb,struct netlink_callback * cb)513 static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
514 struct netlink_callback *cb)
515 {
516 int err = 0, index = -1, s_i = 0, n_i = 0;
517 u32 act_flags = cb->args[2];
518 unsigned long jiffy_since = cb->args[3];
519 struct nlattr *nest;
520 struct idr *idr = &idrinfo->action_idr;
521 struct tc_action *p;
522 unsigned long id = 1;
523 unsigned long tmp;
524
525 mutex_lock(&idrinfo->lock);
526
527 s_i = cb->args[0];
528
529 idr_for_each_entry_ul(idr, p, tmp, id) {
530 index++;
531 if (index < s_i)
532 continue;
533 if (IS_ERR(p))
534 continue;
535
536 if (jiffy_since &&
537 time_after(jiffy_since,
538 (unsigned long)p->tcfa_tm.lastuse))
539 continue;
540
541 nest = nla_nest_start_noflag(skb, n_i);
542 if (!nest) {
543 index--;
544 goto nla_put_failure;
545 }
546 err = (act_flags & TCA_ACT_FLAG_TERSE_DUMP) ?
547 tcf_action_dump_terse(skb, p, true) :
548 tcf_action_dump_1(skb, p, 0, 0);
549 if (err < 0) {
550 index--;
551 nlmsg_trim(skb, nest);
552 goto done;
553 }
554 nla_nest_end(skb, nest);
555 n_i++;
556 if (!(act_flags & TCA_ACT_FLAG_LARGE_DUMP_ON) &&
557 n_i >= TCA_ACT_MAX_PRIO)
558 goto done;
559 }
560 done:
561 if (index >= 0)
562 cb->args[0] = index + 1;
563
564 mutex_unlock(&idrinfo->lock);
565 if (n_i) {
566 if (act_flags & TCA_ACT_FLAG_LARGE_DUMP_ON)
567 cb->args[1] = n_i;
568 }
569 return n_i;
570
571 nla_put_failure:
572 nla_nest_cancel(skb, nest);
573 goto done;
574 }
575
tcf_idr_release_unsafe(struct tc_action * p)576 static int tcf_idr_release_unsafe(struct tc_action *p)
577 {
578 if (atomic_read(&p->tcfa_bindcnt) > 0)
579 return -EPERM;
580
581 if (refcount_dec_and_test(&p->tcfa_refcnt)) {
582 idr_remove(&p->idrinfo->action_idr, p->tcfa_index);
583 tcf_action_cleanup(p);
584 return ACT_P_DELETED;
585 }
586
587 return 0;
588 }
589
tcf_del_walker(struct tcf_idrinfo * idrinfo,struct sk_buff * skb,const struct tc_action_ops * ops,struct netlink_ext_ack * extack)590 static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
591 const struct tc_action_ops *ops,
592 struct netlink_ext_ack *extack)
593 {
594 struct nlattr *nest;
595 int n_i = 0;
596 int ret = -EINVAL;
597 struct idr *idr = &idrinfo->action_idr;
598 struct tc_action *p;
599 unsigned long id = 1;
600 unsigned long tmp;
601
602 nest = nla_nest_start_noflag(skb, 0);
603 if (nest == NULL)
604 goto nla_put_failure;
605 if (nla_put_string(skb, TCA_KIND, ops->kind))
606 goto nla_put_failure;
607
608 ret = 0;
609 mutex_lock(&idrinfo->lock);
610 idr_for_each_entry_ul(idr, p, tmp, id) {
611 if (IS_ERR(p))
612 continue;
613 ret = tcf_idr_release_unsafe(p);
614 if (ret == ACT_P_DELETED)
615 module_put(ops->owner);
616 else if (ret < 0)
617 break;
618 n_i++;
619 }
620 mutex_unlock(&idrinfo->lock);
621 if (ret < 0) {
622 if (n_i)
623 NL_SET_ERR_MSG(extack, "Unable to flush all TC actions");
624 else
625 goto nla_put_failure;
626 }
627
628 ret = nla_put_u32(skb, TCA_FCNT, n_i);
629 if (ret)
630 goto nla_put_failure;
631 nla_nest_end(skb, nest);
632
633 return n_i;
634 nla_put_failure:
635 nla_nest_cancel(skb, nest);
636 return ret;
637 }
638
tcf_generic_walker(struct tc_action_net * tn,struct sk_buff * skb,struct netlink_callback * cb,int type,const struct tc_action_ops * ops,struct netlink_ext_ack * extack)639 int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
640 struct netlink_callback *cb, int type,
641 const struct tc_action_ops *ops,
642 struct netlink_ext_ack *extack)
643 {
644 struct tcf_idrinfo *idrinfo = tn->idrinfo;
645
646 if (type == RTM_DELACTION) {
647 return tcf_del_walker(idrinfo, skb, ops, extack);
648 } else if (type == RTM_GETACTION) {
649 return tcf_dump_walker(idrinfo, skb, cb);
650 } else {
651 WARN(1, "tcf_generic_walker: unknown command %d\n", type);
652 NL_SET_ERR_MSG(extack, "tcf_generic_walker: unknown command");
653 return -EINVAL;
654 }
655 }
656 EXPORT_SYMBOL(tcf_generic_walker);
657
tcf_idr_search(struct tc_action_net * tn,struct tc_action ** a,u32 index)658 int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index)
659 {
660 struct tcf_idrinfo *idrinfo = tn->idrinfo;
661 struct tc_action *p;
662
663 mutex_lock(&idrinfo->lock);
664 p = idr_find(&idrinfo->action_idr, index);
665 if (IS_ERR(p))
666 p = NULL;
667 else if (p)
668 refcount_inc(&p->tcfa_refcnt);
669 mutex_unlock(&idrinfo->lock);
670
671 if (p) {
672 *a = p;
673 return true;
674 }
675 return false;
676 }
677 EXPORT_SYMBOL(tcf_idr_search);
678
__tcf_generic_walker(struct net * net,struct sk_buff * skb,struct netlink_callback * cb,int type,const struct tc_action_ops * ops,struct netlink_ext_ack * extack)679 static int __tcf_generic_walker(struct net *net, struct sk_buff *skb,
680 struct netlink_callback *cb, int type,
681 const struct tc_action_ops *ops,
682 struct netlink_ext_ack *extack)
683 {
684 struct tc_action_net *tn = net_generic(net, ops->net_id);
685
686 if (unlikely(ops->walk))
687 return ops->walk(net, skb, cb, type, ops, extack);
688
689 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
690 }
691
__tcf_idr_search(struct net * net,const struct tc_action_ops * ops,struct tc_action ** a,u32 index)692 static int __tcf_idr_search(struct net *net,
693 const struct tc_action_ops *ops,
694 struct tc_action **a, u32 index)
695 {
696 struct tc_action_net *tn = net_generic(net, ops->net_id);
697
698 if (unlikely(ops->lookup))
699 return ops->lookup(net, a, index);
700
701 return tcf_idr_search(tn, a, index);
702 }
703
tcf_idr_delete_index(struct tcf_idrinfo * idrinfo,u32 index)704 static int tcf_idr_delete_index(struct tcf_idrinfo *idrinfo, u32 index)
705 {
706 struct tc_action *p;
707 int ret = 0;
708
709 mutex_lock(&idrinfo->lock);
710 p = idr_find(&idrinfo->action_idr, index);
711 if (!p) {
712 mutex_unlock(&idrinfo->lock);
713 return -ENOENT;
714 }
715
716 if (!atomic_read(&p->tcfa_bindcnt)) {
717 if (refcount_dec_and_test(&p->tcfa_refcnt)) {
718 struct module *owner = p->ops->owner;
719
720 WARN_ON(p != idr_remove(&idrinfo->action_idr,
721 p->tcfa_index));
722 mutex_unlock(&idrinfo->lock);
723
724 tcf_action_cleanup(p);
725 module_put(owner);
726 return 0;
727 }
728 ret = 0;
729 } else {
730 ret = -EPERM;
731 }
732
733 mutex_unlock(&idrinfo->lock);
734 return ret;
735 }
736
tcf_idr_create(struct tc_action_net * tn,u32 index,struct nlattr * est,struct tc_action ** a,const struct tc_action_ops * ops,int bind,bool cpustats,u32 flags)737 int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
738 struct tc_action **a, const struct tc_action_ops *ops,
739 int bind, bool cpustats, u32 flags)
740 {
741 struct tc_action *p = kzalloc(ops->size, GFP_KERNEL);
742 struct tcf_idrinfo *idrinfo = tn->idrinfo;
743 int err = -ENOMEM;
744
745 if (unlikely(!p))
746 return -ENOMEM;
747 refcount_set(&p->tcfa_refcnt, 1);
748 if (bind)
749 atomic_set(&p->tcfa_bindcnt, 1);
750
751 if (cpustats) {
752 p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync);
753 if (!p->cpu_bstats)
754 goto err1;
755 p->cpu_bstats_hw = netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync);
756 if (!p->cpu_bstats_hw)
757 goto err2;
758 p->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
759 if (!p->cpu_qstats)
760 goto err3;
761 }
762 gnet_stats_basic_sync_init(&p->tcfa_bstats);
763 gnet_stats_basic_sync_init(&p->tcfa_bstats_hw);
764 spin_lock_init(&p->tcfa_lock);
765 p->tcfa_index = index;
766 p->tcfa_tm.install = jiffies;
767 p->tcfa_tm.lastuse = jiffies;
768 p->tcfa_tm.firstuse = 0;
769 p->tcfa_flags = flags;
770 if (est) {
771 err = gen_new_estimator(&p->tcfa_bstats, p->cpu_bstats,
772 &p->tcfa_rate_est,
773 &p->tcfa_lock, false, est);
774 if (err)
775 goto err4;
776 }
777
778 p->idrinfo = idrinfo;
779 __module_get(ops->owner);
780 p->ops = ops;
781 *a = p;
782 return 0;
783 err4:
784 free_percpu(p->cpu_qstats);
785 err3:
786 free_percpu(p->cpu_bstats_hw);
787 err2:
788 free_percpu(p->cpu_bstats);
789 err1:
790 kfree(p);
791 return err;
792 }
793 EXPORT_SYMBOL(tcf_idr_create);
794
tcf_idr_create_from_flags(struct tc_action_net * tn,u32 index,struct nlattr * est,struct tc_action ** a,const struct tc_action_ops * ops,int bind,u32 flags)795 int tcf_idr_create_from_flags(struct tc_action_net *tn, u32 index,
796 struct nlattr *est, struct tc_action **a,
797 const struct tc_action_ops *ops, int bind,
798 u32 flags)
799 {
800 /* Set cpustats according to actions flags. */
801 return tcf_idr_create(tn, index, est, a, ops, bind,
802 !(flags & TCA_ACT_FLAGS_NO_PERCPU_STATS), flags);
803 }
804 EXPORT_SYMBOL(tcf_idr_create_from_flags);
805
806 /* Cleanup idr index that was allocated but not initialized. */
807
tcf_idr_cleanup(struct tc_action_net * tn,u32 index)808 void tcf_idr_cleanup(struct tc_action_net *tn, u32 index)
809 {
810 struct tcf_idrinfo *idrinfo = tn->idrinfo;
811
812 mutex_lock(&idrinfo->lock);
813 /* Remove ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */
814 WARN_ON(!IS_ERR(idr_remove(&idrinfo->action_idr, index)));
815 mutex_unlock(&idrinfo->lock);
816 }
817 EXPORT_SYMBOL(tcf_idr_cleanup);
818
819 /* Check if action with specified index exists. If actions is found, increments
820 * its reference and bind counters, and return 1. Otherwise insert temporary
821 * error pointer (to prevent concurrent users from inserting actions with same
822 * index) and return 0.
823 */
824
tcf_idr_check_alloc(struct tc_action_net * tn,u32 * index,struct tc_action ** a,int bind)825 int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
826 struct tc_action **a, int bind)
827 {
828 struct tcf_idrinfo *idrinfo = tn->idrinfo;
829 struct tc_action *p;
830 int ret;
831
832 again:
833 mutex_lock(&idrinfo->lock);
834 if (*index) {
835 p = idr_find(&idrinfo->action_idr, *index);
836 if (IS_ERR(p)) {
837 /* This means that another process allocated
838 * index but did not assign the pointer yet.
839 */
840 mutex_unlock(&idrinfo->lock);
841 goto again;
842 }
843
844 if (p) {
845 refcount_inc(&p->tcfa_refcnt);
846 if (bind)
847 atomic_inc(&p->tcfa_bindcnt);
848 *a = p;
849 ret = 1;
850 } else {
851 *a = NULL;
852 ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index,
853 *index, GFP_KERNEL);
854 if (!ret)
855 idr_replace(&idrinfo->action_idr,
856 ERR_PTR(-EBUSY), *index);
857 }
858 } else {
859 *index = 1;
860 *a = NULL;
861 ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index,
862 UINT_MAX, GFP_KERNEL);
863 if (!ret)
864 idr_replace(&idrinfo->action_idr, ERR_PTR(-EBUSY),
865 *index);
866 }
867 mutex_unlock(&idrinfo->lock);
868 return ret;
869 }
870 EXPORT_SYMBOL(tcf_idr_check_alloc);
871
tcf_idrinfo_destroy(const struct tc_action_ops * ops,struct tcf_idrinfo * idrinfo)872 void tcf_idrinfo_destroy(const struct tc_action_ops *ops,
873 struct tcf_idrinfo *idrinfo)
874 {
875 struct idr *idr = &idrinfo->action_idr;
876 struct tc_action *p;
877 int ret;
878 unsigned long id = 1;
879 unsigned long tmp;
880
881 idr_for_each_entry_ul(idr, p, tmp, id) {
882 ret = __tcf_idr_release(p, false, true);
883 if (ret == ACT_P_DELETED)
884 module_put(ops->owner);
885 else if (ret < 0)
886 return;
887 }
888 idr_destroy(&idrinfo->action_idr);
889 }
890 EXPORT_SYMBOL(tcf_idrinfo_destroy);
891
892 static LIST_HEAD(act_base);
893 static DEFINE_RWLOCK(act_mod_lock);
894 /* since act ops id is stored in pernet subsystem list,
895 * then there is no way to walk through only all the action
896 * subsystem, so we keep tc action pernet ops id for
897 * reoffload to walk through.
898 */
899 static LIST_HEAD(act_pernet_id_list);
900 static DEFINE_MUTEX(act_id_mutex);
901 struct tc_act_pernet_id {
902 struct list_head list;
903 unsigned int id;
904 };
905
tcf_pernet_add_id_list(unsigned int id)906 static int tcf_pernet_add_id_list(unsigned int id)
907 {
908 struct tc_act_pernet_id *id_ptr;
909 int ret = 0;
910
911 mutex_lock(&act_id_mutex);
912 list_for_each_entry(id_ptr, &act_pernet_id_list, list) {
913 if (id_ptr->id == id) {
914 ret = -EEXIST;
915 goto err_out;
916 }
917 }
918
919 id_ptr = kzalloc(sizeof(*id_ptr), GFP_KERNEL);
920 if (!id_ptr) {
921 ret = -ENOMEM;
922 goto err_out;
923 }
924 id_ptr->id = id;
925
926 list_add_tail(&id_ptr->list, &act_pernet_id_list);
927
928 err_out:
929 mutex_unlock(&act_id_mutex);
930 return ret;
931 }
932
tcf_pernet_del_id_list(unsigned int id)933 static void tcf_pernet_del_id_list(unsigned int id)
934 {
935 struct tc_act_pernet_id *id_ptr;
936
937 mutex_lock(&act_id_mutex);
938 list_for_each_entry(id_ptr, &act_pernet_id_list, list) {
939 if (id_ptr->id == id) {
940 list_del(&id_ptr->list);
941 kfree(id_ptr);
942 break;
943 }
944 }
945 mutex_unlock(&act_id_mutex);
946 }
947
tcf_register_action(struct tc_action_ops * act,struct pernet_operations * ops)948 int tcf_register_action(struct tc_action_ops *act,
949 struct pernet_operations *ops)
950 {
951 struct tc_action_ops *a;
952 int ret;
953
954 if (!act->act || !act->dump || !act->init)
955 return -EINVAL;
956
957 /* We have to register pernet ops before making the action ops visible,
958 * otherwise tcf_action_init_1() could get a partially initialized
959 * netns.
960 */
961 ret = register_pernet_subsys(ops);
962 if (ret)
963 return ret;
964
965 if (ops->id) {
966 ret = tcf_pernet_add_id_list(*ops->id);
967 if (ret)
968 goto err_id;
969 }
970
971 write_lock(&act_mod_lock);
972 list_for_each_entry(a, &act_base, head) {
973 if (act->id == a->id || (strcmp(act->kind, a->kind) == 0)) {
974 ret = -EEXIST;
975 goto err_out;
976 }
977 }
978 list_add_tail(&act->head, &act_base);
979 write_unlock(&act_mod_lock);
980
981 return 0;
982
983 err_out:
984 write_unlock(&act_mod_lock);
985 if (ops->id)
986 tcf_pernet_del_id_list(*ops->id);
987 err_id:
988 unregister_pernet_subsys(ops);
989 return ret;
990 }
991 EXPORT_SYMBOL(tcf_register_action);
992
tcf_unregister_action(struct tc_action_ops * act,struct pernet_operations * ops)993 int tcf_unregister_action(struct tc_action_ops *act,
994 struct pernet_operations *ops)
995 {
996 struct tc_action_ops *a;
997 int err = -ENOENT;
998
999 write_lock(&act_mod_lock);
1000 list_for_each_entry(a, &act_base, head) {
1001 if (a == act) {
1002 list_del(&act->head);
1003 err = 0;
1004 break;
1005 }
1006 }
1007 write_unlock(&act_mod_lock);
1008 if (!err) {
1009 unregister_pernet_subsys(ops);
1010 if (ops->id)
1011 tcf_pernet_del_id_list(*ops->id);
1012 }
1013 return err;
1014 }
1015 EXPORT_SYMBOL(tcf_unregister_action);
1016
1017 /* lookup by name */
tc_lookup_action_n(char * kind)1018 static struct tc_action_ops *tc_lookup_action_n(char *kind)
1019 {
1020 struct tc_action_ops *a, *res = NULL;
1021
1022 if (kind) {
1023 read_lock(&act_mod_lock);
1024 list_for_each_entry(a, &act_base, head) {
1025 if (strcmp(kind, a->kind) == 0) {
1026 if (try_module_get(a->owner))
1027 res = a;
1028 break;
1029 }
1030 }
1031 read_unlock(&act_mod_lock);
1032 }
1033 return res;
1034 }
1035
1036 /* lookup by nlattr */
tc_lookup_action(struct nlattr * kind)1037 static struct tc_action_ops *tc_lookup_action(struct nlattr *kind)
1038 {
1039 struct tc_action_ops *a, *res = NULL;
1040
1041 if (kind) {
1042 read_lock(&act_mod_lock);
1043 list_for_each_entry(a, &act_base, head) {
1044 if (nla_strcmp(kind, a->kind) == 0) {
1045 if (try_module_get(a->owner))
1046 res = a;
1047 break;
1048 }
1049 }
1050 read_unlock(&act_mod_lock);
1051 }
1052 return res;
1053 }
1054
1055 /*TCA_ACT_MAX_PRIO is 32, there count up to 32 */
1056 #define TCA_ACT_MAX_PRIO_MASK 0x1FF
tcf_action_exec(struct sk_buff * skb,struct tc_action ** actions,int nr_actions,struct tcf_result * res)1057 int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
1058 int nr_actions, struct tcf_result *res)
1059 {
1060 u32 jmp_prgcnt = 0;
1061 u32 jmp_ttl = TCA_ACT_MAX_PRIO; /*matches actions per filter */
1062 int i;
1063 int ret = TC_ACT_OK;
1064
1065 if (skb_skip_tc_classify(skb))
1066 return TC_ACT_OK;
1067
1068 restart_act_graph:
1069 for (i = 0; i < nr_actions; i++) {
1070 const struct tc_action *a = actions[i];
1071 int repeat_ttl;
1072
1073 if (jmp_prgcnt > 0) {
1074 jmp_prgcnt -= 1;
1075 continue;
1076 }
1077
1078 if (tc_act_skip_sw(a->tcfa_flags))
1079 continue;
1080
1081 repeat_ttl = 32;
1082 repeat:
1083 ret = a->ops->act(skb, a, res);
1084 if (unlikely(ret == TC_ACT_REPEAT)) {
1085 if (--repeat_ttl != 0)
1086 goto repeat;
1087 /* suspicious opcode, stop pipeline */
1088 net_warn_ratelimited("TC_ACT_REPEAT abuse ?\n");
1089 return TC_ACT_OK;
1090 }
1091 if (TC_ACT_EXT_CMP(ret, TC_ACT_JUMP)) {
1092 jmp_prgcnt = ret & TCA_ACT_MAX_PRIO_MASK;
1093 if (!jmp_prgcnt || (jmp_prgcnt > nr_actions)) {
1094 /* faulty opcode, stop pipeline */
1095 return TC_ACT_OK;
1096 } else {
1097 jmp_ttl -= 1;
1098 if (jmp_ttl > 0)
1099 goto restart_act_graph;
1100 else /* faulty graph, stop pipeline */
1101 return TC_ACT_OK;
1102 }
1103 } else if (TC_ACT_EXT_CMP(ret, TC_ACT_GOTO_CHAIN)) {
1104 if (unlikely(!rcu_access_pointer(a->goto_chain))) {
1105 net_warn_ratelimited("can't go to NULL chain!\n");
1106 return TC_ACT_SHOT;
1107 }
1108 tcf_action_goto_chain_exec(a, res);
1109 }
1110
1111 if (ret != TC_ACT_PIPE)
1112 break;
1113 }
1114
1115 return ret;
1116 }
1117 EXPORT_SYMBOL(tcf_action_exec);
1118
tcf_action_destroy(struct tc_action * actions[],int bind)1119 int tcf_action_destroy(struct tc_action *actions[], int bind)
1120 {
1121 const struct tc_action_ops *ops;
1122 struct tc_action *a;
1123 int ret = 0, i;
1124
1125 for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
1126 a = actions[i];
1127 actions[i] = NULL;
1128 ops = a->ops;
1129 ret = __tcf_idr_release(a, bind, true);
1130 if (ret == ACT_P_DELETED)
1131 module_put(ops->owner);
1132 else if (ret < 0)
1133 return ret;
1134 }
1135 return ret;
1136 }
1137
tcf_action_put(struct tc_action * p)1138 static int tcf_action_put(struct tc_action *p)
1139 {
1140 return __tcf_action_put(p, false);
1141 }
1142
1143 /* Put all actions in this array, skip those NULL's. */
tcf_action_put_many(struct tc_action * actions[])1144 static void tcf_action_put_many(struct tc_action *actions[])
1145 {
1146 int i;
1147
1148 for (i = 0; i < TCA_ACT_MAX_PRIO; i++) {
1149 struct tc_action *a = actions[i];
1150 const struct tc_action_ops *ops;
1151
1152 if (!a)
1153 continue;
1154 ops = a->ops;
1155 if (tcf_action_put(a))
1156 module_put(ops->owner);
1157 }
1158 }
1159
1160 int
tcf_action_dump_old(struct sk_buff * skb,struct tc_action * a,int bind,int ref)1161 tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
1162 {
1163 return a->ops->dump(skb, a, bind, ref);
1164 }
1165
1166 int
tcf_action_dump_1(struct sk_buff * skb,struct tc_action * a,int bind,int ref)1167 tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
1168 {
1169 int err = -EINVAL;
1170 unsigned char *b = skb_tail_pointer(skb);
1171 struct nlattr *nest;
1172 u32 flags;
1173
1174 if (tcf_action_dump_terse(skb, a, false))
1175 goto nla_put_failure;
1176
1177 if (a->hw_stats != TCA_ACT_HW_STATS_ANY &&
1178 nla_put_bitfield32(skb, TCA_ACT_HW_STATS,
1179 a->hw_stats, TCA_ACT_HW_STATS_ANY))
1180 goto nla_put_failure;
1181
1182 if (a->used_hw_stats_valid &&
1183 nla_put_bitfield32(skb, TCA_ACT_USED_HW_STATS,
1184 a->used_hw_stats, TCA_ACT_HW_STATS_ANY))
1185 goto nla_put_failure;
1186
1187 flags = a->tcfa_flags & TCA_ACT_FLAGS_USER_MASK;
1188 if (flags &&
1189 nla_put_bitfield32(skb, TCA_ACT_FLAGS,
1190 flags, flags))
1191 goto nla_put_failure;
1192
1193 if (nla_put_u32(skb, TCA_ACT_IN_HW_COUNT, a->in_hw_count))
1194 goto nla_put_failure;
1195
1196 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
1197 if (nest == NULL)
1198 goto nla_put_failure;
1199 err = tcf_action_dump_old(skb, a, bind, ref);
1200 if (err > 0) {
1201 nla_nest_end(skb, nest);
1202 return err;
1203 }
1204
1205 nla_put_failure:
1206 nlmsg_trim(skb, b);
1207 return -1;
1208 }
1209 EXPORT_SYMBOL(tcf_action_dump_1);
1210
tcf_action_dump(struct sk_buff * skb,struct tc_action * actions[],int bind,int ref,bool terse)1211 int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[],
1212 int bind, int ref, bool terse)
1213 {
1214 struct tc_action *a;
1215 int err = -EINVAL, i;
1216 struct nlattr *nest;
1217
1218 for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
1219 a = actions[i];
1220 nest = nla_nest_start_noflag(skb, i + 1);
1221 if (nest == NULL)
1222 goto nla_put_failure;
1223 err = terse ? tcf_action_dump_terse(skb, a, false) :
1224 tcf_action_dump_1(skb, a, bind, ref);
1225 if (err < 0)
1226 goto errout;
1227 nla_nest_end(skb, nest);
1228 }
1229
1230 return 0;
1231
1232 nla_put_failure:
1233 err = -EINVAL;
1234 errout:
1235 nla_nest_cancel(skb, nest);
1236 return err;
1237 }
1238
nla_memdup_cookie(struct nlattr ** tb)1239 static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb)
1240 {
1241 struct tc_cookie *c = kzalloc(sizeof(*c), GFP_KERNEL);
1242 if (!c)
1243 return NULL;
1244
1245 c->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL);
1246 if (!c->data) {
1247 kfree(c);
1248 return NULL;
1249 }
1250 c->len = nla_len(tb[TCA_ACT_COOKIE]);
1251
1252 return c;
1253 }
1254
tcf_action_hw_stats_get(struct nlattr * hw_stats_attr)1255 static u8 tcf_action_hw_stats_get(struct nlattr *hw_stats_attr)
1256 {
1257 struct nla_bitfield32 hw_stats_bf;
1258
1259 /* If the user did not pass the attr, that means he does
1260 * not care about the type. Return "any" in that case
1261 * which is setting on all supported types.
1262 */
1263 if (!hw_stats_attr)
1264 return TCA_ACT_HW_STATS_ANY;
1265 hw_stats_bf = nla_get_bitfield32(hw_stats_attr);
1266 return hw_stats_bf.value;
1267 }
1268
1269 static const struct nla_policy tcf_action_policy[TCA_ACT_MAX + 1] = {
1270 [TCA_ACT_KIND] = { .type = NLA_STRING },
1271 [TCA_ACT_INDEX] = { .type = NLA_U32 },
1272 [TCA_ACT_COOKIE] = { .type = NLA_BINARY,
1273 .len = TC_COOKIE_MAX_SIZE },
1274 [TCA_ACT_OPTIONS] = { .type = NLA_NESTED },
1275 [TCA_ACT_FLAGS] = NLA_POLICY_BITFIELD32(TCA_ACT_FLAGS_NO_PERCPU_STATS |
1276 TCA_ACT_FLAGS_SKIP_HW |
1277 TCA_ACT_FLAGS_SKIP_SW),
1278 [TCA_ACT_HW_STATS] = NLA_POLICY_BITFIELD32(TCA_ACT_HW_STATS_ANY),
1279 };
1280
tcf_idr_insert_many(struct tc_action * actions[])1281 void tcf_idr_insert_many(struct tc_action *actions[])
1282 {
1283 int i;
1284
1285 for (i = 0; i < TCA_ACT_MAX_PRIO; i++) {
1286 struct tc_action *a = actions[i];
1287 struct tcf_idrinfo *idrinfo;
1288
1289 if (!a)
1290 continue;
1291 idrinfo = a->idrinfo;
1292 mutex_lock(&idrinfo->lock);
1293 /* Replace ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc if
1294 * it is just created, otherwise this is just a nop.
1295 */
1296 idr_replace(&idrinfo->action_idr, a, a->tcfa_index);
1297 mutex_unlock(&idrinfo->lock);
1298 }
1299 }
1300
tc_action_load_ops(struct nlattr * nla,bool police,bool rtnl_held,struct netlink_ext_ack * extack)1301 struct tc_action_ops *tc_action_load_ops(struct nlattr *nla, bool police,
1302 bool rtnl_held,
1303 struct netlink_ext_ack *extack)
1304 {
1305 struct nlattr *tb[TCA_ACT_MAX + 1];
1306 struct tc_action_ops *a_o;
1307 char act_name[IFNAMSIZ];
1308 struct nlattr *kind;
1309 int err;
1310
1311 if (!police) {
1312 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
1313 tcf_action_policy, extack);
1314 if (err < 0)
1315 return ERR_PTR(err);
1316 err = -EINVAL;
1317 kind = tb[TCA_ACT_KIND];
1318 if (!kind) {
1319 NL_SET_ERR_MSG(extack, "TC action kind must be specified");
1320 return ERR_PTR(err);
1321 }
1322 if (nla_strscpy(act_name, kind, IFNAMSIZ) < 0) {
1323 NL_SET_ERR_MSG(extack, "TC action name too long");
1324 return ERR_PTR(err);
1325 }
1326 } else {
1327 if (strlcpy(act_name, "police", IFNAMSIZ) >= IFNAMSIZ) {
1328 NL_SET_ERR_MSG(extack, "TC action name too long");
1329 return ERR_PTR(-EINVAL);
1330 }
1331 }
1332
1333 a_o = tc_lookup_action_n(act_name);
1334 if (a_o == NULL) {
1335 #ifdef CONFIG_MODULES
1336 if (rtnl_held)
1337 rtnl_unlock();
1338 request_module("act_%s", act_name);
1339 if (rtnl_held)
1340 rtnl_lock();
1341
1342 a_o = tc_lookup_action_n(act_name);
1343
1344 /* We dropped the RTNL semaphore in order to
1345 * perform the module load. So, even if we
1346 * succeeded in loading the module we have to
1347 * tell the caller to replay the request. We
1348 * indicate this using -EAGAIN.
1349 */
1350 if (a_o != NULL) {
1351 module_put(a_o->owner);
1352 return ERR_PTR(-EAGAIN);
1353 }
1354 #endif
1355 NL_SET_ERR_MSG(extack, "Failed to load TC action module");
1356 return ERR_PTR(-ENOENT);
1357 }
1358
1359 return a_o;
1360 }
1361
tcf_action_init_1(struct net * net,struct tcf_proto * tp,struct nlattr * nla,struct nlattr * est,struct tc_action_ops * a_o,int * init_res,u32 flags,struct netlink_ext_ack * extack)1362 struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
1363 struct nlattr *nla, struct nlattr *est,
1364 struct tc_action_ops *a_o, int *init_res,
1365 u32 flags, struct netlink_ext_ack *extack)
1366 {
1367 bool police = flags & TCA_ACT_FLAGS_POLICE;
1368 struct nla_bitfield32 userflags = { 0, 0 };
1369 u8 hw_stats = TCA_ACT_HW_STATS_ANY;
1370 struct nlattr *tb[TCA_ACT_MAX + 1];
1371 struct tc_cookie *cookie = NULL;
1372 struct tc_action *a;
1373 int err;
1374
1375 /* backward compatibility for policer */
1376 if (!police) {
1377 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
1378 tcf_action_policy, extack);
1379 if (err < 0)
1380 return ERR_PTR(err);
1381 if (tb[TCA_ACT_COOKIE]) {
1382 cookie = nla_memdup_cookie(tb);
1383 if (!cookie) {
1384 NL_SET_ERR_MSG(extack, "No memory to generate TC cookie");
1385 err = -ENOMEM;
1386 goto err_out;
1387 }
1388 }
1389 hw_stats = tcf_action_hw_stats_get(tb[TCA_ACT_HW_STATS]);
1390 if (tb[TCA_ACT_FLAGS]) {
1391 userflags = nla_get_bitfield32(tb[TCA_ACT_FLAGS]);
1392 if (!tc_act_flags_valid(userflags.value)) {
1393 err = -EINVAL;
1394 goto err_out;
1395 }
1396 }
1397
1398 err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, tp,
1399 userflags.value | flags, extack);
1400 } else {
1401 err = a_o->init(net, nla, est, &a, tp, userflags.value | flags,
1402 extack);
1403 }
1404 if (err < 0)
1405 goto err_out;
1406 *init_res = err;
1407
1408 if (!police && tb[TCA_ACT_COOKIE])
1409 tcf_set_action_cookie(&a->act_cookie, cookie);
1410
1411 if (!police)
1412 a->hw_stats = hw_stats;
1413
1414 return a;
1415
1416 err_out:
1417 if (cookie) {
1418 kfree(cookie->data);
1419 kfree(cookie);
1420 }
1421 return ERR_PTR(err);
1422 }
1423
tc_act_bind(u32 flags)1424 static bool tc_act_bind(u32 flags)
1425 {
1426 return !!(flags & TCA_ACT_FLAGS_BIND);
1427 }
1428
1429 /* Returns numbers of initialized actions or negative error. */
1430
tcf_action_init(struct net * net,struct tcf_proto * tp,struct nlattr * nla,struct nlattr * est,struct tc_action * actions[],int init_res[],size_t * attr_size,u32 flags,u32 fl_flags,struct netlink_ext_ack * extack)1431 int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
1432 struct nlattr *est, struct tc_action *actions[],
1433 int init_res[], size_t *attr_size,
1434 u32 flags, u32 fl_flags,
1435 struct netlink_ext_ack *extack)
1436 {
1437 struct tc_action_ops *ops[TCA_ACT_MAX_PRIO] = {};
1438 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
1439 struct tc_action *act;
1440 size_t sz = 0;
1441 int err;
1442 int i;
1443
1444 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX_PRIO, nla, NULL,
1445 extack);
1446 if (err < 0)
1447 return err;
1448
1449 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
1450 struct tc_action_ops *a_o;
1451
1452 a_o = tc_action_load_ops(tb[i], flags & TCA_ACT_FLAGS_POLICE,
1453 !(flags & TCA_ACT_FLAGS_NO_RTNL),
1454 extack);
1455 if (IS_ERR(a_o)) {
1456 err = PTR_ERR(a_o);
1457 goto err_mod;
1458 }
1459 ops[i - 1] = a_o;
1460 }
1461
1462 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
1463 act = tcf_action_init_1(net, tp, tb[i], est, ops[i - 1],
1464 &init_res[i - 1], flags, extack);
1465 if (IS_ERR(act)) {
1466 err = PTR_ERR(act);
1467 goto err;
1468 }
1469 sz += tcf_action_fill_size(act);
1470 /* Start from index 0 */
1471 actions[i - 1] = act;
1472 if (tc_act_bind(flags)) {
1473 bool skip_sw = tc_skip_sw(fl_flags);
1474 bool skip_hw = tc_skip_hw(fl_flags);
1475
1476 if (tc_act_bind(act->tcfa_flags))
1477 continue;
1478 if (skip_sw != tc_act_skip_sw(act->tcfa_flags) ||
1479 skip_hw != tc_act_skip_hw(act->tcfa_flags)) {
1480 NL_SET_ERR_MSG(extack,
1481 "Mismatch between action and filter offload flags");
1482 err = -EINVAL;
1483 goto err;
1484 }
1485 } else {
1486 err = tcf_action_offload_add(act, extack);
1487 if (tc_act_skip_sw(act->tcfa_flags) && err)
1488 goto err;
1489 }
1490 }
1491
1492 /* We have to commit them all together, because if any error happened in
1493 * between, we could not handle the failure gracefully.
1494 */
1495 tcf_idr_insert_many(actions);
1496
1497 *attr_size = tcf_action_full_attrs_size(sz);
1498 err = i - 1;
1499 goto err_mod;
1500
1501 err:
1502 tcf_action_destroy(actions, flags & TCA_ACT_FLAGS_BIND);
1503 err_mod:
1504 for (i = 0; i < TCA_ACT_MAX_PRIO; i++) {
1505 if (ops[i])
1506 module_put(ops[i]->owner);
1507 }
1508 return err;
1509 }
1510
tcf_action_update_stats(struct tc_action * a,u64 bytes,u64 packets,u64 drops,bool hw)1511 void tcf_action_update_stats(struct tc_action *a, u64 bytes, u64 packets,
1512 u64 drops, bool hw)
1513 {
1514 if (a->cpu_bstats) {
1515 _bstats_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
1516
1517 this_cpu_ptr(a->cpu_qstats)->drops += drops;
1518
1519 if (hw)
1520 _bstats_update(this_cpu_ptr(a->cpu_bstats_hw),
1521 bytes, packets);
1522 return;
1523 }
1524
1525 _bstats_update(&a->tcfa_bstats, bytes, packets);
1526 a->tcfa_qstats.drops += drops;
1527 if (hw)
1528 _bstats_update(&a->tcfa_bstats_hw, bytes, packets);
1529 }
1530 EXPORT_SYMBOL(tcf_action_update_stats);
1531
tcf_action_copy_stats(struct sk_buff * skb,struct tc_action * p,int compat_mode)1532 int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p,
1533 int compat_mode)
1534 {
1535 int err = 0;
1536 struct gnet_dump d;
1537
1538 if (p == NULL)
1539 goto errout;
1540
1541 /* update hw stats for this action */
1542 tcf_action_update_hw_stats(p);
1543
1544 /* compat_mode being true specifies a call that is supposed
1545 * to add additional backward compatibility statistic TLVs.
1546 */
1547 if (compat_mode) {
1548 if (p->type == TCA_OLD_COMPAT)
1549 err = gnet_stats_start_copy_compat(skb, 0,
1550 TCA_STATS,
1551 TCA_XSTATS,
1552 &p->tcfa_lock, &d,
1553 TCA_PAD);
1554 else
1555 return 0;
1556 } else
1557 err = gnet_stats_start_copy(skb, TCA_ACT_STATS,
1558 &p->tcfa_lock, &d, TCA_ACT_PAD);
1559
1560 if (err < 0)
1561 goto errout;
1562
1563 if (gnet_stats_copy_basic(&d, p->cpu_bstats,
1564 &p->tcfa_bstats, false) < 0 ||
1565 gnet_stats_copy_basic_hw(&d, p->cpu_bstats_hw,
1566 &p->tcfa_bstats_hw, false) < 0 ||
1567 gnet_stats_copy_rate_est(&d, &p->tcfa_rate_est) < 0 ||
1568 gnet_stats_copy_queue(&d, p->cpu_qstats,
1569 &p->tcfa_qstats,
1570 p->tcfa_qstats.qlen) < 0)
1571 goto errout;
1572
1573 if (gnet_stats_finish_copy(&d) < 0)
1574 goto errout;
1575
1576 return 0;
1577
1578 errout:
1579 return -1;
1580 }
1581
tca_get_fill(struct sk_buff * skb,struct tc_action * actions[],u32 portid,u32 seq,u16 flags,int event,int bind,int ref,struct netlink_ext_ack * extack)1582 static int tca_get_fill(struct sk_buff *skb, struct tc_action *actions[],
1583 u32 portid, u32 seq, u16 flags, int event, int bind,
1584 int ref, struct netlink_ext_ack *extack)
1585 {
1586 struct tcamsg *t;
1587 struct nlmsghdr *nlh;
1588 unsigned char *b = skb_tail_pointer(skb);
1589 struct nlattr *nest;
1590
1591 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*t), flags);
1592 if (!nlh)
1593 goto out_nlmsg_trim;
1594 t = nlmsg_data(nlh);
1595 t->tca_family = AF_UNSPEC;
1596 t->tca__pad1 = 0;
1597 t->tca__pad2 = 0;
1598
1599 if (extack && extack->_msg &&
1600 nla_put_string(skb, TCA_ROOT_EXT_WARN_MSG, extack->_msg))
1601 goto out_nlmsg_trim;
1602
1603 nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
1604 if (!nest)
1605 goto out_nlmsg_trim;
1606
1607 if (tcf_action_dump(skb, actions, bind, ref, false) < 0)
1608 goto out_nlmsg_trim;
1609
1610 nla_nest_end(skb, nest);
1611
1612 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1613
1614 return skb->len;
1615
1616 out_nlmsg_trim:
1617 nlmsg_trim(skb, b);
1618 return -1;
1619 }
1620
1621 static int
tcf_get_notify(struct net * net,u32 portid,struct nlmsghdr * n,struct tc_action * actions[],int event,struct netlink_ext_ack * extack)1622 tcf_get_notify(struct net *net, u32 portid, struct nlmsghdr *n,
1623 struct tc_action *actions[], int event,
1624 struct netlink_ext_ack *extack)
1625 {
1626 struct sk_buff *skb;
1627
1628 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1629 if (!skb)
1630 return -ENOBUFS;
1631 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event,
1632 0, 1, NULL) <= 0) {
1633 NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action");
1634 kfree_skb(skb);
1635 return -EINVAL;
1636 }
1637
1638 return rtnl_unicast(skb, net, portid);
1639 }
1640
tcf_action_get_1(struct net * net,struct nlattr * nla,struct nlmsghdr * n,u32 portid,struct netlink_ext_ack * extack)1641 static struct tc_action *tcf_action_get_1(struct net *net, struct nlattr *nla,
1642 struct nlmsghdr *n, u32 portid,
1643 struct netlink_ext_ack *extack)
1644 {
1645 struct nlattr *tb[TCA_ACT_MAX + 1];
1646 const struct tc_action_ops *ops;
1647 struct tc_action *a;
1648 int index;
1649 int err;
1650
1651 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
1652 tcf_action_policy, extack);
1653 if (err < 0)
1654 goto err_out;
1655
1656 err = -EINVAL;
1657 if (tb[TCA_ACT_INDEX] == NULL ||
1658 nla_len(tb[TCA_ACT_INDEX]) < sizeof(index)) {
1659 NL_SET_ERR_MSG(extack, "Invalid TC action index value");
1660 goto err_out;
1661 }
1662 index = nla_get_u32(tb[TCA_ACT_INDEX]);
1663
1664 err = -EINVAL;
1665 ops = tc_lookup_action(tb[TCA_ACT_KIND]);
1666 if (!ops) { /* could happen in batch of actions */
1667 NL_SET_ERR_MSG(extack, "Specified TC action kind not found");
1668 goto err_out;
1669 }
1670 err = -ENOENT;
1671 if (__tcf_idr_search(net, ops, &a, index) == 0) {
1672 NL_SET_ERR_MSG(extack, "TC action with specified index not found");
1673 goto err_mod;
1674 }
1675
1676 module_put(ops->owner);
1677 return a;
1678
1679 err_mod:
1680 module_put(ops->owner);
1681 err_out:
1682 return ERR_PTR(err);
1683 }
1684
tca_action_flush(struct net * net,struct nlattr * nla,struct nlmsghdr * n,u32 portid,struct netlink_ext_ack * extack)1685 static int tca_action_flush(struct net *net, struct nlattr *nla,
1686 struct nlmsghdr *n, u32 portid,
1687 struct netlink_ext_ack *extack)
1688 {
1689 struct sk_buff *skb;
1690 unsigned char *b;
1691 struct nlmsghdr *nlh;
1692 struct tcamsg *t;
1693 struct netlink_callback dcb;
1694 struct nlattr *nest;
1695 struct nlattr *tb[TCA_ACT_MAX + 1];
1696 const struct tc_action_ops *ops;
1697 struct nlattr *kind;
1698 int err = -ENOMEM;
1699
1700 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1701 if (!skb)
1702 return err;
1703
1704 b = skb_tail_pointer(skb);
1705
1706 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
1707 tcf_action_policy, extack);
1708 if (err < 0)
1709 goto err_out;
1710
1711 err = -EINVAL;
1712 kind = tb[TCA_ACT_KIND];
1713 ops = tc_lookup_action(kind);
1714 if (!ops) { /*some idjot trying to flush unknown action */
1715 NL_SET_ERR_MSG(extack, "Cannot flush unknown TC action");
1716 goto err_out;
1717 }
1718
1719 nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION,
1720 sizeof(*t), 0);
1721 if (!nlh) {
1722 NL_SET_ERR_MSG(extack, "Failed to create TC action flush notification");
1723 goto out_module_put;
1724 }
1725 t = nlmsg_data(nlh);
1726 t->tca_family = AF_UNSPEC;
1727 t->tca__pad1 = 0;
1728 t->tca__pad2 = 0;
1729
1730 nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
1731 if (!nest) {
1732 NL_SET_ERR_MSG(extack, "Failed to add new netlink message");
1733 goto out_module_put;
1734 }
1735
1736 err = __tcf_generic_walker(net, skb, &dcb, RTM_DELACTION, ops, extack);
1737 if (err <= 0) {
1738 nla_nest_cancel(skb, nest);
1739 goto out_module_put;
1740 }
1741
1742 nla_nest_end(skb, nest);
1743
1744 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1745 nlh->nlmsg_flags |= NLM_F_ROOT;
1746 module_put(ops->owner);
1747 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1748 n->nlmsg_flags & NLM_F_ECHO);
1749 if (err < 0)
1750 NL_SET_ERR_MSG(extack, "Failed to send TC action flush notification");
1751
1752 return err;
1753
1754 out_module_put:
1755 module_put(ops->owner);
1756 err_out:
1757 kfree_skb(skb);
1758 return err;
1759 }
1760
tcf_action_delete(struct net * net,struct tc_action * actions[])1761 static int tcf_action_delete(struct net *net, struct tc_action *actions[])
1762 {
1763 int i;
1764
1765 for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
1766 struct tc_action *a = actions[i];
1767 const struct tc_action_ops *ops = a->ops;
1768 /* Actions can be deleted concurrently so we must save their
1769 * type and id to search again after reference is released.
1770 */
1771 struct tcf_idrinfo *idrinfo = a->idrinfo;
1772 u32 act_index = a->tcfa_index;
1773
1774 actions[i] = NULL;
1775 if (tcf_action_put(a)) {
1776 /* last reference, action was deleted concurrently */
1777 module_put(ops->owner);
1778 } else {
1779 int ret;
1780
1781 /* now do the delete */
1782 ret = tcf_idr_delete_index(idrinfo, act_index);
1783 if (ret < 0)
1784 return ret;
1785 }
1786 }
1787 return 0;
1788 }
1789
1790 static int
tcf_reoffload_del_notify(struct net * net,struct tc_action * action)1791 tcf_reoffload_del_notify(struct net *net, struct tc_action *action)
1792 {
1793 size_t attr_size = tcf_action_fill_size(action);
1794 struct tc_action *actions[TCA_ACT_MAX_PRIO] = {
1795 [0] = action,
1796 };
1797 const struct tc_action_ops *ops = action->ops;
1798 struct sk_buff *skb;
1799 int ret;
1800
1801 skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size,
1802 GFP_KERNEL);
1803 if (!skb)
1804 return -ENOBUFS;
1805
1806 if (tca_get_fill(skb, actions, 0, 0, 0, RTM_DELACTION, 0, 1, NULL) <= 0) {
1807 kfree_skb(skb);
1808 return -EINVAL;
1809 }
1810
1811 ret = tcf_idr_release_unsafe(action);
1812 if (ret == ACT_P_DELETED) {
1813 module_put(ops->owner);
1814 ret = rtnetlink_send(skb, net, 0, RTNLGRP_TC, 0);
1815 } else {
1816 kfree_skb(skb);
1817 }
1818
1819 return ret;
1820 }
1821
tcf_action_reoffload_cb(flow_indr_block_bind_cb_t * cb,void * cb_priv,bool add)1822 int tcf_action_reoffload_cb(flow_indr_block_bind_cb_t *cb,
1823 void *cb_priv, bool add)
1824 {
1825 struct tc_act_pernet_id *id_ptr;
1826 struct tcf_idrinfo *idrinfo;
1827 struct tc_action_net *tn;
1828 struct tc_action *p;
1829 unsigned int act_id;
1830 unsigned long tmp;
1831 unsigned long id;
1832 struct idr *idr;
1833 struct net *net;
1834 int ret;
1835
1836 if (!cb)
1837 return -EINVAL;
1838
1839 down_read(&net_rwsem);
1840 mutex_lock(&act_id_mutex);
1841
1842 for_each_net(net) {
1843 list_for_each_entry(id_ptr, &act_pernet_id_list, list) {
1844 act_id = id_ptr->id;
1845 tn = net_generic(net, act_id);
1846 if (!tn)
1847 continue;
1848 idrinfo = tn->idrinfo;
1849 if (!idrinfo)
1850 continue;
1851
1852 mutex_lock(&idrinfo->lock);
1853 idr = &idrinfo->action_idr;
1854 idr_for_each_entry_ul(idr, p, tmp, id) {
1855 if (IS_ERR(p) || tc_act_bind(p->tcfa_flags))
1856 continue;
1857 if (add) {
1858 tcf_action_offload_add_ex(p, NULL, cb,
1859 cb_priv);
1860 continue;
1861 }
1862
1863 /* cb unregister to update hw count */
1864 ret = tcf_action_offload_del_ex(p, cb, cb_priv);
1865 if (ret < 0)
1866 continue;
1867 if (tc_act_skip_sw(p->tcfa_flags) &&
1868 !tc_act_in_hw(p))
1869 tcf_reoffload_del_notify(net, p);
1870 }
1871 mutex_unlock(&idrinfo->lock);
1872 }
1873 }
1874 mutex_unlock(&act_id_mutex);
1875 up_read(&net_rwsem);
1876
1877 return 0;
1878 }
1879
1880 static int
tcf_del_notify(struct net * net,struct nlmsghdr * n,struct tc_action * actions[],u32 portid,size_t attr_size,struct netlink_ext_ack * extack)1881 tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[],
1882 u32 portid, size_t attr_size, struct netlink_ext_ack *extack)
1883 {
1884 int ret;
1885 struct sk_buff *skb;
1886
1887 skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size,
1888 GFP_KERNEL);
1889 if (!skb)
1890 return -ENOBUFS;
1891
1892 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, RTM_DELACTION,
1893 0, 2, extack) <= 0) {
1894 NL_SET_ERR_MSG(extack, "Failed to fill netlink TC action attributes");
1895 kfree_skb(skb);
1896 return -EINVAL;
1897 }
1898
1899 /* now do the delete */
1900 ret = tcf_action_delete(net, actions);
1901 if (ret < 0) {
1902 NL_SET_ERR_MSG(extack, "Failed to delete TC action");
1903 kfree_skb(skb);
1904 return ret;
1905 }
1906
1907 ret = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1908 n->nlmsg_flags & NLM_F_ECHO);
1909 return ret;
1910 }
1911
1912 static int
tca_action_gd(struct net * net,struct nlattr * nla,struct nlmsghdr * n,u32 portid,int event,struct netlink_ext_ack * extack)1913 tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
1914 u32 portid, int event, struct netlink_ext_ack *extack)
1915 {
1916 int i, ret;
1917 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
1918 struct tc_action *act;
1919 size_t attr_size = 0;
1920 struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
1921
1922 ret = nla_parse_nested_deprecated(tb, TCA_ACT_MAX_PRIO, nla, NULL,
1923 extack);
1924 if (ret < 0)
1925 return ret;
1926
1927 if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) {
1928 if (tb[1])
1929 return tca_action_flush(net, tb[1], n, portid, extack);
1930
1931 NL_SET_ERR_MSG(extack, "Invalid netlink attributes while flushing TC action");
1932 return -EINVAL;
1933 }
1934
1935 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
1936 act = tcf_action_get_1(net, tb[i], n, portid, extack);
1937 if (IS_ERR(act)) {
1938 ret = PTR_ERR(act);
1939 goto err;
1940 }
1941 attr_size += tcf_action_fill_size(act);
1942 actions[i - 1] = act;
1943 }
1944
1945 attr_size = tcf_action_full_attrs_size(attr_size);
1946
1947 if (event == RTM_GETACTION)
1948 ret = tcf_get_notify(net, portid, n, actions, event, extack);
1949 else { /* delete */
1950 ret = tcf_del_notify(net, n, actions, portid, attr_size, extack);
1951 if (ret)
1952 goto err;
1953 return 0;
1954 }
1955 err:
1956 tcf_action_put_many(actions);
1957 return ret;
1958 }
1959
1960 static int
tcf_add_notify(struct net * net,struct nlmsghdr * n,struct tc_action * actions[],u32 portid,size_t attr_size,struct netlink_ext_ack * extack)1961 tcf_add_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[],
1962 u32 portid, size_t attr_size, struct netlink_ext_ack *extack)
1963 {
1964 struct sk_buff *skb;
1965
1966 skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size,
1967 GFP_KERNEL);
1968 if (!skb)
1969 return -ENOBUFS;
1970
1971 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, n->nlmsg_flags,
1972 RTM_NEWACTION, 0, 0, extack) <= 0) {
1973 NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action");
1974 kfree_skb(skb);
1975 return -EINVAL;
1976 }
1977
1978 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1979 n->nlmsg_flags & NLM_F_ECHO);
1980 }
1981
tcf_action_add(struct net * net,struct nlattr * nla,struct nlmsghdr * n,u32 portid,u32 flags,struct netlink_ext_ack * extack)1982 static int tcf_action_add(struct net *net, struct nlattr *nla,
1983 struct nlmsghdr *n, u32 portid, u32 flags,
1984 struct netlink_ext_ack *extack)
1985 {
1986 size_t attr_size = 0;
1987 int loop, ret, i;
1988 struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
1989 int init_res[TCA_ACT_MAX_PRIO] = {};
1990
1991 for (loop = 0; loop < 10; loop++) {
1992 ret = tcf_action_init(net, NULL, nla, NULL, actions, init_res,
1993 &attr_size, flags, 0, extack);
1994 if (ret != -EAGAIN)
1995 break;
1996 }
1997
1998 if (ret < 0)
1999 return ret;
2000 ret = tcf_add_notify(net, n, actions, portid, attr_size, extack);
2001
2002 /* only put existing actions */
2003 for (i = 0; i < TCA_ACT_MAX_PRIO; i++)
2004 if (init_res[i] == ACT_P_CREATED)
2005 actions[i] = NULL;
2006 tcf_action_put_many(actions);
2007
2008 return ret;
2009 }
2010
2011 static const struct nla_policy tcaa_policy[TCA_ROOT_MAX + 1] = {
2012 [TCA_ROOT_FLAGS] = NLA_POLICY_BITFIELD32(TCA_ACT_FLAG_LARGE_DUMP_ON |
2013 TCA_ACT_FLAG_TERSE_DUMP),
2014 [TCA_ROOT_TIME_DELTA] = { .type = NLA_U32 },
2015 };
2016
tc_ctl_action(struct sk_buff * skb,struct nlmsghdr * n,struct netlink_ext_ack * extack)2017 static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n,
2018 struct netlink_ext_ack *extack)
2019 {
2020 struct net *net = sock_net(skb->sk);
2021 struct nlattr *tca[TCA_ROOT_MAX + 1];
2022 u32 portid = NETLINK_CB(skb).portid;
2023 u32 flags = 0;
2024 int ret = 0;
2025
2026 if ((n->nlmsg_type != RTM_GETACTION) &&
2027 !netlink_capable(skb, CAP_NET_ADMIN))
2028 return -EPERM;
2029
2030 ret = nlmsg_parse_deprecated(n, sizeof(struct tcamsg), tca,
2031 TCA_ROOT_MAX, NULL, extack);
2032 if (ret < 0)
2033 return ret;
2034
2035 if (tca[TCA_ACT_TAB] == NULL) {
2036 NL_SET_ERR_MSG(extack, "Netlink action attributes missing");
2037 return -EINVAL;
2038 }
2039
2040 /* n->nlmsg_flags & NLM_F_CREATE */
2041 switch (n->nlmsg_type) {
2042 case RTM_NEWACTION:
2043 /* we are going to assume all other flags
2044 * imply create only if it doesn't exist
2045 * Note that CREATE | EXCL implies that
2046 * but since we want avoid ambiguity (eg when flags
2047 * is zero) then just set this
2048 */
2049 if (n->nlmsg_flags & NLM_F_REPLACE)
2050 flags = TCA_ACT_FLAGS_REPLACE;
2051 ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, flags,
2052 extack);
2053 break;
2054 case RTM_DELACTION:
2055 ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
2056 portid, RTM_DELACTION, extack);
2057 break;
2058 case RTM_GETACTION:
2059 ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
2060 portid, RTM_GETACTION, extack);
2061 break;
2062 default:
2063 BUG();
2064 }
2065
2066 return ret;
2067 }
2068
find_dump_kind(struct nlattr ** nla)2069 static struct nlattr *find_dump_kind(struct nlattr **nla)
2070 {
2071 struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1];
2072 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
2073 struct nlattr *kind;
2074
2075 tb1 = nla[TCA_ACT_TAB];
2076 if (tb1 == NULL)
2077 return NULL;
2078
2079 if (nla_parse_deprecated(tb, TCA_ACT_MAX_PRIO, nla_data(tb1), NLMSG_ALIGN(nla_len(tb1)), NULL, NULL) < 0)
2080 return NULL;
2081
2082 if (tb[1] == NULL)
2083 return NULL;
2084 if (nla_parse_nested_deprecated(tb2, TCA_ACT_MAX, tb[1], tcf_action_policy, NULL) < 0)
2085 return NULL;
2086 kind = tb2[TCA_ACT_KIND];
2087
2088 return kind;
2089 }
2090
tc_dump_action(struct sk_buff * skb,struct netlink_callback * cb)2091 static int tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
2092 {
2093 struct net *net = sock_net(skb->sk);
2094 struct nlmsghdr *nlh;
2095 unsigned char *b = skb_tail_pointer(skb);
2096 struct nlattr *nest;
2097 struct tc_action_ops *a_o;
2098 int ret = 0;
2099 struct tcamsg *t = (struct tcamsg *) nlmsg_data(cb->nlh);
2100 struct nlattr *tb[TCA_ROOT_MAX + 1];
2101 struct nlattr *count_attr = NULL;
2102 unsigned long jiffy_since = 0;
2103 struct nlattr *kind = NULL;
2104 struct nla_bitfield32 bf;
2105 u32 msecs_since = 0;
2106 u32 act_count = 0;
2107
2108 ret = nlmsg_parse_deprecated(cb->nlh, sizeof(struct tcamsg), tb,
2109 TCA_ROOT_MAX, tcaa_policy, cb->extack);
2110 if (ret < 0)
2111 return ret;
2112
2113 kind = find_dump_kind(tb);
2114 if (kind == NULL) {
2115 pr_info("tc_dump_action: action bad kind\n");
2116 return 0;
2117 }
2118
2119 a_o = tc_lookup_action(kind);
2120 if (a_o == NULL)
2121 return 0;
2122
2123 cb->args[2] = 0;
2124 if (tb[TCA_ROOT_FLAGS]) {
2125 bf = nla_get_bitfield32(tb[TCA_ROOT_FLAGS]);
2126 cb->args[2] = bf.value;
2127 }
2128
2129 if (tb[TCA_ROOT_TIME_DELTA]) {
2130 msecs_since = nla_get_u32(tb[TCA_ROOT_TIME_DELTA]);
2131 }
2132
2133 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2134 cb->nlh->nlmsg_type, sizeof(*t), 0);
2135 if (!nlh)
2136 goto out_module_put;
2137
2138 if (msecs_since)
2139 jiffy_since = jiffies - msecs_to_jiffies(msecs_since);
2140
2141 t = nlmsg_data(nlh);
2142 t->tca_family = AF_UNSPEC;
2143 t->tca__pad1 = 0;
2144 t->tca__pad2 = 0;
2145 cb->args[3] = jiffy_since;
2146 count_attr = nla_reserve(skb, TCA_ROOT_COUNT, sizeof(u32));
2147 if (!count_attr)
2148 goto out_module_put;
2149
2150 nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
2151 if (nest == NULL)
2152 goto out_module_put;
2153
2154 ret = __tcf_generic_walker(net, skb, cb, RTM_GETACTION, a_o, NULL);
2155 if (ret < 0)
2156 goto out_module_put;
2157
2158 if (ret > 0) {
2159 nla_nest_end(skb, nest);
2160 ret = skb->len;
2161 act_count = cb->args[1];
2162 memcpy(nla_data(count_attr), &act_count, sizeof(u32));
2163 cb->args[1] = 0;
2164 } else
2165 nlmsg_trim(skb, b);
2166
2167 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2168 if (NETLINK_CB(cb->skb).portid && ret)
2169 nlh->nlmsg_flags |= NLM_F_MULTI;
2170 module_put(a_o->owner);
2171 return skb->len;
2172
2173 out_module_put:
2174 module_put(a_o->owner);
2175 nlmsg_trim(skb, b);
2176 return skb->len;
2177 }
2178
tc_action_init(void)2179 static int __init tc_action_init(void)
2180 {
2181 rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL, 0);
2182 rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL, 0);
2183 rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action,
2184 0);
2185
2186 return 0;
2187 }
2188
2189 subsys_initcall(tc_action_init);
2190