1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * net/sched/act_sample.c - Packet sampling tc action
4 * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
5 */
6
7 #include <linux/types.h>
8 #include <linux/kernel.h>
9 #include <linux/string.h>
10 #include <linux/errno.h>
11 #include <linux/skbuff.h>
12 #include <linux/rtnetlink.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/gfp.h>
16 #include <net/net_namespace.h>
17 #include <net/netlink.h>
18 #include <net/pkt_sched.h>
19 #include <linux/tc_act/tc_sample.h>
20 #include <net/tc_act/tc_sample.h>
21 #include <net/psample.h>
22 #include <net/pkt_cls.h>
23
24 #include <linux/if_arp.h>
25
26 static unsigned int sample_net_id;
27 static struct tc_action_ops act_sample_ops;
28
29 static const struct nla_policy sample_policy[TCA_SAMPLE_MAX + 1] = {
30 [TCA_SAMPLE_PARMS] = { .len = sizeof(struct tc_sample) },
31 [TCA_SAMPLE_RATE] = { .type = NLA_U32 },
32 [TCA_SAMPLE_TRUNC_SIZE] = { .type = NLA_U32 },
33 [TCA_SAMPLE_PSAMPLE_GROUP] = { .type = NLA_U32 },
34 };
35
tcf_sample_init(struct net * net,struct nlattr * nla,struct nlattr * est,struct tc_action ** a,int ovr,int bind,bool rtnl_held,struct tcf_proto * tp,struct netlink_ext_ack * extack)36 static int tcf_sample_init(struct net *net, struct nlattr *nla,
37 struct nlattr *est, struct tc_action **a, int ovr,
38 int bind, bool rtnl_held, struct tcf_proto *tp,
39 struct netlink_ext_ack *extack)
40 {
41 struct tc_action_net *tn = net_generic(net, sample_net_id);
42 struct nlattr *tb[TCA_SAMPLE_MAX + 1];
43 struct psample_group *psample_group;
44 u32 psample_group_num, rate, index;
45 struct tcf_chain *goto_ch = NULL;
46 struct tc_sample *parm;
47 struct tcf_sample *s;
48 bool exists = false;
49 int ret, err;
50
51 if (!nla)
52 return -EINVAL;
53 ret = nla_parse_nested_deprecated(tb, TCA_SAMPLE_MAX, nla,
54 sample_policy, NULL);
55 if (ret < 0)
56 return ret;
57 if (!tb[TCA_SAMPLE_PARMS] || !tb[TCA_SAMPLE_RATE] ||
58 !tb[TCA_SAMPLE_PSAMPLE_GROUP])
59 return -EINVAL;
60
61 parm = nla_data(tb[TCA_SAMPLE_PARMS]);
62 index = parm->index;
63 err = tcf_idr_check_alloc(tn, &index, a, bind);
64 if (err < 0)
65 return err;
66 exists = err;
67 if (exists && bind)
68 return 0;
69
70 if (!exists) {
71 ret = tcf_idr_create(tn, index, est, a,
72 &act_sample_ops, bind, true);
73 if (ret) {
74 tcf_idr_cleanup(tn, index);
75 return ret;
76 }
77 ret = ACT_P_CREATED;
78 } else if (!ovr) {
79 tcf_idr_release(*a, bind);
80 return -EEXIST;
81 }
82 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
83 if (err < 0)
84 goto release_idr;
85
86 rate = nla_get_u32(tb[TCA_SAMPLE_RATE]);
87 if (!rate) {
88 NL_SET_ERR_MSG(extack, "invalid sample rate");
89 err = -EINVAL;
90 goto put_chain;
91 }
92 psample_group_num = nla_get_u32(tb[TCA_SAMPLE_PSAMPLE_GROUP]);
93 psample_group = psample_group_get(net, psample_group_num);
94 if (!psample_group) {
95 err = -ENOMEM;
96 goto put_chain;
97 }
98
99 s = to_sample(*a);
100
101 spin_lock_bh(&s->tcf_lock);
102 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
103 s->rate = rate;
104 s->psample_group_num = psample_group_num;
105 rcu_swap_protected(s->psample_group, psample_group,
106 lockdep_is_held(&s->tcf_lock));
107
108 if (tb[TCA_SAMPLE_TRUNC_SIZE]) {
109 s->truncate = true;
110 s->trunc_size = nla_get_u32(tb[TCA_SAMPLE_TRUNC_SIZE]);
111 }
112 spin_unlock_bh(&s->tcf_lock);
113
114 if (psample_group)
115 psample_group_put(psample_group);
116 if (goto_ch)
117 tcf_chain_put_by_act(goto_ch);
118
119 if (ret == ACT_P_CREATED)
120 tcf_idr_insert(tn, *a);
121 return ret;
122 put_chain:
123 if (goto_ch)
124 tcf_chain_put_by_act(goto_ch);
125 release_idr:
126 tcf_idr_release(*a, bind);
127 return err;
128 }
129
tcf_sample_cleanup(struct tc_action * a)130 static void tcf_sample_cleanup(struct tc_action *a)
131 {
132 struct tcf_sample *s = to_sample(a);
133 struct psample_group *psample_group;
134
135 /* last reference to action, no need to lock */
136 psample_group = rcu_dereference_protected(s->psample_group, 1);
137 RCU_INIT_POINTER(s->psample_group, NULL);
138 if (psample_group)
139 psample_group_put(psample_group);
140 }
141
tcf_sample_dev_ok_push(struct net_device * dev)142 static bool tcf_sample_dev_ok_push(struct net_device *dev)
143 {
144 switch (dev->type) {
145 case ARPHRD_TUNNEL:
146 case ARPHRD_TUNNEL6:
147 case ARPHRD_SIT:
148 case ARPHRD_IPGRE:
149 case ARPHRD_IP6GRE:
150 case ARPHRD_VOID:
151 case ARPHRD_NONE:
152 return false;
153 default:
154 return true;
155 }
156 }
157
tcf_sample_act(struct sk_buff * skb,const struct tc_action * a,struct tcf_result * res)158 static int tcf_sample_act(struct sk_buff *skb, const struct tc_action *a,
159 struct tcf_result *res)
160 {
161 struct tcf_sample *s = to_sample(a);
162 struct psample_group *psample_group;
163 int retval;
164 int size;
165 int iif;
166 int oif;
167
168 tcf_lastuse_update(&s->tcf_tm);
169 bstats_cpu_update(this_cpu_ptr(s->common.cpu_bstats), skb);
170 retval = READ_ONCE(s->tcf_action);
171
172 psample_group = rcu_dereference_bh(s->psample_group);
173
174 /* randomly sample packets according to rate */
175 if (psample_group && (prandom_u32() % s->rate == 0)) {
176 if (!skb_at_tc_ingress(skb)) {
177 iif = skb->skb_iif;
178 oif = skb->dev->ifindex;
179 } else {
180 iif = skb->dev->ifindex;
181 oif = 0;
182 }
183
184 /* on ingress, the mac header gets popped, so push it back */
185 if (skb_at_tc_ingress(skb) && tcf_sample_dev_ok_push(skb->dev))
186 skb_push(skb, skb->mac_len);
187
188 size = s->truncate ? s->trunc_size : skb->len;
189 psample_sample_packet(psample_group, skb, size, iif, oif,
190 s->rate);
191
192 if (skb_at_tc_ingress(skb) && tcf_sample_dev_ok_push(skb->dev))
193 skb_pull(skb, skb->mac_len);
194 }
195
196 return retval;
197 }
198
tcf_sample_dump(struct sk_buff * skb,struct tc_action * a,int bind,int ref)199 static int tcf_sample_dump(struct sk_buff *skb, struct tc_action *a,
200 int bind, int ref)
201 {
202 unsigned char *b = skb_tail_pointer(skb);
203 struct tcf_sample *s = to_sample(a);
204 struct tc_sample opt = {
205 .index = s->tcf_index,
206 .refcnt = refcount_read(&s->tcf_refcnt) - ref,
207 .bindcnt = atomic_read(&s->tcf_bindcnt) - bind,
208 };
209 struct tcf_t t;
210
211 spin_lock_bh(&s->tcf_lock);
212 opt.action = s->tcf_action;
213 if (nla_put(skb, TCA_SAMPLE_PARMS, sizeof(opt), &opt))
214 goto nla_put_failure;
215
216 tcf_tm_dump(&t, &s->tcf_tm);
217 if (nla_put_64bit(skb, TCA_SAMPLE_TM, sizeof(t), &t, TCA_SAMPLE_PAD))
218 goto nla_put_failure;
219
220 if (nla_put_u32(skb, TCA_SAMPLE_RATE, s->rate))
221 goto nla_put_failure;
222
223 if (s->truncate)
224 if (nla_put_u32(skb, TCA_SAMPLE_TRUNC_SIZE, s->trunc_size))
225 goto nla_put_failure;
226
227 if (nla_put_u32(skb, TCA_SAMPLE_PSAMPLE_GROUP, s->psample_group_num))
228 goto nla_put_failure;
229 spin_unlock_bh(&s->tcf_lock);
230
231 return skb->len;
232
233 nla_put_failure:
234 spin_unlock_bh(&s->tcf_lock);
235 nlmsg_trim(skb, b);
236 return -1;
237 }
238
tcf_sample_walker(struct net * net,struct sk_buff * skb,struct netlink_callback * cb,int type,const struct tc_action_ops * ops,struct netlink_ext_ack * extack)239 static int tcf_sample_walker(struct net *net, struct sk_buff *skb,
240 struct netlink_callback *cb, int type,
241 const struct tc_action_ops *ops,
242 struct netlink_ext_ack *extack)
243 {
244 struct tc_action_net *tn = net_generic(net, sample_net_id);
245
246 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
247 }
248
tcf_sample_search(struct net * net,struct tc_action ** a,u32 index)249 static int tcf_sample_search(struct net *net, struct tc_action **a, u32 index)
250 {
251 struct tc_action_net *tn = net_generic(net, sample_net_id);
252
253 return tcf_idr_search(tn, a, index);
254 }
255
tcf_psample_group_put(void * priv)256 static void tcf_psample_group_put(void *priv)
257 {
258 struct psample_group *group = priv;
259
260 psample_group_put(group);
261 }
262
263 static struct psample_group *
tcf_sample_get_group(const struct tc_action * a,tc_action_priv_destructor * destructor)264 tcf_sample_get_group(const struct tc_action *a,
265 tc_action_priv_destructor *destructor)
266 {
267 struct tcf_sample *s = to_sample(a);
268 struct psample_group *group;
269
270 spin_lock_bh(&s->tcf_lock);
271 group = rcu_dereference_protected(s->psample_group,
272 lockdep_is_held(&s->tcf_lock));
273 if (group) {
274 psample_group_take(group);
275 *destructor = tcf_psample_group_put;
276 }
277 spin_unlock_bh(&s->tcf_lock);
278
279 return group;
280 }
281
282 static struct tc_action_ops act_sample_ops = {
283 .kind = "sample",
284 .id = TCA_ID_SAMPLE,
285 .owner = THIS_MODULE,
286 .act = tcf_sample_act,
287 .dump = tcf_sample_dump,
288 .init = tcf_sample_init,
289 .cleanup = tcf_sample_cleanup,
290 .walk = tcf_sample_walker,
291 .lookup = tcf_sample_search,
292 .get_psample_group = tcf_sample_get_group,
293 .size = sizeof(struct tcf_sample),
294 };
295
sample_init_net(struct net * net)296 static __net_init int sample_init_net(struct net *net)
297 {
298 struct tc_action_net *tn = net_generic(net, sample_net_id);
299
300 return tc_action_net_init(net, tn, &act_sample_ops);
301 }
302
sample_exit_net(struct list_head * net_list)303 static void __net_exit sample_exit_net(struct list_head *net_list)
304 {
305 tc_action_net_exit(net_list, sample_net_id);
306 }
307
308 static struct pernet_operations sample_net_ops = {
309 .init = sample_init_net,
310 .exit_batch = sample_exit_net,
311 .id = &sample_net_id,
312 .size = sizeof(struct tc_action_net),
313 };
314
sample_init_module(void)315 static int __init sample_init_module(void)
316 {
317 return tcf_register_action(&act_sample_ops, &sample_net_ops);
318 }
319
sample_cleanup_module(void)320 static void __exit sample_cleanup_module(void)
321 {
322 tcf_unregister_action(&act_sample_ops, &sample_net_ops);
323 }
324
325 module_init(sample_init_module);
326 module_exit(sample_cleanup_module);
327
328 MODULE_AUTHOR("Yotam Gigi <yotam.gi@gmail.com>");
329 MODULE_DESCRIPTION("Packet sampling action");
330 MODULE_LICENSE("GPL v2");
331