• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/act_mirred.c	packet mirroring and redirect actions
4  *
5  * Authors:	Jamal Hadi Salim (2002-4)
6  *
7  * TODO: Add ingress support (and socket redirect support)
8  */
9 
10 #include <linux/types.h>
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/errno.h>
14 #include <linux/skbuff.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/gfp.h>
19 #include <linux/if_arp.h>
20 #include <net/net_namespace.h>
21 #include <net/netlink.h>
22 #include <net/dst.h>
23 #include <net/pkt_sched.h>
24 #include <net/pkt_cls.h>
25 #include <linux/tc_act/tc_mirred.h>
26 #include <net/tc_act/tc_mirred.h>
27 
28 static LIST_HEAD(mirred_list);
29 static DEFINE_SPINLOCK(mirred_list_lock);
30 
31 #define MIRRED_RECURSION_LIMIT    4
32 static DEFINE_PER_CPU(unsigned int, mirred_rec_level);
33 
tcf_mirred_is_act_redirect(int action)34 static bool tcf_mirred_is_act_redirect(int action)
35 {
36 	return action == TCA_EGRESS_REDIR || action == TCA_INGRESS_REDIR;
37 }
38 
tcf_mirred_act_wants_ingress(int action)39 static bool tcf_mirred_act_wants_ingress(int action)
40 {
41 	switch (action) {
42 	case TCA_EGRESS_REDIR:
43 	case TCA_EGRESS_MIRROR:
44 		return false;
45 	case TCA_INGRESS_REDIR:
46 	case TCA_INGRESS_MIRROR:
47 		return true;
48 	default:
49 		BUG();
50 	}
51 }
52 
tcf_mirred_can_reinsert(int action)53 static bool tcf_mirred_can_reinsert(int action)
54 {
55 	switch (action) {
56 	case TC_ACT_SHOT:
57 	case TC_ACT_STOLEN:
58 	case TC_ACT_QUEUED:
59 	case TC_ACT_TRAP:
60 		return true;
61 	}
62 	return false;
63 }
64 
tcf_mirred_dev_dereference(struct tcf_mirred * m)65 static struct net_device *tcf_mirred_dev_dereference(struct tcf_mirred *m)
66 {
67 	return rcu_dereference_protected(m->tcfm_dev,
68 					 lockdep_is_held(&m->tcf_lock));
69 }
70 
tcf_mirred_release(struct tc_action * a)71 static void tcf_mirred_release(struct tc_action *a)
72 {
73 	struct tcf_mirred *m = to_mirred(a);
74 	struct net_device *dev;
75 
76 	spin_lock(&mirred_list_lock);
77 	list_del(&m->tcfm_list);
78 	spin_unlock(&mirred_list_lock);
79 
80 	/* last reference to action, no need to lock */
81 	dev = rcu_dereference_protected(m->tcfm_dev, 1);
82 	if (dev)
83 		dev_put(dev);
84 }
85 
86 static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = {
87 	[TCA_MIRRED_PARMS]	= { .len = sizeof(struct tc_mirred) },
88 };
89 
90 static unsigned int mirred_net_id;
91 static struct tc_action_ops act_mirred_ops;
92 
tcf_mirred_init(struct net * net,struct nlattr * nla,struct nlattr * est,struct tc_action ** a,int ovr,int bind,bool rtnl_held,struct tcf_proto * tp,struct netlink_ext_ack * extack)93 static int tcf_mirred_init(struct net *net, struct nlattr *nla,
94 			   struct nlattr *est, struct tc_action **a,
95 			   int ovr, int bind, bool rtnl_held,
96 			   struct tcf_proto *tp,
97 			   struct netlink_ext_ack *extack)
98 {
99 	struct tc_action_net *tn = net_generic(net, mirred_net_id);
100 	struct nlattr *tb[TCA_MIRRED_MAX + 1];
101 	struct tcf_chain *goto_ch = NULL;
102 	bool mac_header_xmit = false;
103 	struct tc_mirred *parm;
104 	struct tcf_mirred *m;
105 	struct net_device *dev;
106 	bool exists = false;
107 	int ret, err;
108 	u32 index;
109 
110 	if (!nla) {
111 		NL_SET_ERR_MSG_MOD(extack, "Mirred requires attributes to be passed");
112 		return -EINVAL;
113 	}
114 	ret = nla_parse_nested_deprecated(tb, TCA_MIRRED_MAX, nla,
115 					  mirred_policy, extack);
116 	if (ret < 0)
117 		return ret;
118 	if (!tb[TCA_MIRRED_PARMS]) {
119 		NL_SET_ERR_MSG_MOD(extack, "Missing required mirred parameters");
120 		return -EINVAL;
121 	}
122 	parm = nla_data(tb[TCA_MIRRED_PARMS]);
123 	index = parm->index;
124 	err = tcf_idr_check_alloc(tn, &index, a, bind);
125 	if (err < 0)
126 		return err;
127 	exists = err;
128 	if (exists && bind)
129 		return 0;
130 
131 	switch (parm->eaction) {
132 	case TCA_EGRESS_MIRROR:
133 	case TCA_EGRESS_REDIR:
134 	case TCA_INGRESS_REDIR:
135 	case TCA_INGRESS_MIRROR:
136 		break;
137 	default:
138 		if (exists)
139 			tcf_idr_release(*a, bind);
140 		else
141 			tcf_idr_cleanup(tn, index);
142 		NL_SET_ERR_MSG_MOD(extack, "Unknown mirred option");
143 		return -EINVAL;
144 	}
145 
146 	if (!exists) {
147 		if (!parm->ifindex) {
148 			tcf_idr_cleanup(tn, index);
149 			NL_SET_ERR_MSG_MOD(extack, "Specified device does not exist");
150 			return -EINVAL;
151 		}
152 		ret = tcf_idr_create(tn, index, est, a,
153 				     &act_mirred_ops, bind, true);
154 		if (ret) {
155 			tcf_idr_cleanup(tn, index);
156 			return ret;
157 		}
158 		ret = ACT_P_CREATED;
159 	} else if (!ovr) {
160 		tcf_idr_release(*a, bind);
161 		return -EEXIST;
162 	}
163 
164 	m = to_mirred(*a);
165 	if (ret == ACT_P_CREATED)
166 		INIT_LIST_HEAD(&m->tcfm_list);
167 
168 	err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
169 	if (err < 0)
170 		goto release_idr;
171 
172 	spin_lock_bh(&m->tcf_lock);
173 
174 	if (parm->ifindex) {
175 		dev = dev_get_by_index(net, parm->ifindex);
176 		if (!dev) {
177 			spin_unlock_bh(&m->tcf_lock);
178 			err = -ENODEV;
179 			goto put_chain;
180 		}
181 		mac_header_xmit = dev_is_mac_header_xmit(dev);
182 		rcu_swap_protected(m->tcfm_dev, dev,
183 				   lockdep_is_held(&m->tcf_lock));
184 		if (dev)
185 			dev_put(dev);
186 		m->tcfm_mac_header_xmit = mac_header_xmit;
187 	}
188 	goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
189 	m->tcfm_eaction = parm->eaction;
190 	spin_unlock_bh(&m->tcf_lock);
191 	if (goto_ch)
192 		tcf_chain_put_by_act(goto_ch);
193 
194 	if (ret == ACT_P_CREATED) {
195 		spin_lock(&mirred_list_lock);
196 		list_add(&m->tcfm_list, &mirred_list);
197 		spin_unlock(&mirred_list_lock);
198 	}
199 
200 	return ret;
201 put_chain:
202 	if (goto_ch)
203 		tcf_chain_put_by_act(goto_ch);
204 release_idr:
205 	tcf_idr_release(*a, bind);
206 	return err;
207 }
208 
tcf_mirred_act(struct sk_buff * skb,const struct tc_action * a,struct tcf_result * res)209 static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
210 			  struct tcf_result *res)
211 {
212 	struct tcf_mirred *m = to_mirred(a);
213 	struct sk_buff *skb2 = skb;
214 	bool m_mac_header_xmit;
215 	struct net_device *dev;
216 	unsigned int rec_level;
217 	int retval, err = 0;
218 	bool use_reinsert;
219 	bool want_ingress;
220 	bool is_redirect;
221 	bool expects_nh;
222 	bool at_ingress;
223 	int m_eaction;
224 	int mac_len;
225 	bool at_nh;
226 
227 	rec_level = __this_cpu_inc_return(mirred_rec_level);
228 	if (unlikely(rec_level > MIRRED_RECURSION_LIMIT)) {
229 		net_warn_ratelimited("Packet exceeded mirred recursion limit on dev %s\n",
230 				     netdev_name(skb->dev));
231 		__this_cpu_dec(mirred_rec_level);
232 		return TC_ACT_SHOT;
233 	}
234 
235 	tcf_lastuse_update(&m->tcf_tm);
236 	bstats_cpu_update(this_cpu_ptr(m->common.cpu_bstats), skb);
237 
238 	m_mac_header_xmit = READ_ONCE(m->tcfm_mac_header_xmit);
239 	m_eaction = READ_ONCE(m->tcfm_eaction);
240 	retval = READ_ONCE(m->tcf_action);
241 	dev = rcu_dereference_bh(m->tcfm_dev);
242 	if (unlikely(!dev)) {
243 		pr_notice_once("tc mirred: target device is gone\n");
244 		goto out;
245 	}
246 
247 	if (unlikely(!(dev->flags & IFF_UP)) || !netif_carrier_ok(dev)) {
248 		net_notice_ratelimited("tc mirred to Houston: device %s is down\n",
249 				       dev->name);
250 		goto out;
251 	}
252 
253 	/* we could easily avoid the clone only if called by ingress and clsact;
254 	 * since we can't easily detect the clsact caller, skip clone only for
255 	 * ingress - that covers the TC S/W datapath.
256 	 */
257 	is_redirect = tcf_mirred_is_act_redirect(m_eaction);
258 	at_ingress = skb_at_tc_ingress(skb);
259 	use_reinsert = at_ingress && is_redirect &&
260 		       tcf_mirred_can_reinsert(retval);
261 	if (!use_reinsert) {
262 		skb2 = skb_clone(skb, GFP_ATOMIC);
263 		if (!skb2)
264 			goto out;
265 	}
266 
267 	want_ingress = tcf_mirred_act_wants_ingress(m_eaction);
268 
269 	/* All mirred/redirected skbs should clear previous ct info */
270 	nf_reset_ct(skb2);
271 	if (want_ingress && !at_ingress) /* drop dst for egress -> ingress */
272 		skb_dst_drop(skb2);
273 
274 	expects_nh = want_ingress || !m_mac_header_xmit;
275 	at_nh = skb->data == skb_network_header(skb);
276 	if (at_nh != expects_nh) {
277 		mac_len = skb_at_tc_ingress(skb) ? skb->mac_len :
278 			  skb_network_header(skb) - skb_mac_header(skb);
279 		if (expects_nh) {
280 			/* target device/action expect data at nh */
281 			skb_pull_rcsum(skb2, mac_len);
282 		} else {
283 			/* target device/action expect data at mac */
284 			skb_push_rcsum(skb2, mac_len);
285 		}
286 	}
287 
288 	skb2->skb_iif = skb->dev->ifindex;
289 	skb2->dev = dev;
290 
291 	/* mirror is always swallowed */
292 	if (is_redirect) {
293 		skb_set_redirected(skb2, skb2->tc_at_ingress);
294 
295 		/* let's the caller reinsert the packet, if possible */
296 		if (use_reinsert) {
297 			res->ingress = want_ingress;
298 			res->qstats = this_cpu_ptr(m->common.cpu_qstats);
299 			skb_tc_reinsert(skb, res);
300 			__this_cpu_dec(mirred_rec_level);
301 			return TC_ACT_CONSUMED;
302 		}
303 	}
304 
305 	if (!want_ingress)
306 		err = dev_queue_xmit(skb2);
307 	else
308 		err = netif_receive_skb(skb2);
309 
310 	if (err) {
311 out:
312 		qstats_overlimit_inc(this_cpu_ptr(m->common.cpu_qstats));
313 		if (tcf_mirred_is_act_redirect(m_eaction))
314 			retval = TC_ACT_SHOT;
315 	}
316 	__this_cpu_dec(mirred_rec_level);
317 
318 	return retval;
319 }
320 
tcf_stats_update(struct tc_action * a,u64 bytes,u32 packets,u64 lastuse,bool hw)321 static void tcf_stats_update(struct tc_action *a, u64 bytes, u32 packets,
322 			     u64 lastuse, bool hw)
323 {
324 	struct tcf_mirred *m = to_mirred(a);
325 	struct tcf_t *tm = &m->tcf_tm;
326 
327 	_bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
328 	if (hw)
329 		_bstats_cpu_update(this_cpu_ptr(a->cpu_bstats_hw),
330 				   bytes, packets);
331 	tm->lastuse = max_t(u64, tm->lastuse, lastuse);
332 }
333 
tcf_mirred_dump(struct sk_buff * skb,struct tc_action * a,int bind,int ref)334 static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,
335 			   int ref)
336 {
337 	unsigned char *b = skb_tail_pointer(skb);
338 	struct tcf_mirred *m = to_mirred(a);
339 	struct tc_mirred opt = {
340 		.index   = m->tcf_index,
341 		.refcnt  = refcount_read(&m->tcf_refcnt) - ref,
342 		.bindcnt = atomic_read(&m->tcf_bindcnt) - bind,
343 	};
344 	struct net_device *dev;
345 	struct tcf_t t;
346 
347 	spin_lock_bh(&m->tcf_lock);
348 	opt.action = m->tcf_action;
349 	opt.eaction = m->tcfm_eaction;
350 	dev = tcf_mirred_dev_dereference(m);
351 	if (dev)
352 		opt.ifindex = dev->ifindex;
353 
354 	if (nla_put(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt))
355 		goto nla_put_failure;
356 
357 	tcf_tm_dump(&t, &m->tcf_tm);
358 	if (nla_put_64bit(skb, TCA_MIRRED_TM, sizeof(t), &t, TCA_MIRRED_PAD))
359 		goto nla_put_failure;
360 	spin_unlock_bh(&m->tcf_lock);
361 
362 	return skb->len;
363 
364 nla_put_failure:
365 	spin_unlock_bh(&m->tcf_lock);
366 	nlmsg_trim(skb, b);
367 	return -1;
368 }
369 
tcf_mirred_walker(struct net * net,struct sk_buff * skb,struct netlink_callback * cb,int type,const struct tc_action_ops * ops,struct netlink_ext_ack * extack)370 static int tcf_mirred_walker(struct net *net, struct sk_buff *skb,
371 			     struct netlink_callback *cb, int type,
372 			     const struct tc_action_ops *ops,
373 			     struct netlink_ext_ack *extack)
374 {
375 	struct tc_action_net *tn = net_generic(net, mirred_net_id);
376 
377 	return tcf_generic_walker(tn, skb, cb, type, ops, extack);
378 }
379 
tcf_mirred_search(struct net * net,struct tc_action ** a,u32 index)380 static int tcf_mirred_search(struct net *net, struct tc_action **a, u32 index)
381 {
382 	struct tc_action_net *tn = net_generic(net, mirred_net_id);
383 
384 	return tcf_idr_search(tn, a, index);
385 }
386 
mirred_device_event(struct notifier_block * unused,unsigned long event,void * ptr)387 static int mirred_device_event(struct notifier_block *unused,
388 			       unsigned long event, void *ptr)
389 {
390 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
391 	struct tcf_mirred *m;
392 
393 	ASSERT_RTNL();
394 	if (event == NETDEV_UNREGISTER) {
395 		spin_lock(&mirred_list_lock);
396 		list_for_each_entry(m, &mirred_list, tcfm_list) {
397 			spin_lock_bh(&m->tcf_lock);
398 			if (tcf_mirred_dev_dereference(m) == dev) {
399 				dev_put(dev);
400 				/* Note : no rcu grace period necessary, as
401 				 * net_device are already rcu protected.
402 				 */
403 				RCU_INIT_POINTER(m->tcfm_dev, NULL);
404 			}
405 			spin_unlock_bh(&m->tcf_lock);
406 		}
407 		spin_unlock(&mirred_list_lock);
408 	}
409 
410 	return NOTIFY_DONE;
411 }
412 
413 static struct notifier_block mirred_device_notifier = {
414 	.notifier_call = mirred_device_event,
415 };
416 
tcf_mirred_dev_put(void * priv)417 static void tcf_mirred_dev_put(void *priv)
418 {
419 	struct net_device *dev = priv;
420 
421 	dev_put(dev);
422 }
423 
424 static struct net_device *
tcf_mirred_get_dev(const struct tc_action * a,tc_action_priv_destructor * destructor)425 tcf_mirred_get_dev(const struct tc_action *a,
426 		   tc_action_priv_destructor *destructor)
427 {
428 	struct tcf_mirred *m = to_mirred(a);
429 	struct net_device *dev;
430 
431 	rcu_read_lock();
432 	dev = rcu_dereference(m->tcfm_dev);
433 	if (dev) {
434 		dev_hold(dev);
435 		*destructor = tcf_mirred_dev_put;
436 	}
437 	rcu_read_unlock();
438 
439 	return dev;
440 }
441 
tcf_mirred_get_fill_size(const struct tc_action * act)442 static size_t tcf_mirred_get_fill_size(const struct tc_action *act)
443 {
444 	return nla_total_size(sizeof(struct tc_mirred));
445 }
446 
447 static struct tc_action_ops act_mirred_ops = {
448 	.kind		=	"mirred",
449 	.id		=	TCA_ID_MIRRED,
450 	.owner		=	THIS_MODULE,
451 	.act		=	tcf_mirred_act,
452 	.stats_update	=	tcf_stats_update,
453 	.dump		=	tcf_mirred_dump,
454 	.cleanup	=	tcf_mirred_release,
455 	.init		=	tcf_mirred_init,
456 	.walk		=	tcf_mirred_walker,
457 	.lookup		=	tcf_mirred_search,
458 	.get_fill_size	=	tcf_mirred_get_fill_size,
459 	.size		=	sizeof(struct tcf_mirred),
460 	.get_dev	=	tcf_mirred_get_dev,
461 };
462 
mirred_init_net(struct net * net)463 static __net_init int mirred_init_net(struct net *net)
464 {
465 	struct tc_action_net *tn = net_generic(net, mirred_net_id);
466 
467 	return tc_action_net_init(net, tn, &act_mirred_ops);
468 }
469 
mirred_exit_net(struct list_head * net_list)470 static void __net_exit mirred_exit_net(struct list_head *net_list)
471 {
472 	tc_action_net_exit(net_list, mirred_net_id);
473 }
474 
475 static struct pernet_operations mirred_net_ops = {
476 	.init = mirred_init_net,
477 	.exit_batch = mirred_exit_net,
478 	.id   = &mirred_net_id,
479 	.size = sizeof(struct tc_action_net),
480 };
481 
482 MODULE_AUTHOR("Jamal Hadi Salim(2002)");
483 MODULE_DESCRIPTION("Device Mirror/redirect actions");
484 MODULE_LICENSE("GPL");
485 
mirred_init_module(void)486 static int __init mirred_init_module(void)
487 {
488 	int err = register_netdevice_notifier(&mirred_device_notifier);
489 	if (err)
490 		return err;
491 
492 	pr_info("Mirror/redirect action on\n");
493 	err = tcf_register_action(&act_mirred_ops, &mirred_net_ops);
494 	if (err)
495 		unregister_netdevice_notifier(&mirred_device_notifier);
496 
497 	return err;
498 }
499 
mirred_cleanup_module(void)500 static void __exit mirred_cleanup_module(void)
501 {
502 	tcf_unregister_action(&act_mirred_ops, &mirred_net_ops);
503 	unregister_netdevice_notifier(&mirred_device_notifier);
504 }
505 
506 module_init(mirred_init_module);
507 module_exit(mirred_cleanup_module);
508