• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * net/sched/sch_api.c	Packet scheduler API.
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10  *
11  * Fixes:
12  *
13  * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
14  * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
15  * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
16  */
17 
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/errno.h>
23 #include <linux/skbuff.h>
24 #include <linux/init.h>
25 #include <linux/proc_fs.h>
26 #include <linux/seq_file.h>
27 #include <linux/kmod.h>
28 #include <linux/list.h>
29 #include <linux/hrtimer.h>
30 #include <linux/lockdep.h>
31 #include <linux/slab.h>
32 
33 #include <net/net_namespace.h>
34 #include <net/sock.h>
35 #include <net/netlink.h>
36 #include <net/pkt_sched.h>
37 
38 static int qdisc_notify(struct net *net, struct sk_buff *oskb,
39 			struct nlmsghdr *n, u32 clid,
40 			struct Qdisc *old, struct Qdisc *new);
41 static int tclass_notify(struct net *net, struct sk_buff *oskb,
42 			 struct nlmsghdr *n, struct Qdisc *q,
43 			 unsigned long cl, int event);
44 
45 /*
46 
47    Short review.
48    -------------
49 
50    This file consists of two interrelated parts:
51 
52    1. queueing disciplines manager frontend.
53    2. traffic classes manager frontend.
54 
55    Generally, queueing discipline ("qdisc") is a black box,
56    which is able to enqueue packets and to dequeue them (when
57    device is ready to send something) in order and at times
58    determined by algorithm hidden in it.
59 
60    qdisc's are divided to two categories:
61    - "queues", which have no internal structure visible from outside.
62    - "schedulers", which split all the packets to "traffic classes",
63      using "packet classifiers" (look at cls_api.c)
64 
65    In turn, classes may have child qdiscs (as rule, queues)
66    attached to them etc. etc. etc.
67 
68    The goal of the routines in this file is to translate
69    information supplied by user in the form of handles
70    to more intelligible for kernel form, to make some sanity
71    checks and part of work, which is common to all qdiscs
72    and to provide rtnetlink notifications.
73 
74    All real intelligent work is done inside qdisc modules.
75 
76 
77 
78    Every discipline has two major routines: enqueue and dequeue.
79 
80    ---dequeue
81 
82    dequeue usually returns a skb to send. It is allowed to return NULL,
83    but it does not mean that queue is empty, it just means that
84    discipline does not want to send anything this time.
85    Queue is really empty if q->q.qlen == 0.
86    For complicated disciplines with multiple queues q->q is not
87    real packet queue, but however q->q.qlen must be valid.
88 
89    ---enqueue
90 
91    enqueue returns 0, if packet was enqueued successfully.
92    If packet (this one or another one) was dropped, it returns
93    not zero error code.
94    NET_XMIT_DROP 	- this packet dropped
95      Expected action: do not backoff, but wait until queue will clear.
96    NET_XMIT_CN	 	- probably this packet enqueued, but another one dropped.
97      Expected action: backoff or ignore
98    NET_XMIT_POLICED	- dropped by police.
99      Expected action: backoff or error to real-time apps.
100 
101    Auxiliary routines:
102 
103    ---peek
104 
105    like dequeue but without removing a packet from the queue
106 
107    ---reset
108 
109    returns qdisc to initial state: purge all buffers, clear all
110    timers, counters (except for statistics) etc.
111 
112    ---init
113 
114    initializes newly created qdisc.
115 
116    ---destroy
117 
118    destroys resources allocated by init and during lifetime of qdisc.
119 
120    ---change
121 
122    changes qdisc parameters.
123  */
124 
125 /* Protects list of registered TC modules. It is pure SMP lock. */
126 static DEFINE_RWLOCK(qdisc_mod_lock);
127 
128 
129 /************************************************
130  *	Queueing disciplines manipulation.	*
131  ************************************************/
132 
133 
134 /* The list of all installed queueing disciplines. */
135 
136 static struct Qdisc_ops *qdisc_base;
137 
138 /* Register/unregister queueing discipline */
139 
register_qdisc(struct Qdisc_ops * qops)140 int register_qdisc(struct Qdisc_ops *qops)
141 {
142 	struct Qdisc_ops *q, **qp;
143 	int rc = -EEXIST;
144 
145 	write_lock(&qdisc_mod_lock);
146 	for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
147 		if (!strcmp(qops->id, q->id))
148 			goto out;
149 
150 	if (qops->enqueue == NULL)
151 		qops->enqueue = noop_qdisc_ops.enqueue;
152 	if (qops->peek == NULL) {
153 		if (qops->dequeue == NULL)
154 			qops->peek = noop_qdisc_ops.peek;
155 		else
156 			goto out_einval;
157 	}
158 	if (qops->dequeue == NULL)
159 		qops->dequeue = noop_qdisc_ops.dequeue;
160 
161 	if (qops->cl_ops) {
162 		const struct Qdisc_class_ops *cops = qops->cl_ops;
163 
164 		if (!(cops->get && cops->put && cops->walk && cops->leaf))
165 			goto out_einval;
166 
167 		if (cops->tcf_chain && !(cops->bind_tcf && cops->unbind_tcf))
168 			goto out_einval;
169 	}
170 
171 	qops->next = NULL;
172 	*qp = qops;
173 	rc = 0;
174 out:
175 	write_unlock(&qdisc_mod_lock);
176 	return rc;
177 
178 out_einval:
179 	rc = -EINVAL;
180 	goto out;
181 }
182 EXPORT_SYMBOL(register_qdisc);
183 
unregister_qdisc(struct Qdisc_ops * qops)184 int unregister_qdisc(struct Qdisc_ops *qops)
185 {
186 	struct Qdisc_ops *q, **qp;
187 	int err = -ENOENT;
188 
189 	write_lock(&qdisc_mod_lock);
190 	for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
191 		if (q == qops)
192 			break;
193 	if (q) {
194 		*qp = q->next;
195 		q->next = NULL;
196 		err = 0;
197 	}
198 	write_unlock(&qdisc_mod_lock);
199 	return err;
200 }
201 EXPORT_SYMBOL(unregister_qdisc);
202 
203 /* Get default qdisc if not otherwise specified */
qdisc_get_default(char * name,size_t len)204 void qdisc_get_default(char *name, size_t len)
205 {
206 	read_lock(&qdisc_mod_lock);
207 	strlcpy(name, default_qdisc_ops->id, len);
208 	read_unlock(&qdisc_mod_lock);
209 }
210 
qdisc_lookup_default(const char * name)211 static struct Qdisc_ops *qdisc_lookup_default(const char *name)
212 {
213 	struct Qdisc_ops *q = NULL;
214 
215 	for (q = qdisc_base; q; q = q->next) {
216 		if (!strcmp(name, q->id)) {
217 			if (!try_module_get(q->owner))
218 				q = NULL;
219 			break;
220 		}
221 	}
222 
223 	return q;
224 }
225 
226 /* Set new default qdisc to use */
qdisc_set_default(const char * name)227 int qdisc_set_default(const char *name)
228 {
229 	const struct Qdisc_ops *ops;
230 
231 	if (!capable(CAP_NET_ADMIN))
232 		return -EPERM;
233 
234 	write_lock(&qdisc_mod_lock);
235 	ops = qdisc_lookup_default(name);
236 	if (!ops) {
237 		/* Not found, drop lock and try to load module */
238 		write_unlock(&qdisc_mod_lock);
239 		request_module("sch_%s", name);
240 		write_lock(&qdisc_mod_lock);
241 
242 		ops = qdisc_lookup_default(name);
243 	}
244 
245 	if (ops) {
246 		/* Set new default */
247 		module_put(default_qdisc_ops->owner);
248 		default_qdisc_ops = ops;
249 	}
250 	write_unlock(&qdisc_mod_lock);
251 
252 	return ops ? 0 : -ENOENT;
253 }
254 
255 /* We know handle. Find qdisc among all qdisc's attached to device
256    (root qdisc, all its children, children of children etc.)
257  */
258 
qdisc_match_from_root(struct Qdisc * root,u32 handle)259 static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
260 {
261 	struct Qdisc *q;
262 
263 	if (!(root->flags & TCQ_F_BUILTIN) &&
264 	    root->handle == handle)
265 		return root;
266 
267 	list_for_each_entry(q, &root->list, list) {
268 		if (q->handle == handle)
269 			return q;
270 	}
271 	return NULL;
272 }
273 
qdisc_list_add(struct Qdisc * q)274 void qdisc_list_add(struct Qdisc *q)
275 {
276 	if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
277 		struct Qdisc *root = qdisc_dev(q)->qdisc;
278 
279 		WARN_ON_ONCE(root == &noop_qdisc);
280 		list_add_tail(&q->list, &root->list);
281 	}
282 }
283 EXPORT_SYMBOL(qdisc_list_add);
284 
qdisc_list_del(struct Qdisc * q)285 void qdisc_list_del(struct Qdisc *q)
286 {
287 	if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS))
288 		list_del(&q->list);
289 }
290 EXPORT_SYMBOL(qdisc_list_del);
291 
qdisc_lookup(struct net_device * dev,u32 handle)292 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
293 {
294 	struct Qdisc *q;
295 
296 	q = qdisc_match_from_root(dev->qdisc, handle);
297 	if (q)
298 		goto out;
299 
300 	if (dev_ingress_queue(dev))
301 		q = qdisc_match_from_root(
302 			dev_ingress_queue(dev)->qdisc_sleeping,
303 			handle);
304 out:
305 	return q;
306 }
307 
qdisc_leaf(struct Qdisc * p,u32 classid)308 static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
309 {
310 	unsigned long cl;
311 	struct Qdisc *leaf;
312 	const struct Qdisc_class_ops *cops = p->ops->cl_ops;
313 
314 	if (cops == NULL)
315 		return NULL;
316 	cl = cops->get(p, classid);
317 
318 	if (cl == 0)
319 		return NULL;
320 	leaf = cops->leaf(p, cl);
321 	cops->put(p, cl);
322 	return leaf;
323 }
324 
325 /* Find queueing discipline by name */
326 
qdisc_lookup_ops(struct nlattr * kind)327 static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
328 {
329 	struct Qdisc_ops *q = NULL;
330 
331 	if (kind) {
332 		read_lock(&qdisc_mod_lock);
333 		for (q = qdisc_base; q; q = q->next) {
334 			if (nla_strcmp(kind, q->id) == 0) {
335 				if (!try_module_get(q->owner))
336 					q = NULL;
337 				break;
338 			}
339 		}
340 		read_unlock(&qdisc_mod_lock);
341 	}
342 	return q;
343 }
344 
345 /* The linklayer setting were not transferred from iproute2, in older
346  * versions, and the rate tables lookup systems have been dropped in
347  * the kernel. To keep backward compatible with older iproute2 tc
348  * utils, we detect the linklayer setting by detecting if the rate
349  * table were modified.
350  *
351  * For linklayer ATM table entries, the rate table will be aligned to
352  * 48 bytes, thus some table entries will contain the same value.  The
353  * mpu (min packet unit) is also encoded into the old rate table, thus
354  * starting from the mpu, we find low and high table entries for
355  * mapping this cell.  If these entries contain the same value, when
356  * the rate tables have been modified for linklayer ATM.
357  *
358  * This is done by rounding mpu to the nearest 48 bytes cell/entry,
359  * and then roundup to the next cell, calc the table entry one below,
360  * and compare.
361  */
__detect_linklayer(struct tc_ratespec * r,__u32 * rtab)362 static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
363 {
364 	int low       = roundup(r->mpu, 48);
365 	int high      = roundup(low+1, 48);
366 	int cell_low  = low >> r->cell_log;
367 	int cell_high = (high >> r->cell_log) - 1;
368 
369 	/* rtab is too inaccurate at rates > 100Mbit/s */
370 	if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
371 		pr_debug("TC linklayer: Giving up ATM detection\n");
372 		return TC_LINKLAYER_ETHERNET;
373 	}
374 
375 	if ((cell_high > cell_low) && (cell_high < 256)
376 	    && (rtab[cell_low] == rtab[cell_high])) {
377 		pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
378 			 cell_low, cell_high, rtab[cell_high]);
379 		return TC_LINKLAYER_ATM;
380 	}
381 	return TC_LINKLAYER_ETHERNET;
382 }
383 
384 static struct qdisc_rate_table *qdisc_rtab_list;
385 
qdisc_get_rtab(struct tc_ratespec * r,struct nlattr * tab)386 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab)
387 {
388 	struct qdisc_rate_table *rtab;
389 
390 	if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
391 	    nla_len(tab) != TC_RTAB_SIZE)
392 		return NULL;
393 
394 	for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
395 		if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
396 		    !memcmp(&rtab->data, nla_data(tab), 1024)) {
397 			rtab->refcnt++;
398 			return rtab;
399 		}
400 	}
401 
402 	rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
403 	if (rtab) {
404 		rtab->rate = *r;
405 		rtab->refcnt = 1;
406 		memcpy(rtab->data, nla_data(tab), 1024);
407 		if (r->linklayer == TC_LINKLAYER_UNAWARE)
408 			r->linklayer = __detect_linklayer(r, rtab->data);
409 		rtab->next = qdisc_rtab_list;
410 		qdisc_rtab_list = rtab;
411 	}
412 	return rtab;
413 }
414 EXPORT_SYMBOL(qdisc_get_rtab);
415 
qdisc_put_rtab(struct qdisc_rate_table * tab)416 void qdisc_put_rtab(struct qdisc_rate_table *tab)
417 {
418 	struct qdisc_rate_table *rtab, **rtabp;
419 
420 	if (!tab || --tab->refcnt)
421 		return;
422 
423 	for (rtabp = &qdisc_rtab_list;
424 	     (rtab = *rtabp) != NULL;
425 	     rtabp = &rtab->next) {
426 		if (rtab == tab) {
427 			*rtabp = rtab->next;
428 			kfree(rtab);
429 			return;
430 		}
431 	}
432 }
433 EXPORT_SYMBOL(qdisc_put_rtab);
434 
435 static LIST_HEAD(qdisc_stab_list);
436 static DEFINE_SPINLOCK(qdisc_stab_lock);
437 
438 static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
439 	[TCA_STAB_BASE]	= { .len = sizeof(struct tc_sizespec) },
440 	[TCA_STAB_DATA] = { .type = NLA_BINARY },
441 };
442 
qdisc_get_stab(struct nlattr * opt)443 static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
444 {
445 	struct nlattr *tb[TCA_STAB_MAX + 1];
446 	struct qdisc_size_table *stab;
447 	struct tc_sizespec *s;
448 	unsigned int tsize = 0;
449 	u16 *tab = NULL;
450 	int err;
451 
452 	err = nla_parse_nested(tb, TCA_STAB_MAX, opt, stab_policy);
453 	if (err < 0)
454 		return ERR_PTR(err);
455 	if (!tb[TCA_STAB_BASE])
456 		return ERR_PTR(-EINVAL);
457 
458 	s = nla_data(tb[TCA_STAB_BASE]);
459 
460 	if (s->tsize > 0) {
461 		if (!tb[TCA_STAB_DATA])
462 			return ERR_PTR(-EINVAL);
463 		tab = nla_data(tb[TCA_STAB_DATA]);
464 		tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
465 	}
466 
467 	if (tsize != s->tsize || (!tab && tsize > 0))
468 		return ERR_PTR(-EINVAL);
469 
470 	spin_lock(&qdisc_stab_lock);
471 
472 	list_for_each_entry(stab, &qdisc_stab_list, list) {
473 		if (memcmp(&stab->szopts, s, sizeof(*s)))
474 			continue;
475 		if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16)))
476 			continue;
477 		stab->refcnt++;
478 		spin_unlock(&qdisc_stab_lock);
479 		return stab;
480 	}
481 
482 	spin_unlock(&qdisc_stab_lock);
483 
484 	stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
485 	if (!stab)
486 		return ERR_PTR(-ENOMEM);
487 
488 	stab->refcnt = 1;
489 	stab->szopts = *s;
490 	if (tsize > 0)
491 		memcpy(stab->data, tab, tsize * sizeof(u16));
492 
493 	spin_lock(&qdisc_stab_lock);
494 	list_add_tail(&stab->list, &qdisc_stab_list);
495 	spin_unlock(&qdisc_stab_lock);
496 
497 	return stab;
498 }
499 
stab_kfree_rcu(struct rcu_head * head)500 static void stab_kfree_rcu(struct rcu_head *head)
501 {
502 	kfree(container_of(head, struct qdisc_size_table, rcu));
503 }
504 
qdisc_put_stab(struct qdisc_size_table * tab)505 void qdisc_put_stab(struct qdisc_size_table *tab)
506 {
507 	if (!tab)
508 		return;
509 
510 	spin_lock(&qdisc_stab_lock);
511 
512 	if (--tab->refcnt == 0) {
513 		list_del(&tab->list);
514 		call_rcu_bh(&tab->rcu, stab_kfree_rcu);
515 	}
516 
517 	spin_unlock(&qdisc_stab_lock);
518 }
519 EXPORT_SYMBOL(qdisc_put_stab);
520 
qdisc_dump_stab(struct sk_buff * skb,struct qdisc_size_table * stab)521 static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
522 {
523 	struct nlattr *nest;
524 
525 	nest = nla_nest_start(skb, TCA_STAB);
526 	if (nest == NULL)
527 		goto nla_put_failure;
528 	if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts))
529 		goto nla_put_failure;
530 	nla_nest_end(skb, nest);
531 
532 	return skb->len;
533 
534 nla_put_failure:
535 	return -1;
536 }
537 
__qdisc_calculate_pkt_len(struct sk_buff * skb,const struct qdisc_size_table * stab)538 void __qdisc_calculate_pkt_len(struct sk_buff *skb, const struct qdisc_size_table *stab)
539 {
540 	int pkt_len, slot;
541 
542 	pkt_len = skb->len + stab->szopts.overhead;
543 	if (unlikely(!stab->szopts.tsize))
544 		goto out;
545 
546 	slot = pkt_len + stab->szopts.cell_align;
547 	if (unlikely(slot < 0))
548 		slot = 0;
549 
550 	slot >>= stab->szopts.cell_log;
551 	if (likely(slot < stab->szopts.tsize))
552 		pkt_len = stab->data[slot];
553 	else
554 		pkt_len = stab->data[stab->szopts.tsize - 1] *
555 				(slot / stab->szopts.tsize) +
556 				stab->data[slot % stab->szopts.tsize];
557 
558 	pkt_len <<= stab->szopts.size_log;
559 out:
560 	if (unlikely(pkt_len < 1))
561 		pkt_len = 1;
562 	qdisc_skb_cb(skb)->pkt_len = pkt_len;
563 }
564 EXPORT_SYMBOL(__qdisc_calculate_pkt_len);
565 
qdisc_warn_nonwc(const char * txt,struct Qdisc * qdisc)566 void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc)
567 {
568 	if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
569 		pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
570 			txt, qdisc->ops->id, qdisc->handle >> 16);
571 		qdisc->flags |= TCQ_F_WARN_NONWC;
572 	}
573 }
574 EXPORT_SYMBOL(qdisc_warn_nonwc);
575 
qdisc_watchdog(struct hrtimer * timer)576 static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
577 {
578 	struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
579 						 timer);
580 
581 	rcu_read_lock();
582 	qdisc_unthrottled(wd->qdisc);
583 	__netif_schedule(qdisc_root(wd->qdisc));
584 	rcu_read_unlock();
585 
586 	return HRTIMER_NORESTART;
587 }
588 
qdisc_watchdog_init(struct qdisc_watchdog * wd,struct Qdisc * qdisc)589 void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
590 {
591 	hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
592 	wd->timer.function = qdisc_watchdog;
593 	wd->qdisc = qdisc;
594 }
595 EXPORT_SYMBOL(qdisc_watchdog_init);
596 
qdisc_watchdog_schedule_ns(struct qdisc_watchdog * wd,u64 expires,bool throttle)597 void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires, bool throttle)
598 {
599 	if (test_bit(__QDISC_STATE_DEACTIVATED,
600 		     &qdisc_root_sleeping(wd->qdisc)->state))
601 		return;
602 
603 	if (throttle)
604 		qdisc_throttled(wd->qdisc);
605 
606 	hrtimer_start(&wd->timer,
607 		      ns_to_ktime(expires),
608 		      HRTIMER_MODE_ABS_PINNED);
609 }
610 EXPORT_SYMBOL(qdisc_watchdog_schedule_ns);
611 
qdisc_watchdog_cancel(struct qdisc_watchdog * wd)612 void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
613 {
614 	hrtimer_cancel(&wd->timer);
615 	qdisc_unthrottled(wd->qdisc);
616 }
617 EXPORT_SYMBOL(qdisc_watchdog_cancel);
618 
qdisc_class_hash_alloc(unsigned int n)619 static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
620 {
621 	unsigned int size = n * sizeof(struct hlist_head), i;
622 	struct hlist_head *h;
623 
624 	if (size <= PAGE_SIZE)
625 		h = kmalloc(size, GFP_KERNEL);
626 	else
627 		h = (struct hlist_head *)
628 			__get_free_pages(GFP_KERNEL, get_order(size));
629 
630 	if (h != NULL) {
631 		for (i = 0; i < n; i++)
632 			INIT_HLIST_HEAD(&h[i]);
633 	}
634 	return h;
635 }
636 
qdisc_class_hash_free(struct hlist_head * h,unsigned int n)637 static void qdisc_class_hash_free(struct hlist_head *h, unsigned int n)
638 {
639 	unsigned int size = n * sizeof(struct hlist_head);
640 
641 	if (size <= PAGE_SIZE)
642 		kfree(h);
643 	else
644 		free_pages((unsigned long)h, get_order(size));
645 }
646 
qdisc_class_hash_grow(struct Qdisc * sch,struct Qdisc_class_hash * clhash)647 void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
648 {
649 	struct Qdisc_class_common *cl;
650 	struct hlist_node *next;
651 	struct hlist_head *nhash, *ohash;
652 	unsigned int nsize, nmask, osize;
653 	unsigned int i, h;
654 
655 	/* Rehash when load factor exceeds 0.75 */
656 	if (clhash->hashelems * 4 <= clhash->hashsize * 3)
657 		return;
658 	nsize = clhash->hashsize * 2;
659 	nmask = nsize - 1;
660 	nhash = qdisc_class_hash_alloc(nsize);
661 	if (nhash == NULL)
662 		return;
663 
664 	ohash = clhash->hash;
665 	osize = clhash->hashsize;
666 
667 	sch_tree_lock(sch);
668 	for (i = 0; i < osize; i++) {
669 		hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) {
670 			h = qdisc_class_hash(cl->classid, nmask);
671 			hlist_add_head(&cl->hnode, &nhash[h]);
672 		}
673 	}
674 	clhash->hash     = nhash;
675 	clhash->hashsize = nsize;
676 	clhash->hashmask = nmask;
677 	sch_tree_unlock(sch);
678 
679 	qdisc_class_hash_free(ohash, osize);
680 }
681 EXPORT_SYMBOL(qdisc_class_hash_grow);
682 
qdisc_class_hash_init(struct Qdisc_class_hash * clhash)683 int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
684 {
685 	unsigned int size = 4;
686 
687 	clhash->hash = qdisc_class_hash_alloc(size);
688 	if (clhash->hash == NULL)
689 		return -ENOMEM;
690 	clhash->hashsize  = size;
691 	clhash->hashmask  = size - 1;
692 	clhash->hashelems = 0;
693 	return 0;
694 }
695 EXPORT_SYMBOL(qdisc_class_hash_init);
696 
qdisc_class_hash_destroy(struct Qdisc_class_hash * clhash)697 void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
698 {
699 	qdisc_class_hash_free(clhash->hash, clhash->hashsize);
700 }
701 EXPORT_SYMBOL(qdisc_class_hash_destroy);
702 
qdisc_class_hash_insert(struct Qdisc_class_hash * clhash,struct Qdisc_class_common * cl)703 void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
704 			     struct Qdisc_class_common *cl)
705 {
706 	unsigned int h;
707 
708 	INIT_HLIST_NODE(&cl->hnode);
709 	h = qdisc_class_hash(cl->classid, clhash->hashmask);
710 	hlist_add_head(&cl->hnode, &clhash->hash[h]);
711 	clhash->hashelems++;
712 }
713 EXPORT_SYMBOL(qdisc_class_hash_insert);
714 
qdisc_class_hash_remove(struct Qdisc_class_hash * clhash,struct Qdisc_class_common * cl)715 void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
716 			     struct Qdisc_class_common *cl)
717 {
718 	hlist_del(&cl->hnode);
719 	clhash->hashelems--;
720 }
721 EXPORT_SYMBOL(qdisc_class_hash_remove);
722 
723 /* Allocate an unique handle from space managed by kernel
724  * Possible range is [8000-FFFF]:0000 (0x8000 values)
725  */
qdisc_alloc_handle(struct net_device * dev)726 static u32 qdisc_alloc_handle(struct net_device *dev)
727 {
728 	int i = 0x8000;
729 	static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
730 
731 	do {
732 		autohandle += TC_H_MAKE(0x10000U, 0);
733 		if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
734 			autohandle = TC_H_MAKE(0x80000000U, 0);
735 		if (!qdisc_lookup(dev, autohandle))
736 			return autohandle;
737 		cond_resched();
738 	} while	(--i > 0);
739 
740 	return 0;
741 }
742 
qdisc_tree_reduce_backlog(struct Qdisc * sch,unsigned int n,unsigned int len)743 void qdisc_tree_reduce_backlog(struct Qdisc *sch, unsigned int n,
744 			       unsigned int len)
745 {
746 	const struct Qdisc_class_ops *cops;
747 	unsigned long cl;
748 	u32 parentid;
749 	int drops;
750 
751 	if (n == 0 && len == 0)
752 		return;
753 	drops = max_t(int, n, 0);
754 	while ((parentid = sch->parent)) {
755 		if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
756 			return;
757 
758 		sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
759 		if (sch == NULL) {
760 			WARN_ON(parentid != TC_H_ROOT);
761 			return;
762 		}
763 		cops = sch->ops->cl_ops;
764 		if (cops->qlen_notify) {
765 			cl = cops->get(sch, parentid);
766 			cops->qlen_notify(sch, cl);
767 			cops->put(sch, cl);
768 		}
769 		sch->q.qlen -= n;
770 		sch->qstats.backlog -= len;
771 		__qdisc_qstats_drop(sch, drops);
772 	}
773 }
774 EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
775 
notify_and_destroy(struct net * net,struct sk_buff * skb,struct nlmsghdr * n,u32 clid,struct Qdisc * old,struct Qdisc * new)776 static void notify_and_destroy(struct net *net, struct sk_buff *skb,
777 			       struct nlmsghdr *n, u32 clid,
778 			       struct Qdisc *old, struct Qdisc *new)
779 {
780 	if (new || old)
781 		qdisc_notify(net, skb, n, clid, old, new);
782 
783 	if (old)
784 		qdisc_destroy(old);
785 }
786 
787 /* Graft qdisc "new" to class "classid" of qdisc "parent" or
788  * to device "dev".
789  *
790  * When appropriate send a netlink notification using 'skb'
791  * and "n".
792  *
793  * On success, destroy old qdisc.
794  */
795 
qdisc_graft(struct net_device * dev,struct Qdisc * parent,struct sk_buff * skb,struct nlmsghdr * n,u32 classid,struct Qdisc * new,struct Qdisc * old)796 static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
797 		       struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
798 		       struct Qdisc *new, struct Qdisc *old)
799 {
800 	struct Qdisc *q = old;
801 	struct net *net = dev_net(dev);
802 	int err = 0;
803 
804 	if (parent == NULL) {
805 		unsigned int i, num_q, ingress;
806 
807 		ingress = 0;
808 		num_q = dev->num_tx_queues;
809 		if ((q && q->flags & TCQ_F_INGRESS) ||
810 		    (new && new->flags & TCQ_F_INGRESS)) {
811 			num_q = 1;
812 			ingress = 1;
813 			if (!dev_ingress_queue(dev))
814 				return -ENOENT;
815 		}
816 
817 		if (dev->flags & IFF_UP)
818 			dev_deactivate(dev);
819 
820 		if (new && new->ops->attach)
821 			goto skip;
822 
823 		for (i = 0; i < num_q; i++) {
824 			struct netdev_queue *dev_queue = dev_ingress_queue(dev);
825 
826 			if (!ingress)
827 				dev_queue = netdev_get_tx_queue(dev, i);
828 
829 			old = dev_graft_qdisc(dev_queue, new);
830 			if (new && i > 0)
831 				atomic_inc(&new->refcnt);
832 
833 			if (!ingress)
834 				qdisc_destroy(old);
835 		}
836 
837 skip:
838 		if (!ingress) {
839 			notify_and_destroy(net, skb, n, classid,
840 					   dev->qdisc, new);
841 			if (new && !new->ops->attach)
842 				atomic_inc(&new->refcnt);
843 			dev->qdisc = new ? : &noop_qdisc;
844 
845 			if (new && new->ops->attach)
846 				new->ops->attach(new);
847 		} else {
848 			notify_and_destroy(net, skb, n, classid, old, new);
849 		}
850 
851 		if (dev->flags & IFF_UP)
852 			dev_activate(dev);
853 	} else {
854 		const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
855 
856 		err = -EOPNOTSUPP;
857 		if (cops && cops->graft) {
858 			unsigned long cl = cops->get(parent, classid);
859 			if (cl) {
860 				err = cops->graft(parent, cl, new, &old);
861 				cops->put(parent, cl);
862 			} else
863 				err = -ENOENT;
864 		}
865 		if (!err)
866 			notify_and_destroy(net, skb, n, classid, old, new);
867 	}
868 	return err;
869 }
870 
871 /* lockdep annotation is needed for ingress; egress gets it only for name */
872 static struct lock_class_key qdisc_tx_lock;
873 static struct lock_class_key qdisc_rx_lock;
874 
875 /*
876    Allocate and initialize new qdisc.
877 
878    Parameters are passed via opt.
879  */
880 
881 static struct Qdisc *
qdisc_create(struct net_device * dev,struct netdev_queue * dev_queue,struct Qdisc * p,u32 parent,u32 handle,struct nlattr ** tca,int * errp)882 qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
883 	     struct Qdisc *p, u32 parent, u32 handle,
884 	     struct nlattr **tca, int *errp)
885 {
886 	int err;
887 	struct nlattr *kind = tca[TCA_KIND];
888 	struct Qdisc *sch;
889 	struct Qdisc_ops *ops;
890 	struct qdisc_size_table *stab;
891 
892 	ops = qdisc_lookup_ops(kind);
893 #ifdef CONFIG_MODULES
894 	if (ops == NULL && kind != NULL) {
895 		char name[IFNAMSIZ];
896 		if (nla_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) {
897 			/* We dropped the RTNL semaphore in order to
898 			 * perform the module load.  So, even if we
899 			 * succeeded in loading the module we have to
900 			 * tell the caller to replay the request.  We
901 			 * indicate this using -EAGAIN.
902 			 * We replay the request because the device may
903 			 * go away in the mean time.
904 			 */
905 			rtnl_unlock();
906 			request_module("sch_%s", name);
907 			rtnl_lock();
908 			ops = qdisc_lookup_ops(kind);
909 			if (ops != NULL) {
910 				/* We will try again qdisc_lookup_ops,
911 				 * so don't keep a reference.
912 				 */
913 				module_put(ops->owner);
914 				err = -EAGAIN;
915 				goto err_out;
916 			}
917 		}
918 	}
919 #endif
920 
921 	err = -ENOENT;
922 	if (ops == NULL)
923 		goto err_out;
924 
925 	sch = qdisc_alloc(dev_queue, ops);
926 	if (IS_ERR(sch)) {
927 		err = PTR_ERR(sch);
928 		goto err_out2;
929 	}
930 
931 	sch->parent = parent;
932 
933 	if (handle == TC_H_INGRESS) {
934 		sch->flags |= TCQ_F_INGRESS;
935 		handle = TC_H_MAKE(TC_H_INGRESS, 0);
936 		lockdep_set_class(qdisc_lock(sch), &qdisc_rx_lock);
937 	} else {
938 		if (handle == 0) {
939 			handle = qdisc_alloc_handle(dev);
940 			err = -ENOMEM;
941 			if (handle == 0)
942 				goto err_out3;
943 		}
944 		lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock);
945 		if (!netif_is_multiqueue(dev))
946 			sch->flags |= TCQ_F_ONETXQUEUE;
947 	}
948 
949 	sch->handle = handle;
950 
951 	if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) {
952 		if (qdisc_is_percpu_stats(sch)) {
953 			sch->cpu_bstats =
954 				netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
955 			if (!sch->cpu_bstats)
956 				goto err_out4;
957 
958 			sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
959 			if (!sch->cpu_qstats)
960 				goto err_out4;
961 		}
962 
963 		if (tca[TCA_STAB]) {
964 			stab = qdisc_get_stab(tca[TCA_STAB]);
965 			if (IS_ERR(stab)) {
966 				err = PTR_ERR(stab);
967 				goto err_out4;
968 			}
969 			rcu_assign_pointer(sch->stab, stab);
970 		}
971 		if (tca[TCA_RATE]) {
972 			spinlock_t *root_lock;
973 
974 			err = -EOPNOTSUPP;
975 			if (sch->flags & TCQ_F_MQROOT)
976 				goto err_out4;
977 
978 			if ((sch->parent != TC_H_ROOT) &&
979 			    !(sch->flags & TCQ_F_INGRESS) &&
980 			    (!p || !(p->flags & TCQ_F_MQROOT)))
981 				root_lock = qdisc_root_sleeping_lock(sch);
982 			else
983 				root_lock = qdisc_lock(sch);
984 
985 			err = gen_new_estimator(&sch->bstats,
986 						sch->cpu_bstats,
987 						&sch->rate_est,
988 						root_lock,
989 						tca[TCA_RATE]);
990 			if (err)
991 				goto err_out4;
992 		}
993 
994 		qdisc_list_add(sch);
995 
996 		return sch;
997 	}
998 err_out3:
999 	dev_put(dev);
1000 	kfree((char *) sch - sch->padded);
1001 err_out2:
1002 	module_put(ops->owner);
1003 err_out:
1004 	*errp = err;
1005 	return NULL;
1006 
1007 err_out4:
1008 	free_percpu(sch->cpu_bstats);
1009 	free_percpu(sch->cpu_qstats);
1010 	/*
1011 	 * Any broken qdiscs that would require a ops->reset() here?
1012 	 * The qdisc was never in action so it shouldn't be necessary.
1013 	 */
1014 	qdisc_put_stab(rtnl_dereference(sch->stab));
1015 	if (ops->destroy)
1016 		ops->destroy(sch);
1017 	goto err_out3;
1018 }
1019 
qdisc_change(struct Qdisc * sch,struct nlattr ** tca)1020 static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
1021 {
1022 	struct qdisc_size_table *ostab, *stab = NULL;
1023 	int err = 0;
1024 
1025 	if (tca[TCA_OPTIONS]) {
1026 		if (sch->ops->change == NULL)
1027 			return -EINVAL;
1028 		err = sch->ops->change(sch, tca[TCA_OPTIONS]);
1029 		if (err)
1030 			return err;
1031 	}
1032 
1033 	if (tca[TCA_STAB]) {
1034 		stab = qdisc_get_stab(tca[TCA_STAB]);
1035 		if (IS_ERR(stab))
1036 			return PTR_ERR(stab);
1037 	}
1038 
1039 	ostab = rtnl_dereference(sch->stab);
1040 	rcu_assign_pointer(sch->stab, stab);
1041 	qdisc_put_stab(ostab);
1042 
1043 	if (tca[TCA_RATE]) {
1044 		/* NB: ignores errors from replace_estimator
1045 		   because change can't be undone. */
1046 		if (sch->flags & TCQ_F_MQROOT)
1047 			goto out;
1048 		gen_replace_estimator(&sch->bstats,
1049 				      sch->cpu_bstats,
1050 				      &sch->rate_est,
1051 				      qdisc_root_sleeping_lock(sch),
1052 				      tca[TCA_RATE]);
1053 	}
1054 out:
1055 	return 0;
1056 }
1057 
1058 struct check_loop_arg {
1059 	struct qdisc_walker	w;
1060 	struct Qdisc		*p;
1061 	int			depth;
1062 };
1063 
1064 static int check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w);
1065 
check_loop(struct Qdisc * q,struct Qdisc * p,int depth)1066 static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
1067 {
1068 	struct check_loop_arg	arg;
1069 
1070 	if (q->ops->cl_ops == NULL)
1071 		return 0;
1072 
1073 	arg.w.stop = arg.w.skip = arg.w.count = 0;
1074 	arg.w.fn = check_loop_fn;
1075 	arg.depth = depth;
1076 	arg.p = p;
1077 	q->ops->cl_ops->walk(q, &arg.w);
1078 	return arg.w.stop ? -ELOOP : 0;
1079 }
1080 
1081 static int
check_loop_fn(struct Qdisc * q,unsigned long cl,struct qdisc_walker * w)1082 check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
1083 {
1084 	struct Qdisc *leaf;
1085 	const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1086 	struct check_loop_arg *arg = (struct check_loop_arg *)w;
1087 
1088 	leaf = cops->leaf(q, cl);
1089 	if (leaf) {
1090 		if (leaf == arg->p || arg->depth > 7)
1091 			return -ELOOP;
1092 		return check_loop(leaf, arg->p, arg->depth + 1);
1093 	}
1094 	return 0;
1095 }
1096 
1097 /*
1098  * Delete/get qdisc.
1099  */
1100 
tc_get_qdisc(struct sk_buff * skb,struct nlmsghdr * n)1101 static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n)
1102 {
1103 	struct net *net = sock_net(skb->sk);
1104 	struct tcmsg *tcm = nlmsg_data(n);
1105 	struct nlattr *tca[TCA_MAX + 1];
1106 	struct net_device *dev;
1107 	u32 clid;
1108 	struct Qdisc *q = NULL;
1109 	struct Qdisc *p = NULL;
1110 	int err;
1111 
1112 	if ((n->nlmsg_type != RTM_GETQDISC) &&
1113 	    !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1114 		return -EPERM;
1115 
1116 	err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1117 	if (err < 0)
1118 		return err;
1119 
1120 	dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1121 	if (!dev)
1122 		return -ENODEV;
1123 
1124 	clid = tcm->tcm_parent;
1125 	if (clid) {
1126 		if (clid != TC_H_ROOT) {
1127 			if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
1128 				p = qdisc_lookup(dev, TC_H_MAJ(clid));
1129 				if (!p)
1130 					return -ENOENT;
1131 				q = qdisc_leaf(p, clid);
1132 			} else if (dev_ingress_queue(dev)) {
1133 				q = dev_ingress_queue(dev)->qdisc_sleeping;
1134 			}
1135 		} else {
1136 			q = dev->qdisc;
1137 		}
1138 		if (!q)
1139 			return -ENOENT;
1140 
1141 		if (tcm->tcm_handle && q->handle != tcm->tcm_handle)
1142 			return -EINVAL;
1143 	} else {
1144 		q = qdisc_lookup(dev, tcm->tcm_handle);
1145 		if (!q)
1146 			return -ENOENT;
1147 	}
1148 
1149 	if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
1150 		return -EINVAL;
1151 
1152 	if (n->nlmsg_type == RTM_DELQDISC) {
1153 		if (!clid)
1154 			return -EINVAL;
1155 		if (q->handle == 0)
1156 			return -ENOENT;
1157 		err = qdisc_graft(dev, p, skb, n, clid, NULL, q);
1158 		if (err != 0)
1159 			return err;
1160 	} else {
1161 		qdisc_notify(net, skb, n, clid, NULL, q);
1162 	}
1163 	return 0;
1164 }
1165 
1166 /*
1167  * Create/change qdisc.
1168  */
1169 
tc_modify_qdisc(struct sk_buff * skb,struct nlmsghdr * n)1170 static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n)
1171 {
1172 	struct net *net = sock_net(skb->sk);
1173 	struct tcmsg *tcm;
1174 	struct nlattr *tca[TCA_MAX + 1];
1175 	struct net_device *dev;
1176 	u32 clid;
1177 	struct Qdisc *q, *p;
1178 	int err;
1179 
1180 	if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1181 		return -EPERM;
1182 
1183 replay:
1184 	/* Reinit, just in case something touches this. */
1185 	err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1186 	if (err < 0)
1187 		return err;
1188 
1189 	tcm = nlmsg_data(n);
1190 	clid = tcm->tcm_parent;
1191 	q = p = NULL;
1192 
1193 	dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1194 	if (!dev)
1195 		return -ENODEV;
1196 
1197 
1198 	if (clid) {
1199 		if (clid != TC_H_ROOT) {
1200 			if (clid != TC_H_INGRESS) {
1201 				p = qdisc_lookup(dev, TC_H_MAJ(clid));
1202 				if (!p)
1203 					return -ENOENT;
1204 				q = qdisc_leaf(p, clid);
1205 			} else if (dev_ingress_queue_create(dev)) {
1206 				q = dev_ingress_queue(dev)->qdisc_sleeping;
1207 			}
1208 		} else {
1209 			q = dev->qdisc;
1210 		}
1211 
1212 		/* It may be default qdisc, ignore it */
1213 		if (q && q->handle == 0)
1214 			q = NULL;
1215 
1216 		if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
1217 			if (tcm->tcm_handle) {
1218 				if (q && !(n->nlmsg_flags & NLM_F_REPLACE))
1219 					return -EEXIST;
1220 				if (TC_H_MIN(tcm->tcm_handle))
1221 					return -EINVAL;
1222 				q = qdisc_lookup(dev, tcm->tcm_handle);
1223 				if (!q)
1224 					goto create_n_graft;
1225 				if (n->nlmsg_flags & NLM_F_EXCL)
1226 					return -EEXIST;
1227 				if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
1228 					return -EINVAL;
1229 				if (q == p ||
1230 				    (p && check_loop(q, p, 0)))
1231 					return -ELOOP;
1232 				atomic_inc(&q->refcnt);
1233 				goto graft;
1234 			} else {
1235 				if (!q)
1236 					goto create_n_graft;
1237 
1238 				/* This magic test requires explanation.
1239 				 *
1240 				 *   We know, that some child q is already
1241 				 *   attached to this parent and have choice:
1242 				 *   either to change it or to create/graft new one.
1243 				 *
1244 				 *   1. We are allowed to create/graft only
1245 				 *   if CREATE and REPLACE flags are set.
1246 				 *
1247 				 *   2. If EXCL is set, requestor wanted to say,
1248 				 *   that qdisc tcm_handle is not expected
1249 				 *   to exist, so that we choose create/graft too.
1250 				 *
1251 				 *   3. The last case is when no flags are set.
1252 				 *   Alas, it is sort of hole in API, we
1253 				 *   cannot decide what to do unambiguously.
1254 				 *   For now we select create/graft, if
1255 				 *   user gave KIND, which does not match existing.
1256 				 */
1257 				if ((n->nlmsg_flags & NLM_F_CREATE) &&
1258 				    (n->nlmsg_flags & NLM_F_REPLACE) &&
1259 				    ((n->nlmsg_flags & NLM_F_EXCL) ||
1260 				     (tca[TCA_KIND] &&
1261 				      nla_strcmp(tca[TCA_KIND], q->ops->id))))
1262 					goto create_n_graft;
1263 			}
1264 		}
1265 	} else {
1266 		if (!tcm->tcm_handle)
1267 			return -EINVAL;
1268 		q = qdisc_lookup(dev, tcm->tcm_handle);
1269 	}
1270 
1271 	/* Change qdisc parameters */
1272 	if (q == NULL)
1273 		return -ENOENT;
1274 	if (n->nlmsg_flags & NLM_F_EXCL)
1275 		return -EEXIST;
1276 	if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
1277 		return -EINVAL;
1278 	err = qdisc_change(q, tca);
1279 	if (err == 0)
1280 		qdisc_notify(net, skb, n, clid, NULL, q);
1281 	return err;
1282 
1283 create_n_graft:
1284 	if (!(n->nlmsg_flags & NLM_F_CREATE))
1285 		return -ENOENT;
1286 	if (clid == TC_H_INGRESS) {
1287 		if (dev_ingress_queue(dev))
1288 			q = qdisc_create(dev, dev_ingress_queue(dev), p,
1289 					 tcm->tcm_parent, tcm->tcm_parent,
1290 					 tca, &err);
1291 		else
1292 			err = -ENOENT;
1293 	} else {
1294 		struct netdev_queue *dev_queue;
1295 
1296 		if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue)
1297 			dev_queue = p->ops->cl_ops->select_queue(p, tcm);
1298 		else if (p)
1299 			dev_queue = p->dev_queue;
1300 		else
1301 			dev_queue = netdev_get_tx_queue(dev, 0);
1302 
1303 		q = qdisc_create(dev, dev_queue, p,
1304 				 tcm->tcm_parent, tcm->tcm_handle,
1305 				 tca, &err);
1306 	}
1307 	if (q == NULL) {
1308 		if (err == -EAGAIN)
1309 			goto replay;
1310 		return err;
1311 	}
1312 
1313 graft:
1314 	err = qdisc_graft(dev, p, skb, n, clid, q, NULL);
1315 	if (err) {
1316 		if (q)
1317 			qdisc_destroy(q);
1318 		return err;
1319 	}
1320 
1321 	return 0;
1322 }
1323 
tc_fill_qdisc(struct sk_buff * skb,struct Qdisc * q,u32 clid,u32 portid,u32 seq,u16 flags,int event)1324 static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
1325 			 u32 portid, u32 seq, u16 flags, int event)
1326 {
1327 	struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL;
1328 	struct gnet_stats_queue __percpu *cpu_qstats = NULL;
1329 	struct tcmsg *tcm;
1330 	struct nlmsghdr  *nlh;
1331 	unsigned char *b = skb_tail_pointer(skb);
1332 	struct gnet_dump d;
1333 	struct qdisc_size_table *stab;
1334 	__u32 qlen;
1335 
1336 	cond_resched();
1337 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1338 	if (!nlh)
1339 		goto out_nlmsg_trim;
1340 	tcm = nlmsg_data(nlh);
1341 	tcm->tcm_family = AF_UNSPEC;
1342 	tcm->tcm__pad1 = 0;
1343 	tcm->tcm__pad2 = 0;
1344 	tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1345 	tcm->tcm_parent = clid;
1346 	tcm->tcm_handle = q->handle;
1347 	tcm->tcm_info = atomic_read(&q->refcnt);
1348 	if (nla_put_string(skb, TCA_KIND, q->ops->id))
1349 		goto nla_put_failure;
1350 	if (q->ops->dump && q->ops->dump(q, skb) < 0)
1351 		goto nla_put_failure;
1352 	qlen = q->q.qlen;
1353 
1354 	stab = rtnl_dereference(q->stab);
1355 	if (stab && qdisc_dump_stab(skb, stab) < 0)
1356 		goto nla_put_failure;
1357 
1358 	if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1359 					 qdisc_root_sleeping_lock(q), &d) < 0)
1360 		goto nla_put_failure;
1361 
1362 	if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
1363 		goto nla_put_failure;
1364 
1365 	if (qdisc_is_percpu_stats(q)) {
1366 		cpu_bstats = q->cpu_bstats;
1367 		cpu_qstats = q->cpu_qstats;
1368 	}
1369 
1370 	if (gnet_stats_copy_basic(&d, cpu_bstats, &q->bstats) < 0 ||
1371 	    gnet_stats_copy_rate_est(&d, &q->bstats, &q->rate_est) < 0 ||
1372 	    gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0)
1373 		goto nla_put_failure;
1374 
1375 	if (gnet_stats_finish_copy(&d) < 0)
1376 		goto nla_put_failure;
1377 
1378 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1379 	return skb->len;
1380 
1381 out_nlmsg_trim:
1382 nla_put_failure:
1383 	nlmsg_trim(skb, b);
1384 	return -1;
1385 }
1386 
tc_qdisc_dump_ignore(struct Qdisc * q)1387 static bool tc_qdisc_dump_ignore(struct Qdisc *q)
1388 {
1389 	return (q->flags & TCQ_F_BUILTIN) ? true : false;
1390 }
1391 
qdisc_notify(struct net * net,struct sk_buff * oskb,struct nlmsghdr * n,u32 clid,struct Qdisc * old,struct Qdisc * new)1392 static int qdisc_notify(struct net *net, struct sk_buff *oskb,
1393 			struct nlmsghdr *n, u32 clid,
1394 			struct Qdisc *old, struct Qdisc *new)
1395 {
1396 	struct sk_buff *skb;
1397 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1398 
1399 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1400 	if (!skb)
1401 		return -ENOBUFS;
1402 
1403 	if (old && !tc_qdisc_dump_ignore(old)) {
1404 		if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq,
1405 				  0, RTM_DELQDISC) < 0)
1406 			goto err_out;
1407 	}
1408 	if (new && !tc_qdisc_dump_ignore(new)) {
1409 		if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq,
1410 				  old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
1411 			goto err_out;
1412 	}
1413 
1414 	if (skb->len)
1415 		return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1416 				      n->nlmsg_flags & NLM_F_ECHO);
1417 
1418 err_out:
1419 	kfree_skb(skb);
1420 	return -EINVAL;
1421 }
1422 
tc_dump_qdisc_root(struct Qdisc * root,struct sk_buff * skb,struct netlink_callback * cb,int * q_idx_p,int s_q_idx)1423 static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1424 			      struct netlink_callback *cb,
1425 			      int *q_idx_p, int s_q_idx)
1426 {
1427 	int ret = 0, q_idx = *q_idx_p;
1428 	struct Qdisc *q;
1429 
1430 	if (!root)
1431 		return 0;
1432 
1433 	q = root;
1434 	if (q_idx < s_q_idx) {
1435 		q_idx++;
1436 	} else {
1437 		if (!tc_qdisc_dump_ignore(q) &&
1438 		    tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1439 				  cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
1440 			goto done;
1441 		q_idx++;
1442 	}
1443 	list_for_each_entry(q, &root->list, list) {
1444 		if (q_idx < s_q_idx) {
1445 			q_idx++;
1446 			continue;
1447 		}
1448 		if (!tc_qdisc_dump_ignore(q) &&
1449 		    tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1450 				  cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
1451 			goto done;
1452 		q_idx++;
1453 	}
1454 
1455 out:
1456 	*q_idx_p = q_idx;
1457 	return ret;
1458 done:
1459 	ret = -1;
1460 	goto out;
1461 }
1462 
tc_dump_qdisc(struct sk_buff * skb,struct netlink_callback * cb)1463 static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1464 {
1465 	struct net *net = sock_net(skb->sk);
1466 	int idx, q_idx;
1467 	int s_idx, s_q_idx;
1468 	struct net_device *dev;
1469 
1470 	s_idx = cb->args[0];
1471 	s_q_idx = q_idx = cb->args[1];
1472 
1473 	idx = 0;
1474 	ASSERT_RTNL();
1475 	for_each_netdev(net, dev) {
1476 		struct netdev_queue *dev_queue;
1477 
1478 		if (idx < s_idx)
1479 			goto cont;
1480 		if (idx > s_idx)
1481 			s_q_idx = 0;
1482 		q_idx = 0;
1483 
1484 		if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx) < 0)
1485 			goto done;
1486 
1487 		dev_queue = dev_ingress_queue(dev);
1488 		if (dev_queue &&
1489 		    tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb,
1490 				       &q_idx, s_q_idx) < 0)
1491 			goto done;
1492 
1493 cont:
1494 		idx++;
1495 	}
1496 
1497 done:
1498 	cb->args[0] = idx;
1499 	cb->args[1] = q_idx;
1500 
1501 	return skb->len;
1502 }
1503 
1504 
1505 
1506 /************************************************
1507  *	Traffic classes manipulation.		*
1508  ************************************************/
1509 
1510 
1511 
tc_ctl_tclass(struct sk_buff * skb,struct nlmsghdr * n)1512 static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n)
1513 {
1514 	struct net *net = sock_net(skb->sk);
1515 	struct tcmsg *tcm = nlmsg_data(n);
1516 	struct nlattr *tca[TCA_MAX + 1];
1517 	struct net_device *dev;
1518 	struct Qdisc *q = NULL;
1519 	const struct Qdisc_class_ops *cops;
1520 	unsigned long cl = 0;
1521 	unsigned long new_cl;
1522 	u32 portid;
1523 	u32 clid;
1524 	u32 qid;
1525 	int err;
1526 
1527 	if ((n->nlmsg_type != RTM_GETTCLASS) &&
1528 	    !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1529 		return -EPERM;
1530 
1531 	err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1532 	if (err < 0)
1533 		return err;
1534 
1535 	dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1536 	if (!dev)
1537 		return -ENODEV;
1538 
1539 	/*
1540 	   parent == TC_H_UNSPEC - unspecified parent.
1541 	   parent == TC_H_ROOT   - class is root, which has no parent.
1542 	   parent == X:0	 - parent is root class.
1543 	   parent == X:Y	 - parent is a node in hierarchy.
1544 	   parent == 0:Y	 - parent is X:Y, where X:0 is qdisc.
1545 
1546 	   handle == 0:0	 - generate handle from kernel pool.
1547 	   handle == 0:Y	 - class is X:Y, where X:0 is qdisc.
1548 	   handle == X:Y	 - clear.
1549 	   handle == X:0	 - root class.
1550 	 */
1551 
1552 	/* Step 1. Determine qdisc handle X:0 */
1553 
1554 	portid = tcm->tcm_parent;
1555 	clid = tcm->tcm_handle;
1556 	qid = TC_H_MAJ(clid);
1557 
1558 	if (portid != TC_H_ROOT) {
1559 		u32 qid1 = TC_H_MAJ(portid);
1560 
1561 		if (qid && qid1) {
1562 			/* If both majors are known, they must be identical. */
1563 			if (qid != qid1)
1564 				return -EINVAL;
1565 		} else if (qid1) {
1566 			qid = qid1;
1567 		} else if (qid == 0)
1568 			qid = dev->qdisc->handle;
1569 
1570 		/* Now qid is genuine qdisc handle consistent
1571 		 * both with parent and child.
1572 		 *
1573 		 * TC_H_MAJ(portid) still may be unspecified, complete it now.
1574 		 */
1575 		if (portid)
1576 			portid = TC_H_MAKE(qid, portid);
1577 	} else {
1578 		if (qid == 0)
1579 			qid = dev->qdisc->handle;
1580 	}
1581 
1582 	/* OK. Locate qdisc */
1583 	q = qdisc_lookup(dev, qid);
1584 	if (!q)
1585 		return -ENOENT;
1586 
1587 	/* An check that it supports classes */
1588 	cops = q->ops->cl_ops;
1589 	if (cops == NULL)
1590 		return -EINVAL;
1591 
1592 	/* Now try to get class */
1593 	if (clid == 0) {
1594 		if (portid == TC_H_ROOT)
1595 			clid = qid;
1596 	} else
1597 		clid = TC_H_MAKE(qid, clid);
1598 
1599 	if (clid)
1600 		cl = cops->get(q, clid);
1601 
1602 	if (cl == 0) {
1603 		err = -ENOENT;
1604 		if (n->nlmsg_type != RTM_NEWTCLASS ||
1605 		    !(n->nlmsg_flags & NLM_F_CREATE))
1606 			goto out;
1607 	} else {
1608 		switch (n->nlmsg_type) {
1609 		case RTM_NEWTCLASS:
1610 			err = -EEXIST;
1611 			if (n->nlmsg_flags & NLM_F_EXCL)
1612 				goto out;
1613 			break;
1614 		case RTM_DELTCLASS:
1615 			err = -EOPNOTSUPP;
1616 			if (cops->delete)
1617 				err = cops->delete(q, cl);
1618 			if (err == 0)
1619 				tclass_notify(net, skb, n, q, cl, RTM_DELTCLASS);
1620 			goto out;
1621 		case RTM_GETTCLASS:
1622 			err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS);
1623 			goto out;
1624 		default:
1625 			err = -EINVAL;
1626 			goto out;
1627 		}
1628 	}
1629 
1630 	new_cl = cl;
1631 	err = -EOPNOTSUPP;
1632 	if (cops->change)
1633 		err = cops->change(q, clid, portid, tca, &new_cl);
1634 	if (err == 0)
1635 		tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS);
1636 
1637 out:
1638 	if (cl)
1639 		cops->put(q, cl);
1640 
1641 	return err;
1642 }
1643 
1644 
tc_fill_tclass(struct sk_buff * skb,struct Qdisc * q,unsigned long cl,u32 portid,u32 seq,u16 flags,int event)1645 static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1646 			  unsigned long cl,
1647 			  u32 portid, u32 seq, u16 flags, int event)
1648 {
1649 	struct tcmsg *tcm;
1650 	struct nlmsghdr  *nlh;
1651 	unsigned char *b = skb_tail_pointer(skb);
1652 	struct gnet_dump d;
1653 	const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
1654 
1655 	cond_resched();
1656 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1657 	if (!nlh)
1658 		goto out_nlmsg_trim;
1659 	tcm = nlmsg_data(nlh);
1660 	tcm->tcm_family = AF_UNSPEC;
1661 	tcm->tcm__pad1 = 0;
1662 	tcm->tcm__pad2 = 0;
1663 	tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1664 	tcm->tcm_parent = q->handle;
1665 	tcm->tcm_handle = q->handle;
1666 	tcm->tcm_info = 0;
1667 	if (nla_put_string(skb, TCA_KIND, q->ops->id))
1668 		goto nla_put_failure;
1669 	if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1670 		goto nla_put_failure;
1671 
1672 	if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1673 					 qdisc_root_sleeping_lock(q), &d) < 0)
1674 		goto nla_put_failure;
1675 
1676 	if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
1677 		goto nla_put_failure;
1678 
1679 	if (gnet_stats_finish_copy(&d) < 0)
1680 		goto nla_put_failure;
1681 
1682 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1683 	return skb->len;
1684 
1685 out_nlmsg_trim:
1686 nla_put_failure:
1687 	nlmsg_trim(skb, b);
1688 	return -1;
1689 }
1690 
tclass_notify(struct net * net,struct sk_buff * oskb,struct nlmsghdr * n,struct Qdisc * q,unsigned long cl,int event)1691 static int tclass_notify(struct net *net, struct sk_buff *oskb,
1692 			 struct nlmsghdr *n, struct Qdisc *q,
1693 			 unsigned long cl, int event)
1694 {
1695 	struct sk_buff *skb;
1696 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1697 
1698 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1699 	if (!skb)
1700 		return -ENOBUFS;
1701 
1702 	if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event) < 0) {
1703 		kfree_skb(skb);
1704 		return -EINVAL;
1705 	}
1706 
1707 	return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1708 			      n->nlmsg_flags & NLM_F_ECHO);
1709 }
1710 
1711 struct qdisc_dump_args {
1712 	struct qdisc_walker	w;
1713 	struct sk_buff		*skb;
1714 	struct netlink_callback	*cb;
1715 };
1716 
qdisc_class_dump(struct Qdisc * q,unsigned long cl,struct qdisc_walker * arg)1717 static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walker *arg)
1718 {
1719 	struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
1720 
1721 	return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid,
1722 			      a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTCLASS);
1723 }
1724 
tc_dump_tclass_qdisc(struct Qdisc * q,struct sk_buff * skb,struct tcmsg * tcm,struct netlink_callback * cb,int * t_p,int s_t)1725 static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
1726 				struct tcmsg *tcm, struct netlink_callback *cb,
1727 				int *t_p, int s_t)
1728 {
1729 	struct qdisc_dump_args arg;
1730 
1731 	if (tc_qdisc_dump_ignore(q) ||
1732 	    *t_p < s_t || !q->ops->cl_ops ||
1733 	    (tcm->tcm_parent &&
1734 	     TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
1735 		(*t_p)++;
1736 		return 0;
1737 	}
1738 	if (*t_p > s_t)
1739 		memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
1740 	arg.w.fn = qdisc_class_dump;
1741 	arg.skb = skb;
1742 	arg.cb = cb;
1743 	arg.w.stop  = 0;
1744 	arg.w.skip = cb->args[1];
1745 	arg.w.count = 0;
1746 	q->ops->cl_ops->walk(q, &arg.w);
1747 	cb->args[1] = arg.w.count;
1748 	if (arg.w.stop)
1749 		return -1;
1750 	(*t_p)++;
1751 	return 0;
1752 }
1753 
tc_dump_tclass_root(struct Qdisc * root,struct sk_buff * skb,struct tcmsg * tcm,struct netlink_callback * cb,int * t_p,int s_t)1754 static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
1755 			       struct tcmsg *tcm, struct netlink_callback *cb,
1756 			       int *t_p, int s_t)
1757 {
1758 	struct Qdisc *q;
1759 
1760 	if (!root)
1761 		return 0;
1762 
1763 	if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
1764 		return -1;
1765 
1766 	list_for_each_entry(q, &root->list, list) {
1767 		if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
1768 			return -1;
1769 	}
1770 
1771 	return 0;
1772 }
1773 
tc_dump_tclass(struct sk_buff * skb,struct netlink_callback * cb)1774 static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1775 {
1776 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
1777 	struct net *net = sock_net(skb->sk);
1778 	struct netdev_queue *dev_queue;
1779 	struct net_device *dev;
1780 	int t, s_t;
1781 
1782 	if (nlmsg_len(cb->nlh) < sizeof(*tcm))
1783 		return 0;
1784 	dev = dev_get_by_index(net, tcm->tcm_ifindex);
1785 	if (!dev)
1786 		return 0;
1787 
1788 	s_t = cb->args[0];
1789 	t = 0;
1790 
1791 	if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t) < 0)
1792 		goto done;
1793 
1794 	dev_queue = dev_ingress_queue(dev);
1795 	if (dev_queue &&
1796 	    tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb,
1797 				&t, s_t) < 0)
1798 		goto done;
1799 
1800 done:
1801 	cb->args[0] = t;
1802 
1803 	dev_put(dev);
1804 	return skb->len;
1805 }
1806 
1807 /* Main classifier routine: scans classifier chain attached
1808  * to this qdisc, (optionally) tests for protocol and asks
1809  * specific classifiers.
1810  */
tc_classify_compat(struct sk_buff * skb,const struct tcf_proto * tp,struct tcf_result * res)1811 int tc_classify_compat(struct sk_buff *skb, const struct tcf_proto *tp,
1812 		       struct tcf_result *res)
1813 {
1814 	__be16 protocol = skb->protocol;
1815 	int err;
1816 
1817 	for (; tp; tp = rcu_dereference_bh(tp->next)) {
1818 		if (tp->protocol != protocol &&
1819 		    tp->protocol != htons(ETH_P_ALL))
1820 			continue;
1821 		err = tp->classify(skb, tp, res);
1822 
1823 		if (err >= 0) {
1824 #ifdef CONFIG_NET_CLS_ACT
1825 			if (err != TC_ACT_RECLASSIFY && skb->tc_verd)
1826 				skb->tc_verd = SET_TC_VERD(skb->tc_verd, 0);
1827 #endif
1828 			return err;
1829 		}
1830 	}
1831 	return -1;
1832 }
1833 EXPORT_SYMBOL(tc_classify_compat);
1834 
tc_classify(struct sk_buff * skb,const struct tcf_proto * tp,struct tcf_result * res)1835 int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
1836 		struct tcf_result *res)
1837 {
1838 	int err = 0;
1839 #ifdef CONFIG_NET_CLS_ACT
1840 	const struct tcf_proto *otp = tp;
1841 reclassify:
1842 #endif
1843 
1844 	err = tc_classify_compat(skb, tp, res);
1845 #ifdef CONFIG_NET_CLS_ACT
1846 	if (err == TC_ACT_RECLASSIFY) {
1847 		u32 verd = G_TC_VERD(skb->tc_verd);
1848 		tp = otp;
1849 
1850 		if (verd++ >= MAX_REC_LOOP) {
1851 			net_notice_ratelimited("%s: packet reclassify loop rule prio %u protocol %02x\n",
1852 					       tp->q->ops->id,
1853 					       tp->prio & 0xffff,
1854 					       ntohs(tp->protocol));
1855 			return TC_ACT_SHOT;
1856 		}
1857 		skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd);
1858 		goto reclassify;
1859 	}
1860 #endif
1861 	return err;
1862 }
1863 EXPORT_SYMBOL(tc_classify);
1864 
tcf_destroy(struct tcf_proto * tp)1865 void tcf_destroy(struct tcf_proto *tp)
1866 {
1867 	tp->ops->destroy(tp);
1868 	module_put(tp->ops->owner);
1869 	kfree_rcu(tp, rcu);
1870 }
1871 
tcf_destroy_chain(struct tcf_proto __rcu ** fl)1872 void tcf_destroy_chain(struct tcf_proto __rcu **fl)
1873 {
1874 	struct tcf_proto *tp;
1875 
1876 	while ((tp = rtnl_dereference(*fl)) != NULL) {
1877 		RCU_INIT_POINTER(*fl, tp->next);
1878 		tcf_destroy(tp);
1879 	}
1880 }
1881 EXPORT_SYMBOL(tcf_destroy_chain);
1882 
1883 #ifdef CONFIG_PROC_FS
psched_show(struct seq_file * seq,void * v)1884 static int psched_show(struct seq_file *seq, void *v)
1885 {
1886 	struct timespec ts;
1887 
1888 	hrtimer_get_res(CLOCK_MONOTONIC, &ts);
1889 	seq_printf(seq, "%08x %08x %08x %08x\n",
1890 		   (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1),
1891 		   1000000,
1892 		   (u32)NSEC_PER_SEC/(u32)ktime_to_ns(timespec_to_ktime(ts)));
1893 
1894 	return 0;
1895 }
1896 
psched_open(struct inode * inode,struct file * file)1897 static int psched_open(struct inode *inode, struct file *file)
1898 {
1899 	return single_open(file, psched_show, NULL);
1900 }
1901 
1902 static const struct file_operations psched_fops = {
1903 	.owner = THIS_MODULE,
1904 	.open = psched_open,
1905 	.read  = seq_read,
1906 	.llseek = seq_lseek,
1907 	.release = single_release,
1908 };
1909 
psched_net_init(struct net * net)1910 static int __net_init psched_net_init(struct net *net)
1911 {
1912 	struct proc_dir_entry *e;
1913 
1914 	e = proc_create("psched", 0, net->proc_net, &psched_fops);
1915 	if (e == NULL)
1916 		return -ENOMEM;
1917 
1918 	return 0;
1919 }
1920 
psched_net_exit(struct net * net)1921 static void __net_exit psched_net_exit(struct net *net)
1922 {
1923 	remove_proc_entry("psched", net->proc_net);
1924 }
1925 #else
psched_net_init(struct net * net)1926 static int __net_init psched_net_init(struct net *net)
1927 {
1928 	return 0;
1929 }
1930 
psched_net_exit(struct net * net)1931 static void __net_exit psched_net_exit(struct net *net)
1932 {
1933 }
1934 #endif
1935 
1936 static struct pernet_operations psched_net_ops = {
1937 	.init = psched_net_init,
1938 	.exit = psched_net_exit,
1939 };
1940 
pktsched_init(void)1941 static int __init pktsched_init(void)
1942 {
1943 	int err;
1944 
1945 	err = register_pernet_subsys(&psched_net_ops);
1946 	if (err) {
1947 		pr_err("pktsched_init: "
1948 		       "cannot initialize per netns operations\n");
1949 		return err;
1950 	}
1951 
1952 	register_qdisc(&pfifo_fast_ops);
1953 	register_qdisc(&pfifo_qdisc_ops);
1954 	register_qdisc(&bfifo_qdisc_ops);
1955 	register_qdisc(&pfifo_head_drop_qdisc_ops);
1956 	register_qdisc(&mq_qdisc_ops);
1957 
1958 	rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, NULL);
1959 	rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, NULL);
1960 	rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc, NULL);
1961 	rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, NULL);
1962 	rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, NULL);
1963 	rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass, NULL);
1964 
1965 	return 0;
1966 }
1967 
1968 subsys_initcall(pktsched_init);
1969