• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/sch_htb.c	Hierarchical token bucket, feed tree version
4  *
5  * Authors:	Martin Devera, <devik@cdi.cz>
6  *
7  * Credits (in time order) for older HTB versions:
8  *              Stef Coene <stef.coene@docum.org>
9  *			HTB support at LARTC mailing list
10  *		Ondrej Kraus, <krauso@barr.cz>
11  *			found missing INIT_QDISC(htb)
12  *		Vladimir Smelhaus, Aamer Akhter, Bert Hubert
13  *			helped a lot to locate nasty class stall bug
14  *		Andi Kleen, Jamal Hadi, Bert Hubert
15  *			code review and helpful comments on shaping
16  *		Tomasz Wrona, <tw@eter.tym.pl>
17  *			created test case so that I was able to fix nasty bug
18  *		Wilfried Weissmann
19  *			spotted bug in dequeue code and helped with fix
20  *		Jiri Fojtasek
21  *			fixed requeue routine
22  *		and many others. thanks.
23  */
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <linux/types.h>
27 #include <linux/kernel.h>
28 #include <linux/string.h>
29 #include <linux/errno.h>
30 #include <linux/skbuff.h>
31 #include <linux/list.h>
32 #include <linux/compiler.h>
33 #include <linux/rbtree.h>
34 #include <linux/workqueue.h>
35 #include <linux/slab.h>
36 #include <net/netlink.h>
37 #include <net/sch_generic.h>
38 #include <net/pkt_sched.h>
39 #include <net/pkt_cls.h>
40 
41 /* HTB algorithm.
42     Author: devik@cdi.cz
43     ========================================================================
44     HTB is like TBF with multiple classes. It is also similar to CBQ because
45     it allows to assign priority to each class in hierarchy.
46     In fact it is another implementation of Floyd's formal sharing.
47 
48     Levels:
49     Each class is assigned level. Leaf has ALWAYS level 0 and root
50     classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
51     one less than their parent.
52 */
53 
54 static int htb_hysteresis __read_mostly = 0; /* whether to use mode hysteresis for speedup */
55 #define HTB_VER 0x30011		/* major must be matched with number suplied by TC as version */
56 
57 #if HTB_VER >> 16 != TC_HTB_PROTOVER
58 #error "Mismatched sch_htb.c and pkt_sch.h"
59 #endif
60 
61 /* Module parameter and sysfs export */
62 module_param    (htb_hysteresis, int, 0640);
63 MODULE_PARM_DESC(htb_hysteresis, "Hysteresis mode, less CPU load, less accurate");
64 
65 static int htb_rate_est = 0; /* htb classes have a default rate estimator */
66 module_param(htb_rate_est, int, 0640);
67 MODULE_PARM_DESC(htb_rate_est, "setup a default rate estimator (4sec 16sec) for htb classes");
68 
69 /* used internaly to keep status of single class */
70 enum htb_cmode {
71 	HTB_CANT_SEND,		/* class can't send and can't borrow */
72 	HTB_MAY_BORROW,		/* class can't send but may borrow */
73 	HTB_CAN_SEND		/* class can send */
74 };
75 
76 struct htb_prio {
77 	union {
78 		struct rb_root	row;
79 		struct rb_root	feed;
80 	};
81 	struct rb_node	*ptr;
82 	/* When class changes from state 1->2 and disconnects from
83 	 * parent's feed then we lost ptr value and start from the
84 	 * first child again. Here we store classid of the
85 	 * last valid ptr (used when ptr is NULL).
86 	 */
87 	u32		last_ptr_id;
88 };
89 
90 /* interior & leaf nodes; props specific to leaves are marked L:
91  * To reduce false sharing, place mostly read fields at beginning,
92  * and mostly written ones at the end.
93  */
94 struct htb_class {
95 	struct Qdisc_class_common common;
96 	struct psched_ratecfg	rate;
97 	struct psched_ratecfg	ceil;
98 	s64			buffer, cbuffer;/* token bucket depth/rate */
99 	s64			mbuffer;	/* max wait time */
100 	u32			prio;		/* these two are used only by leaves... */
101 	int			quantum;	/* but stored for parent-to-leaf return */
102 
103 	struct tcf_proto __rcu	*filter_list;	/* class attached filters */
104 	struct tcf_block	*block;
105 	int			filter_cnt;
106 
107 	int			level;		/* our level (see above) */
108 	unsigned int		children;
109 	struct htb_class	*parent;	/* parent class */
110 
111 	struct net_rate_estimator __rcu *rate_est;
112 
113 	/*
114 	 * Written often fields
115 	 */
116 	struct gnet_stats_basic_packed bstats;
117 	struct tc_htb_xstats	xstats;	/* our special stats */
118 
119 	/* token bucket parameters */
120 	s64			tokens, ctokens;/* current number of tokens */
121 	s64			t_c;		/* checkpoint time */
122 
123 	union {
124 		struct htb_class_leaf {
125 			int		deficit[TC_HTB_MAXDEPTH];
126 			struct Qdisc	*q;
127 		} leaf;
128 		struct htb_class_inner {
129 			struct htb_prio clprio[TC_HTB_NUMPRIO];
130 		} inner;
131 	};
132 	s64			pq_key;
133 
134 	int			prio_activity;	/* for which prios are we active */
135 	enum htb_cmode		cmode;		/* current mode of the class */
136 	struct rb_node		pq_node;	/* node for event queue */
137 	struct rb_node		node[TC_HTB_NUMPRIO];	/* node for self or feed tree */
138 
139 	unsigned int drops ____cacheline_aligned_in_smp;
140 	unsigned int		overlimits;
141 };
142 
143 struct htb_level {
144 	struct rb_root	wait_pq;
145 	struct htb_prio hprio[TC_HTB_NUMPRIO];
146 };
147 
148 struct htb_sched {
149 	struct Qdisc_class_hash clhash;
150 	int			defcls;		/* class where unclassified flows go to */
151 	int			rate2quantum;	/* quant = rate / rate2quantum */
152 
153 	/* filters for qdisc itself */
154 	struct tcf_proto __rcu	*filter_list;
155 	struct tcf_block	*block;
156 
157 #define HTB_WARN_TOOMANYEVENTS	0x1
158 	unsigned int		warned;	/* only one warning */
159 	int			direct_qlen;
160 	struct work_struct	work;
161 
162 	/* non shaped skbs; let them go directly thru */
163 	struct qdisc_skb_head	direct_queue;
164 	u32			direct_pkts;
165 	u32			overlimits;
166 
167 	struct qdisc_watchdog	watchdog;
168 
169 	s64			now;	/* cached dequeue time */
170 
171 	/* time of nearest event per level (row) */
172 	s64			near_ev_cache[TC_HTB_MAXDEPTH];
173 
174 	int			row_mask[TC_HTB_MAXDEPTH];
175 
176 	struct htb_level	hlevel[TC_HTB_MAXDEPTH];
177 };
178 
179 /* find class in global hash table using given handle */
htb_find(u32 handle,struct Qdisc * sch)180 static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
181 {
182 	struct htb_sched *q = qdisc_priv(sch);
183 	struct Qdisc_class_common *clc;
184 
185 	clc = qdisc_class_find(&q->clhash, handle);
186 	if (clc == NULL)
187 		return NULL;
188 	return container_of(clc, struct htb_class, common);
189 }
190 
htb_search(struct Qdisc * sch,u32 handle)191 static unsigned long htb_search(struct Qdisc *sch, u32 handle)
192 {
193 	return (unsigned long)htb_find(handle, sch);
194 }
195 /**
196  * htb_classify - classify a packet into class
197  *
198  * It returns NULL if the packet should be dropped or -1 if the packet
199  * should be passed directly thru. In all other cases leaf class is returned.
200  * We allow direct class selection by classid in priority. The we examine
201  * filters in qdisc and in inner nodes (if higher filter points to the inner
202  * node). If we end up with classid MAJOR:0 we enqueue the skb into special
203  * internal fifo (direct). These packets then go directly thru. If we still
204  * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessful
205  * then finish and return direct queue.
206  */
207 #define HTB_DIRECT ((struct htb_class *)-1L)
208 
htb_classify(struct sk_buff * skb,struct Qdisc * sch,int * qerr)209 static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
210 				      int *qerr)
211 {
212 	struct htb_sched *q = qdisc_priv(sch);
213 	struct htb_class *cl;
214 	struct tcf_result res;
215 	struct tcf_proto *tcf;
216 	int result;
217 
218 	/* allow to select class by setting skb->priority to valid classid;
219 	 * note that nfmark can be used too by attaching filter fw with no
220 	 * rules in it
221 	 */
222 	if (skb->priority == sch->handle)
223 		return HTB_DIRECT;	/* X:0 (direct flow) selected */
224 	cl = htb_find(skb->priority, sch);
225 	if (cl) {
226 		if (cl->level == 0)
227 			return cl;
228 		/* Start with inner filter chain if a non-leaf class is selected */
229 		tcf = rcu_dereference_bh(cl->filter_list);
230 	} else {
231 		tcf = rcu_dereference_bh(q->filter_list);
232 	}
233 
234 	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
235 	while (tcf && (result = tcf_classify(skb, tcf, &res, false)) >= 0) {
236 #ifdef CONFIG_NET_CLS_ACT
237 		switch (result) {
238 		case TC_ACT_QUEUED:
239 		case TC_ACT_STOLEN:
240 		case TC_ACT_TRAP:
241 			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
242 			fallthrough;
243 		case TC_ACT_SHOT:
244 			return NULL;
245 		}
246 #endif
247 		cl = (void *)res.class;
248 		if (!cl) {
249 			if (res.classid == sch->handle)
250 				return HTB_DIRECT;	/* X:0 (direct flow) */
251 			cl = htb_find(res.classid, sch);
252 			if (!cl)
253 				break;	/* filter selected invalid classid */
254 		}
255 		if (!cl->level)
256 			return cl;	/* we hit leaf; return it */
257 
258 		/* we have got inner class; apply inner filter chain */
259 		tcf = rcu_dereference_bh(cl->filter_list);
260 	}
261 	/* classification failed; try to use default class */
262 	cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
263 	if (!cl || cl->level)
264 		return HTB_DIRECT;	/* bad default .. this is safe bet */
265 	return cl;
266 }
267 
268 /**
269  * htb_add_to_id_tree - adds class to the round robin list
270  *
271  * Routine adds class to the list (actually tree) sorted by classid.
272  * Make sure that class is not already on such list for given prio.
273  */
htb_add_to_id_tree(struct rb_root * root,struct htb_class * cl,int prio)274 static void htb_add_to_id_tree(struct rb_root *root,
275 			       struct htb_class *cl, int prio)
276 {
277 	struct rb_node **p = &root->rb_node, *parent = NULL;
278 
279 	while (*p) {
280 		struct htb_class *c;
281 		parent = *p;
282 		c = rb_entry(parent, struct htb_class, node[prio]);
283 
284 		if (cl->common.classid > c->common.classid)
285 			p = &parent->rb_right;
286 		else
287 			p = &parent->rb_left;
288 	}
289 	rb_link_node(&cl->node[prio], parent, p);
290 	rb_insert_color(&cl->node[prio], root);
291 }
292 
293 /**
294  * htb_add_to_wait_tree - adds class to the event queue with delay
295  *
296  * The class is added to priority event queue to indicate that class will
297  * change its mode in cl->pq_key microseconds. Make sure that class is not
298  * already in the queue.
299  */
htb_add_to_wait_tree(struct htb_sched * q,struct htb_class * cl,s64 delay)300 static void htb_add_to_wait_tree(struct htb_sched *q,
301 				 struct htb_class *cl, s64 delay)
302 {
303 	struct rb_node **p = &q->hlevel[cl->level].wait_pq.rb_node, *parent = NULL;
304 
305 	cl->pq_key = q->now + delay;
306 	if (cl->pq_key == q->now)
307 		cl->pq_key++;
308 
309 	/* update the nearest event cache */
310 	if (q->near_ev_cache[cl->level] > cl->pq_key)
311 		q->near_ev_cache[cl->level] = cl->pq_key;
312 
313 	while (*p) {
314 		struct htb_class *c;
315 		parent = *p;
316 		c = rb_entry(parent, struct htb_class, pq_node);
317 		if (cl->pq_key >= c->pq_key)
318 			p = &parent->rb_right;
319 		else
320 			p = &parent->rb_left;
321 	}
322 	rb_link_node(&cl->pq_node, parent, p);
323 	rb_insert_color(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
324 }
325 
326 /**
327  * htb_next_rb_node - finds next node in binary tree
328  *
329  * When we are past last key we return NULL.
330  * Average complexity is 2 steps per call.
331  */
htb_next_rb_node(struct rb_node ** n)332 static inline void htb_next_rb_node(struct rb_node **n)
333 {
334 	*n = rb_next(*n);
335 }
336 
337 /**
338  * htb_add_class_to_row - add class to its row
339  *
340  * The class is added to row at priorities marked in mask.
341  * It does nothing if mask == 0.
342  */
htb_add_class_to_row(struct htb_sched * q,struct htb_class * cl,int mask)343 static inline void htb_add_class_to_row(struct htb_sched *q,
344 					struct htb_class *cl, int mask)
345 {
346 	q->row_mask[cl->level] |= mask;
347 	while (mask) {
348 		int prio = ffz(~mask);
349 		mask &= ~(1 << prio);
350 		htb_add_to_id_tree(&q->hlevel[cl->level].hprio[prio].row, cl, prio);
351 	}
352 }
353 
354 /* If this triggers, it is a bug in this code, but it need not be fatal */
htb_safe_rb_erase(struct rb_node * rb,struct rb_root * root)355 static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root)
356 {
357 	if (RB_EMPTY_NODE(rb)) {
358 		WARN_ON(1);
359 	} else {
360 		rb_erase(rb, root);
361 		RB_CLEAR_NODE(rb);
362 	}
363 }
364 
365 
366 /**
367  * htb_remove_class_from_row - removes class from its row
368  *
369  * The class is removed from row at priorities marked in mask.
370  * It does nothing if mask == 0.
371  */
htb_remove_class_from_row(struct htb_sched * q,struct htb_class * cl,int mask)372 static inline void htb_remove_class_from_row(struct htb_sched *q,
373 						 struct htb_class *cl, int mask)
374 {
375 	int m = 0;
376 	struct htb_level *hlevel = &q->hlevel[cl->level];
377 
378 	while (mask) {
379 		int prio = ffz(~mask);
380 		struct htb_prio *hprio = &hlevel->hprio[prio];
381 
382 		mask &= ~(1 << prio);
383 		if (hprio->ptr == cl->node + prio)
384 			htb_next_rb_node(&hprio->ptr);
385 
386 		htb_safe_rb_erase(cl->node + prio, &hprio->row);
387 		if (!hprio->row.rb_node)
388 			m |= 1 << prio;
389 	}
390 	q->row_mask[cl->level] &= ~m;
391 }
392 
393 /**
394  * htb_activate_prios - creates active classe's feed chain
395  *
396  * The class is connected to ancestors and/or appropriate rows
397  * for priorities it is participating on. cl->cmode must be new
398  * (activated) mode. It does nothing if cl->prio_activity == 0.
399  */
htb_activate_prios(struct htb_sched * q,struct htb_class * cl)400 static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
401 {
402 	struct htb_class *p = cl->parent;
403 	long m, mask = cl->prio_activity;
404 
405 	while (cl->cmode == HTB_MAY_BORROW && p && mask) {
406 		m = mask;
407 		while (m) {
408 			unsigned int prio = ffz(~m);
409 
410 			if (WARN_ON_ONCE(prio >= ARRAY_SIZE(p->inner.clprio)))
411 				break;
412 			m &= ~(1 << prio);
413 
414 			if (p->inner.clprio[prio].feed.rb_node)
415 				/* parent already has its feed in use so that
416 				 * reset bit in mask as parent is already ok
417 				 */
418 				mask &= ~(1 << prio);
419 
420 			htb_add_to_id_tree(&p->inner.clprio[prio].feed, cl, prio);
421 		}
422 		p->prio_activity |= mask;
423 		cl = p;
424 		p = cl->parent;
425 
426 	}
427 	if (cl->cmode == HTB_CAN_SEND && mask)
428 		htb_add_class_to_row(q, cl, mask);
429 }
430 
431 /**
432  * htb_deactivate_prios - remove class from feed chain
433  *
434  * cl->cmode must represent old mode (before deactivation). It does
435  * nothing if cl->prio_activity == 0. Class is removed from all feed
436  * chains and rows.
437  */
htb_deactivate_prios(struct htb_sched * q,struct htb_class * cl)438 static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
439 {
440 	struct htb_class *p = cl->parent;
441 	long m, mask = cl->prio_activity;
442 
443 	while (cl->cmode == HTB_MAY_BORROW && p && mask) {
444 		m = mask;
445 		mask = 0;
446 		while (m) {
447 			int prio = ffz(~m);
448 			m &= ~(1 << prio);
449 
450 			if (p->inner.clprio[prio].ptr == cl->node + prio) {
451 				/* we are removing child which is pointed to from
452 				 * parent feed - forget the pointer but remember
453 				 * classid
454 				 */
455 				p->inner.clprio[prio].last_ptr_id = cl->common.classid;
456 				p->inner.clprio[prio].ptr = NULL;
457 			}
458 
459 			htb_safe_rb_erase(cl->node + prio,
460 					  &p->inner.clprio[prio].feed);
461 
462 			if (!p->inner.clprio[prio].feed.rb_node)
463 				mask |= 1 << prio;
464 		}
465 
466 		p->prio_activity &= ~mask;
467 		cl = p;
468 		p = cl->parent;
469 
470 	}
471 	if (cl->cmode == HTB_CAN_SEND && mask)
472 		htb_remove_class_from_row(q, cl, mask);
473 }
474 
htb_lowater(const struct htb_class * cl)475 static inline s64 htb_lowater(const struct htb_class *cl)
476 {
477 	if (htb_hysteresis)
478 		return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
479 	else
480 		return 0;
481 }
htb_hiwater(const struct htb_class * cl)482 static inline s64 htb_hiwater(const struct htb_class *cl)
483 {
484 	if (htb_hysteresis)
485 		return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
486 	else
487 		return 0;
488 }
489 
490 
491 /**
492  * htb_class_mode - computes and returns current class mode
493  *
494  * It computes cl's mode at time cl->t_c+diff and returns it. If mode
495  * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
496  * from now to time when cl will change its state.
497  * Also it is worth to note that class mode doesn't change simply
498  * at cl->{c,}tokens == 0 but there can rather be hysteresis of
499  * 0 .. -cl->{c,}buffer range. It is meant to limit number of
500  * mode transitions per time unit. The speed gain is about 1/6.
501  */
502 static inline enum htb_cmode
htb_class_mode(struct htb_class * cl,s64 * diff)503 htb_class_mode(struct htb_class *cl, s64 *diff)
504 {
505 	s64 toks;
506 
507 	if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
508 		*diff = -toks;
509 		return HTB_CANT_SEND;
510 	}
511 
512 	if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl))
513 		return HTB_CAN_SEND;
514 
515 	*diff = -toks;
516 	return HTB_MAY_BORROW;
517 }
518 
519 /**
520  * htb_change_class_mode - changes classe's mode
521  *
522  * This should be the only way how to change classe's mode under normal
523  * cirsumstances. Routine will update feed lists linkage, change mode
524  * and add class to the wait event queue if appropriate. New mode should
525  * be different from old one and cl->pq_key has to be valid if changing
526  * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
527  */
528 static void
htb_change_class_mode(struct htb_sched * q,struct htb_class * cl,s64 * diff)529 htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
530 {
531 	enum htb_cmode new_mode = htb_class_mode(cl, diff);
532 
533 	if (new_mode == cl->cmode)
534 		return;
535 
536 	if (new_mode == HTB_CANT_SEND) {
537 		cl->overlimits++;
538 		q->overlimits++;
539 	}
540 
541 	if (cl->prio_activity) {	/* not necessary: speed optimization */
542 		if (cl->cmode != HTB_CANT_SEND)
543 			htb_deactivate_prios(q, cl);
544 		cl->cmode = new_mode;
545 		if (new_mode != HTB_CANT_SEND)
546 			htb_activate_prios(q, cl);
547 	} else
548 		cl->cmode = new_mode;
549 }
550 
551 /**
552  * htb_activate - inserts leaf cl into appropriate active feeds
553  *
554  * Routine learns (new) priority of leaf and activates feed chain
555  * for the prio. It can be called on already active leaf safely.
556  * It also adds leaf into droplist.
557  */
htb_activate(struct htb_sched * q,struct htb_class * cl)558 static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
559 {
560 	WARN_ON(cl->level || !cl->leaf.q || !cl->leaf.q->q.qlen);
561 
562 	if (!cl->prio_activity) {
563 		cl->prio_activity = 1 << cl->prio;
564 		htb_activate_prios(q, cl);
565 	}
566 }
567 
568 /**
569  * htb_deactivate - remove leaf cl from active feeds
570  *
571  * Make sure that leaf is active. In the other words it can't be called
572  * with non-active leaf. It also removes class from the drop list.
573  */
htb_deactivate(struct htb_sched * q,struct htb_class * cl)574 static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
575 {
576 	WARN_ON(!cl->prio_activity);
577 
578 	htb_deactivate_prios(q, cl);
579 	cl->prio_activity = 0;
580 }
581 
htb_enqueue(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)582 static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
583 		       struct sk_buff **to_free)
584 {
585 	int ret;
586 	unsigned int len = qdisc_pkt_len(skb);
587 	struct htb_sched *q = qdisc_priv(sch);
588 	struct htb_class *cl = htb_classify(skb, sch, &ret);
589 
590 	if (cl == HTB_DIRECT) {
591 		/* enqueue to helper queue */
592 		if (q->direct_queue.qlen < q->direct_qlen) {
593 			__qdisc_enqueue_tail(skb, &q->direct_queue);
594 			q->direct_pkts++;
595 		} else {
596 			return qdisc_drop(skb, sch, to_free);
597 		}
598 #ifdef CONFIG_NET_CLS_ACT
599 	} else if (!cl) {
600 		if (ret & __NET_XMIT_BYPASS)
601 			qdisc_qstats_drop(sch);
602 		__qdisc_drop(skb, to_free);
603 		return ret;
604 #endif
605 	} else if ((ret = qdisc_enqueue(skb, cl->leaf.q,
606 					to_free)) != NET_XMIT_SUCCESS) {
607 		if (net_xmit_drop_count(ret)) {
608 			qdisc_qstats_drop(sch);
609 			cl->drops++;
610 		}
611 		return ret;
612 	} else {
613 		htb_activate(q, cl);
614 	}
615 
616 	sch->qstats.backlog += len;
617 	sch->q.qlen++;
618 	return NET_XMIT_SUCCESS;
619 }
620 
htb_accnt_tokens(struct htb_class * cl,int bytes,s64 diff)621 static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, s64 diff)
622 {
623 	s64 toks = diff + cl->tokens;
624 
625 	if (toks > cl->buffer)
626 		toks = cl->buffer;
627 	toks -= (s64) psched_l2t_ns(&cl->rate, bytes);
628 	if (toks <= -cl->mbuffer)
629 		toks = 1 - cl->mbuffer;
630 
631 	cl->tokens = toks;
632 }
633 
htb_accnt_ctokens(struct htb_class * cl,int bytes,s64 diff)634 static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, s64 diff)
635 {
636 	s64 toks = diff + cl->ctokens;
637 
638 	if (toks > cl->cbuffer)
639 		toks = cl->cbuffer;
640 	toks -= (s64) psched_l2t_ns(&cl->ceil, bytes);
641 	if (toks <= -cl->mbuffer)
642 		toks = 1 - cl->mbuffer;
643 
644 	cl->ctokens = toks;
645 }
646 
647 /**
648  * htb_charge_class - charges amount "bytes" to leaf and ancestors
649  *
650  * Routine assumes that packet "bytes" long was dequeued from leaf cl
651  * borrowing from "level". It accounts bytes to ceil leaky bucket for
652  * leaf and all ancestors and to rate bucket for ancestors at levels
653  * "level" and higher. It also handles possible change of mode resulting
654  * from the update. Note that mode can also increase here (MAY_BORROW to
655  * CAN_SEND) because we can use more precise clock that event queue here.
656  * In such case we remove class from event queue first.
657  */
htb_charge_class(struct htb_sched * q,struct htb_class * cl,int level,struct sk_buff * skb)658 static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
659 			     int level, struct sk_buff *skb)
660 {
661 	int bytes = qdisc_pkt_len(skb);
662 	enum htb_cmode old_mode;
663 	s64 diff;
664 
665 	while (cl) {
666 		diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
667 		if (cl->level >= level) {
668 			if (cl->level == level)
669 				cl->xstats.lends++;
670 			htb_accnt_tokens(cl, bytes, diff);
671 		} else {
672 			cl->xstats.borrows++;
673 			cl->tokens += diff;	/* we moved t_c; update tokens */
674 		}
675 		htb_accnt_ctokens(cl, bytes, diff);
676 		cl->t_c = q->now;
677 
678 		old_mode = cl->cmode;
679 		diff = 0;
680 		htb_change_class_mode(q, cl, &diff);
681 		if (old_mode != cl->cmode) {
682 			if (old_mode != HTB_CAN_SEND)
683 				htb_safe_rb_erase(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
684 			if (cl->cmode != HTB_CAN_SEND)
685 				htb_add_to_wait_tree(q, cl, diff);
686 		}
687 
688 		/* update basic stats except for leaves which are already updated */
689 		if (cl->level)
690 			bstats_update(&cl->bstats, skb);
691 
692 		cl = cl->parent;
693 	}
694 }
695 
696 /**
697  * htb_do_events - make mode changes to classes at the level
698  *
699  * Scans event queue for pending events and applies them. Returns time of
700  * next pending event (0 for no event in pq, q->now for too many events).
701  * Note: Applied are events whose have cl->pq_key <= q->now.
702  */
htb_do_events(struct htb_sched * q,const int level,unsigned long start)703 static s64 htb_do_events(struct htb_sched *q, const int level,
704 			 unsigned long start)
705 {
706 	/* don't run for longer than 2 jiffies; 2 is used instead of
707 	 * 1 to simplify things when jiffy is going to be incremented
708 	 * too soon
709 	 */
710 	unsigned long stop_at = start + 2;
711 	struct rb_root *wait_pq = &q->hlevel[level].wait_pq;
712 
713 	while (time_before(jiffies, stop_at)) {
714 		struct htb_class *cl;
715 		s64 diff;
716 		struct rb_node *p = rb_first(wait_pq);
717 
718 		if (!p)
719 			return 0;
720 
721 		cl = rb_entry(p, struct htb_class, pq_node);
722 		if (cl->pq_key > q->now)
723 			return cl->pq_key;
724 
725 		htb_safe_rb_erase(p, wait_pq);
726 		diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
727 		htb_change_class_mode(q, cl, &diff);
728 		if (cl->cmode != HTB_CAN_SEND)
729 			htb_add_to_wait_tree(q, cl, diff);
730 	}
731 
732 	/* too much load - let's continue after a break for scheduling */
733 	if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) {
734 		pr_warn("htb: too many events!\n");
735 		q->warned |= HTB_WARN_TOOMANYEVENTS;
736 	}
737 
738 	return q->now;
739 }
740 
741 /* Returns class->node+prio from id-tree where classe's id is >= id. NULL
742  * is no such one exists.
743  */
htb_id_find_next_upper(int prio,struct rb_node * n,u32 id)744 static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
745 					      u32 id)
746 {
747 	struct rb_node *r = NULL;
748 	while (n) {
749 		struct htb_class *cl =
750 		    rb_entry(n, struct htb_class, node[prio]);
751 
752 		if (id > cl->common.classid) {
753 			n = n->rb_right;
754 		} else if (id < cl->common.classid) {
755 			r = n;
756 			n = n->rb_left;
757 		} else {
758 			return n;
759 		}
760 	}
761 	return r;
762 }
763 
764 /**
765  * htb_lookup_leaf - returns next leaf class in DRR order
766  *
767  * Find leaf where current feed pointers points to.
768  */
htb_lookup_leaf(struct htb_prio * hprio,const int prio)769 static struct htb_class *htb_lookup_leaf(struct htb_prio *hprio, const int prio)
770 {
771 	int i;
772 	struct {
773 		struct rb_node *root;
774 		struct rb_node **pptr;
775 		u32 *pid;
776 	} stk[TC_HTB_MAXDEPTH], *sp = stk;
777 
778 	BUG_ON(!hprio->row.rb_node);
779 	sp->root = hprio->row.rb_node;
780 	sp->pptr = &hprio->ptr;
781 	sp->pid = &hprio->last_ptr_id;
782 
783 	for (i = 0; i < 65535; i++) {
784 		if (!*sp->pptr && *sp->pid) {
785 			/* ptr was invalidated but id is valid - try to recover
786 			 * the original or next ptr
787 			 */
788 			*sp->pptr =
789 			    htb_id_find_next_upper(prio, sp->root, *sp->pid);
790 		}
791 		*sp->pid = 0;	/* ptr is valid now so that remove this hint as it
792 				 * can become out of date quickly
793 				 */
794 		if (!*sp->pptr) {	/* we are at right end; rewind & go up */
795 			*sp->pptr = sp->root;
796 			while ((*sp->pptr)->rb_left)
797 				*sp->pptr = (*sp->pptr)->rb_left;
798 			if (sp > stk) {
799 				sp--;
800 				if (!*sp->pptr) {
801 					WARN_ON(1);
802 					return NULL;
803 				}
804 				htb_next_rb_node(sp->pptr);
805 			}
806 		} else {
807 			struct htb_class *cl;
808 			struct htb_prio *clp;
809 
810 			cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
811 			if (!cl->level)
812 				return cl;
813 			clp = &cl->inner.clprio[prio];
814 			(++sp)->root = clp->feed.rb_node;
815 			sp->pptr = &clp->ptr;
816 			sp->pid = &clp->last_ptr_id;
817 		}
818 	}
819 	WARN_ON(1);
820 	return NULL;
821 }
822 
823 /* dequeues packet at given priority and level; call only if
824  * you are sure that there is active class at prio/level
825  */
htb_dequeue_tree(struct htb_sched * q,const int prio,const int level)826 static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, const int prio,
827 					const int level)
828 {
829 	struct sk_buff *skb = NULL;
830 	struct htb_class *cl, *start;
831 	struct htb_level *hlevel = &q->hlevel[level];
832 	struct htb_prio *hprio = &hlevel->hprio[prio];
833 
834 	/* look initial class up in the row */
835 	start = cl = htb_lookup_leaf(hprio, prio);
836 
837 	do {
838 next:
839 		if (unlikely(!cl))
840 			return NULL;
841 
842 		/* class can be empty - it is unlikely but can be true if leaf
843 		 * qdisc drops packets in enqueue routine or if someone used
844 		 * graft operation on the leaf since last dequeue;
845 		 * simply deactivate and skip such class
846 		 */
847 		if (unlikely(cl->leaf.q->q.qlen == 0)) {
848 			struct htb_class *next;
849 			htb_deactivate(q, cl);
850 
851 			/* row/level might become empty */
852 			if ((q->row_mask[level] & (1 << prio)) == 0)
853 				return NULL;
854 
855 			next = htb_lookup_leaf(hprio, prio);
856 
857 			if (cl == start)	/* fix start if we just deleted it */
858 				start = next;
859 			cl = next;
860 			goto next;
861 		}
862 
863 		skb = cl->leaf.q->dequeue(cl->leaf.q);
864 		if (likely(skb != NULL))
865 			break;
866 
867 		qdisc_warn_nonwc("htb", cl->leaf.q);
868 		htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr:
869 					 &q->hlevel[0].hprio[prio].ptr);
870 		cl = htb_lookup_leaf(hprio, prio);
871 
872 	} while (cl != start);
873 
874 	if (likely(skb != NULL)) {
875 		bstats_update(&cl->bstats, skb);
876 		cl->leaf.deficit[level] -= qdisc_pkt_len(skb);
877 		if (cl->leaf.deficit[level] < 0) {
878 			cl->leaf.deficit[level] += cl->quantum;
879 			htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr :
880 						 &q->hlevel[0].hprio[prio].ptr);
881 		}
882 		/* this used to be after charge_class but this constelation
883 		 * gives us slightly better performance
884 		 */
885 		if (!cl->leaf.q->q.qlen)
886 			htb_deactivate(q, cl);
887 		htb_charge_class(q, cl, level, skb);
888 	}
889 	return skb;
890 }
891 
htb_dequeue(struct Qdisc * sch)892 static struct sk_buff *htb_dequeue(struct Qdisc *sch)
893 {
894 	struct sk_buff *skb;
895 	struct htb_sched *q = qdisc_priv(sch);
896 	int level;
897 	s64 next_event;
898 	unsigned long start_at;
899 
900 	/* try to dequeue direct packets as high prio (!) to minimize cpu work */
901 	skb = __qdisc_dequeue_head(&q->direct_queue);
902 	if (skb != NULL) {
903 ok:
904 		qdisc_bstats_update(sch, skb);
905 		qdisc_qstats_backlog_dec(sch, skb);
906 		sch->q.qlen--;
907 		return skb;
908 	}
909 
910 	if (!sch->q.qlen)
911 		goto fin;
912 	q->now = ktime_get_ns();
913 	start_at = jiffies;
914 
915 	next_event = q->now + 5LLU * NSEC_PER_SEC;
916 
917 	for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
918 		/* common case optimization - skip event handler quickly */
919 		int m;
920 		s64 event = q->near_ev_cache[level];
921 
922 		if (q->now >= event) {
923 			event = htb_do_events(q, level, start_at);
924 			if (!event)
925 				event = q->now + NSEC_PER_SEC;
926 			q->near_ev_cache[level] = event;
927 		}
928 
929 		if (next_event > event)
930 			next_event = event;
931 
932 		m = ~q->row_mask[level];
933 		while (m != (int)(-1)) {
934 			int prio = ffz(m);
935 
936 			m |= 1 << prio;
937 			skb = htb_dequeue_tree(q, prio, level);
938 			if (likely(skb != NULL))
939 				goto ok;
940 		}
941 	}
942 	if (likely(next_event > q->now))
943 		qdisc_watchdog_schedule_ns(&q->watchdog, next_event);
944 	else
945 		schedule_work(&q->work);
946 fin:
947 	return skb;
948 }
949 
950 /* reset all classes */
951 /* always caled under BH & queue lock */
htb_reset(struct Qdisc * sch)952 static void htb_reset(struct Qdisc *sch)
953 {
954 	struct htb_sched *q = qdisc_priv(sch);
955 	struct htb_class *cl;
956 	unsigned int i;
957 
958 	for (i = 0; i < q->clhash.hashsize; i++) {
959 		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
960 			if (cl->level)
961 				memset(&cl->inner, 0, sizeof(cl->inner));
962 			else {
963 				if (cl->leaf.q)
964 					qdisc_reset(cl->leaf.q);
965 			}
966 			cl->prio_activity = 0;
967 			cl->cmode = HTB_CAN_SEND;
968 		}
969 	}
970 	qdisc_watchdog_cancel(&q->watchdog);
971 	__qdisc_reset_queue(&q->direct_queue);
972 	memset(q->hlevel, 0, sizeof(q->hlevel));
973 	memset(q->row_mask, 0, sizeof(q->row_mask));
974 }
975 
976 static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = {
977 	[TCA_HTB_PARMS]	= { .len = sizeof(struct tc_htb_opt) },
978 	[TCA_HTB_INIT]	= { .len = sizeof(struct tc_htb_glob) },
979 	[TCA_HTB_CTAB]	= { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
980 	[TCA_HTB_RTAB]	= { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
981 	[TCA_HTB_DIRECT_QLEN] = { .type = NLA_U32 },
982 	[TCA_HTB_RATE64] = { .type = NLA_U64 },
983 	[TCA_HTB_CEIL64] = { .type = NLA_U64 },
984 };
985 
htb_work_func(struct work_struct * work)986 static void htb_work_func(struct work_struct *work)
987 {
988 	struct htb_sched *q = container_of(work, struct htb_sched, work);
989 	struct Qdisc *sch = q->watchdog.qdisc;
990 
991 	rcu_read_lock();
992 	__netif_schedule(qdisc_root(sch));
993 	rcu_read_unlock();
994 }
995 
htb_init(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)996 static int htb_init(struct Qdisc *sch, struct nlattr *opt,
997 		    struct netlink_ext_ack *extack)
998 {
999 	struct htb_sched *q = qdisc_priv(sch);
1000 	struct nlattr *tb[TCA_HTB_MAX + 1];
1001 	struct tc_htb_glob *gopt;
1002 	int err;
1003 
1004 	qdisc_watchdog_init(&q->watchdog, sch);
1005 	INIT_WORK(&q->work, htb_work_func);
1006 
1007 	if (!opt)
1008 		return -EINVAL;
1009 
1010 	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
1011 	if (err)
1012 		return err;
1013 
1014 	err = nla_parse_nested_deprecated(tb, TCA_HTB_MAX, opt, htb_policy,
1015 					  NULL);
1016 	if (err < 0)
1017 		return err;
1018 
1019 	if (!tb[TCA_HTB_INIT])
1020 		return -EINVAL;
1021 
1022 	gopt = nla_data(tb[TCA_HTB_INIT]);
1023 	if (gopt->version != HTB_VER >> 16)
1024 		return -EINVAL;
1025 
1026 	err = qdisc_class_hash_init(&q->clhash);
1027 	if (err < 0)
1028 		return err;
1029 
1030 	qdisc_skb_head_init(&q->direct_queue);
1031 
1032 	if (tb[TCA_HTB_DIRECT_QLEN])
1033 		q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]);
1034 	else
1035 		q->direct_qlen = qdisc_dev(sch)->tx_queue_len;
1036 
1037 	if ((q->rate2quantum = gopt->rate2quantum) < 1)
1038 		q->rate2quantum = 1;
1039 	q->defcls = gopt->defcls;
1040 
1041 	return 0;
1042 }
1043 
htb_dump(struct Qdisc * sch,struct sk_buff * skb)1044 static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1045 {
1046 	struct htb_sched *q = qdisc_priv(sch);
1047 	struct nlattr *nest;
1048 	struct tc_htb_glob gopt;
1049 
1050 	sch->qstats.overlimits = q->overlimits;
1051 	/* Its safe to not acquire qdisc lock. As we hold RTNL,
1052 	 * no change can happen on the qdisc parameters.
1053 	 */
1054 
1055 	gopt.direct_pkts = q->direct_pkts;
1056 	gopt.version = HTB_VER;
1057 	gopt.rate2quantum = q->rate2quantum;
1058 	gopt.defcls = q->defcls;
1059 	gopt.debug = 0;
1060 
1061 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
1062 	if (nest == NULL)
1063 		goto nla_put_failure;
1064 	if (nla_put(skb, TCA_HTB_INIT, sizeof(gopt), &gopt) ||
1065 	    nla_put_u32(skb, TCA_HTB_DIRECT_QLEN, q->direct_qlen))
1066 		goto nla_put_failure;
1067 
1068 	return nla_nest_end(skb, nest);
1069 
1070 nla_put_failure:
1071 	nla_nest_cancel(skb, nest);
1072 	return -1;
1073 }
1074 
htb_dump_class(struct Qdisc * sch,unsigned long arg,struct sk_buff * skb,struct tcmsg * tcm)1075 static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1076 			  struct sk_buff *skb, struct tcmsg *tcm)
1077 {
1078 	struct htb_class *cl = (struct htb_class *)arg;
1079 	struct nlattr *nest;
1080 	struct tc_htb_opt opt;
1081 
1082 	/* Its safe to not acquire qdisc lock. As we hold RTNL,
1083 	 * no change can happen on the class parameters.
1084 	 */
1085 	tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
1086 	tcm->tcm_handle = cl->common.classid;
1087 	if (!cl->level && cl->leaf.q)
1088 		tcm->tcm_info = cl->leaf.q->handle;
1089 
1090 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
1091 	if (nest == NULL)
1092 		goto nla_put_failure;
1093 
1094 	memset(&opt, 0, sizeof(opt));
1095 
1096 	psched_ratecfg_getrate(&opt.rate, &cl->rate);
1097 	opt.buffer = PSCHED_NS2TICKS(cl->buffer);
1098 	psched_ratecfg_getrate(&opt.ceil, &cl->ceil);
1099 	opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer);
1100 	opt.quantum = cl->quantum;
1101 	opt.prio = cl->prio;
1102 	opt.level = cl->level;
1103 	if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt))
1104 		goto nla_put_failure;
1105 	if ((cl->rate.rate_bytes_ps >= (1ULL << 32)) &&
1106 	    nla_put_u64_64bit(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps,
1107 			      TCA_HTB_PAD))
1108 		goto nla_put_failure;
1109 	if ((cl->ceil.rate_bytes_ps >= (1ULL << 32)) &&
1110 	    nla_put_u64_64bit(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps,
1111 			      TCA_HTB_PAD))
1112 		goto nla_put_failure;
1113 
1114 	return nla_nest_end(skb, nest);
1115 
1116 nla_put_failure:
1117 	nla_nest_cancel(skb, nest);
1118 	return -1;
1119 }
1120 
1121 static int
htb_dump_class_stats(struct Qdisc * sch,unsigned long arg,struct gnet_dump * d)1122 htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
1123 {
1124 	struct htb_class *cl = (struct htb_class *)arg;
1125 	struct gnet_stats_queue qs = {
1126 		.drops = cl->drops,
1127 		.overlimits = cl->overlimits,
1128 	};
1129 	__u32 qlen = 0;
1130 
1131 	if (!cl->level && cl->leaf.q)
1132 		qdisc_qstats_qlen_backlog(cl->leaf.q, &qlen, &qs.backlog);
1133 
1134 	cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens),
1135 				    INT_MIN, INT_MAX);
1136 	cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens),
1137 				     INT_MIN, INT_MAX);
1138 
1139 	if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
1140 				  d, NULL, &cl->bstats) < 0 ||
1141 	    gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1142 	    gnet_stats_copy_queue(d, NULL, &qs, qlen) < 0)
1143 		return -1;
1144 
1145 	return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1146 }
1147 
htb_graft(struct Qdisc * sch,unsigned long arg,struct Qdisc * new,struct Qdisc ** old,struct netlink_ext_ack * extack)1148 static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1149 		     struct Qdisc **old, struct netlink_ext_ack *extack)
1150 {
1151 	struct htb_class *cl = (struct htb_class *)arg;
1152 
1153 	if (cl->level)
1154 		return -EINVAL;
1155 	if (new == NULL &&
1156 	    (new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1157 				     cl->common.classid, extack)) == NULL)
1158 		return -ENOBUFS;
1159 
1160 	*old = qdisc_replace(sch, new, &cl->leaf.q);
1161 	return 0;
1162 }
1163 
htb_leaf(struct Qdisc * sch,unsigned long arg)1164 static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
1165 {
1166 	struct htb_class *cl = (struct htb_class *)arg;
1167 	return !cl->level ? cl->leaf.q : NULL;
1168 }
1169 
htb_qlen_notify(struct Qdisc * sch,unsigned long arg)1170 static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
1171 {
1172 	struct htb_class *cl = (struct htb_class *)arg;
1173 
1174 	htb_deactivate(qdisc_priv(sch), cl);
1175 }
1176 
htb_parent_last_child(struct htb_class * cl)1177 static inline int htb_parent_last_child(struct htb_class *cl)
1178 {
1179 	if (!cl->parent)
1180 		/* the root class */
1181 		return 0;
1182 	if (cl->parent->children > 1)
1183 		/* not the last child */
1184 		return 0;
1185 	return 1;
1186 }
1187 
htb_parent_to_leaf(struct htb_sched * q,struct htb_class * cl,struct Qdisc * new_q)1188 static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
1189 			       struct Qdisc *new_q)
1190 {
1191 	struct htb_class *parent = cl->parent;
1192 
1193 	WARN_ON(cl->level || !cl->leaf.q || cl->prio_activity);
1194 
1195 	if (parent->cmode != HTB_CAN_SEND)
1196 		htb_safe_rb_erase(&parent->pq_node,
1197 				  &q->hlevel[parent->level].wait_pq);
1198 
1199 	parent->level = 0;
1200 	memset(&parent->inner, 0, sizeof(parent->inner));
1201 	parent->leaf.q = new_q ? new_q : &noop_qdisc;
1202 	parent->tokens = parent->buffer;
1203 	parent->ctokens = parent->cbuffer;
1204 	parent->t_c = ktime_get_ns();
1205 	parent->cmode = HTB_CAN_SEND;
1206 }
1207 
htb_destroy_class(struct Qdisc * sch,struct htb_class * cl)1208 static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
1209 {
1210 	if (!cl->level) {
1211 		WARN_ON(!cl->leaf.q);
1212 		qdisc_put(cl->leaf.q);
1213 	}
1214 	gen_kill_estimator(&cl->rate_est);
1215 	tcf_block_put(cl->block);
1216 	kfree(cl);
1217 }
1218 
htb_destroy(struct Qdisc * sch)1219 static void htb_destroy(struct Qdisc *sch)
1220 {
1221 	struct htb_sched *q = qdisc_priv(sch);
1222 	struct hlist_node *next;
1223 	struct htb_class *cl;
1224 	unsigned int i;
1225 
1226 	cancel_work_sync(&q->work);
1227 	qdisc_watchdog_cancel(&q->watchdog);
1228 	/* This line used to be after htb_destroy_class call below
1229 	 * and surprisingly it worked in 2.4. But it must precede it
1230 	 * because filter need its target class alive to be able to call
1231 	 * unbind_filter on it (without Oops).
1232 	 */
1233 	tcf_block_put(q->block);
1234 
1235 	for (i = 0; i < q->clhash.hashsize; i++) {
1236 		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
1237 			tcf_block_put(cl->block);
1238 			cl->block = NULL;
1239 		}
1240 	}
1241 	for (i = 0; i < q->clhash.hashsize; i++) {
1242 		hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
1243 					  common.hnode)
1244 			htb_destroy_class(sch, cl);
1245 	}
1246 	qdisc_class_hash_destroy(&q->clhash);
1247 	__qdisc_reset_queue(&q->direct_queue);
1248 }
1249 
htb_delete(struct Qdisc * sch,unsigned long arg)1250 static int htb_delete(struct Qdisc *sch, unsigned long arg)
1251 {
1252 	struct htb_sched *q = qdisc_priv(sch);
1253 	struct htb_class *cl = (struct htb_class *)arg;
1254 	struct Qdisc *new_q = NULL;
1255 	int last_child = 0;
1256 
1257 	/* TODO: why don't allow to delete subtree ? references ? does
1258 	 * tc subsys guarantee us that in htb_destroy it holds no class
1259 	 * refs so that we can remove children safely there ?
1260 	 */
1261 	if (cl->children || cl->filter_cnt)
1262 		return -EBUSY;
1263 
1264 	if (!cl->level && htb_parent_last_child(cl)) {
1265 		new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1266 					  cl->parent->common.classid,
1267 					  NULL);
1268 		last_child = 1;
1269 	}
1270 
1271 	sch_tree_lock(sch);
1272 
1273 	if (!cl->level)
1274 		qdisc_purge_queue(cl->leaf.q);
1275 
1276 	/* delete from hash and active; remainder in destroy_class */
1277 	qdisc_class_hash_remove(&q->clhash, &cl->common);
1278 	if (cl->parent)
1279 		cl->parent->children--;
1280 
1281 	if (cl->prio_activity)
1282 		htb_deactivate(q, cl);
1283 
1284 	if (cl->cmode != HTB_CAN_SEND)
1285 		htb_safe_rb_erase(&cl->pq_node,
1286 				  &q->hlevel[cl->level].wait_pq);
1287 
1288 	if (last_child)
1289 		htb_parent_to_leaf(q, cl, new_q);
1290 
1291 	sch_tree_unlock(sch);
1292 
1293 	htb_destroy_class(sch, cl);
1294 	return 0;
1295 }
1296 
htb_change_class(struct Qdisc * sch,u32 classid,u32 parentid,struct nlattr ** tca,unsigned long * arg,struct netlink_ext_ack * extack)1297 static int htb_change_class(struct Qdisc *sch, u32 classid,
1298 			    u32 parentid, struct nlattr **tca,
1299 			    unsigned long *arg, struct netlink_ext_ack *extack)
1300 {
1301 	int err = -EINVAL;
1302 	struct htb_sched *q = qdisc_priv(sch);
1303 	struct htb_class *cl = (struct htb_class *)*arg, *parent;
1304 	struct nlattr *opt = tca[TCA_OPTIONS];
1305 	struct nlattr *tb[TCA_HTB_MAX + 1];
1306 	struct Qdisc *parent_qdisc = NULL;
1307 	struct tc_htb_opt *hopt;
1308 	u64 rate64, ceil64;
1309 	int warn = 0;
1310 
1311 	/* extract all subattrs from opt attr */
1312 	if (!opt)
1313 		goto failure;
1314 
1315 	err = nla_parse_nested_deprecated(tb, TCA_HTB_MAX, opt, htb_policy,
1316 					  NULL);
1317 	if (err < 0)
1318 		goto failure;
1319 
1320 	err = -EINVAL;
1321 	if (tb[TCA_HTB_PARMS] == NULL)
1322 		goto failure;
1323 
1324 	parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);
1325 
1326 	hopt = nla_data(tb[TCA_HTB_PARMS]);
1327 	if (!hopt->rate.rate || !hopt->ceil.rate)
1328 		goto failure;
1329 
1330 	/* Keeping backward compatible with rate_table based iproute2 tc */
1331 	if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
1332 		qdisc_put_rtab(qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB],
1333 					      NULL));
1334 
1335 	if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE)
1336 		qdisc_put_rtab(qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB],
1337 					      NULL));
1338 
1339 	if (!cl) {		/* new class */
1340 		struct Qdisc *new_q;
1341 		int prio;
1342 		struct {
1343 			struct nlattr		nla;
1344 			struct gnet_estimator	opt;
1345 		} est = {
1346 			.nla = {
1347 				.nla_len	= nla_attr_size(sizeof(est.opt)),
1348 				.nla_type	= TCA_RATE,
1349 			},
1350 			.opt = {
1351 				/* 4s interval, 16s averaging constant */
1352 				.interval	= 2,
1353 				.ewma_log	= 2,
1354 			},
1355 		};
1356 
1357 		/* check for valid classid */
1358 		if (!classid || TC_H_MAJ(classid ^ sch->handle) ||
1359 		    htb_find(classid, sch))
1360 			goto failure;
1361 
1362 		/* check maximal depth */
1363 		if (parent && parent->parent && parent->parent->level < 2) {
1364 			pr_err("htb: tree is too deep\n");
1365 			goto failure;
1366 		}
1367 		err = -ENOBUFS;
1368 		cl = kzalloc(sizeof(*cl), GFP_KERNEL);
1369 		if (!cl)
1370 			goto failure;
1371 
1372 		err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
1373 		if (err) {
1374 			kfree(cl);
1375 			goto failure;
1376 		}
1377 		if (htb_rate_est || tca[TCA_RATE]) {
1378 			err = gen_new_estimator(&cl->bstats, NULL,
1379 						&cl->rate_est,
1380 						NULL,
1381 						qdisc_root_sleeping_running(sch),
1382 						tca[TCA_RATE] ? : &est.nla);
1383 			if (err) {
1384 				tcf_block_put(cl->block);
1385 				kfree(cl);
1386 				goto failure;
1387 			}
1388 		}
1389 
1390 		cl->children = 0;
1391 		RB_CLEAR_NODE(&cl->pq_node);
1392 
1393 		for (prio = 0; prio < TC_HTB_NUMPRIO; prio++)
1394 			RB_CLEAR_NODE(&cl->node[prio]);
1395 
1396 		/* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
1397 		 * so that can't be used inside of sch_tree_lock
1398 		 * -- thanks to Karlis Peisenieks
1399 		 */
1400 		new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1401 					  classid, NULL);
1402 		sch_tree_lock(sch);
1403 		if (parent && !parent->level) {
1404 			/* turn parent into inner node */
1405 			qdisc_purge_queue(parent->leaf.q);
1406 			parent_qdisc = parent->leaf.q;
1407 			if (parent->prio_activity)
1408 				htb_deactivate(q, parent);
1409 
1410 			/* remove from evt list because of level change */
1411 			if (parent->cmode != HTB_CAN_SEND) {
1412 				htb_safe_rb_erase(&parent->pq_node, &q->hlevel[0].wait_pq);
1413 				parent->cmode = HTB_CAN_SEND;
1414 			}
1415 			parent->level = (parent->parent ? parent->parent->level
1416 					 : TC_HTB_MAXDEPTH) - 1;
1417 			memset(&parent->inner, 0, sizeof(parent->inner));
1418 		}
1419 		/* leaf (we) needs elementary qdisc */
1420 		cl->leaf.q = new_q ? new_q : &noop_qdisc;
1421 
1422 		cl->common.classid = classid;
1423 		cl->parent = parent;
1424 
1425 		/* set class to be in HTB_CAN_SEND state */
1426 		cl->tokens = PSCHED_TICKS2NS(hopt->buffer);
1427 		cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer);
1428 		cl->mbuffer = 60ULL * NSEC_PER_SEC;	/* 1min */
1429 		cl->t_c = ktime_get_ns();
1430 		cl->cmode = HTB_CAN_SEND;
1431 
1432 		/* attach to the hash list and parent's family */
1433 		qdisc_class_hash_insert(&q->clhash, &cl->common);
1434 		if (parent)
1435 			parent->children++;
1436 		if (cl->leaf.q != &noop_qdisc)
1437 			qdisc_hash_add(cl->leaf.q, true);
1438 	} else {
1439 		if (tca[TCA_RATE]) {
1440 			err = gen_replace_estimator(&cl->bstats, NULL,
1441 						    &cl->rate_est,
1442 						    NULL,
1443 						    qdisc_root_sleeping_running(sch),
1444 						    tca[TCA_RATE]);
1445 			if (err)
1446 				return err;
1447 		}
1448 		sch_tree_lock(sch);
1449 	}
1450 
1451 	rate64 = tb[TCA_HTB_RATE64] ? nla_get_u64(tb[TCA_HTB_RATE64]) : 0;
1452 
1453 	ceil64 = tb[TCA_HTB_CEIL64] ? nla_get_u64(tb[TCA_HTB_CEIL64]) : 0;
1454 
1455 	psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64);
1456 	psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64);
1457 
1458 	/* it used to be a nasty bug here, we have to check that node
1459 	 * is really leaf before changing cl->leaf !
1460 	 */
1461 	if (!cl->level) {
1462 		u64 quantum = cl->rate.rate_bytes_ps;
1463 
1464 		do_div(quantum, q->rate2quantum);
1465 		cl->quantum = min_t(u64, quantum, INT_MAX);
1466 
1467 		if (!hopt->quantum && cl->quantum < 1000) {
1468 			warn = -1;
1469 			cl->quantum = 1000;
1470 		}
1471 		if (!hopt->quantum && cl->quantum > 200000) {
1472 			warn = 1;
1473 			cl->quantum = 200000;
1474 		}
1475 		if (hopt->quantum)
1476 			cl->quantum = hopt->quantum;
1477 		if ((cl->prio = hopt->prio) >= TC_HTB_NUMPRIO)
1478 			cl->prio = TC_HTB_NUMPRIO - 1;
1479 	}
1480 
1481 	cl->buffer = PSCHED_TICKS2NS(hopt->buffer);
1482 	cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer);
1483 
1484 	sch_tree_unlock(sch);
1485 	qdisc_put(parent_qdisc);
1486 
1487 	if (warn)
1488 		pr_warn("HTB: quantum of class %X is %s. Consider r2q change.\n",
1489 			    cl->common.classid, (warn == -1 ? "small" : "big"));
1490 
1491 	qdisc_class_hash_grow(sch, &q->clhash);
1492 
1493 	*arg = (unsigned long)cl;
1494 	return 0;
1495 
1496 failure:
1497 	return err;
1498 }
1499 
htb_tcf_block(struct Qdisc * sch,unsigned long arg,struct netlink_ext_ack * extack)1500 static struct tcf_block *htb_tcf_block(struct Qdisc *sch, unsigned long arg,
1501 				       struct netlink_ext_ack *extack)
1502 {
1503 	struct htb_sched *q = qdisc_priv(sch);
1504 	struct htb_class *cl = (struct htb_class *)arg;
1505 
1506 	return cl ? cl->block : q->block;
1507 }
1508 
htb_bind_filter(struct Qdisc * sch,unsigned long parent,u32 classid)1509 static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
1510 				     u32 classid)
1511 {
1512 	struct htb_class *cl = htb_find(classid, sch);
1513 
1514 	/*if (cl && !cl->level) return 0;
1515 	 * The line above used to be there to prevent attaching filters to
1516 	 * leaves. But at least tc_index filter uses this just to get class
1517 	 * for other reasons so that we have to allow for it.
1518 	 * ----
1519 	 * 19.6.2002 As Werner explained it is ok - bind filter is just
1520 	 * another way to "lock" the class - unlike "get" this lock can
1521 	 * be broken by class during destroy IIUC.
1522 	 */
1523 	if (cl)
1524 		cl->filter_cnt++;
1525 	return (unsigned long)cl;
1526 }
1527 
htb_unbind_filter(struct Qdisc * sch,unsigned long arg)1528 static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
1529 {
1530 	struct htb_class *cl = (struct htb_class *)arg;
1531 
1532 	if (cl)
1533 		cl->filter_cnt--;
1534 }
1535 
htb_walk(struct Qdisc * sch,struct qdisc_walker * arg)1536 static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1537 {
1538 	struct htb_sched *q = qdisc_priv(sch);
1539 	struct htb_class *cl;
1540 	unsigned int i;
1541 
1542 	if (arg->stop)
1543 		return;
1544 
1545 	for (i = 0; i < q->clhash.hashsize; i++) {
1546 		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
1547 			if (arg->count < arg->skip) {
1548 				arg->count++;
1549 				continue;
1550 			}
1551 			if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1552 				arg->stop = 1;
1553 				return;
1554 			}
1555 			arg->count++;
1556 		}
1557 	}
1558 }
1559 
1560 static const struct Qdisc_class_ops htb_class_ops = {
1561 	.graft		=	htb_graft,
1562 	.leaf		=	htb_leaf,
1563 	.qlen_notify	=	htb_qlen_notify,
1564 	.find		=	htb_search,
1565 	.change		=	htb_change_class,
1566 	.delete		=	htb_delete,
1567 	.walk		=	htb_walk,
1568 	.tcf_block	=	htb_tcf_block,
1569 	.bind_tcf	=	htb_bind_filter,
1570 	.unbind_tcf	=	htb_unbind_filter,
1571 	.dump		=	htb_dump_class,
1572 	.dump_stats	=	htb_dump_class_stats,
1573 };
1574 
1575 static struct Qdisc_ops htb_qdisc_ops __read_mostly = {
1576 	.cl_ops		=	&htb_class_ops,
1577 	.id		=	"htb",
1578 	.priv_size	=	sizeof(struct htb_sched),
1579 	.enqueue	=	htb_enqueue,
1580 	.dequeue	=	htb_dequeue,
1581 	.peek		=	qdisc_peek_dequeued,
1582 	.init		=	htb_init,
1583 	.reset		=	htb_reset,
1584 	.destroy	=	htb_destroy,
1585 	.dump		=	htb_dump,
1586 	.owner		=	THIS_MODULE,
1587 };
1588 
htb_module_init(void)1589 static int __init htb_module_init(void)
1590 {
1591 	return register_qdisc(&htb_qdisc_ops);
1592 }
htb_module_exit(void)1593 static void __exit htb_module_exit(void)
1594 {
1595 	unregister_qdisc(&htb_qdisc_ops);
1596 }
1597 
1598 module_init(htb_module_init)
1599 module_exit(htb_module_exit)
1600 MODULE_LICENSE("GPL");
1601