• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Codel - The Controlled-Delay Active Queue Management algorithm
4  *
5  *  Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
6  *  Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
7  *
8  *  Implemented on linux by :
9  *  Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
10  *  Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
11  */
12 
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/errno.h>
18 #include <linux/skbuff.h>
19 #include <linux/prefetch.h>
20 #include <net/pkt_sched.h>
21 #include <net/codel.h>
22 #include <net/codel_impl.h>
23 #include <net/codel_qdisc.h>
24 
25 
26 #define DEFAULT_CODEL_LIMIT 1000
27 
28 struct codel_sched_data {
29 	struct codel_params	params;
30 	struct codel_vars	vars;
31 	struct codel_stats	stats;
32 	u32			drop_overlimit;
33 };
34 
35 /* This is the specific function called from codel_dequeue()
36  * to dequeue a packet from queue. Note: backlog is handled in
37  * codel, we dont need to reduce it here.
38  */
dequeue_func(struct codel_vars * vars,void * ctx)39 static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
40 {
41 	struct Qdisc *sch = ctx;
42 	struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
43 
44 	if (skb) {
45 		sch->qstats.backlog -= qdisc_pkt_len(skb);
46 		prefetch(&skb->end); /* we'll need skb_shinfo() */
47 	}
48 	return skb;
49 }
50 
drop_func(struct sk_buff * skb,void * ctx)51 static void drop_func(struct sk_buff *skb, void *ctx)
52 {
53 	struct Qdisc *sch = ctx;
54 
55 	kfree_skb(skb);
56 	qdisc_qstats_drop(sch);
57 }
58 
codel_qdisc_dequeue(struct Qdisc * sch)59 static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
60 {
61 	struct codel_sched_data *q = qdisc_priv(sch);
62 	struct sk_buff *skb;
63 
64 	skb = codel_dequeue(sch, &sch->qstats.backlog, &q->params, &q->vars,
65 			    &q->stats, qdisc_pkt_len, codel_get_enqueue_time,
66 			    drop_func, dequeue_func);
67 
68 	if (q->stats.drop_count) {
69 		qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len);
70 		q->stats.drop_count = 0;
71 		q->stats.drop_len = 0;
72 	}
73 	if (skb)
74 		qdisc_bstats_update(sch, skb);
75 	return skb;
76 }
77 
codel_qdisc_enqueue(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)78 static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
79 			       struct sk_buff **to_free)
80 {
81 	struct codel_sched_data *q;
82 
83 	if (likely(qdisc_qlen(sch) < sch->limit)) {
84 		codel_set_enqueue_time(skb);
85 		return qdisc_enqueue_tail(skb, sch);
86 	}
87 	q = qdisc_priv(sch);
88 	q->drop_overlimit++;
89 	return qdisc_drop(skb, sch, to_free);
90 }
91 
92 static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = {
93 	[TCA_CODEL_TARGET]	= { .type = NLA_U32 },
94 	[TCA_CODEL_LIMIT]	= { .type = NLA_U32 },
95 	[TCA_CODEL_INTERVAL]	= { .type = NLA_U32 },
96 	[TCA_CODEL_ECN]		= { .type = NLA_U32 },
97 	[TCA_CODEL_CE_THRESHOLD]= { .type = NLA_U32 },
98 };
99 
codel_change(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)100 static int codel_change(struct Qdisc *sch, struct nlattr *opt,
101 			struct netlink_ext_ack *extack)
102 {
103 	struct codel_sched_data *q = qdisc_priv(sch);
104 	struct nlattr *tb[TCA_CODEL_MAX + 1];
105 	unsigned int qlen, dropped = 0;
106 	int err;
107 
108 	err = nla_parse_nested_deprecated(tb, TCA_CODEL_MAX, opt,
109 					  codel_policy, NULL);
110 	if (err < 0)
111 		return err;
112 
113 	sch_tree_lock(sch);
114 
115 	if (tb[TCA_CODEL_TARGET]) {
116 		u32 target = nla_get_u32(tb[TCA_CODEL_TARGET]);
117 
118 		WRITE_ONCE(q->params.target,
119 			   ((u64)target * NSEC_PER_USEC) >> CODEL_SHIFT);
120 	}
121 
122 	if (tb[TCA_CODEL_CE_THRESHOLD]) {
123 		u64 val = nla_get_u32(tb[TCA_CODEL_CE_THRESHOLD]);
124 
125 		WRITE_ONCE(q->params.ce_threshold,
126 			   (val * NSEC_PER_USEC) >> CODEL_SHIFT);
127 	}
128 
129 	if (tb[TCA_CODEL_INTERVAL]) {
130 		u32 interval = nla_get_u32(tb[TCA_CODEL_INTERVAL]);
131 
132 		WRITE_ONCE(q->params.interval,
133 			   ((u64)interval * NSEC_PER_USEC) >> CODEL_SHIFT);
134 	}
135 
136 	if (tb[TCA_CODEL_LIMIT])
137 		WRITE_ONCE(sch->limit,
138 			   nla_get_u32(tb[TCA_CODEL_LIMIT]));
139 
140 	if (tb[TCA_CODEL_ECN])
141 		WRITE_ONCE(q->params.ecn,
142 			   !!nla_get_u32(tb[TCA_CODEL_ECN]));
143 
144 	qlen = sch->q.qlen;
145 	while (sch->q.qlen > sch->limit) {
146 		struct sk_buff *skb = qdisc_dequeue_internal(sch, true);
147 
148 		dropped += qdisc_pkt_len(skb);
149 		qdisc_qstats_backlog_dec(sch, skb);
150 		rtnl_qdisc_drop(skb, sch);
151 	}
152 	qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
153 
154 	sch_tree_unlock(sch);
155 	return 0;
156 }
157 
codel_init(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)158 static int codel_init(struct Qdisc *sch, struct nlattr *opt,
159 		      struct netlink_ext_ack *extack)
160 {
161 	struct codel_sched_data *q = qdisc_priv(sch);
162 
163 	sch->limit = DEFAULT_CODEL_LIMIT;
164 
165 	codel_params_init(&q->params);
166 	codel_vars_init(&q->vars);
167 	codel_stats_init(&q->stats);
168 	q->params.mtu = psched_mtu(qdisc_dev(sch));
169 
170 	if (opt) {
171 		int err = codel_change(sch, opt, extack);
172 
173 		if (err)
174 			return err;
175 	}
176 
177 	if (sch->limit >= 1)
178 		sch->flags |= TCQ_F_CAN_BYPASS;
179 	else
180 		sch->flags &= ~TCQ_F_CAN_BYPASS;
181 
182 	return 0;
183 }
184 
codel_dump(struct Qdisc * sch,struct sk_buff * skb)185 static int codel_dump(struct Qdisc *sch, struct sk_buff *skb)
186 {
187 	struct codel_sched_data *q = qdisc_priv(sch);
188 	codel_time_t ce_threshold;
189 	struct nlattr *opts;
190 
191 	opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
192 	if (opts == NULL)
193 		goto nla_put_failure;
194 
195 	if (nla_put_u32(skb, TCA_CODEL_TARGET,
196 			codel_time_to_us(READ_ONCE(q->params.target))) ||
197 	    nla_put_u32(skb, TCA_CODEL_LIMIT,
198 			READ_ONCE(sch->limit)) ||
199 	    nla_put_u32(skb, TCA_CODEL_INTERVAL,
200 			codel_time_to_us(READ_ONCE(q->params.interval))) ||
201 	    nla_put_u32(skb, TCA_CODEL_ECN,
202 			READ_ONCE(q->params.ecn)))
203 		goto nla_put_failure;
204 	ce_threshold = READ_ONCE(q->params.ce_threshold);
205 	if (ce_threshold != CODEL_DISABLED_THRESHOLD &&
206 	    nla_put_u32(skb, TCA_CODEL_CE_THRESHOLD,
207 			codel_time_to_us(ce_threshold)))
208 		goto nla_put_failure;
209 	return nla_nest_end(skb, opts);
210 
211 nla_put_failure:
212 	nla_nest_cancel(skb, opts);
213 	return -1;
214 }
215 
codel_dump_stats(struct Qdisc * sch,struct gnet_dump * d)216 static int codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
217 {
218 	const struct codel_sched_data *q = qdisc_priv(sch);
219 	struct tc_codel_xstats st = {
220 		.maxpacket	= q->stats.maxpacket,
221 		.count		= q->vars.count,
222 		.lastcount	= q->vars.lastcount,
223 		.drop_overlimit = q->drop_overlimit,
224 		.ldelay		= codel_time_to_us(q->vars.ldelay),
225 		.dropping	= q->vars.dropping,
226 		.ecn_mark	= q->stats.ecn_mark,
227 		.ce_mark	= q->stats.ce_mark,
228 	};
229 
230 	if (q->vars.dropping) {
231 		codel_tdiff_t delta = q->vars.drop_next - codel_get_time();
232 
233 		if (delta >= 0)
234 			st.drop_next = codel_time_to_us(delta);
235 		else
236 			st.drop_next = -codel_time_to_us(-delta);
237 	}
238 
239 	return gnet_stats_copy_app(d, &st, sizeof(st));
240 }
241 
codel_reset(struct Qdisc * sch)242 static void codel_reset(struct Qdisc *sch)
243 {
244 	struct codel_sched_data *q = qdisc_priv(sch);
245 
246 	qdisc_reset_queue(sch);
247 	codel_vars_init(&q->vars);
248 }
249 
250 static struct Qdisc_ops codel_qdisc_ops __read_mostly = {
251 	.id		=	"codel",
252 	.priv_size	=	sizeof(struct codel_sched_data),
253 
254 	.enqueue	=	codel_qdisc_enqueue,
255 	.dequeue	=	codel_qdisc_dequeue,
256 	.peek		=	qdisc_peek_dequeued,
257 	.init		=	codel_init,
258 	.reset		=	codel_reset,
259 	.change 	=	codel_change,
260 	.dump		=	codel_dump,
261 	.dump_stats	=	codel_dump_stats,
262 	.owner		=	THIS_MODULE,
263 };
264 MODULE_ALIAS_NET_SCH("codel");
265 
codel_module_init(void)266 static int __init codel_module_init(void)
267 {
268 	return register_qdisc(&codel_qdisc_ops);
269 }
270 
codel_module_exit(void)271 static void __exit codel_module_exit(void)
272 {
273 	unregister_qdisc(&codel_qdisc_ops);
274 }
275 
276 module_init(codel_module_init)
277 module_exit(codel_module_exit)
278 
279 MODULE_DESCRIPTION("Controlled Delay queue discipline");
280 MODULE_AUTHOR("Dave Taht");
281 MODULE_AUTHOR("Eric Dumazet");
282 MODULE_LICENSE("Dual BSD/GPL");
283