• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Fair Queue CoDel discipline
3  *
4  *	This program is free software; you can redistribute it and/or
5  *	modify it under the terms of the GNU General Public License
6  *	as published by the Free Software Foundation; either version
7  *	2 of the License, or (at your option) any later version.
8  *
9  *  Copyright (C) 2012 Eric Dumazet <edumazet@google.com>
10  */
11 
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/jiffies.h>
16 #include <linux/string.h>
17 #include <linux/in.h>
18 #include <linux/errno.h>
19 #include <linux/init.h>
20 #include <linux/skbuff.h>
21 #include <linux/jhash.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <net/netlink.h>
25 #include <net/pkt_sched.h>
26 #include <net/flow_keys.h>
27 #include <net/codel.h>
28 
29 /*	Fair Queue CoDel.
30  *
31  * Principles :
32  * Packets are classified (internal classifier or external) on flows.
33  * This is a Stochastic model (as we use a hash, several flows
34  *			       might be hashed on same slot)
35  * Each flow has a CoDel managed queue.
36  * Flows are linked onto two (Round Robin) lists,
37  * so that new flows have priority on old ones.
38  *
39  * For a given flow, packets are not reordered (CoDel uses a FIFO)
40  * head drops only.
41  * ECN capability is on by default.
42  * Low memory footprint (64 bytes per flow)
43  */
44 
45 struct fq_codel_flow {
46 	struct sk_buff	  *head;
47 	struct sk_buff	  *tail;
48 	struct list_head  flowchain;
49 	int		  deficit;
50 	u32		  dropped; /* number of drops (or ECN marks) on this flow */
51 	struct codel_vars cvars;
52 }; /* please try to keep this structure <= 64 bytes */
53 
54 struct fq_codel_sched_data {
55 	struct tcf_proto __rcu *filter_list; /* optional external classifier */
56 	struct fq_codel_flow *flows;	/* Flows table [flows_cnt] */
57 	u32		*backlogs;	/* backlog table [flows_cnt] */
58 	u32		flows_cnt;	/* number of flows */
59 	u32		perturbation;	/* hash perturbation */
60 	u32		quantum;	/* psched_mtu(qdisc_dev(sch)); */
61 	struct codel_params cparams;
62 	struct codel_stats cstats;
63 	u32		drop_overlimit;
64 	u32		new_flow_count;
65 
66 	struct list_head new_flows;	/* list of new flows */
67 	struct list_head old_flows;	/* list of old flows */
68 };
69 
fq_codel_hash(const struct fq_codel_sched_data * q,const struct sk_buff * skb)70 static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
71 				  const struct sk_buff *skb)
72 {
73 	struct flow_keys keys;
74 	unsigned int hash;
75 
76 	skb_flow_dissect(skb, &keys);
77 	hash = jhash_3words((__force u32)keys.dst,
78 			    (__force u32)keys.src ^ keys.ip_proto,
79 			    (__force u32)keys.ports, q->perturbation);
80 
81 	return reciprocal_scale(hash, q->flows_cnt);
82 }
83 
fq_codel_classify(struct sk_buff * skb,struct Qdisc * sch,int * qerr)84 static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
85 				      int *qerr)
86 {
87 	struct fq_codel_sched_data *q = qdisc_priv(sch);
88 	struct tcf_proto *filter;
89 	struct tcf_result res;
90 	int result;
91 
92 	if (TC_H_MAJ(skb->priority) == sch->handle &&
93 	    TC_H_MIN(skb->priority) > 0 &&
94 	    TC_H_MIN(skb->priority) <= q->flows_cnt)
95 		return TC_H_MIN(skb->priority);
96 
97 	filter = rcu_dereference_bh(q->filter_list);
98 	if (!filter)
99 		return fq_codel_hash(q, skb) + 1;
100 
101 	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
102 	result = tc_classify(skb, filter, &res);
103 	if (result >= 0) {
104 #ifdef CONFIG_NET_CLS_ACT
105 		switch (result) {
106 		case TC_ACT_STOLEN:
107 		case TC_ACT_QUEUED:
108 			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
109 		case TC_ACT_SHOT:
110 			return 0;
111 		}
112 #endif
113 		if (TC_H_MIN(res.classid) <= q->flows_cnt)
114 			return TC_H_MIN(res.classid);
115 	}
116 	return 0;
117 }
118 
119 /* helper functions : might be changed when/if skb use a standard list_head */
120 
121 /* remove one skb from head of slot queue */
dequeue_head(struct fq_codel_flow * flow)122 static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow)
123 {
124 	struct sk_buff *skb = flow->head;
125 
126 	flow->head = skb->next;
127 	skb->next = NULL;
128 	return skb;
129 }
130 
131 /* add skb to flow queue (tail add) */
flow_queue_add(struct fq_codel_flow * flow,struct sk_buff * skb)132 static inline void flow_queue_add(struct fq_codel_flow *flow,
133 				  struct sk_buff *skb)
134 {
135 	if (flow->head == NULL)
136 		flow->head = skb;
137 	else
138 		flow->tail->next = skb;
139 	flow->tail = skb;
140 	skb->next = NULL;
141 }
142 
fq_codel_drop(struct Qdisc * sch)143 static unsigned int fq_codel_drop(struct Qdisc *sch)
144 {
145 	struct fq_codel_sched_data *q = qdisc_priv(sch);
146 	struct sk_buff *skb;
147 	unsigned int maxbacklog = 0, idx = 0, i, len;
148 	struct fq_codel_flow *flow;
149 
150 	/* Queue is full! Find the fat flow and drop packet from it.
151 	 * This might sound expensive, but with 1024 flows, we scan
152 	 * 4KB of memory, and we dont need to handle a complex tree
153 	 * in fast path (packet queue/enqueue) with many cache misses.
154 	 */
155 	for (i = 0; i < q->flows_cnt; i++) {
156 		if (q->backlogs[i] > maxbacklog) {
157 			maxbacklog = q->backlogs[i];
158 			idx = i;
159 		}
160 	}
161 	flow = &q->flows[idx];
162 	skb = dequeue_head(flow);
163 	len = qdisc_pkt_len(skb);
164 	q->backlogs[idx] -= len;
165 	kfree_skb(skb);
166 	sch->q.qlen--;
167 	qdisc_qstats_drop(sch);
168 	qdisc_qstats_backlog_dec(sch, skb);
169 	flow->dropped++;
170 	return idx;
171 }
172 
fq_codel_enqueue(struct sk_buff * skb,struct Qdisc * sch)173 static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
174 {
175 	struct fq_codel_sched_data *q = qdisc_priv(sch);
176 	unsigned int idx, prev_backlog;
177 	struct fq_codel_flow *flow;
178 	int uninitialized_var(ret);
179 
180 	idx = fq_codel_classify(skb, sch, &ret);
181 	if (idx == 0) {
182 		if (ret & __NET_XMIT_BYPASS)
183 			qdisc_qstats_drop(sch);
184 		kfree_skb(skb);
185 		return ret;
186 	}
187 	idx--;
188 
189 	codel_set_enqueue_time(skb);
190 	flow = &q->flows[idx];
191 	flow_queue_add(flow, skb);
192 	q->backlogs[idx] += qdisc_pkt_len(skb);
193 	qdisc_qstats_backlog_inc(sch, skb);
194 
195 	if (list_empty(&flow->flowchain)) {
196 		list_add_tail(&flow->flowchain, &q->new_flows);
197 		q->new_flow_count++;
198 		flow->deficit = q->quantum;
199 		flow->dropped = 0;
200 	}
201 	if (++sch->q.qlen <= sch->limit)
202 		return NET_XMIT_SUCCESS;
203 
204 	prev_backlog = sch->qstats.backlog;
205 	q->drop_overlimit++;
206 	/* Return Congestion Notification only if we dropped a packet
207 	 * from this flow.
208 	 */
209 	if (fq_codel_drop(sch) == idx)
210 		return NET_XMIT_CN;
211 
212 	/* As we dropped a packet, better let upper stack know this */
213 	qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog);
214 	return NET_XMIT_SUCCESS;
215 }
216 
217 /* This is the specific function called from codel_dequeue()
218  * to dequeue a packet from queue. Note: backlog is handled in
219  * codel, we dont need to reduce it here.
220  */
dequeue(struct codel_vars * vars,struct Qdisc * sch)221 static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
222 {
223 	struct fq_codel_sched_data *q = qdisc_priv(sch);
224 	struct fq_codel_flow *flow;
225 	struct sk_buff *skb = NULL;
226 
227 	flow = container_of(vars, struct fq_codel_flow, cvars);
228 	if (flow->head) {
229 		skb = dequeue_head(flow);
230 		q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
231 		sch->q.qlen--;
232 	}
233 	return skb;
234 }
235 
fq_codel_dequeue(struct Qdisc * sch)236 static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
237 {
238 	struct fq_codel_sched_data *q = qdisc_priv(sch);
239 	struct sk_buff *skb;
240 	struct fq_codel_flow *flow;
241 	struct list_head *head;
242 	u32 prev_drop_count, prev_ecn_mark;
243 	unsigned int prev_backlog;
244 
245 begin:
246 	head = &q->new_flows;
247 	if (list_empty(head)) {
248 		head = &q->old_flows;
249 		if (list_empty(head))
250 			return NULL;
251 	}
252 	flow = list_first_entry(head, struct fq_codel_flow, flowchain);
253 
254 	if (flow->deficit <= 0) {
255 		flow->deficit += q->quantum;
256 		list_move_tail(&flow->flowchain, &q->old_flows);
257 		goto begin;
258 	}
259 
260 	prev_drop_count = q->cstats.drop_count;
261 	prev_ecn_mark = q->cstats.ecn_mark;
262 	prev_backlog = sch->qstats.backlog;
263 
264 	skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats,
265 			    dequeue);
266 
267 	flow->dropped += q->cstats.drop_count - prev_drop_count;
268 	flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;
269 
270 	if (!skb) {
271 		/* force a pass through old_flows to prevent starvation */
272 		if ((head == &q->new_flows) && !list_empty(&q->old_flows))
273 			list_move_tail(&flow->flowchain, &q->old_flows);
274 		else
275 			list_del_init(&flow->flowchain);
276 		goto begin;
277 	}
278 	qdisc_bstats_update(sch, skb);
279 	flow->deficit -= qdisc_pkt_len(skb);
280 	/* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
281 	 * or HTB crashes. Defer it for next round.
282 	 */
283 	if (q->cstats.drop_count && sch->q.qlen) {
284 		qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
285 					  q->cstats.drop_len);
286 		q->cstats.drop_count = 0;
287 		q->cstats.drop_len = 0;
288 	}
289 	return skb;
290 }
291 
fq_codel_reset(struct Qdisc * sch)292 static void fq_codel_reset(struct Qdisc *sch)
293 {
294 	struct sk_buff *skb;
295 
296 	while ((skb = fq_codel_dequeue(sch)) != NULL)
297 		kfree_skb(skb);
298 }
299 
300 static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
301 	[TCA_FQ_CODEL_TARGET]	= { .type = NLA_U32 },
302 	[TCA_FQ_CODEL_LIMIT]	= { .type = NLA_U32 },
303 	[TCA_FQ_CODEL_INTERVAL]	= { .type = NLA_U32 },
304 	[TCA_FQ_CODEL_ECN]	= { .type = NLA_U32 },
305 	[TCA_FQ_CODEL_FLOWS]	= { .type = NLA_U32 },
306 	[TCA_FQ_CODEL_QUANTUM]	= { .type = NLA_U32 },
307 };
308 
fq_codel_change(struct Qdisc * sch,struct nlattr * opt)309 static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
310 {
311 	struct fq_codel_sched_data *q = qdisc_priv(sch);
312 	struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
313 	int err;
314 
315 	if (!opt)
316 		return -EINVAL;
317 
318 	err = nla_parse_nested(tb, TCA_FQ_CODEL_MAX, opt, fq_codel_policy);
319 	if (err < 0)
320 		return err;
321 	if (tb[TCA_FQ_CODEL_FLOWS]) {
322 		if (q->flows)
323 			return -EINVAL;
324 		q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]);
325 		if (!q->flows_cnt ||
326 		    q->flows_cnt > 65536)
327 			return -EINVAL;
328 	}
329 	sch_tree_lock(sch);
330 
331 	if (tb[TCA_FQ_CODEL_TARGET]) {
332 		u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]);
333 
334 		q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT;
335 	}
336 
337 	if (tb[TCA_FQ_CODEL_INTERVAL]) {
338 		u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]);
339 
340 		q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT;
341 	}
342 
343 	if (tb[TCA_FQ_CODEL_LIMIT])
344 		sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]);
345 
346 	if (tb[TCA_FQ_CODEL_ECN])
347 		q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]);
348 
349 	if (tb[TCA_FQ_CODEL_QUANTUM])
350 		q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
351 
352 	while (sch->q.qlen > sch->limit) {
353 		struct sk_buff *skb = fq_codel_dequeue(sch);
354 
355 		q->cstats.drop_len += qdisc_pkt_len(skb);
356 		kfree_skb(skb);
357 		q->cstats.drop_count++;
358 	}
359 	qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len);
360 	q->cstats.drop_count = 0;
361 	q->cstats.drop_len = 0;
362 
363 	sch_tree_unlock(sch);
364 	return 0;
365 }
366 
fq_codel_zalloc(size_t sz)367 static void *fq_codel_zalloc(size_t sz)
368 {
369 	void *ptr = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN);
370 
371 	if (!ptr)
372 		ptr = vzalloc(sz);
373 	return ptr;
374 }
375 
fq_codel_free(void * addr)376 static void fq_codel_free(void *addr)
377 {
378 	kvfree(addr);
379 }
380 
fq_codel_destroy(struct Qdisc * sch)381 static void fq_codel_destroy(struct Qdisc *sch)
382 {
383 	struct fq_codel_sched_data *q = qdisc_priv(sch);
384 
385 	tcf_destroy_chain(&q->filter_list);
386 	fq_codel_free(q->backlogs);
387 	fq_codel_free(q->flows);
388 }
389 
fq_codel_init(struct Qdisc * sch,struct nlattr * opt)390 static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
391 {
392 	struct fq_codel_sched_data *q = qdisc_priv(sch);
393 	int i;
394 
395 	sch->limit = 10*1024;
396 	q->flows_cnt = 1024;
397 	q->quantum = psched_mtu(qdisc_dev(sch));
398 	q->perturbation = prandom_u32();
399 	INIT_LIST_HEAD(&q->new_flows);
400 	INIT_LIST_HEAD(&q->old_flows);
401 	codel_params_init(&q->cparams);
402 	codel_stats_init(&q->cstats);
403 	q->cparams.ecn = true;
404 
405 	if (opt) {
406 		int err = fq_codel_change(sch, opt);
407 		if (err)
408 			return err;
409 	}
410 
411 	if (!q->flows) {
412 		q->flows = fq_codel_zalloc(q->flows_cnt *
413 					   sizeof(struct fq_codel_flow));
414 		if (!q->flows)
415 			return -ENOMEM;
416 		q->backlogs = fq_codel_zalloc(q->flows_cnt * sizeof(u32));
417 		if (!q->backlogs) {
418 			fq_codel_free(q->flows);
419 			return -ENOMEM;
420 		}
421 		for (i = 0; i < q->flows_cnt; i++) {
422 			struct fq_codel_flow *flow = q->flows + i;
423 
424 			INIT_LIST_HEAD(&flow->flowchain);
425 			codel_vars_init(&flow->cvars);
426 		}
427 	}
428 	if (sch->limit >= 1)
429 		sch->flags |= TCQ_F_CAN_BYPASS;
430 	else
431 		sch->flags &= ~TCQ_F_CAN_BYPASS;
432 	return 0;
433 }
434 
fq_codel_dump(struct Qdisc * sch,struct sk_buff * skb)435 static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
436 {
437 	struct fq_codel_sched_data *q = qdisc_priv(sch);
438 	struct nlattr *opts;
439 
440 	opts = nla_nest_start(skb, TCA_OPTIONS);
441 	if (opts == NULL)
442 		goto nla_put_failure;
443 
444 	if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET,
445 			codel_time_to_us(q->cparams.target)) ||
446 	    nla_put_u32(skb, TCA_FQ_CODEL_LIMIT,
447 			sch->limit) ||
448 	    nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL,
449 			codel_time_to_us(q->cparams.interval)) ||
450 	    nla_put_u32(skb, TCA_FQ_CODEL_ECN,
451 			q->cparams.ecn) ||
452 	    nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM,
453 			q->quantum) ||
454 	    nla_put_u32(skb, TCA_FQ_CODEL_FLOWS,
455 			q->flows_cnt))
456 		goto nla_put_failure;
457 
458 	return nla_nest_end(skb, opts);
459 
460 nla_put_failure:
461 	return -1;
462 }
463 
fq_codel_dump_stats(struct Qdisc * sch,struct gnet_dump * d)464 static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
465 {
466 	struct fq_codel_sched_data *q = qdisc_priv(sch);
467 	struct tc_fq_codel_xstats st = {
468 		.type				= TCA_FQ_CODEL_XSTATS_QDISC,
469 	};
470 	struct list_head *pos;
471 
472 	st.qdisc_stats.maxpacket = q->cstats.maxpacket;
473 	st.qdisc_stats.drop_overlimit = q->drop_overlimit;
474 	st.qdisc_stats.ecn_mark = q->cstats.ecn_mark;
475 	st.qdisc_stats.new_flow_count = q->new_flow_count;
476 
477 	list_for_each(pos, &q->new_flows)
478 		st.qdisc_stats.new_flows_len++;
479 
480 	list_for_each(pos, &q->old_flows)
481 		st.qdisc_stats.old_flows_len++;
482 
483 	return gnet_stats_copy_app(d, &st, sizeof(st));
484 }
485 
fq_codel_leaf(struct Qdisc * sch,unsigned long arg)486 static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg)
487 {
488 	return NULL;
489 }
490 
fq_codel_get(struct Qdisc * sch,u32 classid)491 static unsigned long fq_codel_get(struct Qdisc *sch, u32 classid)
492 {
493 	return 0;
494 }
495 
fq_codel_bind(struct Qdisc * sch,unsigned long parent,u32 classid)496 static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent,
497 			      u32 classid)
498 {
499 	/* we cannot bypass queue discipline anymore */
500 	sch->flags &= ~TCQ_F_CAN_BYPASS;
501 	return 0;
502 }
503 
fq_codel_put(struct Qdisc * q,unsigned long cl)504 static void fq_codel_put(struct Qdisc *q, unsigned long cl)
505 {
506 }
507 
fq_codel_find_tcf(struct Qdisc * sch,unsigned long cl)508 static struct tcf_proto __rcu **fq_codel_find_tcf(struct Qdisc *sch,
509 						  unsigned long cl)
510 {
511 	struct fq_codel_sched_data *q = qdisc_priv(sch);
512 
513 	if (cl)
514 		return NULL;
515 	return &q->filter_list;
516 }
517 
fq_codel_dump_class(struct Qdisc * sch,unsigned long cl,struct sk_buff * skb,struct tcmsg * tcm)518 static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl,
519 			  struct sk_buff *skb, struct tcmsg *tcm)
520 {
521 	tcm->tcm_handle |= TC_H_MIN(cl);
522 	return 0;
523 }
524 
fq_codel_dump_class_stats(struct Qdisc * sch,unsigned long cl,struct gnet_dump * d)525 static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
526 				     struct gnet_dump *d)
527 {
528 	struct fq_codel_sched_data *q = qdisc_priv(sch);
529 	u32 idx = cl - 1;
530 	struct gnet_stats_queue qs = { 0 };
531 	struct tc_fq_codel_xstats xstats;
532 
533 	if (idx < q->flows_cnt) {
534 		const struct fq_codel_flow *flow = &q->flows[idx];
535 		const struct sk_buff *skb = flow->head;
536 
537 		memset(&xstats, 0, sizeof(xstats));
538 		xstats.type = TCA_FQ_CODEL_XSTATS_CLASS;
539 		xstats.class_stats.deficit = flow->deficit;
540 		xstats.class_stats.ldelay =
541 			codel_time_to_us(flow->cvars.ldelay);
542 		xstats.class_stats.count = flow->cvars.count;
543 		xstats.class_stats.lastcount = flow->cvars.lastcount;
544 		xstats.class_stats.dropping = flow->cvars.dropping;
545 		if (flow->cvars.dropping) {
546 			codel_tdiff_t delta = flow->cvars.drop_next -
547 					      codel_get_time();
548 
549 			xstats.class_stats.drop_next = (delta >= 0) ?
550 				codel_time_to_us(delta) :
551 				-codel_time_to_us(-delta);
552 		}
553 		while (skb) {
554 			qs.qlen++;
555 			skb = skb->next;
556 		}
557 		qs.backlog = q->backlogs[idx];
558 		qs.drops = flow->dropped;
559 	}
560 	if (gnet_stats_copy_queue(d, NULL, &qs, 0) < 0)
561 		return -1;
562 	if (idx < q->flows_cnt)
563 		return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
564 	return 0;
565 }
566 
fq_codel_walk(struct Qdisc * sch,struct qdisc_walker * arg)567 static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg)
568 {
569 	struct fq_codel_sched_data *q = qdisc_priv(sch);
570 	unsigned int i;
571 
572 	if (arg->stop)
573 		return;
574 
575 	for (i = 0; i < q->flows_cnt; i++) {
576 		if (list_empty(&q->flows[i].flowchain) ||
577 		    arg->count < arg->skip) {
578 			arg->count++;
579 			continue;
580 		}
581 		if (arg->fn(sch, i + 1, arg) < 0) {
582 			arg->stop = 1;
583 			break;
584 		}
585 		arg->count++;
586 	}
587 }
588 
589 static const struct Qdisc_class_ops fq_codel_class_ops = {
590 	.leaf		=	fq_codel_leaf,
591 	.get		=	fq_codel_get,
592 	.put		=	fq_codel_put,
593 	.tcf_chain	=	fq_codel_find_tcf,
594 	.bind_tcf	=	fq_codel_bind,
595 	.unbind_tcf	=	fq_codel_put,
596 	.dump		=	fq_codel_dump_class,
597 	.dump_stats	=	fq_codel_dump_class_stats,
598 	.walk		=	fq_codel_walk,
599 };
600 
601 static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
602 	.cl_ops		=	&fq_codel_class_ops,
603 	.id		=	"fq_codel",
604 	.priv_size	=	sizeof(struct fq_codel_sched_data),
605 	.enqueue	=	fq_codel_enqueue,
606 	.dequeue	=	fq_codel_dequeue,
607 	.peek		=	qdisc_peek_dequeued,
608 	.drop		=	fq_codel_drop,
609 	.init		=	fq_codel_init,
610 	.reset		=	fq_codel_reset,
611 	.destroy	=	fq_codel_destroy,
612 	.change		=	fq_codel_change,
613 	.dump		=	fq_codel_dump,
614 	.dump_stats =	fq_codel_dump_stats,
615 	.owner		=	THIS_MODULE,
616 };
617 
fq_codel_module_init(void)618 static int __init fq_codel_module_init(void)
619 {
620 	return register_qdisc(&fq_codel_qdisc_ops);
621 }
622 
fq_codel_module_exit(void)623 static void __exit fq_codel_module_exit(void)
624 {
625 	unregister_qdisc(&fq_codel_qdisc_ops);
626 }
627 
628 module_init(fq_codel_module_init)
629 module_exit(fq_codel_module_exit)
630 MODULE_AUTHOR("Eric Dumazet");
631 MODULE_LICENSE("GPL");
632