1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Fair Queue CoDel discipline
4 *
5 * Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
6 */
7
8 #include <linux/module.h>
9 #include <linux/types.h>
10 #include <linux/kernel.h>
11 #include <linux/jiffies.h>
12 #include <linux/string.h>
13 #include <linux/in.h>
14 #include <linux/errno.h>
15 #include <linux/init.h>
16 #include <linux/skbuff.h>
17 #include <linux/jhash.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <net/netlink.h>
21 #include <net/pkt_sched.h>
22 #include <net/pkt_cls.h>
23 #include <net/codel.h>
24 #include <net/codel_impl.h>
25 #include <net/codel_qdisc.h>
26
27 /* Fair Queue CoDel.
28 *
29 * Principles :
30 * Packets are classified (internal classifier or external) on flows.
31 * This is a Stochastic model (as we use a hash, several flows
32 * might be hashed on same slot)
33 * Each flow has a CoDel managed queue.
34 * Flows are linked onto two (Round Robin) lists,
35 * so that new flows have priority on old ones.
36 *
37 * For a given flow, packets are not reordered (CoDel uses a FIFO)
38 * head drops only.
39 * ECN capability is on by default.
40 * Low memory footprint (64 bytes per flow)
41 */
42
43 struct fq_codel_flow {
44 struct sk_buff *head;
45 struct sk_buff *tail;
46 struct list_head flowchain;
47 int deficit;
48 struct codel_vars cvars;
49 }; /* please try to keep this structure <= 64 bytes */
50
51 struct fq_codel_sched_data {
52 struct tcf_proto __rcu *filter_list; /* optional external classifier */
53 struct tcf_block *block;
54 struct fq_codel_flow *flows; /* Flows table [flows_cnt] */
55 u32 *backlogs; /* backlog table [flows_cnt] */
56 u32 flows_cnt; /* number of flows */
57 u32 quantum; /* psched_mtu(qdisc_dev(sch)); */
58 u32 drop_batch_size;
59 u32 memory_limit;
60 struct codel_params cparams;
61 struct codel_stats cstats;
62 u32 memory_usage;
63 u32 drop_overmemory;
64 u32 drop_overlimit;
65 u32 new_flow_count;
66
67 struct list_head new_flows; /* list of new flows */
68 struct list_head old_flows; /* list of old flows */
69 };
70
fq_codel_hash(const struct fq_codel_sched_data * q,struct sk_buff * skb)71 static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
72 struct sk_buff *skb)
73 {
74 return reciprocal_scale(skb_get_hash(skb), q->flows_cnt);
75 }
76
fq_codel_classify(struct sk_buff * skb,struct Qdisc * sch,int * qerr)77 static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
78 int *qerr)
79 {
80 struct fq_codel_sched_data *q = qdisc_priv(sch);
81 struct tcf_proto *filter;
82 struct tcf_result res;
83 int result;
84
85 if (TC_H_MAJ(skb->priority) == sch->handle &&
86 TC_H_MIN(skb->priority) > 0 &&
87 TC_H_MIN(skb->priority) <= q->flows_cnt)
88 return TC_H_MIN(skb->priority);
89
90 filter = rcu_dereference_bh(q->filter_list);
91 if (!filter)
92 return fq_codel_hash(q, skb) + 1;
93
94 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
95 result = tcf_classify(skb, filter, &res, false);
96 if (result >= 0) {
97 #ifdef CONFIG_NET_CLS_ACT
98 switch (result) {
99 case TC_ACT_STOLEN:
100 case TC_ACT_QUEUED:
101 case TC_ACT_TRAP:
102 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
103 /* fall through */
104 case TC_ACT_SHOT:
105 return 0;
106 }
107 #endif
108 if (TC_H_MIN(res.classid) <= q->flows_cnt)
109 return TC_H_MIN(res.classid);
110 }
111 return 0;
112 }
113
114 /* helper functions : might be changed when/if skb use a standard list_head */
115
116 /* remove one skb from head of slot queue */
dequeue_head(struct fq_codel_flow * flow)117 static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow)
118 {
119 struct sk_buff *skb = flow->head;
120
121 flow->head = skb->next;
122 skb_mark_not_on_list(skb);
123 return skb;
124 }
125
126 /* add skb to flow queue (tail add) */
flow_queue_add(struct fq_codel_flow * flow,struct sk_buff * skb)127 static inline void flow_queue_add(struct fq_codel_flow *flow,
128 struct sk_buff *skb)
129 {
130 if (flow->head == NULL)
131 flow->head = skb;
132 else
133 flow->tail->next = skb;
134 flow->tail = skb;
135 skb->next = NULL;
136 }
137
fq_codel_drop(struct Qdisc * sch,unsigned int max_packets,struct sk_buff ** to_free)138 static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets,
139 struct sk_buff **to_free)
140 {
141 struct fq_codel_sched_data *q = qdisc_priv(sch);
142 struct sk_buff *skb;
143 unsigned int maxbacklog = 0, idx = 0, i, len;
144 struct fq_codel_flow *flow;
145 unsigned int threshold;
146 unsigned int mem = 0;
147
148 /* Queue is full! Find the fat flow and drop packet(s) from it.
149 * This might sound expensive, but with 1024 flows, we scan
150 * 4KB of memory, and we dont need to handle a complex tree
151 * in fast path (packet queue/enqueue) with many cache misses.
152 * In stress mode, we'll try to drop 64 packets from the flow,
153 * amortizing this linear lookup to one cache line per drop.
154 */
155 for (i = 0; i < q->flows_cnt; i++) {
156 if (q->backlogs[i] > maxbacklog) {
157 maxbacklog = q->backlogs[i];
158 idx = i;
159 }
160 }
161
162 /* Our goal is to drop half of this fat flow backlog */
163 threshold = maxbacklog >> 1;
164
165 flow = &q->flows[idx];
166 len = 0;
167 i = 0;
168 do {
169 skb = dequeue_head(flow);
170 len += qdisc_pkt_len(skb);
171 mem += get_codel_cb(skb)->mem_usage;
172 __qdisc_drop(skb, to_free);
173 } while (++i < max_packets && len < threshold);
174
175 /* Tell codel to increase its signal strength also */
176 flow->cvars.count += i;
177 q->backlogs[idx] -= len;
178 q->memory_usage -= mem;
179 sch->qstats.drops += i;
180 sch->qstats.backlog -= len;
181 sch->q.qlen -= i;
182 return idx;
183 }
184
fq_codel_enqueue(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)185 static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch,
186 struct sk_buff **to_free)
187 {
188 struct fq_codel_sched_data *q = qdisc_priv(sch);
189 unsigned int idx, prev_backlog, prev_qlen;
190 struct fq_codel_flow *flow;
191 int uninitialized_var(ret);
192 unsigned int pkt_len;
193 bool memory_limited;
194
195 idx = fq_codel_classify(skb, sch, &ret);
196 if (idx == 0) {
197 if (ret & __NET_XMIT_BYPASS)
198 qdisc_qstats_drop(sch);
199 __qdisc_drop(skb, to_free);
200 return ret;
201 }
202 idx--;
203
204 codel_set_enqueue_time(skb);
205 flow = &q->flows[idx];
206 flow_queue_add(flow, skb);
207 q->backlogs[idx] += qdisc_pkt_len(skb);
208 qdisc_qstats_backlog_inc(sch, skb);
209
210 if (list_empty(&flow->flowchain)) {
211 list_add_tail(&flow->flowchain, &q->new_flows);
212 q->new_flow_count++;
213 flow->deficit = q->quantum;
214 }
215 get_codel_cb(skb)->mem_usage = skb->truesize;
216 q->memory_usage += get_codel_cb(skb)->mem_usage;
217 memory_limited = q->memory_usage > q->memory_limit;
218 if (++sch->q.qlen <= sch->limit && !memory_limited)
219 return NET_XMIT_SUCCESS;
220
221 prev_backlog = sch->qstats.backlog;
222 prev_qlen = sch->q.qlen;
223
224 /* save this packet length as it might be dropped by fq_codel_drop() */
225 pkt_len = qdisc_pkt_len(skb);
226 /* fq_codel_drop() is quite expensive, as it performs a linear search
227 * in q->backlogs[] to find a fat flow.
228 * So instead of dropping a single packet, drop half of its backlog
229 * with a 64 packets limit to not add a too big cpu spike here.
230 */
231 ret = fq_codel_drop(sch, q->drop_batch_size, to_free);
232
233 prev_qlen -= sch->q.qlen;
234 prev_backlog -= sch->qstats.backlog;
235 q->drop_overlimit += prev_qlen;
236 if (memory_limited)
237 q->drop_overmemory += prev_qlen;
238
239 /* As we dropped packet(s), better let upper stack know this.
240 * If we dropped a packet for this flow, return NET_XMIT_CN,
241 * but in this case, our parents wont increase their backlogs.
242 */
243 if (ret == idx) {
244 qdisc_tree_reduce_backlog(sch, prev_qlen - 1,
245 prev_backlog - pkt_len);
246 return NET_XMIT_CN;
247 }
248 qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog);
249 return NET_XMIT_SUCCESS;
250 }
251
252 /* This is the specific function called from codel_dequeue()
253 * to dequeue a packet from queue. Note: backlog is handled in
254 * codel, we dont need to reduce it here.
255 */
dequeue_func(struct codel_vars * vars,void * ctx)256 static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
257 {
258 struct Qdisc *sch = ctx;
259 struct fq_codel_sched_data *q = qdisc_priv(sch);
260 struct fq_codel_flow *flow;
261 struct sk_buff *skb = NULL;
262
263 flow = container_of(vars, struct fq_codel_flow, cvars);
264 if (flow->head) {
265 skb = dequeue_head(flow);
266 q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
267 q->memory_usage -= get_codel_cb(skb)->mem_usage;
268 sch->q.qlen--;
269 sch->qstats.backlog -= qdisc_pkt_len(skb);
270 }
271 return skb;
272 }
273
drop_func(struct sk_buff * skb,void * ctx)274 static void drop_func(struct sk_buff *skb, void *ctx)
275 {
276 struct Qdisc *sch = ctx;
277
278 kfree_skb(skb);
279 qdisc_qstats_drop(sch);
280 }
281
fq_codel_dequeue(struct Qdisc * sch)282 static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
283 {
284 struct fq_codel_sched_data *q = qdisc_priv(sch);
285 struct sk_buff *skb;
286 struct fq_codel_flow *flow;
287 struct list_head *head;
288
289 begin:
290 head = &q->new_flows;
291 if (list_empty(head)) {
292 head = &q->old_flows;
293 if (list_empty(head))
294 return NULL;
295 }
296 flow = list_first_entry(head, struct fq_codel_flow, flowchain);
297
298 if (flow->deficit <= 0) {
299 flow->deficit += q->quantum;
300 list_move_tail(&flow->flowchain, &q->old_flows);
301 goto begin;
302 }
303
304 skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams,
305 &flow->cvars, &q->cstats, qdisc_pkt_len,
306 codel_get_enqueue_time, drop_func, dequeue_func);
307
308 if (!skb) {
309 /* force a pass through old_flows to prevent starvation */
310 if ((head == &q->new_flows) && !list_empty(&q->old_flows))
311 list_move_tail(&flow->flowchain, &q->old_flows);
312 else
313 list_del_init(&flow->flowchain);
314 goto begin;
315 }
316 qdisc_bstats_update(sch, skb);
317 flow->deficit -= qdisc_pkt_len(skb);
318 /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
319 * or HTB crashes. Defer it for next round.
320 */
321 if (q->cstats.drop_count && sch->q.qlen) {
322 qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
323 q->cstats.drop_len);
324 q->cstats.drop_count = 0;
325 q->cstats.drop_len = 0;
326 }
327 return skb;
328 }
329
fq_codel_flow_purge(struct fq_codel_flow * flow)330 static void fq_codel_flow_purge(struct fq_codel_flow *flow)
331 {
332 rtnl_kfree_skbs(flow->head, flow->tail);
333 flow->head = NULL;
334 }
335
fq_codel_reset(struct Qdisc * sch)336 static void fq_codel_reset(struct Qdisc *sch)
337 {
338 struct fq_codel_sched_data *q = qdisc_priv(sch);
339 int i;
340
341 INIT_LIST_HEAD(&q->new_flows);
342 INIT_LIST_HEAD(&q->old_flows);
343 for (i = 0; i < q->flows_cnt; i++) {
344 struct fq_codel_flow *flow = q->flows + i;
345
346 fq_codel_flow_purge(flow);
347 INIT_LIST_HEAD(&flow->flowchain);
348 codel_vars_init(&flow->cvars);
349 }
350 memset(q->backlogs, 0, q->flows_cnt * sizeof(u32));
351 sch->q.qlen = 0;
352 sch->qstats.backlog = 0;
353 q->memory_usage = 0;
354 }
355
356 static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
357 [TCA_FQ_CODEL_TARGET] = { .type = NLA_U32 },
358 [TCA_FQ_CODEL_LIMIT] = { .type = NLA_U32 },
359 [TCA_FQ_CODEL_INTERVAL] = { .type = NLA_U32 },
360 [TCA_FQ_CODEL_ECN] = { .type = NLA_U32 },
361 [TCA_FQ_CODEL_FLOWS] = { .type = NLA_U32 },
362 [TCA_FQ_CODEL_QUANTUM] = { .type = NLA_U32 },
363 [TCA_FQ_CODEL_CE_THRESHOLD] = { .type = NLA_U32 },
364 [TCA_FQ_CODEL_DROP_BATCH_SIZE] = { .type = NLA_U32 },
365 [TCA_FQ_CODEL_MEMORY_LIMIT] = { .type = NLA_U32 },
366 };
367
fq_codel_change(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)368 static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
369 struct netlink_ext_ack *extack)
370 {
371 struct fq_codel_sched_data *q = qdisc_priv(sch);
372 struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
373 int err;
374
375 if (!opt)
376 return -EINVAL;
377
378 err = nla_parse_nested_deprecated(tb, TCA_FQ_CODEL_MAX, opt,
379 fq_codel_policy, NULL);
380 if (err < 0)
381 return err;
382 if (tb[TCA_FQ_CODEL_FLOWS]) {
383 if (q->flows)
384 return -EINVAL;
385 q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]);
386 if (!q->flows_cnt ||
387 q->flows_cnt > 65536)
388 return -EINVAL;
389 }
390 sch_tree_lock(sch);
391
392 if (tb[TCA_FQ_CODEL_TARGET]) {
393 u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]);
394
395 q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT;
396 }
397
398 if (tb[TCA_FQ_CODEL_CE_THRESHOLD]) {
399 u64 val = nla_get_u32(tb[TCA_FQ_CODEL_CE_THRESHOLD]);
400
401 q->cparams.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT;
402 }
403
404 if (tb[TCA_FQ_CODEL_INTERVAL]) {
405 u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]);
406
407 q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT;
408 }
409
410 if (tb[TCA_FQ_CODEL_LIMIT])
411 sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]);
412
413 if (tb[TCA_FQ_CODEL_ECN])
414 q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]);
415
416 if (tb[TCA_FQ_CODEL_QUANTUM])
417 q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
418
419 if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])
420 q->drop_batch_size = min(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]));
421
422 if (tb[TCA_FQ_CODEL_MEMORY_LIMIT])
423 q->memory_limit = min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT]));
424
425 while (sch->q.qlen > sch->limit ||
426 q->memory_usage > q->memory_limit) {
427 struct sk_buff *skb = fq_codel_dequeue(sch);
428
429 q->cstats.drop_len += qdisc_pkt_len(skb);
430 rtnl_kfree_skbs(skb, skb);
431 q->cstats.drop_count++;
432 }
433 qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len);
434 q->cstats.drop_count = 0;
435 q->cstats.drop_len = 0;
436
437 sch_tree_unlock(sch);
438 return 0;
439 }
440
fq_codel_destroy(struct Qdisc * sch)441 static void fq_codel_destroy(struct Qdisc *sch)
442 {
443 struct fq_codel_sched_data *q = qdisc_priv(sch);
444
445 tcf_block_put(q->block);
446 kvfree(q->backlogs);
447 kvfree(q->flows);
448 }
449
fq_codel_init(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)450 static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt,
451 struct netlink_ext_ack *extack)
452 {
453 struct fq_codel_sched_data *q = qdisc_priv(sch);
454 int i;
455 int err;
456
457 sch->limit = 10*1024;
458 q->flows_cnt = 1024;
459 q->memory_limit = 32 << 20; /* 32 MBytes */
460 q->drop_batch_size = 64;
461 q->quantum = psched_mtu(qdisc_dev(sch));
462 INIT_LIST_HEAD(&q->new_flows);
463 INIT_LIST_HEAD(&q->old_flows);
464 codel_params_init(&q->cparams);
465 codel_stats_init(&q->cstats);
466 q->cparams.ecn = true;
467 q->cparams.mtu = psched_mtu(qdisc_dev(sch));
468
469 if (opt) {
470 err = fq_codel_change(sch, opt, extack);
471 if (err)
472 goto init_failure;
473 }
474
475 err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
476 if (err)
477 goto init_failure;
478
479 if (!q->flows) {
480 q->flows = kvcalloc(q->flows_cnt,
481 sizeof(struct fq_codel_flow),
482 GFP_KERNEL);
483 if (!q->flows) {
484 err = -ENOMEM;
485 goto init_failure;
486 }
487 q->backlogs = kvcalloc(q->flows_cnt, sizeof(u32), GFP_KERNEL);
488 if (!q->backlogs) {
489 err = -ENOMEM;
490 goto alloc_failure;
491 }
492 for (i = 0; i < q->flows_cnt; i++) {
493 struct fq_codel_flow *flow = q->flows + i;
494
495 INIT_LIST_HEAD(&flow->flowchain);
496 codel_vars_init(&flow->cvars);
497 }
498 }
499 if (sch->limit >= 1)
500 sch->flags |= TCQ_F_CAN_BYPASS;
501 else
502 sch->flags &= ~TCQ_F_CAN_BYPASS;
503 return 0;
504
505 alloc_failure:
506 kvfree(q->flows);
507 q->flows = NULL;
508 init_failure:
509 q->flows_cnt = 0;
510 return err;
511 }
512
fq_codel_dump(struct Qdisc * sch,struct sk_buff * skb)513 static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
514 {
515 struct fq_codel_sched_data *q = qdisc_priv(sch);
516 struct nlattr *opts;
517
518 opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
519 if (opts == NULL)
520 goto nla_put_failure;
521
522 if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET,
523 codel_time_to_us(q->cparams.target)) ||
524 nla_put_u32(skb, TCA_FQ_CODEL_LIMIT,
525 sch->limit) ||
526 nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL,
527 codel_time_to_us(q->cparams.interval)) ||
528 nla_put_u32(skb, TCA_FQ_CODEL_ECN,
529 q->cparams.ecn) ||
530 nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM,
531 q->quantum) ||
532 nla_put_u32(skb, TCA_FQ_CODEL_DROP_BATCH_SIZE,
533 q->drop_batch_size) ||
534 nla_put_u32(skb, TCA_FQ_CODEL_MEMORY_LIMIT,
535 q->memory_limit) ||
536 nla_put_u32(skb, TCA_FQ_CODEL_FLOWS,
537 q->flows_cnt))
538 goto nla_put_failure;
539
540 if (q->cparams.ce_threshold != CODEL_DISABLED_THRESHOLD &&
541 nla_put_u32(skb, TCA_FQ_CODEL_CE_THRESHOLD,
542 codel_time_to_us(q->cparams.ce_threshold)))
543 goto nla_put_failure;
544
545 return nla_nest_end(skb, opts);
546
547 nla_put_failure:
548 return -1;
549 }
550
fq_codel_dump_stats(struct Qdisc * sch,struct gnet_dump * d)551 static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
552 {
553 struct fq_codel_sched_data *q = qdisc_priv(sch);
554 struct tc_fq_codel_xstats st = {
555 .type = TCA_FQ_CODEL_XSTATS_QDISC,
556 };
557 struct list_head *pos;
558
559 st.qdisc_stats.maxpacket = q->cstats.maxpacket;
560 st.qdisc_stats.drop_overlimit = q->drop_overlimit;
561 st.qdisc_stats.ecn_mark = q->cstats.ecn_mark;
562 st.qdisc_stats.new_flow_count = q->new_flow_count;
563 st.qdisc_stats.ce_mark = q->cstats.ce_mark;
564 st.qdisc_stats.memory_usage = q->memory_usage;
565 st.qdisc_stats.drop_overmemory = q->drop_overmemory;
566
567 sch_tree_lock(sch);
568 list_for_each(pos, &q->new_flows)
569 st.qdisc_stats.new_flows_len++;
570
571 list_for_each(pos, &q->old_flows)
572 st.qdisc_stats.old_flows_len++;
573 sch_tree_unlock(sch);
574
575 return gnet_stats_copy_app(d, &st, sizeof(st));
576 }
577
fq_codel_leaf(struct Qdisc * sch,unsigned long arg)578 static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg)
579 {
580 return NULL;
581 }
582
fq_codel_find(struct Qdisc * sch,u32 classid)583 static unsigned long fq_codel_find(struct Qdisc *sch, u32 classid)
584 {
585 return 0;
586 }
587
fq_codel_bind(struct Qdisc * sch,unsigned long parent,u32 classid)588 static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent,
589 u32 classid)
590 {
591 return 0;
592 }
593
fq_codel_unbind(struct Qdisc * q,unsigned long cl)594 static void fq_codel_unbind(struct Qdisc *q, unsigned long cl)
595 {
596 }
597
fq_codel_tcf_block(struct Qdisc * sch,unsigned long cl,struct netlink_ext_ack * extack)598 static struct tcf_block *fq_codel_tcf_block(struct Qdisc *sch, unsigned long cl,
599 struct netlink_ext_ack *extack)
600 {
601 struct fq_codel_sched_data *q = qdisc_priv(sch);
602
603 if (cl)
604 return NULL;
605 return q->block;
606 }
607
fq_codel_dump_class(struct Qdisc * sch,unsigned long cl,struct sk_buff * skb,struct tcmsg * tcm)608 static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl,
609 struct sk_buff *skb, struct tcmsg *tcm)
610 {
611 tcm->tcm_handle |= TC_H_MIN(cl);
612 return 0;
613 }
614
fq_codel_dump_class_stats(struct Qdisc * sch,unsigned long cl,struct gnet_dump * d)615 static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
616 struct gnet_dump *d)
617 {
618 struct fq_codel_sched_data *q = qdisc_priv(sch);
619 u32 idx = cl - 1;
620 struct gnet_stats_queue qs = { 0 };
621 struct tc_fq_codel_xstats xstats;
622
623 if (idx < q->flows_cnt) {
624 const struct fq_codel_flow *flow = &q->flows[idx];
625 const struct sk_buff *skb;
626
627 memset(&xstats, 0, sizeof(xstats));
628 xstats.type = TCA_FQ_CODEL_XSTATS_CLASS;
629 xstats.class_stats.deficit = flow->deficit;
630 xstats.class_stats.ldelay =
631 codel_time_to_us(flow->cvars.ldelay);
632 xstats.class_stats.count = flow->cvars.count;
633 xstats.class_stats.lastcount = flow->cvars.lastcount;
634 xstats.class_stats.dropping = flow->cvars.dropping;
635 if (flow->cvars.dropping) {
636 codel_tdiff_t delta = flow->cvars.drop_next -
637 codel_get_time();
638
639 xstats.class_stats.drop_next = (delta >= 0) ?
640 codel_time_to_us(delta) :
641 -codel_time_to_us(-delta);
642 }
643 if (flow->head) {
644 sch_tree_lock(sch);
645 skb = flow->head;
646 while (skb) {
647 qs.qlen++;
648 skb = skb->next;
649 }
650 sch_tree_unlock(sch);
651 }
652 qs.backlog = q->backlogs[idx];
653 qs.drops = 0;
654 }
655 if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
656 return -1;
657 if (idx < q->flows_cnt)
658 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
659 return 0;
660 }
661
fq_codel_walk(struct Qdisc * sch,struct qdisc_walker * arg)662 static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg)
663 {
664 struct fq_codel_sched_data *q = qdisc_priv(sch);
665 unsigned int i;
666
667 if (arg->stop)
668 return;
669
670 for (i = 0; i < q->flows_cnt; i++) {
671 if (list_empty(&q->flows[i].flowchain) ||
672 arg->count < arg->skip) {
673 arg->count++;
674 continue;
675 }
676 if (arg->fn(sch, i + 1, arg) < 0) {
677 arg->stop = 1;
678 break;
679 }
680 arg->count++;
681 }
682 }
683
684 static const struct Qdisc_class_ops fq_codel_class_ops = {
685 .leaf = fq_codel_leaf,
686 .find = fq_codel_find,
687 .tcf_block = fq_codel_tcf_block,
688 .bind_tcf = fq_codel_bind,
689 .unbind_tcf = fq_codel_unbind,
690 .dump = fq_codel_dump_class,
691 .dump_stats = fq_codel_dump_class_stats,
692 .walk = fq_codel_walk,
693 };
694
695 static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
696 .cl_ops = &fq_codel_class_ops,
697 .id = "fq_codel",
698 .priv_size = sizeof(struct fq_codel_sched_data),
699 .enqueue = fq_codel_enqueue,
700 .dequeue = fq_codel_dequeue,
701 .peek = qdisc_peek_dequeued,
702 .init = fq_codel_init,
703 .reset = fq_codel_reset,
704 .destroy = fq_codel_destroy,
705 .change = fq_codel_change,
706 .dump = fq_codel_dump,
707 .dump_stats = fq_codel_dump_stats,
708 .owner = THIS_MODULE,
709 };
710
fq_codel_module_init(void)711 static int __init fq_codel_module_init(void)
712 {
713 return register_qdisc(&fq_codel_qdisc_ops);
714 }
715
fq_codel_module_exit(void)716 static void __exit fq_codel_module_exit(void)
717 {
718 unregister_qdisc(&fq_codel_qdisc_ops);
719 }
720
721 module_init(fq_codel_module_init)
722 module_exit(fq_codel_module_exit)
723 MODULE_AUTHOR("Eric Dumazet");
724 MODULE_LICENSE("GPL");
725