• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * net/sched/sch_sfb.c	  Stochastic Fair Blue
3  *
4  * Copyright (c) 2008-2011 Juliusz Chroboczek <jch@pps.jussieu.fr>
5  * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com>
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * version 2 as published by the Free Software Foundation.
10  *
11  * W. Feng, D. Kandlur, D. Saha, K. Shin. Blue:
12  * A New Class of Active Queue Management Algorithms.
13  * U. Michigan CSE-TR-387-99, April 1999.
14  *
15  * http://www.thefengs.com/wuchang/blue/CSE-TR-387-99.pdf
16  *
17  */
18 
19 #include <linux/module.h>
20 #include <linux/types.h>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/skbuff.h>
24 #include <linux/random.h>
25 #include <linux/siphash.h>
26 #include <net/ip.h>
27 #include <net/pkt_sched.h>
28 #include <net/pkt_cls.h>
29 #include <net/inet_ecn.h>
30 
31 /*
32  * SFB uses two B[l][n] : L x N arrays of bins (L levels, N bins per level)
33  * This implementation uses L = 8 and N = 16
34  * This permits us to split one 32bit hash (provided per packet by rxhash or
35  * external classifier) into 8 subhashes of 4 bits.
36  */
37 #define SFB_BUCKET_SHIFT 4
38 #define SFB_NUMBUCKETS	(1 << SFB_BUCKET_SHIFT) /* N bins per Level */
39 #define SFB_BUCKET_MASK (SFB_NUMBUCKETS - 1)
40 #define SFB_LEVELS	(32 / SFB_BUCKET_SHIFT) /* L */
41 
42 /* SFB algo uses a virtual queue, named "bin" */
43 struct sfb_bucket {
44 	u16		qlen; /* length of virtual queue */
45 	u16		p_mark; /* marking probability */
46 };
47 
48 /* We use a double buffering right before hash change
49  * (Section 4.4 of SFB reference : moving hash functions)
50  */
51 struct sfb_bins {
52 	siphash_key_t	  perturbation; /* siphash key */
53 	struct sfb_bucket bins[SFB_LEVELS][SFB_NUMBUCKETS];
54 };
55 
56 struct sfb_sched_data {
57 	struct Qdisc	*qdisc;
58 	struct tcf_proto __rcu *filter_list;
59 	struct tcf_block *block;
60 	unsigned long	rehash_interval;
61 	unsigned long	warmup_time;	/* double buffering warmup time in jiffies */
62 	u32		max;
63 	u32		bin_size;	/* maximum queue length per bin */
64 	u32		increment;	/* d1 */
65 	u32		decrement;	/* d2 */
66 	u32		limit;		/* HARD maximal queue length */
67 	u32		penalty_rate;
68 	u32		penalty_burst;
69 	u32		tokens_avail;
70 	unsigned long	rehash_time;
71 	unsigned long	token_time;
72 
73 	u8		slot;		/* current active bins (0 or 1) */
74 	bool		double_buffering;
75 	struct sfb_bins bins[2];
76 
77 	struct {
78 		u32	earlydrop;
79 		u32	penaltydrop;
80 		u32	bucketdrop;
81 		u32	queuedrop;
82 		u32	childdrop;	/* drops in child qdisc */
83 		u32	marked;		/* ECN mark */
84 	} stats;
85 };
86 
87 /*
88  * Each queued skb might be hashed on one or two bins
89  * We store in skb_cb the two hash values.
90  * (A zero value means double buffering was not used)
91  */
92 struct sfb_skb_cb {
93 	u32 hashes[2];
94 };
95 
sfb_skb_cb(const struct sk_buff * skb)96 static inline struct sfb_skb_cb *sfb_skb_cb(const struct sk_buff *skb)
97 {
98 	qdisc_cb_private_validate(skb, sizeof(struct sfb_skb_cb));
99 	return (struct sfb_skb_cb *)qdisc_skb_cb(skb)->data;
100 }
101 
102 /*
103  * If using 'internal' SFB flow classifier, hash comes from skb rxhash
104  * If using external classifier, hash comes from the classid.
105  */
sfb_hash(const struct sk_buff * skb,u32 slot)106 static u32 sfb_hash(const struct sk_buff *skb, u32 slot)
107 {
108 	return sfb_skb_cb(skb)->hashes[slot];
109 }
110 
111 /* Probabilities are coded as Q0.16 fixed-point values,
112  * with 0xFFFF representing 65535/65536 (almost 1.0)
113  * Addition and subtraction are saturating in [0, 65535]
114  */
prob_plus(u32 p1,u32 p2)115 static u32 prob_plus(u32 p1, u32 p2)
116 {
117 	u32 res = p1 + p2;
118 
119 	return min_t(u32, res, SFB_MAX_PROB);
120 }
121 
prob_minus(u32 p1,u32 p2)122 static u32 prob_minus(u32 p1, u32 p2)
123 {
124 	return p1 > p2 ? p1 - p2 : 0;
125 }
126 
increment_one_qlen(u32 sfbhash,u32 slot,struct sfb_sched_data * q)127 static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q)
128 {
129 	int i;
130 	struct sfb_bucket *b = &q->bins[slot].bins[0][0];
131 
132 	for (i = 0; i < SFB_LEVELS; i++) {
133 		u32 hash = sfbhash & SFB_BUCKET_MASK;
134 
135 		sfbhash >>= SFB_BUCKET_SHIFT;
136 		if (b[hash].qlen < 0xFFFF)
137 			b[hash].qlen++;
138 		b += SFB_NUMBUCKETS; /* next level */
139 	}
140 }
141 
increment_qlen(const struct sk_buff * skb,struct sfb_sched_data * q)142 static void increment_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
143 {
144 	u32 sfbhash;
145 
146 	sfbhash = sfb_hash(skb, 0);
147 	if (sfbhash)
148 		increment_one_qlen(sfbhash, 0, q);
149 
150 	sfbhash = sfb_hash(skb, 1);
151 	if (sfbhash)
152 		increment_one_qlen(sfbhash, 1, q);
153 }
154 
decrement_one_qlen(u32 sfbhash,u32 slot,struct sfb_sched_data * q)155 static void decrement_one_qlen(u32 sfbhash, u32 slot,
156 			       struct sfb_sched_data *q)
157 {
158 	int i;
159 	struct sfb_bucket *b = &q->bins[slot].bins[0][0];
160 
161 	for (i = 0; i < SFB_LEVELS; i++) {
162 		u32 hash = sfbhash & SFB_BUCKET_MASK;
163 
164 		sfbhash >>= SFB_BUCKET_SHIFT;
165 		if (b[hash].qlen > 0)
166 			b[hash].qlen--;
167 		b += SFB_NUMBUCKETS; /* next level */
168 	}
169 }
170 
decrement_qlen(const struct sk_buff * skb,struct sfb_sched_data * q)171 static void decrement_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
172 {
173 	u32 sfbhash;
174 
175 	sfbhash = sfb_hash(skb, 0);
176 	if (sfbhash)
177 		decrement_one_qlen(sfbhash, 0, q);
178 
179 	sfbhash = sfb_hash(skb, 1);
180 	if (sfbhash)
181 		decrement_one_qlen(sfbhash, 1, q);
182 }
183 
decrement_prob(struct sfb_bucket * b,struct sfb_sched_data * q)184 static void decrement_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
185 {
186 	b->p_mark = prob_minus(b->p_mark, q->decrement);
187 }
188 
increment_prob(struct sfb_bucket * b,struct sfb_sched_data * q)189 static void increment_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
190 {
191 	b->p_mark = prob_plus(b->p_mark, q->increment);
192 }
193 
sfb_zero_all_buckets(struct sfb_sched_data * q)194 static void sfb_zero_all_buckets(struct sfb_sched_data *q)
195 {
196 	memset(&q->bins, 0, sizeof(q->bins));
197 }
198 
199 /*
200  * compute max qlen, max p_mark, and avg p_mark
201  */
sfb_compute_qlen(u32 * prob_r,u32 * avgpm_r,const struct sfb_sched_data * q)202 static u32 sfb_compute_qlen(u32 *prob_r, u32 *avgpm_r, const struct sfb_sched_data *q)
203 {
204 	int i;
205 	u32 qlen = 0, prob = 0, totalpm = 0;
206 	const struct sfb_bucket *b = &q->bins[q->slot].bins[0][0];
207 
208 	for (i = 0; i < SFB_LEVELS * SFB_NUMBUCKETS; i++) {
209 		if (qlen < b->qlen)
210 			qlen = b->qlen;
211 		totalpm += b->p_mark;
212 		if (prob < b->p_mark)
213 			prob = b->p_mark;
214 		b++;
215 	}
216 	*prob_r = prob;
217 	*avgpm_r = totalpm / (SFB_LEVELS * SFB_NUMBUCKETS);
218 	return qlen;
219 }
220 
221 
sfb_init_perturbation(u32 slot,struct sfb_sched_data * q)222 static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q)
223 {
224 	get_random_bytes(&q->bins[slot].perturbation,
225 			 sizeof(q->bins[slot].perturbation));
226 }
227 
sfb_swap_slot(struct sfb_sched_data * q)228 static void sfb_swap_slot(struct sfb_sched_data *q)
229 {
230 	sfb_init_perturbation(q->slot, q);
231 	q->slot ^= 1;
232 	q->double_buffering = false;
233 }
234 
235 /* Non elastic flows are allowed to use part of the bandwidth, expressed
236  * in "penalty_rate" packets per second, with "penalty_burst" burst
237  */
sfb_rate_limit(struct sk_buff * skb,struct sfb_sched_data * q)238 static bool sfb_rate_limit(struct sk_buff *skb, struct sfb_sched_data *q)
239 {
240 	if (q->penalty_rate == 0 || q->penalty_burst == 0)
241 		return true;
242 
243 	if (q->tokens_avail < 1) {
244 		unsigned long age = min(10UL * HZ, jiffies - q->token_time);
245 
246 		q->tokens_avail = (age * q->penalty_rate) / HZ;
247 		if (q->tokens_avail > q->penalty_burst)
248 			q->tokens_avail = q->penalty_burst;
249 		q->token_time = jiffies;
250 		if (q->tokens_avail < 1)
251 			return true;
252 	}
253 
254 	q->tokens_avail--;
255 	return false;
256 }
257 
sfb_classify(struct sk_buff * skb,struct tcf_proto * fl,int * qerr,u32 * salt)258 static bool sfb_classify(struct sk_buff *skb, struct tcf_proto *fl,
259 			 int *qerr, u32 *salt)
260 {
261 	struct tcf_result res;
262 	int result;
263 
264 	result = tcf_classify(skb, fl, &res, false);
265 	if (result >= 0) {
266 #ifdef CONFIG_NET_CLS_ACT
267 		switch (result) {
268 		case TC_ACT_STOLEN:
269 		case TC_ACT_QUEUED:
270 		case TC_ACT_TRAP:
271 			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
272 		case TC_ACT_SHOT:
273 			return false;
274 		}
275 #endif
276 		*salt = TC_H_MIN(res.classid);
277 		return true;
278 	}
279 	return false;
280 }
281 
sfb_enqueue(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)282 static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
283 		       struct sk_buff **to_free)
284 {
285 
286 	struct sfb_sched_data *q = qdisc_priv(sch);
287 	struct Qdisc *child = q->qdisc;
288 	struct tcf_proto *fl;
289 	int i;
290 	u32 p_min = ~0;
291 	u32 minqlen = ~0;
292 	u32 r, sfbhash;
293 	u32 slot = q->slot;
294 	int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
295 
296 	if (unlikely(sch->q.qlen >= q->limit)) {
297 		qdisc_qstats_overlimit(sch);
298 		q->stats.queuedrop++;
299 		goto drop;
300 	}
301 
302 	if (q->rehash_interval > 0) {
303 		unsigned long limit = q->rehash_time + q->rehash_interval;
304 
305 		if (unlikely(time_after(jiffies, limit))) {
306 			sfb_swap_slot(q);
307 			q->rehash_time = jiffies;
308 		} else if (unlikely(!q->double_buffering && q->warmup_time > 0 &&
309 				    time_after(jiffies, limit - q->warmup_time))) {
310 			q->double_buffering = true;
311 		}
312 	}
313 
314 	fl = rcu_dereference_bh(q->filter_list);
315 	if (fl) {
316 		u32 salt;
317 
318 		/* If using external classifiers, get result and record it. */
319 		if (!sfb_classify(skb, fl, &ret, &salt))
320 			goto other_drop;
321 		sfbhash = siphash_1u32(salt, &q->bins[slot].perturbation);
322 	} else {
323 		sfbhash = skb_get_hash_perturb(skb, &q->bins[slot].perturbation);
324 	}
325 
326 
327 	if (!sfbhash)
328 		sfbhash = 1;
329 	sfb_skb_cb(skb)->hashes[slot] = sfbhash;
330 
331 	for (i = 0; i < SFB_LEVELS; i++) {
332 		u32 hash = sfbhash & SFB_BUCKET_MASK;
333 		struct sfb_bucket *b = &q->bins[slot].bins[i][hash];
334 
335 		sfbhash >>= SFB_BUCKET_SHIFT;
336 		if (b->qlen == 0)
337 			decrement_prob(b, q);
338 		else if (b->qlen >= q->bin_size)
339 			increment_prob(b, q);
340 		if (minqlen > b->qlen)
341 			minqlen = b->qlen;
342 		if (p_min > b->p_mark)
343 			p_min = b->p_mark;
344 	}
345 
346 	slot ^= 1;
347 	sfb_skb_cb(skb)->hashes[slot] = 0;
348 
349 	if (unlikely(minqlen >= q->max)) {
350 		qdisc_qstats_overlimit(sch);
351 		q->stats.bucketdrop++;
352 		goto drop;
353 	}
354 
355 	if (unlikely(p_min >= SFB_MAX_PROB)) {
356 		/* Inelastic flow */
357 		if (q->double_buffering) {
358 			sfbhash = skb_get_hash_perturb(skb,
359 			    &q->bins[slot].perturbation);
360 			if (!sfbhash)
361 				sfbhash = 1;
362 			sfb_skb_cb(skb)->hashes[slot] = sfbhash;
363 
364 			for (i = 0; i < SFB_LEVELS; i++) {
365 				u32 hash = sfbhash & SFB_BUCKET_MASK;
366 				struct sfb_bucket *b = &q->bins[slot].bins[i][hash];
367 
368 				sfbhash >>= SFB_BUCKET_SHIFT;
369 				if (b->qlen == 0)
370 					decrement_prob(b, q);
371 				else if (b->qlen >= q->bin_size)
372 					increment_prob(b, q);
373 			}
374 		}
375 		if (sfb_rate_limit(skb, q)) {
376 			qdisc_qstats_overlimit(sch);
377 			q->stats.penaltydrop++;
378 			goto drop;
379 		}
380 		goto enqueue;
381 	}
382 
383 	r = prandom_u32() & SFB_MAX_PROB;
384 
385 	if (unlikely(r < p_min)) {
386 		if (unlikely(p_min > SFB_MAX_PROB / 2)) {
387 			/* If we're marking that many packets, then either
388 			 * this flow is unresponsive, or we're badly congested.
389 			 * In either case, we want to start dropping packets.
390 			 */
391 			if (r < (p_min - SFB_MAX_PROB / 2) * 2) {
392 				q->stats.earlydrop++;
393 				goto drop;
394 			}
395 		}
396 		if (INET_ECN_set_ce(skb)) {
397 			q->stats.marked++;
398 		} else {
399 			q->stats.earlydrop++;
400 			goto drop;
401 		}
402 	}
403 
404 enqueue:
405 	ret = qdisc_enqueue(skb, child, to_free);
406 	if (likely(ret == NET_XMIT_SUCCESS)) {
407 		qdisc_qstats_backlog_inc(sch, skb);
408 		sch->q.qlen++;
409 		increment_qlen(skb, q);
410 	} else if (net_xmit_drop_count(ret)) {
411 		q->stats.childdrop++;
412 		qdisc_qstats_drop(sch);
413 	}
414 	return ret;
415 
416 drop:
417 	qdisc_drop(skb, sch, to_free);
418 	return NET_XMIT_CN;
419 other_drop:
420 	if (ret & __NET_XMIT_BYPASS)
421 		qdisc_qstats_drop(sch);
422 	kfree_skb(skb);
423 	return ret;
424 }
425 
sfb_dequeue(struct Qdisc * sch)426 static struct sk_buff *sfb_dequeue(struct Qdisc *sch)
427 {
428 	struct sfb_sched_data *q = qdisc_priv(sch);
429 	struct Qdisc *child = q->qdisc;
430 	struct sk_buff *skb;
431 
432 	skb = child->dequeue(q->qdisc);
433 
434 	if (skb) {
435 		qdisc_bstats_update(sch, skb);
436 		qdisc_qstats_backlog_dec(sch, skb);
437 		sch->q.qlen--;
438 		decrement_qlen(skb, q);
439 	}
440 
441 	return skb;
442 }
443 
sfb_peek(struct Qdisc * sch)444 static struct sk_buff *sfb_peek(struct Qdisc *sch)
445 {
446 	struct sfb_sched_data *q = qdisc_priv(sch);
447 	struct Qdisc *child = q->qdisc;
448 
449 	return child->ops->peek(child);
450 }
451 
452 /* No sfb_drop -- impossible since the child doesn't return the dropped skb. */
453 
sfb_reset(struct Qdisc * sch)454 static void sfb_reset(struct Qdisc *sch)
455 {
456 	struct sfb_sched_data *q = qdisc_priv(sch);
457 
458 	qdisc_reset(q->qdisc);
459 	sch->qstats.backlog = 0;
460 	sch->q.qlen = 0;
461 	q->slot = 0;
462 	q->double_buffering = false;
463 	sfb_zero_all_buckets(q);
464 	sfb_init_perturbation(0, q);
465 }
466 
sfb_destroy(struct Qdisc * sch)467 static void sfb_destroy(struct Qdisc *sch)
468 {
469 	struct sfb_sched_data *q = qdisc_priv(sch);
470 
471 	tcf_block_put(q->block);
472 	qdisc_destroy(q->qdisc);
473 }
474 
475 static const struct nla_policy sfb_policy[TCA_SFB_MAX + 1] = {
476 	[TCA_SFB_PARMS]	= { .len = sizeof(struct tc_sfb_qopt) },
477 };
478 
479 static const struct tc_sfb_qopt sfb_default_ops = {
480 	.rehash_interval = 600 * MSEC_PER_SEC,
481 	.warmup_time = 60 * MSEC_PER_SEC,
482 	.limit = 0,
483 	.max = 25,
484 	.bin_size = 20,
485 	.increment = (SFB_MAX_PROB + 500) / 1000, /* 0.1 % */
486 	.decrement = (SFB_MAX_PROB + 3000) / 6000,
487 	.penalty_rate = 10,
488 	.penalty_burst = 20,
489 };
490 
sfb_change(struct Qdisc * sch,struct nlattr * opt)491 static int sfb_change(struct Qdisc *sch, struct nlattr *opt)
492 {
493 	struct sfb_sched_data *q = qdisc_priv(sch);
494 	struct Qdisc *child;
495 	struct nlattr *tb[TCA_SFB_MAX + 1];
496 	const struct tc_sfb_qopt *ctl = &sfb_default_ops;
497 	u32 limit;
498 	int err;
499 
500 	if (opt) {
501 		err = nla_parse_nested(tb, TCA_SFB_MAX, opt, sfb_policy, NULL);
502 		if (err < 0)
503 			return -EINVAL;
504 
505 		if (tb[TCA_SFB_PARMS] == NULL)
506 			return -EINVAL;
507 
508 		ctl = nla_data(tb[TCA_SFB_PARMS]);
509 	}
510 
511 	limit = ctl->limit;
512 	if (limit == 0)
513 		limit = qdisc_dev(sch)->tx_queue_len;
514 
515 	child = fifo_create_dflt(sch, &pfifo_qdisc_ops, limit);
516 	if (IS_ERR(child))
517 		return PTR_ERR(child);
518 
519 	if (child != &noop_qdisc)
520 		qdisc_hash_add(child, true);
521 	sch_tree_lock(sch);
522 
523 	qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
524 				  q->qdisc->qstats.backlog);
525 	qdisc_destroy(q->qdisc);
526 	q->qdisc = child;
527 
528 	q->rehash_interval = msecs_to_jiffies(ctl->rehash_interval);
529 	q->warmup_time = msecs_to_jiffies(ctl->warmup_time);
530 	q->rehash_time = jiffies;
531 	q->limit = limit;
532 	q->increment = ctl->increment;
533 	q->decrement = ctl->decrement;
534 	q->max = ctl->max;
535 	q->bin_size = ctl->bin_size;
536 	q->penalty_rate = ctl->penalty_rate;
537 	q->penalty_burst = ctl->penalty_burst;
538 	q->tokens_avail = ctl->penalty_burst;
539 	q->token_time = jiffies;
540 
541 	q->slot = 0;
542 	q->double_buffering = false;
543 	sfb_zero_all_buckets(q);
544 	sfb_init_perturbation(0, q);
545 	sfb_init_perturbation(1, q);
546 
547 	sch_tree_unlock(sch);
548 
549 	return 0;
550 }
551 
sfb_init(struct Qdisc * sch,struct nlattr * opt)552 static int sfb_init(struct Qdisc *sch, struct nlattr *opt)
553 {
554 	struct sfb_sched_data *q = qdisc_priv(sch);
555 	int err;
556 
557 	err = tcf_block_get(&q->block, &q->filter_list);
558 	if (err)
559 		return err;
560 
561 	q->qdisc = &noop_qdisc;
562 	return sfb_change(sch, opt);
563 }
564 
sfb_dump(struct Qdisc * sch,struct sk_buff * skb)565 static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb)
566 {
567 	struct sfb_sched_data *q = qdisc_priv(sch);
568 	struct nlattr *opts;
569 	struct tc_sfb_qopt opt = {
570 		.rehash_interval = jiffies_to_msecs(q->rehash_interval),
571 		.warmup_time = jiffies_to_msecs(q->warmup_time),
572 		.limit = q->limit,
573 		.max = q->max,
574 		.bin_size = q->bin_size,
575 		.increment = q->increment,
576 		.decrement = q->decrement,
577 		.penalty_rate = q->penalty_rate,
578 		.penalty_burst = q->penalty_burst,
579 	};
580 
581 	sch->qstats.backlog = q->qdisc->qstats.backlog;
582 	opts = nla_nest_start(skb, TCA_OPTIONS);
583 	if (opts == NULL)
584 		goto nla_put_failure;
585 	if (nla_put(skb, TCA_SFB_PARMS, sizeof(opt), &opt))
586 		goto nla_put_failure;
587 	return nla_nest_end(skb, opts);
588 
589 nla_put_failure:
590 	nla_nest_cancel(skb, opts);
591 	return -EMSGSIZE;
592 }
593 
sfb_dump_stats(struct Qdisc * sch,struct gnet_dump * d)594 static int sfb_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
595 {
596 	struct sfb_sched_data *q = qdisc_priv(sch);
597 	struct tc_sfb_xstats st = {
598 		.earlydrop = q->stats.earlydrop,
599 		.penaltydrop = q->stats.penaltydrop,
600 		.bucketdrop = q->stats.bucketdrop,
601 		.queuedrop = q->stats.queuedrop,
602 		.childdrop = q->stats.childdrop,
603 		.marked = q->stats.marked,
604 	};
605 
606 	st.maxqlen = sfb_compute_qlen(&st.maxprob, &st.avgprob, q);
607 
608 	return gnet_stats_copy_app(d, &st, sizeof(st));
609 }
610 
sfb_dump_class(struct Qdisc * sch,unsigned long cl,struct sk_buff * skb,struct tcmsg * tcm)611 static int sfb_dump_class(struct Qdisc *sch, unsigned long cl,
612 			  struct sk_buff *skb, struct tcmsg *tcm)
613 {
614 	return -ENOSYS;
615 }
616 
sfb_graft(struct Qdisc * sch,unsigned long arg,struct Qdisc * new,struct Qdisc ** old)617 static int sfb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
618 		     struct Qdisc **old)
619 {
620 	struct sfb_sched_data *q = qdisc_priv(sch);
621 
622 	if (new == NULL)
623 		new = &noop_qdisc;
624 
625 	*old = qdisc_replace(sch, new, &q->qdisc);
626 	return 0;
627 }
628 
sfb_leaf(struct Qdisc * sch,unsigned long arg)629 static struct Qdisc *sfb_leaf(struct Qdisc *sch, unsigned long arg)
630 {
631 	struct sfb_sched_data *q = qdisc_priv(sch);
632 
633 	return q->qdisc;
634 }
635 
sfb_find(struct Qdisc * sch,u32 classid)636 static unsigned long sfb_find(struct Qdisc *sch, u32 classid)
637 {
638 	return 1;
639 }
640 
sfb_unbind(struct Qdisc * sch,unsigned long arg)641 static void sfb_unbind(struct Qdisc *sch, unsigned long arg)
642 {
643 }
644 
sfb_change_class(struct Qdisc * sch,u32 classid,u32 parentid,struct nlattr ** tca,unsigned long * arg)645 static int sfb_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
646 			    struct nlattr **tca, unsigned long *arg)
647 {
648 	return -ENOSYS;
649 }
650 
sfb_delete(struct Qdisc * sch,unsigned long cl)651 static int sfb_delete(struct Qdisc *sch, unsigned long cl)
652 {
653 	return -ENOSYS;
654 }
655 
sfb_walk(struct Qdisc * sch,struct qdisc_walker * walker)656 static void sfb_walk(struct Qdisc *sch, struct qdisc_walker *walker)
657 {
658 	if (!walker->stop) {
659 		if (walker->count >= walker->skip)
660 			if (walker->fn(sch, 1, walker) < 0) {
661 				walker->stop = 1;
662 				return;
663 			}
664 		walker->count++;
665 	}
666 }
667 
sfb_tcf_block(struct Qdisc * sch,unsigned long cl)668 static struct tcf_block *sfb_tcf_block(struct Qdisc *sch, unsigned long cl)
669 {
670 	struct sfb_sched_data *q = qdisc_priv(sch);
671 
672 	if (cl)
673 		return NULL;
674 	return q->block;
675 }
676 
sfb_bind(struct Qdisc * sch,unsigned long parent,u32 classid)677 static unsigned long sfb_bind(struct Qdisc *sch, unsigned long parent,
678 			      u32 classid)
679 {
680 	return 0;
681 }
682 
683 
684 static const struct Qdisc_class_ops sfb_class_ops = {
685 	.graft		=	sfb_graft,
686 	.leaf		=	sfb_leaf,
687 	.find		=	sfb_find,
688 	.change		=	sfb_change_class,
689 	.delete		=	sfb_delete,
690 	.walk		=	sfb_walk,
691 	.tcf_block	=	sfb_tcf_block,
692 	.bind_tcf	=	sfb_bind,
693 	.unbind_tcf	=	sfb_unbind,
694 	.dump		=	sfb_dump_class,
695 };
696 
697 static struct Qdisc_ops sfb_qdisc_ops __read_mostly = {
698 	.id		=	"sfb",
699 	.priv_size	=	sizeof(struct sfb_sched_data),
700 	.cl_ops		=	&sfb_class_ops,
701 	.enqueue	=	sfb_enqueue,
702 	.dequeue	=	sfb_dequeue,
703 	.peek		=	sfb_peek,
704 	.init		=	sfb_init,
705 	.reset		=	sfb_reset,
706 	.destroy	=	sfb_destroy,
707 	.change		=	sfb_change,
708 	.dump		=	sfb_dump,
709 	.dump_stats	=	sfb_dump_stats,
710 	.owner		=	THIS_MODULE,
711 };
712 
sfb_module_init(void)713 static int __init sfb_module_init(void)
714 {
715 	return register_qdisc(&sfb_qdisc_ops);
716 }
717 
sfb_module_exit(void)718 static void __exit sfb_module_exit(void)
719 {
720 	unregister_qdisc(&sfb_qdisc_ops);
721 }
722 
723 module_init(sfb_module_init)
724 module_exit(sfb_module_exit)
725 
726 MODULE_DESCRIPTION("Stochastic Fair Blue queue discipline");
727 MODULE_AUTHOR("Juliusz Chroboczek");
728 MODULE_AUTHOR("Eric Dumazet");
729 MODULE_LICENSE("GPL");
730