1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * net/sched/sch_sfb.c	  Stochastic Fair Blue
4 *
5 * Copyright (c) 2008-2011 Juliusz Chroboczek <jch@pps.jussieu.fr>
6 * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com>
7 *
8 * W. Feng, D. Kandlur, D. Saha, K. Shin. Blue:
9 * A New Class of Active Queue Management Algorithms.
10 * U. Michigan CSE-TR-387-99, April 1999.
11 *
12 * http://www.thefengs.com/wuchang/blue/CSE-TR-387-99.pdf
13 */
14
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/kernel.h>
18#include <linux/errno.h>
19#include <linux/skbuff.h>
20#include <linux/random.h>
21#include <linux/siphash.h>
22#include <net/ip.h>
23#include <net/pkt_sched.h>
24#include <net/pkt_cls.h>
25#include <net/inet_ecn.h>
26
27/*
28 * SFB uses two B[l][n] : L x N arrays of bins (L levels, N bins per level)
29 * This implementation uses L = 8 and N = 16
30 * This permits us to split one 32bit hash (provided per packet by rxhash or
31 * external classifier) into 8 subhashes of 4 bits.
32 */
33#define SFB_BUCKET_SHIFT 4
34#define SFB_NUMBUCKETS	(1 << SFB_BUCKET_SHIFT) /* N bins per Level */
35#define SFB_BUCKET_MASK (SFB_NUMBUCKETS - 1)
36#define SFB_LEVELS	(32 / SFB_BUCKET_SHIFT) /* L */
37
38/* SFB algo uses a virtual queue, named "bin" */
39struct sfb_bucket {
40	u16		qlen; /* length of virtual queue */
41	u16		p_mark; /* marking probability */
42};
43
44/* We use a double buffering right before hash change
45 * (Section 4.4 of SFB reference : moving hash functions)
46 */
47struct sfb_bins {
48	siphash_key_t	  perturbation; /* siphash key */
49	struct sfb_bucket bins[SFB_LEVELS][SFB_NUMBUCKETS];
50};
51
52struct sfb_sched_data {
53	struct Qdisc	*qdisc;
54	struct tcf_proto __rcu *filter_list;
55	struct tcf_block *block;
56	unsigned long	rehash_interval;
57	unsigned long	warmup_time;	/* double buffering warmup time in jiffies */
58	u32		max;
59	u32		bin_size;	/* maximum queue length per bin */
60	u32		increment;	/* d1 */
61	u32		decrement;	/* d2 */
62	u32		limit;		/* HARD maximal queue length */
63	u32		penalty_rate;
64	u32		penalty_burst;
65	u32		tokens_avail;
66	unsigned long	rehash_time;
67	unsigned long	token_time;
68
69	u8		slot;		/* current active bins (0 or 1) */
70	bool		double_buffering;
71	struct sfb_bins bins[2];
72
73	struct {
74		u32	earlydrop;
75		u32	penaltydrop;
76		u32	bucketdrop;
77		u32	queuedrop;
78		u32	childdrop;	/* drops in child qdisc */
79		u32	marked;		/* ECN mark */
80	} stats;
81};
82
83/*
84 * Each queued skb might be hashed on one or two bins
85 * We store in skb_cb the two hash values.
86 * (A zero value means double buffering was not used)
87 */
88struct sfb_skb_cb {
89	u32 hashes[2];
90};
91
92static inline struct sfb_skb_cb *sfb_skb_cb(const struct sk_buff *skb)
93{
94	qdisc_cb_private_validate(skb, sizeof(struct sfb_skb_cb));
95	return (struct sfb_skb_cb *)qdisc_skb_cb(skb)->data;
96}
97
98/*
99 * If using 'internal' SFB flow classifier, hash comes from skb rxhash
100 * If using external classifier, hash comes from the classid.
101 */
102static u32 sfb_hash(const struct sk_buff *skb, u32 slot)
103{
104	return sfb_skb_cb(skb)->hashes[slot];
105}
106
107/* Probabilities are coded as Q0.16 fixed-point values,
108 * with 0xFFFF representing 65535/65536 (almost 1.0)
109 * Addition and subtraction are saturating in [0, 65535]
110 */
111static u32 prob_plus(u32 p1, u32 p2)
112{
113	u32 res = p1 + p2;
114
115	return min_t(u32, res, SFB_MAX_PROB);
116}
117
118static u32 prob_minus(u32 p1, u32 p2)
119{
120	return p1 > p2 ? p1 - p2 : 0;
121}
122
123static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q)
124{
125	int i;
126	struct sfb_bucket *b = &q->bins[slot].bins[0][0];
127
128	for (i = 0; i < SFB_LEVELS; i++) {
129		u32 hash = sfbhash & SFB_BUCKET_MASK;
130
131		sfbhash >>= SFB_BUCKET_SHIFT;
132		if (b[hash].qlen < 0xFFFF)
133			b[hash].qlen++;
134		b += SFB_NUMBUCKETS; /* next level */
135	}
136}
137
138static void increment_qlen(const struct sfb_skb_cb *cb, struct sfb_sched_data *q)
139{
140	u32 sfbhash;
141
142	sfbhash = cb->hashes[0];
143	if (sfbhash)
144		increment_one_qlen(sfbhash, 0, q);
145
146	sfbhash = cb->hashes[1];
147	if (sfbhash)
148		increment_one_qlen(sfbhash, 1, q);
149}
150
151static void decrement_one_qlen(u32 sfbhash, u32 slot,
152			       struct sfb_sched_data *q)
153{
154	int i;
155	struct sfb_bucket *b = &q->bins[slot].bins[0][0];
156
157	for (i = 0; i < SFB_LEVELS; i++) {
158		u32 hash = sfbhash & SFB_BUCKET_MASK;
159
160		sfbhash >>= SFB_BUCKET_SHIFT;
161		if (b[hash].qlen > 0)
162			b[hash].qlen--;
163		b += SFB_NUMBUCKETS; /* next level */
164	}
165}
166
167static void decrement_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
168{
169	u32 sfbhash;
170
171	sfbhash = sfb_hash(skb, 0);
172	if (sfbhash)
173		decrement_one_qlen(sfbhash, 0, q);
174
175	sfbhash = sfb_hash(skb, 1);
176	if (sfbhash)
177		decrement_one_qlen(sfbhash, 1, q);
178}
179
180static void decrement_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
181{
182	b->p_mark = prob_minus(b->p_mark, q->decrement);
183}
184
185static void increment_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
186{
187	b->p_mark = prob_plus(b->p_mark, q->increment);
188}
189
190static void sfb_zero_all_buckets(struct sfb_sched_data *q)
191{
192	memset(&q->bins, 0, sizeof(q->bins));
193}
194
195/*
196 * compute max qlen, max p_mark, and avg p_mark
197 */
198static u32 sfb_compute_qlen(u32 *prob_r, u32 *avgpm_r, const struct sfb_sched_data *q)
199{
200	int i;
201	u32 qlen = 0, prob = 0, totalpm = 0;
202	const struct sfb_bucket *b = &q->bins[q->slot].bins[0][0];
203
204	for (i = 0; i < SFB_LEVELS * SFB_NUMBUCKETS; i++) {
205		if (qlen < b->qlen)
206			qlen = b->qlen;
207		totalpm += b->p_mark;
208		if (prob < b->p_mark)
209			prob = b->p_mark;
210		b++;
211	}
212	*prob_r = prob;
213	*avgpm_r = totalpm / (SFB_LEVELS * SFB_NUMBUCKETS);
214	return qlen;
215}
216
217
218static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q)
219{
220	get_random_bytes(&q->bins[slot].perturbation,
221			 sizeof(q->bins[slot].perturbation));
222}
223
224static void sfb_swap_slot(struct sfb_sched_data *q)
225{
226	sfb_init_perturbation(q->slot, q);
227	q->slot ^= 1;
228	q->double_buffering = false;
229}
230
231/* Non elastic flows are allowed to use part of the bandwidth, expressed
232 * in "penalty_rate" packets per second, with "penalty_burst" burst
233 */
234static bool sfb_rate_limit(struct sk_buff *skb, struct sfb_sched_data *q)
235{
236	if (q->penalty_rate == 0 || q->penalty_burst == 0)
237		return true;
238
239	if (q->tokens_avail < 1) {
240		unsigned long age = min(10UL * HZ, jiffies - q->token_time);
241
242		q->tokens_avail = (age * q->penalty_rate) / HZ;
243		if (q->tokens_avail > q->penalty_burst)
244			q->tokens_avail = q->penalty_burst;
245		q->token_time = jiffies;
246		if (q->tokens_avail < 1)
247			return true;
248	}
249
250	q->tokens_avail--;
251	return false;
252}
253
254static bool sfb_classify(struct sk_buff *skb, struct tcf_proto *fl,
255			 int *qerr, u32 *salt)
256{
257	struct tcf_result res;
258	int result;
259
260	result = tcf_classify(skb, NULL, fl, &res, false);
261	if (result >= 0) {
262#ifdef CONFIG_NET_CLS_ACT
263		switch (result) {
264		case TC_ACT_STOLEN:
265		case TC_ACT_QUEUED:
266		case TC_ACT_TRAP:
267			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
268			fallthrough;
269		case TC_ACT_SHOT:
270			return false;
271		}
272#endif
273		*salt = TC_H_MIN(res.classid);
274		return true;
275	}
276	return false;
277}
278
279static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
280		       struct sk_buff **to_free)
281{
282
283	struct sfb_sched_data *q = qdisc_priv(sch);
284	unsigned int len = qdisc_pkt_len(skb);
285	struct Qdisc *child = q->qdisc;
286	struct tcf_proto *fl;
287	struct sfb_skb_cb cb;
288	int i;
289	u32 p_min = ~0;
290	u32 minqlen = ~0;
291	u32 r, sfbhash;
292	u32 slot = q->slot;
293	int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
294
295	if (unlikely(sch->q.qlen >= q->limit)) {
296		qdisc_qstats_overlimit(sch);
297		q->stats.queuedrop++;
298		goto drop;
299	}
300
301	if (q->rehash_interval > 0) {
302		unsigned long limit = q->rehash_time + q->rehash_interval;
303
304		if (unlikely(time_after(jiffies, limit))) {
305			sfb_swap_slot(q);
306			q->rehash_time = jiffies;
307		} else if (unlikely(!q->double_buffering && q->warmup_time > 0 &&
308				    time_after(jiffies, limit - q->warmup_time))) {
309			q->double_buffering = true;
310		}
311	}
312
313	fl = rcu_dereference_bh(q->filter_list);
314	if (fl) {
315		u32 salt;
316
317		/* If using external classifiers, get result and record it. */
318		if (!sfb_classify(skb, fl, &ret, &salt))
319			goto other_drop;
320		sfbhash = siphash_1u32(salt, &q->bins[slot].perturbation);
321	} else {
322		sfbhash = skb_get_hash_perturb(skb, &q->bins[slot].perturbation);
323	}
324
325
326	if (!sfbhash)
327		sfbhash = 1;
328	sfb_skb_cb(skb)->hashes[slot] = sfbhash;
329
330	for (i = 0; i < SFB_LEVELS; i++) {
331		u32 hash = sfbhash & SFB_BUCKET_MASK;
332		struct sfb_bucket *b = &q->bins[slot].bins[i][hash];
333
334		sfbhash >>= SFB_BUCKET_SHIFT;
335		if (b->qlen == 0)
336			decrement_prob(b, q);
337		else if (b->qlen >= q->bin_size)
338			increment_prob(b, q);
339		if (minqlen > b->qlen)
340			minqlen = b->qlen;
341		if (p_min > b->p_mark)
342			p_min = b->p_mark;
343	}
344
345	slot ^= 1;
346	sfb_skb_cb(skb)->hashes[slot] = 0;
347
348	if (unlikely(minqlen >= q->max)) {
349		qdisc_qstats_overlimit(sch);
350		q->stats.bucketdrop++;
351		goto drop;
352	}
353
354	if (unlikely(p_min >= SFB_MAX_PROB)) {
355		/* Inelastic flow */
356		if (q->double_buffering) {
357			sfbhash = skb_get_hash_perturb(skb,
358			    &q->bins[slot].perturbation);
359			if (!sfbhash)
360				sfbhash = 1;
361			sfb_skb_cb(skb)->hashes[slot] = sfbhash;
362
363			for (i = 0; i < SFB_LEVELS; i++) {
364				u32 hash = sfbhash & SFB_BUCKET_MASK;
365				struct sfb_bucket *b = &q->bins[slot].bins[i][hash];
366
367				sfbhash >>= SFB_BUCKET_SHIFT;
368				if (b->qlen == 0)
369					decrement_prob(b, q);
370				else if (b->qlen >= q->bin_size)
371					increment_prob(b, q);
372			}
373		}
374		if (sfb_rate_limit(skb, q)) {
375			qdisc_qstats_overlimit(sch);
376			q->stats.penaltydrop++;
377			goto drop;
378		}
379		goto enqueue;
380	}
381
382	r = get_random_u16() & SFB_MAX_PROB;
383
384	if (unlikely(r < p_min)) {
385		if (unlikely(p_min > SFB_MAX_PROB / 2)) {
386			/* If we're marking that many packets, then either
387			 * this flow is unresponsive, or we're badly congested.
388			 * In either case, we want to start dropping packets.
389			 */
390			if (r < (p_min - SFB_MAX_PROB / 2) * 2) {
391				q->stats.earlydrop++;
392				goto drop;
393			}
394		}
395		if (INET_ECN_set_ce(skb)) {
396			q->stats.marked++;
397		} else {
398			q->stats.earlydrop++;
399			goto drop;
400		}
401	}
402
403enqueue:
404	memcpy(&cb, sfb_skb_cb(skb), sizeof(cb));
405	ret = qdisc_enqueue(skb, child, to_free);
406	if (likely(ret == NET_XMIT_SUCCESS)) {
407		sch->qstats.backlog += len;
408		sch->q.qlen++;
409		increment_qlen(&cb, q);
410	} else if (net_xmit_drop_count(ret)) {
411		q->stats.childdrop++;
412		qdisc_qstats_drop(sch);
413	}
414	return ret;
415
416drop:
417	qdisc_drop(skb, sch, to_free);
418	return NET_XMIT_CN;
419other_drop:
420	if (ret & __NET_XMIT_BYPASS)
421		qdisc_qstats_drop(sch);
422	kfree_skb(skb);
423	return ret;
424}
425
426static struct sk_buff *sfb_dequeue(struct Qdisc *sch)
427{
428	struct sfb_sched_data *q = qdisc_priv(sch);
429	struct Qdisc *child = q->qdisc;
430	struct sk_buff *skb;
431
432	skb = child->dequeue(q->qdisc);
433
434	if (skb) {
435		qdisc_bstats_update(sch, skb);
436		qdisc_qstats_backlog_dec(sch, skb);
437		sch->q.qlen--;
438		decrement_qlen(skb, q);
439	}
440
441	return skb;
442}
443
444static struct sk_buff *sfb_peek(struct Qdisc *sch)
445{
446	struct sfb_sched_data *q = qdisc_priv(sch);
447	struct Qdisc *child = q->qdisc;
448
449	return child->ops->peek(child);
450}
451
452/* No sfb_drop -- impossible since the child doesn't return the dropped skb. */
453
454static void sfb_reset(struct Qdisc *sch)
455{
456	struct sfb_sched_data *q = qdisc_priv(sch);
457
458	if (likely(q->qdisc))
459		qdisc_reset(q->qdisc);
460	q->slot = 0;
461	q->double_buffering = false;
462	sfb_zero_all_buckets(q);
463	sfb_init_perturbation(0, q);
464}
465
466static void sfb_destroy(struct Qdisc *sch)
467{
468	struct sfb_sched_data *q = qdisc_priv(sch);
469
470	tcf_block_put(q->block);
471	qdisc_put(q->qdisc);
472}
473
474static const struct nla_policy sfb_policy[TCA_SFB_MAX + 1] = {
475	[TCA_SFB_PARMS]	= { .len = sizeof(struct tc_sfb_qopt) },
476};
477
478static const struct tc_sfb_qopt sfb_default_ops = {
479	.rehash_interval = 600 * MSEC_PER_SEC,
480	.warmup_time = 60 * MSEC_PER_SEC,
481	.limit = 0,
482	.max = 25,
483	.bin_size = 20,
484	.increment = (SFB_MAX_PROB + 500) / 1000, /* 0.1 % */
485	.decrement = (SFB_MAX_PROB + 3000) / 6000,
486	.penalty_rate = 10,
487	.penalty_burst = 20,
488};
489
490static int sfb_change(struct Qdisc *sch, struct nlattr *opt,
491		      struct netlink_ext_ack *extack)
492{
493	struct sfb_sched_data *q = qdisc_priv(sch);
494	struct Qdisc *child, *old;
495	struct nlattr *tb[TCA_SFB_MAX + 1];
496	const struct tc_sfb_qopt *ctl = &sfb_default_ops;
497	u32 limit;
498	int err;
499
500	if (opt) {
501		err = nla_parse_nested_deprecated(tb, TCA_SFB_MAX, opt,
502						  sfb_policy, NULL);
503		if (err < 0)
504			return -EINVAL;
505
506		if (tb[TCA_SFB_PARMS] == NULL)
507			return -EINVAL;
508
509		ctl = nla_data(tb[TCA_SFB_PARMS]);
510	}
511
512	limit = ctl->limit;
513	if (limit == 0)
514		limit = qdisc_dev(sch)->tx_queue_len;
515
516	child = fifo_create_dflt(sch, &pfifo_qdisc_ops, limit, extack);
517	if (IS_ERR(child))
518		return PTR_ERR(child);
519
520	if (child != &noop_qdisc)
521		qdisc_hash_add(child, true);
522	sch_tree_lock(sch);
523
524	qdisc_purge_queue(q->qdisc);
525	old = q->qdisc;
526	q->qdisc = child;
527
528	q->rehash_interval = msecs_to_jiffies(ctl->rehash_interval);
529	q->warmup_time = msecs_to_jiffies(ctl->warmup_time);
530	q->rehash_time = jiffies;
531	q->limit = limit;
532	q->increment = ctl->increment;
533	q->decrement = ctl->decrement;
534	q->max = ctl->max;
535	q->bin_size = ctl->bin_size;
536	q->penalty_rate = ctl->penalty_rate;
537	q->penalty_burst = ctl->penalty_burst;
538	q->tokens_avail = ctl->penalty_burst;
539	q->token_time = jiffies;
540
541	q->slot = 0;
542	q->double_buffering = false;
543	sfb_zero_all_buckets(q);
544	sfb_init_perturbation(0, q);
545	sfb_init_perturbation(1, q);
546
547	sch_tree_unlock(sch);
548	qdisc_put(old);
549
550	return 0;
551}
552
553static int sfb_init(struct Qdisc *sch, struct nlattr *opt,
554		    struct netlink_ext_ack *extack)
555{
556	struct sfb_sched_data *q = qdisc_priv(sch);
557	int err;
558
559	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
560	if (err)
561		return err;
562
563	q->qdisc = &noop_qdisc;
564	return sfb_change(sch, opt, extack);
565}
566
567static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb)
568{
569	struct sfb_sched_data *q = qdisc_priv(sch);
570	struct nlattr *opts;
571	struct tc_sfb_qopt opt = {
572		.rehash_interval = jiffies_to_msecs(q->rehash_interval),
573		.warmup_time = jiffies_to_msecs(q->warmup_time),
574		.limit = q->limit,
575		.max = q->max,
576		.bin_size = q->bin_size,
577		.increment = q->increment,
578		.decrement = q->decrement,
579		.penalty_rate = q->penalty_rate,
580		.penalty_burst = q->penalty_burst,
581	};
582
583	sch->qstats.backlog = q->qdisc->qstats.backlog;
584	opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
585	if (opts == NULL)
586		goto nla_put_failure;
587	if (nla_put(skb, TCA_SFB_PARMS, sizeof(opt), &opt))
588		goto nla_put_failure;
589	return nla_nest_end(skb, opts);
590
591nla_put_failure:
592	nla_nest_cancel(skb, opts);
593	return -EMSGSIZE;
594}
595
596static int sfb_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
597{
598	struct sfb_sched_data *q = qdisc_priv(sch);
599	struct tc_sfb_xstats st = {
600		.earlydrop = q->stats.earlydrop,
601		.penaltydrop = q->stats.penaltydrop,
602		.bucketdrop = q->stats.bucketdrop,
603		.queuedrop = q->stats.queuedrop,
604		.childdrop = q->stats.childdrop,
605		.marked = q->stats.marked,
606	};
607
608	st.maxqlen = sfb_compute_qlen(&st.maxprob, &st.avgprob, q);
609
610	return gnet_stats_copy_app(d, &st, sizeof(st));
611}
612
613static int sfb_dump_class(struct Qdisc *sch, unsigned long cl,
614			  struct sk_buff *skb, struct tcmsg *tcm)
615{
616	return -ENOSYS;
617}
618
619static int sfb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
620		     struct Qdisc **old, struct netlink_ext_ack *extack)
621{
622	struct sfb_sched_data *q = qdisc_priv(sch);
623
624	if (new == NULL)
625		new = &noop_qdisc;
626
627	*old = qdisc_replace(sch, new, &q->qdisc);
628	return 0;
629}
630
631static struct Qdisc *sfb_leaf(struct Qdisc *sch, unsigned long arg)
632{
633	struct sfb_sched_data *q = qdisc_priv(sch);
634
635	return q->qdisc;
636}
637
638static unsigned long sfb_find(struct Qdisc *sch, u32 classid)
639{
640	return 1;
641}
642
643static void sfb_unbind(struct Qdisc *sch, unsigned long arg)
644{
645}
646
647static int sfb_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
648			    struct nlattr **tca, unsigned long *arg,
649			    struct netlink_ext_ack *extack)
650{
651	return -ENOSYS;
652}
653
654static int sfb_delete(struct Qdisc *sch, unsigned long cl,
655		      struct netlink_ext_ack *extack)
656{
657	return -ENOSYS;
658}
659
660static void sfb_walk(struct Qdisc *sch, struct qdisc_walker *walker)
661{
662	if (!walker->stop) {
663		tc_qdisc_stats_dump(sch, 1, walker);
664	}
665}
666
667static struct tcf_block *sfb_tcf_block(struct Qdisc *sch, unsigned long cl,
668				       struct netlink_ext_ack *extack)
669{
670	struct sfb_sched_data *q = qdisc_priv(sch);
671
672	if (cl)
673		return NULL;
674	return q->block;
675}
676
677static unsigned long sfb_bind(struct Qdisc *sch, unsigned long parent,
678			      u32 classid)
679{
680	return 0;
681}
682
683
684static const struct Qdisc_class_ops sfb_class_ops = {
685	.graft		=	sfb_graft,
686	.leaf		=	sfb_leaf,
687	.find		=	sfb_find,
688	.change		=	sfb_change_class,
689	.delete		=	sfb_delete,
690	.walk		=	sfb_walk,
691	.tcf_block	=	sfb_tcf_block,
692	.bind_tcf	=	sfb_bind,
693	.unbind_tcf	=	sfb_unbind,
694	.dump		=	sfb_dump_class,
695};
696
697static struct Qdisc_ops sfb_qdisc_ops __read_mostly = {
698	.id		=	"sfb",
699	.priv_size	=	sizeof(struct sfb_sched_data),
700	.cl_ops		=	&sfb_class_ops,
701	.enqueue	=	sfb_enqueue,
702	.dequeue	=	sfb_dequeue,
703	.peek		=	sfb_peek,
704	.init		=	sfb_init,
705	.reset		=	sfb_reset,
706	.destroy	=	sfb_destroy,
707	.change		=	sfb_change,
708	.dump		=	sfb_dump,
709	.dump_stats	=	sfb_dump_stats,
710	.owner		=	THIS_MODULE,
711};
712MODULE_ALIAS_NET_SCH("sfb");
713
714static int __init sfb_module_init(void)
715{
716	return register_qdisc(&sfb_qdisc_ops);
717}
718
719static void __exit sfb_module_exit(void)
720{
721	unregister_qdisc(&sfb_qdisc_ops);
722}
723
724module_init(sfb_module_init)
725module_exit(sfb_module_exit)
726
727MODULE_DESCRIPTION("Stochastic Fair Blue queue discipline");
728MODULE_AUTHOR("Juliusz Chroboczek");
729MODULE_AUTHOR("Eric Dumazet");
730MODULE_LICENSE("GPL");
731