xref: /OK3568_Linux_fs/kernel/net/sched/sch_sfb.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * net/sched/sch_sfb.c	  Stochastic Fair Blue
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (c) 2008-2011 Juliusz Chroboczek <jch@pps.jussieu.fr>
6*4882a593Smuzhiyun  * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com>
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * W. Feng, D. Kandlur, D. Saha, K. Shin. Blue:
9*4882a593Smuzhiyun  * A New Class of Active Queue Management Algorithms.
10*4882a593Smuzhiyun  * U. Michigan CSE-TR-387-99, April 1999.
11*4882a593Smuzhiyun  *
12*4882a593Smuzhiyun  * http://www.thefengs.com/wuchang/blue/CSE-TR-387-99.pdf
13*4882a593Smuzhiyun  */
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include <linux/module.h>
16*4882a593Smuzhiyun #include <linux/types.h>
17*4882a593Smuzhiyun #include <linux/kernel.h>
18*4882a593Smuzhiyun #include <linux/errno.h>
19*4882a593Smuzhiyun #include <linux/skbuff.h>
20*4882a593Smuzhiyun #include <linux/random.h>
21*4882a593Smuzhiyun #include <linux/siphash.h>
22*4882a593Smuzhiyun #include <net/ip.h>
23*4882a593Smuzhiyun #include <net/pkt_sched.h>
24*4882a593Smuzhiyun #include <net/pkt_cls.h>
25*4882a593Smuzhiyun #include <net/inet_ecn.h>
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun /*
28*4882a593Smuzhiyun  * SFB uses two B[l][n] : L x N arrays of bins (L levels, N bins per level)
29*4882a593Smuzhiyun  * This implementation uses L = 8 and N = 16
30*4882a593Smuzhiyun  * This permits us to split one 32bit hash (provided per packet by rxhash or
31*4882a593Smuzhiyun  * external classifier) into 8 subhashes of 4 bits.
32*4882a593Smuzhiyun  */
33*4882a593Smuzhiyun #define SFB_BUCKET_SHIFT 4
34*4882a593Smuzhiyun #define SFB_NUMBUCKETS	(1 << SFB_BUCKET_SHIFT) /* N bins per Level */
35*4882a593Smuzhiyun #define SFB_BUCKET_MASK (SFB_NUMBUCKETS - 1)
36*4882a593Smuzhiyun #define SFB_LEVELS	(32 / SFB_BUCKET_SHIFT) /* L */
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun /* SFB algo uses a virtual queue, named "bin" */
39*4882a593Smuzhiyun struct sfb_bucket {
40*4882a593Smuzhiyun 	u16		qlen; /* length of virtual queue */
41*4882a593Smuzhiyun 	u16		p_mark; /* marking probability */
42*4882a593Smuzhiyun };
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun /* We use a double buffering right before hash change
45*4882a593Smuzhiyun  * (Section 4.4 of SFB reference : moving hash functions)
46*4882a593Smuzhiyun  */
47*4882a593Smuzhiyun struct sfb_bins {
48*4882a593Smuzhiyun 	siphash_key_t	  perturbation; /* siphash key */
49*4882a593Smuzhiyun 	struct sfb_bucket bins[SFB_LEVELS][SFB_NUMBUCKETS];
50*4882a593Smuzhiyun };
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun struct sfb_sched_data {
53*4882a593Smuzhiyun 	struct Qdisc	*qdisc;
54*4882a593Smuzhiyun 	struct tcf_proto __rcu *filter_list;
55*4882a593Smuzhiyun 	struct tcf_block *block;
56*4882a593Smuzhiyun 	unsigned long	rehash_interval;
57*4882a593Smuzhiyun 	unsigned long	warmup_time;	/* double buffering warmup time in jiffies */
58*4882a593Smuzhiyun 	u32		max;
59*4882a593Smuzhiyun 	u32		bin_size;	/* maximum queue length per bin */
60*4882a593Smuzhiyun 	u32		increment;	/* d1 */
61*4882a593Smuzhiyun 	u32		decrement;	/* d2 */
62*4882a593Smuzhiyun 	u32		limit;		/* HARD maximal queue length */
63*4882a593Smuzhiyun 	u32		penalty_rate;
64*4882a593Smuzhiyun 	u32		penalty_burst;
65*4882a593Smuzhiyun 	u32		tokens_avail;
66*4882a593Smuzhiyun 	unsigned long	rehash_time;
67*4882a593Smuzhiyun 	unsigned long	token_time;
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	u8		slot;		/* current active bins (0 or 1) */
70*4882a593Smuzhiyun 	bool		double_buffering;
71*4882a593Smuzhiyun 	struct sfb_bins bins[2];
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	struct {
74*4882a593Smuzhiyun 		u32	earlydrop;
75*4882a593Smuzhiyun 		u32	penaltydrop;
76*4882a593Smuzhiyun 		u32	bucketdrop;
77*4882a593Smuzhiyun 		u32	queuedrop;
78*4882a593Smuzhiyun 		u32	childdrop;	/* drops in child qdisc */
79*4882a593Smuzhiyun 		u32	marked;		/* ECN mark */
80*4882a593Smuzhiyun 	} stats;
81*4882a593Smuzhiyun };
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun /*
84*4882a593Smuzhiyun  * Each queued skb might be hashed on one or two bins
85*4882a593Smuzhiyun  * We store in skb_cb the two hash values.
86*4882a593Smuzhiyun  * (A zero value means double buffering was not used)
87*4882a593Smuzhiyun  */
88*4882a593Smuzhiyun struct sfb_skb_cb {
89*4882a593Smuzhiyun 	u32 hashes[2];
90*4882a593Smuzhiyun };
91*4882a593Smuzhiyun 
sfb_skb_cb(const struct sk_buff * skb)92*4882a593Smuzhiyun static inline struct sfb_skb_cb *sfb_skb_cb(const struct sk_buff *skb)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun 	qdisc_cb_private_validate(skb, sizeof(struct sfb_skb_cb));
95*4882a593Smuzhiyun 	return (struct sfb_skb_cb *)qdisc_skb_cb(skb)->data;
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun /*
99*4882a593Smuzhiyun  * If using 'internal' SFB flow classifier, hash comes from skb rxhash
100*4882a593Smuzhiyun  * If using external classifier, hash comes from the classid.
101*4882a593Smuzhiyun  */
sfb_hash(const struct sk_buff * skb,u32 slot)102*4882a593Smuzhiyun static u32 sfb_hash(const struct sk_buff *skb, u32 slot)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun 	return sfb_skb_cb(skb)->hashes[slot];
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun /* Probabilities are coded as Q0.16 fixed-point values,
108*4882a593Smuzhiyun  * with 0xFFFF representing 65535/65536 (almost 1.0)
109*4882a593Smuzhiyun  * Addition and subtraction are saturating in [0, 65535]
110*4882a593Smuzhiyun  */
prob_plus(u32 p1,u32 p2)111*4882a593Smuzhiyun static u32 prob_plus(u32 p1, u32 p2)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun 	u32 res = p1 + p2;
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	return min_t(u32, res, SFB_MAX_PROB);
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun 
prob_minus(u32 p1,u32 p2)118*4882a593Smuzhiyun static u32 prob_minus(u32 p1, u32 p2)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun 	return p1 > p2 ? p1 - p2 : 0;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun 
increment_one_qlen(u32 sfbhash,u32 slot,struct sfb_sched_data * q)123*4882a593Smuzhiyun static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun 	int i;
126*4882a593Smuzhiyun 	struct sfb_bucket *b = &q->bins[slot].bins[0][0];
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	for (i = 0; i < SFB_LEVELS; i++) {
129*4882a593Smuzhiyun 		u32 hash = sfbhash & SFB_BUCKET_MASK;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 		sfbhash >>= SFB_BUCKET_SHIFT;
132*4882a593Smuzhiyun 		if (b[hash].qlen < 0xFFFF)
133*4882a593Smuzhiyun 			b[hash].qlen++;
134*4882a593Smuzhiyun 		b += SFB_NUMBUCKETS; /* next level */
135*4882a593Smuzhiyun 	}
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun 
increment_qlen(const struct sfb_skb_cb * cb,struct sfb_sched_data * q)138*4882a593Smuzhiyun static void increment_qlen(const struct sfb_skb_cb *cb, struct sfb_sched_data *q)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun 	u32 sfbhash;
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	sfbhash = cb->hashes[0];
143*4882a593Smuzhiyun 	if (sfbhash)
144*4882a593Smuzhiyun 		increment_one_qlen(sfbhash, 0, q);
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	sfbhash = cb->hashes[1];
147*4882a593Smuzhiyun 	if (sfbhash)
148*4882a593Smuzhiyun 		increment_one_qlen(sfbhash, 1, q);
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun 
decrement_one_qlen(u32 sfbhash,u32 slot,struct sfb_sched_data * q)151*4882a593Smuzhiyun static void decrement_one_qlen(u32 sfbhash, u32 slot,
152*4882a593Smuzhiyun 			       struct sfb_sched_data *q)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun 	int i;
155*4882a593Smuzhiyun 	struct sfb_bucket *b = &q->bins[slot].bins[0][0];
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	for (i = 0; i < SFB_LEVELS; i++) {
158*4882a593Smuzhiyun 		u32 hash = sfbhash & SFB_BUCKET_MASK;
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 		sfbhash >>= SFB_BUCKET_SHIFT;
161*4882a593Smuzhiyun 		if (b[hash].qlen > 0)
162*4882a593Smuzhiyun 			b[hash].qlen--;
163*4882a593Smuzhiyun 		b += SFB_NUMBUCKETS; /* next level */
164*4882a593Smuzhiyun 	}
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun 
decrement_qlen(const struct sk_buff * skb,struct sfb_sched_data * q)167*4882a593Smuzhiyun static void decrement_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun 	u32 sfbhash;
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	sfbhash = sfb_hash(skb, 0);
172*4882a593Smuzhiyun 	if (sfbhash)
173*4882a593Smuzhiyun 		decrement_one_qlen(sfbhash, 0, q);
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	sfbhash = sfb_hash(skb, 1);
176*4882a593Smuzhiyun 	if (sfbhash)
177*4882a593Smuzhiyun 		decrement_one_qlen(sfbhash, 1, q);
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun 
decrement_prob(struct sfb_bucket * b,struct sfb_sched_data * q)180*4882a593Smuzhiyun static void decrement_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun 	b->p_mark = prob_minus(b->p_mark, q->decrement);
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun 
increment_prob(struct sfb_bucket * b,struct sfb_sched_data * q)185*4882a593Smuzhiyun static void increment_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun 	b->p_mark = prob_plus(b->p_mark, q->increment);
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun 
sfb_zero_all_buckets(struct sfb_sched_data * q)190*4882a593Smuzhiyun static void sfb_zero_all_buckets(struct sfb_sched_data *q)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun 	memset(&q->bins, 0, sizeof(q->bins));
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun /*
196*4882a593Smuzhiyun  * compute max qlen, max p_mark, and avg p_mark
197*4882a593Smuzhiyun  */
sfb_compute_qlen(u32 * prob_r,u32 * avgpm_r,const struct sfb_sched_data * q)198*4882a593Smuzhiyun static u32 sfb_compute_qlen(u32 *prob_r, u32 *avgpm_r, const struct sfb_sched_data *q)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun 	int i;
201*4882a593Smuzhiyun 	u32 qlen = 0, prob = 0, totalpm = 0;
202*4882a593Smuzhiyun 	const struct sfb_bucket *b = &q->bins[q->slot].bins[0][0];
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	for (i = 0; i < SFB_LEVELS * SFB_NUMBUCKETS; i++) {
205*4882a593Smuzhiyun 		if (qlen < b->qlen)
206*4882a593Smuzhiyun 			qlen = b->qlen;
207*4882a593Smuzhiyun 		totalpm += b->p_mark;
208*4882a593Smuzhiyun 		if (prob < b->p_mark)
209*4882a593Smuzhiyun 			prob = b->p_mark;
210*4882a593Smuzhiyun 		b++;
211*4882a593Smuzhiyun 	}
212*4882a593Smuzhiyun 	*prob_r = prob;
213*4882a593Smuzhiyun 	*avgpm_r = totalpm / (SFB_LEVELS * SFB_NUMBUCKETS);
214*4882a593Smuzhiyun 	return qlen;
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 
sfb_init_perturbation(u32 slot,struct sfb_sched_data * q)218*4882a593Smuzhiyun static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q)
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun 	get_random_bytes(&q->bins[slot].perturbation,
221*4882a593Smuzhiyun 			 sizeof(q->bins[slot].perturbation));
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun 
sfb_swap_slot(struct sfb_sched_data * q)224*4882a593Smuzhiyun static void sfb_swap_slot(struct sfb_sched_data *q)
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun 	sfb_init_perturbation(q->slot, q);
227*4882a593Smuzhiyun 	q->slot ^= 1;
228*4882a593Smuzhiyun 	q->double_buffering = false;
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun /* Non elastic flows are allowed to use part of the bandwidth, expressed
232*4882a593Smuzhiyun  * in "penalty_rate" packets per second, with "penalty_burst" burst
233*4882a593Smuzhiyun  */
sfb_rate_limit(struct sk_buff * skb,struct sfb_sched_data * q)234*4882a593Smuzhiyun static bool sfb_rate_limit(struct sk_buff *skb, struct sfb_sched_data *q)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun 	if (q->penalty_rate == 0 || q->penalty_burst == 0)
237*4882a593Smuzhiyun 		return true;
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	if (q->tokens_avail < 1) {
240*4882a593Smuzhiyun 		unsigned long age = min(10UL * HZ, jiffies - q->token_time);
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 		q->tokens_avail = (age * q->penalty_rate) / HZ;
243*4882a593Smuzhiyun 		if (q->tokens_avail > q->penalty_burst)
244*4882a593Smuzhiyun 			q->tokens_avail = q->penalty_burst;
245*4882a593Smuzhiyun 		q->token_time = jiffies;
246*4882a593Smuzhiyun 		if (q->tokens_avail < 1)
247*4882a593Smuzhiyun 			return true;
248*4882a593Smuzhiyun 	}
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	q->tokens_avail--;
251*4882a593Smuzhiyun 	return false;
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun 
sfb_classify(struct sk_buff * skb,struct tcf_proto * fl,int * qerr,u32 * salt)254*4882a593Smuzhiyun static bool sfb_classify(struct sk_buff *skb, struct tcf_proto *fl,
255*4882a593Smuzhiyun 			 int *qerr, u32 *salt)
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun 	struct tcf_result res;
258*4882a593Smuzhiyun 	int result;
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	result = tcf_classify(skb, fl, &res, false);
261*4882a593Smuzhiyun 	if (result >= 0) {
262*4882a593Smuzhiyun #ifdef CONFIG_NET_CLS_ACT
263*4882a593Smuzhiyun 		switch (result) {
264*4882a593Smuzhiyun 		case TC_ACT_STOLEN:
265*4882a593Smuzhiyun 		case TC_ACT_QUEUED:
266*4882a593Smuzhiyun 		case TC_ACT_TRAP:
267*4882a593Smuzhiyun 			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
268*4882a593Smuzhiyun 			fallthrough;
269*4882a593Smuzhiyun 		case TC_ACT_SHOT:
270*4882a593Smuzhiyun 			return false;
271*4882a593Smuzhiyun 		}
272*4882a593Smuzhiyun #endif
273*4882a593Smuzhiyun 		*salt = TC_H_MIN(res.classid);
274*4882a593Smuzhiyun 		return true;
275*4882a593Smuzhiyun 	}
276*4882a593Smuzhiyun 	return false;
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun 
sfb_enqueue(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)279*4882a593Smuzhiyun static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
280*4882a593Smuzhiyun 		       struct sk_buff **to_free)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	struct sfb_sched_data *q = qdisc_priv(sch);
284*4882a593Smuzhiyun 	unsigned int len = qdisc_pkt_len(skb);
285*4882a593Smuzhiyun 	struct Qdisc *child = q->qdisc;
286*4882a593Smuzhiyun 	struct tcf_proto *fl;
287*4882a593Smuzhiyun 	struct sfb_skb_cb cb;
288*4882a593Smuzhiyun 	int i;
289*4882a593Smuzhiyun 	u32 p_min = ~0;
290*4882a593Smuzhiyun 	u32 minqlen = ~0;
291*4882a593Smuzhiyun 	u32 r, sfbhash;
292*4882a593Smuzhiyun 	u32 slot = q->slot;
293*4882a593Smuzhiyun 	int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	if (unlikely(sch->q.qlen >= q->limit)) {
296*4882a593Smuzhiyun 		qdisc_qstats_overlimit(sch);
297*4882a593Smuzhiyun 		q->stats.queuedrop++;
298*4882a593Smuzhiyun 		goto drop;
299*4882a593Smuzhiyun 	}
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	if (q->rehash_interval > 0) {
302*4882a593Smuzhiyun 		unsigned long limit = q->rehash_time + q->rehash_interval;
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 		if (unlikely(time_after(jiffies, limit))) {
305*4882a593Smuzhiyun 			sfb_swap_slot(q);
306*4882a593Smuzhiyun 			q->rehash_time = jiffies;
307*4882a593Smuzhiyun 		} else if (unlikely(!q->double_buffering && q->warmup_time > 0 &&
308*4882a593Smuzhiyun 				    time_after(jiffies, limit - q->warmup_time))) {
309*4882a593Smuzhiyun 			q->double_buffering = true;
310*4882a593Smuzhiyun 		}
311*4882a593Smuzhiyun 	}
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	fl = rcu_dereference_bh(q->filter_list);
314*4882a593Smuzhiyun 	if (fl) {
315*4882a593Smuzhiyun 		u32 salt;
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 		/* If using external classifiers, get result and record it. */
318*4882a593Smuzhiyun 		if (!sfb_classify(skb, fl, &ret, &salt))
319*4882a593Smuzhiyun 			goto other_drop;
320*4882a593Smuzhiyun 		sfbhash = siphash_1u32(salt, &q->bins[slot].perturbation);
321*4882a593Smuzhiyun 	} else {
322*4882a593Smuzhiyun 		sfbhash = skb_get_hash_perturb(skb, &q->bins[slot].perturbation);
323*4882a593Smuzhiyun 	}
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	if (!sfbhash)
327*4882a593Smuzhiyun 		sfbhash = 1;
328*4882a593Smuzhiyun 	sfb_skb_cb(skb)->hashes[slot] = sfbhash;
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	for (i = 0; i < SFB_LEVELS; i++) {
331*4882a593Smuzhiyun 		u32 hash = sfbhash & SFB_BUCKET_MASK;
332*4882a593Smuzhiyun 		struct sfb_bucket *b = &q->bins[slot].bins[i][hash];
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 		sfbhash >>= SFB_BUCKET_SHIFT;
335*4882a593Smuzhiyun 		if (b->qlen == 0)
336*4882a593Smuzhiyun 			decrement_prob(b, q);
337*4882a593Smuzhiyun 		else if (b->qlen >= q->bin_size)
338*4882a593Smuzhiyun 			increment_prob(b, q);
339*4882a593Smuzhiyun 		if (minqlen > b->qlen)
340*4882a593Smuzhiyun 			minqlen = b->qlen;
341*4882a593Smuzhiyun 		if (p_min > b->p_mark)
342*4882a593Smuzhiyun 			p_min = b->p_mark;
343*4882a593Smuzhiyun 	}
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	slot ^= 1;
346*4882a593Smuzhiyun 	sfb_skb_cb(skb)->hashes[slot] = 0;
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	if (unlikely(minqlen >= q->max)) {
349*4882a593Smuzhiyun 		qdisc_qstats_overlimit(sch);
350*4882a593Smuzhiyun 		q->stats.bucketdrop++;
351*4882a593Smuzhiyun 		goto drop;
352*4882a593Smuzhiyun 	}
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	if (unlikely(p_min >= SFB_MAX_PROB)) {
355*4882a593Smuzhiyun 		/* Inelastic flow */
356*4882a593Smuzhiyun 		if (q->double_buffering) {
357*4882a593Smuzhiyun 			sfbhash = skb_get_hash_perturb(skb,
358*4882a593Smuzhiyun 			    &q->bins[slot].perturbation);
359*4882a593Smuzhiyun 			if (!sfbhash)
360*4882a593Smuzhiyun 				sfbhash = 1;
361*4882a593Smuzhiyun 			sfb_skb_cb(skb)->hashes[slot] = sfbhash;
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 			for (i = 0; i < SFB_LEVELS; i++) {
364*4882a593Smuzhiyun 				u32 hash = sfbhash & SFB_BUCKET_MASK;
365*4882a593Smuzhiyun 				struct sfb_bucket *b = &q->bins[slot].bins[i][hash];
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 				sfbhash >>= SFB_BUCKET_SHIFT;
368*4882a593Smuzhiyun 				if (b->qlen == 0)
369*4882a593Smuzhiyun 					decrement_prob(b, q);
370*4882a593Smuzhiyun 				else if (b->qlen >= q->bin_size)
371*4882a593Smuzhiyun 					increment_prob(b, q);
372*4882a593Smuzhiyun 			}
373*4882a593Smuzhiyun 		}
374*4882a593Smuzhiyun 		if (sfb_rate_limit(skb, q)) {
375*4882a593Smuzhiyun 			qdisc_qstats_overlimit(sch);
376*4882a593Smuzhiyun 			q->stats.penaltydrop++;
377*4882a593Smuzhiyun 			goto drop;
378*4882a593Smuzhiyun 		}
379*4882a593Smuzhiyun 		goto enqueue;
380*4882a593Smuzhiyun 	}
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	r = prandom_u32() & SFB_MAX_PROB;
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	if (unlikely(r < p_min)) {
385*4882a593Smuzhiyun 		if (unlikely(p_min > SFB_MAX_PROB / 2)) {
386*4882a593Smuzhiyun 			/* If we're marking that many packets, then either
387*4882a593Smuzhiyun 			 * this flow is unresponsive, or we're badly congested.
388*4882a593Smuzhiyun 			 * In either case, we want to start dropping packets.
389*4882a593Smuzhiyun 			 */
390*4882a593Smuzhiyun 			if (r < (p_min - SFB_MAX_PROB / 2) * 2) {
391*4882a593Smuzhiyun 				q->stats.earlydrop++;
392*4882a593Smuzhiyun 				goto drop;
393*4882a593Smuzhiyun 			}
394*4882a593Smuzhiyun 		}
395*4882a593Smuzhiyun 		if (INET_ECN_set_ce(skb)) {
396*4882a593Smuzhiyun 			q->stats.marked++;
397*4882a593Smuzhiyun 		} else {
398*4882a593Smuzhiyun 			q->stats.earlydrop++;
399*4882a593Smuzhiyun 			goto drop;
400*4882a593Smuzhiyun 		}
401*4882a593Smuzhiyun 	}
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun enqueue:
404*4882a593Smuzhiyun 	memcpy(&cb, sfb_skb_cb(skb), sizeof(cb));
405*4882a593Smuzhiyun 	ret = qdisc_enqueue(skb, child, to_free);
406*4882a593Smuzhiyun 	if (likely(ret == NET_XMIT_SUCCESS)) {
407*4882a593Smuzhiyun 		sch->qstats.backlog += len;
408*4882a593Smuzhiyun 		sch->q.qlen++;
409*4882a593Smuzhiyun 		increment_qlen(&cb, q);
410*4882a593Smuzhiyun 	} else if (net_xmit_drop_count(ret)) {
411*4882a593Smuzhiyun 		q->stats.childdrop++;
412*4882a593Smuzhiyun 		qdisc_qstats_drop(sch);
413*4882a593Smuzhiyun 	}
414*4882a593Smuzhiyun 	return ret;
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun drop:
417*4882a593Smuzhiyun 	qdisc_drop(skb, sch, to_free);
418*4882a593Smuzhiyun 	return NET_XMIT_CN;
419*4882a593Smuzhiyun other_drop:
420*4882a593Smuzhiyun 	if (ret & __NET_XMIT_BYPASS)
421*4882a593Smuzhiyun 		qdisc_qstats_drop(sch);
422*4882a593Smuzhiyun 	kfree_skb(skb);
423*4882a593Smuzhiyun 	return ret;
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun 
sfb_dequeue(struct Qdisc * sch)426*4882a593Smuzhiyun static struct sk_buff *sfb_dequeue(struct Qdisc *sch)
427*4882a593Smuzhiyun {
428*4882a593Smuzhiyun 	struct sfb_sched_data *q = qdisc_priv(sch);
429*4882a593Smuzhiyun 	struct Qdisc *child = q->qdisc;
430*4882a593Smuzhiyun 	struct sk_buff *skb;
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	skb = child->dequeue(q->qdisc);
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	if (skb) {
435*4882a593Smuzhiyun 		qdisc_bstats_update(sch, skb);
436*4882a593Smuzhiyun 		qdisc_qstats_backlog_dec(sch, skb);
437*4882a593Smuzhiyun 		sch->q.qlen--;
438*4882a593Smuzhiyun 		decrement_qlen(skb, q);
439*4882a593Smuzhiyun 	}
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 	return skb;
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun 
sfb_peek(struct Qdisc * sch)444*4882a593Smuzhiyun static struct sk_buff *sfb_peek(struct Qdisc *sch)
445*4882a593Smuzhiyun {
446*4882a593Smuzhiyun 	struct sfb_sched_data *q = qdisc_priv(sch);
447*4882a593Smuzhiyun 	struct Qdisc *child = q->qdisc;
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	return child->ops->peek(child);
450*4882a593Smuzhiyun }
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun /* No sfb_drop -- impossible since the child doesn't return the dropped skb. */
453*4882a593Smuzhiyun 
sfb_reset(struct Qdisc * sch)454*4882a593Smuzhiyun static void sfb_reset(struct Qdisc *sch)
455*4882a593Smuzhiyun {
456*4882a593Smuzhiyun 	struct sfb_sched_data *q = qdisc_priv(sch);
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	if (likely(q->qdisc))
459*4882a593Smuzhiyun 		qdisc_reset(q->qdisc);
460*4882a593Smuzhiyun 	q->slot = 0;
461*4882a593Smuzhiyun 	q->double_buffering = false;
462*4882a593Smuzhiyun 	sfb_zero_all_buckets(q);
463*4882a593Smuzhiyun 	sfb_init_perturbation(0, q);
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun 
sfb_destroy(struct Qdisc * sch)466*4882a593Smuzhiyun static void sfb_destroy(struct Qdisc *sch)
467*4882a593Smuzhiyun {
468*4882a593Smuzhiyun 	struct sfb_sched_data *q = qdisc_priv(sch);
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun 	tcf_block_put(q->block);
471*4882a593Smuzhiyun 	qdisc_put(q->qdisc);
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun static const struct nla_policy sfb_policy[TCA_SFB_MAX + 1] = {
475*4882a593Smuzhiyun 	[TCA_SFB_PARMS]	= { .len = sizeof(struct tc_sfb_qopt) },
476*4882a593Smuzhiyun };
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun static const struct tc_sfb_qopt sfb_default_ops = {
479*4882a593Smuzhiyun 	.rehash_interval = 600 * MSEC_PER_SEC,
480*4882a593Smuzhiyun 	.warmup_time = 60 * MSEC_PER_SEC,
481*4882a593Smuzhiyun 	.limit = 0,
482*4882a593Smuzhiyun 	.max = 25,
483*4882a593Smuzhiyun 	.bin_size = 20,
484*4882a593Smuzhiyun 	.increment = (SFB_MAX_PROB + 500) / 1000, /* 0.1 % */
485*4882a593Smuzhiyun 	.decrement = (SFB_MAX_PROB + 3000) / 6000,
486*4882a593Smuzhiyun 	.penalty_rate = 10,
487*4882a593Smuzhiyun 	.penalty_burst = 20,
488*4882a593Smuzhiyun };
489*4882a593Smuzhiyun 
sfb_change(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)490*4882a593Smuzhiyun static int sfb_change(struct Qdisc *sch, struct nlattr *opt,
491*4882a593Smuzhiyun 		      struct netlink_ext_ack *extack)
492*4882a593Smuzhiyun {
493*4882a593Smuzhiyun 	struct sfb_sched_data *q = qdisc_priv(sch);
494*4882a593Smuzhiyun 	struct Qdisc *child, *old;
495*4882a593Smuzhiyun 	struct nlattr *tb[TCA_SFB_MAX + 1];
496*4882a593Smuzhiyun 	const struct tc_sfb_qopt *ctl = &sfb_default_ops;
497*4882a593Smuzhiyun 	u32 limit;
498*4882a593Smuzhiyun 	int err;
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 	if (opt) {
501*4882a593Smuzhiyun 		err = nla_parse_nested_deprecated(tb, TCA_SFB_MAX, opt,
502*4882a593Smuzhiyun 						  sfb_policy, NULL);
503*4882a593Smuzhiyun 		if (err < 0)
504*4882a593Smuzhiyun 			return -EINVAL;
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 		if (tb[TCA_SFB_PARMS] == NULL)
507*4882a593Smuzhiyun 			return -EINVAL;
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun 		ctl = nla_data(tb[TCA_SFB_PARMS]);
510*4882a593Smuzhiyun 	}
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun 	limit = ctl->limit;
513*4882a593Smuzhiyun 	if (limit == 0)
514*4882a593Smuzhiyun 		limit = qdisc_dev(sch)->tx_queue_len;
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 	child = fifo_create_dflt(sch, &pfifo_qdisc_ops, limit, extack);
517*4882a593Smuzhiyun 	if (IS_ERR(child))
518*4882a593Smuzhiyun 		return PTR_ERR(child);
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 	if (child != &noop_qdisc)
521*4882a593Smuzhiyun 		qdisc_hash_add(child, true);
522*4882a593Smuzhiyun 	sch_tree_lock(sch);
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 	qdisc_purge_queue(q->qdisc);
525*4882a593Smuzhiyun 	old = q->qdisc;
526*4882a593Smuzhiyun 	q->qdisc = child;
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	q->rehash_interval = msecs_to_jiffies(ctl->rehash_interval);
529*4882a593Smuzhiyun 	q->warmup_time = msecs_to_jiffies(ctl->warmup_time);
530*4882a593Smuzhiyun 	q->rehash_time = jiffies;
531*4882a593Smuzhiyun 	q->limit = limit;
532*4882a593Smuzhiyun 	q->increment = ctl->increment;
533*4882a593Smuzhiyun 	q->decrement = ctl->decrement;
534*4882a593Smuzhiyun 	q->max = ctl->max;
535*4882a593Smuzhiyun 	q->bin_size = ctl->bin_size;
536*4882a593Smuzhiyun 	q->penalty_rate = ctl->penalty_rate;
537*4882a593Smuzhiyun 	q->penalty_burst = ctl->penalty_burst;
538*4882a593Smuzhiyun 	q->tokens_avail = ctl->penalty_burst;
539*4882a593Smuzhiyun 	q->token_time = jiffies;
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	q->slot = 0;
542*4882a593Smuzhiyun 	q->double_buffering = false;
543*4882a593Smuzhiyun 	sfb_zero_all_buckets(q);
544*4882a593Smuzhiyun 	sfb_init_perturbation(0, q);
545*4882a593Smuzhiyun 	sfb_init_perturbation(1, q);
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 	sch_tree_unlock(sch);
548*4882a593Smuzhiyun 	qdisc_put(old);
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 	return 0;
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun 
sfb_init(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)553*4882a593Smuzhiyun static int sfb_init(struct Qdisc *sch, struct nlattr *opt,
554*4882a593Smuzhiyun 		    struct netlink_ext_ack *extack)
555*4882a593Smuzhiyun {
556*4882a593Smuzhiyun 	struct sfb_sched_data *q = qdisc_priv(sch);
557*4882a593Smuzhiyun 	int err;
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
560*4882a593Smuzhiyun 	if (err)
561*4882a593Smuzhiyun 		return err;
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun 	q->qdisc = &noop_qdisc;
564*4882a593Smuzhiyun 	return sfb_change(sch, opt, extack);
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun 
sfb_dump(struct Qdisc * sch,struct sk_buff * skb)567*4882a593Smuzhiyun static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb)
568*4882a593Smuzhiyun {
569*4882a593Smuzhiyun 	struct sfb_sched_data *q = qdisc_priv(sch);
570*4882a593Smuzhiyun 	struct nlattr *opts;
571*4882a593Smuzhiyun 	struct tc_sfb_qopt opt = {
572*4882a593Smuzhiyun 		.rehash_interval = jiffies_to_msecs(q->rehash_interval),
573*4882a593Smuzhiyun 		.warmup_time = jiffies_to_msecs(q->warmup_time),
574*4882a593Smuzhiyun 		.limit = q->limit,
575*4882a593Smuzhiyun 		.max = q->max,
576*4882a593Smuzhiyun 		.bin_size = q->bin_size,
577*4882a593Smuzhiyun 		.increment = q->increment,
578*4882a593Smuzhiyun 		.decrement = q->decrement,
579*4882a593Smuzhiyun 		.penalty_rate = q->penalty_rate,
580*4882a593Smuzhiyun 		.penalty_burst = q->penalty_burst,
581*4882a593Smuzhiyun 	};
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun 	sch->qstats.backlog = q->qdisc->qstats.backlog;
584*4882a593Smuzhiyun 	opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
585*4882a593Smuzhiyun 	if (opts == NULL)
586*4882a593Smuzhiyun 		goto nla_put_failure;
587*4882a593Smuzhiyun 	if (nla_put(skb, TCA_SFB_PARMS, sizeof(opt), &opt))
588*4882a593Smuzhiyun 		goto nla_put_failure;
589*4882a593Smuzhiyun 	return nla_nest_end(skb, opts);
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun nla_put_failure:
592*4882a593Smuzhiyun 	nla_nest_cancel(skb, opts);
593*4882a593Smuzhiyun 	return -EMSGSIZE;
594*4882a593Smuzhiyun }
595*4882a593Smuzhiyun 
sfb_dump_stats(struct Qdisc * sch,struct gnet_dump * d)596*4882a593Smuzhiyun static int sfb_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
597*4882a593Smuzhiyun {
598*4882a593Smuzhiyun 	struct sfb_sched_data *q = qdisc_priv(sch);
599*4882a593Smuzhiyun 	struct tc_sfb_xstats st = {
600*4882a593Smuzhiyun 		.earlydrop = q->stats.earlydrop,
601*4882a593Smuzhiyun 		.penaltydrop = q->stats.penaltydrop,
602*4882a593Smuzhiyun 		.bucketdrop = q->stats.bucketdrop,
603*4882a593Smuzhiyun 		.queuedrop = q->stats.queuedrop,
604*4882a593Smuzhiyun 		.childdrop = q->stats.childdrop,
605*4882a593Smuzhiyun 		.marked = q->stats.marked,
606*4882a593Smuzhiyun 	};
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 	st.maxqlen = sfb_compute_qlen(&st.maxprob, &st.avgprob, q);
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 	return gnet_stats_copy_app(d, &st, sizeof(st));
611*4882a593Smuzhiyun }
612*4882a593Smuzhiyun 
sfb_dump_class(struct Qdisc * sch,unsigned long cl,struct sk_buff * skb,struct tcmsg * tcm)613*4882a593Smuzhiyun static int sfb_dump_class(struct Qdisc *sch, unsigned long cl,
614*4882a593Smuzhiyun 			  struct sk_buff *skb, struct tcmsg *tcm)
615*4882a593Smuzhiyun {
616*4882a593Smuzhiyun 	return -ENOSYS;
617*4882a593Smuzhiyun }
618*4882a593Smuzhiyun 
sfb_graft(struct Qdisc * sch,unsigned long arg,struct Qdisc * new,struct Qdisc ** old,struct netlink_ext_ack * extack)619*4882a593Smuzhiyun static int sfb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
620*4882a593Smuzhiyun 		     struct Qdisc **old, struct netlink_ext_ack *extack)
621*4882a593Smuzhiyun {
622*4882a593Smuzhiyun 	struct sfb_sched_data *q = qdisc_priv(sch);
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun 	if (new == NULL)
625*4882a593Smuzhiyun 		new = &noop_qdisc;
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun 	*old = qdisc_replace(sch, new, &q->qdisc);
628*4882a593Smuzhiyun 	return 0;
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun 
sfb_leaf(struct Qdisc * sch,unsigned long arg)631*4882a593Smuzhiyun static struct Qdisc *sfb_leaf(struct Qdisc *sch, unsigned long arg)
632*4882a593Smuzhiyun {
633*4882a593Smuzhiyun 	struct sfb_sched_data *q = qdisc_priv(sch);
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 	return q->qdisc;
636*4882a593Smuzhiyun }
637*4882a593Smuzhiyun 
sfb_find(struct Qdisc * sch,u32 classid)638*4882a593Smuzhiyun static unsigned long sfb_find(struct Qdisc *sch, u32 classid)
639*4882a593Smuzhiyun {
640*4882a593Smuzhiyun 	return 1;
641*4882a593Smuzhiyun }
642*4882a593Smuzhiyun 
sfb_unbind(struct Qdisc * sch,unsigned long arg)643*4882a593Smuzhiyun static void sfb_unbind(struct Qdisc *sch, unsigned long arg)
644*4882a593Smuzhiyun {
645*4882a593Smuzhiyun }
646*4882a593Smuzhiyun 
sfb_change_class(struct Qdisc * sch,u32 classid,u32 parentid,struct nlattr ** tca,unsigned long * arg,struct netlink_ext_ack * extack)647*4882a593Smuzhiyun static int sfb_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
648*4882a593Smuzhiyun 			    struct nlattr **tca, unsigned long *arg,
649*4882a593Smuzhiyun 			    struct netlink_ext_ack *extack)
650*4882a593Smuzhiyun {
651*4882a593Smuzhiyun 	return -ENOSYS;
652*4882a593Smuzhiyun }
653*4882a593Smuzhiyun 
sfb_delete(struct Qdisc * sch,unsigned long cl)654*4882a593Smuzhiyun static int sfb_delete(struct Qdisc *sch, unsigned long cl)
655*4882a593Smuzhiyun {
656*4882a593Smuzhiyun 	return -ENOSYS;
657*4882a593Smuzhiyun }
658*4882a593Smuzhiyun 
sfb_walk(struct Qdisc * sch,struct qdisc_walker * walker)659*4882a593Smuzhiyun static void sfb_walk(struct Qdisc *sch, struct qdisc_walker *walker)
660*4882a593Smuzhiyun {
661*4882a593Smuzhiyun 	if (!walker->stop) {
662*4882a593Smuzhiyun 		if (walker->count >= walker->skip)
663*4882a593Smuzhiyun 			if (walker->fn(sch, 1, walker) < 0) {
664*4882a593Smuzhiyun 				walker->stop = 1;
665*4882a593Smuzhiyun 				return;
666*4882a593Smuzhiyun 			}
667*4882a593Smuzhiyun 		walker->count++;
668*4882a593Smuzhiyun 	}
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun 
sfb_tcf_block(struct Qdisc * sch,unsigned long cl,struct netlink_ext_ack * extack)671*4882a593Smuzhiyun static struct tcf_block *sfb_tcf_block(struct Qdisc *sch, unsigned long cl,
672*4882a593Smuzhiyun 				       struct netlink_ext_ack *extack)
673*4882a593Smuzhiyun {
674*4882a593Smuzhiyun 	struct sfb_sched_data *q = qdisc_priv(sch);
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun 	if (cl)
677*4882a593Smuzhiyun 		return NULL;
678*4882a593Smuzhiyun 	return q->block;
679*4882a593Smuzhiyun }
680*4882a593Smuzhiyun 
sfb_bind(struct Qdisc * sch,unsigned long parent,u32 classid)681*4882a593Smuzhiyun static unsigned long sfb_bind(struct Qdisc *sch, unsigned long parent,
682*4882a593Smuzhiyun 			      u32 classid)
683*4882a593Smuzhiyun {
684*4882a593Smuzhiyun 	return 0;
685*4882a593Smuzhiyun }
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun static const struct Qdisc_class_ops sfb_class_ops = {
689*4882a593Smuzhiyun 	.graft		=	sfb_graft,
690*4882a593Smuzhiyun 	.leaf		=	sfb_leaf,
691*4882a593Smuzhiyun 	.find		=	sfb_find,
692*4882a593Smuzhiyun 	.change		=	sfb_change_class,
693*4882a593Smuzhiyun 	.delete		=	sfb_delete,
694*4882a593Smuzhiyun 	.walk		=	sfb_walk,
695*4882a593Smuzhiyun 	.tcf_block	=	sfb_tcf_block,
696*4882a593Smuzhiyun 	.bind_tcf	=	sfb_bind,
697*4882a593Smuzhiyun 	.unbind_tcf	=	sfb_unbind,
698*4882a593Smuzhiyun 	.dump		=	sfb_dump_class,
699*4882a593Smuzhiyun };
700*4882a593Smuzhiyun 
701*4882a593Smuzhiyun static struct Qdisc_ops sfb_qdisc_ops __read_mostly = {
702*4882a593Smuzhiyun 	.id		=	"sfb",
703*4882a593Smuzhiyun 	.priv_size	=	sizeof(struct sfb_sched_data),
704*4882a593Smuzhiyun 	.cl_ops		=	&sfb_class_ops,
705*4882a593Smuzhiyun 	.enqueue	=	sfb_enqueue,
706*4882a593Smuzhiyun 	.dequeue	=	sfb_dequeue,
707*4882a593Smuzhiyun 	.peek		=	sfb_peek,
708*4882a593Smuzhiyun 	.init		=	sfb_init,
709*4882a593Smuzhiyun 	.reset		=	sfb_reset,
710*4882a593Smuzhiyun 	.destroy	=	sfb_destroy,
711*4882a593Smuzhiyun 	.change		=	sfb_change,
712*4882a593Smuzhiyun 	.dump		=	sfb_dump,
713*4882a593Smuzhiyun 	.dump_stats	=	sfb_dump_stats,
714*4882a593Smuzhiyun 	.owner		=	THIS_MODULE,
715*4882a593Smuzhiyun };
716*4882a593Smuzhiyun 
sfb_module_init(void)717*4882a593Smuzhiyun static int __init sfb_module_init(void)
718*4882a593Smuzhiyun {
719*4882a593Smuzhiyun 	return register_qdisc(&sfb_qdisc_ops);
720*4882a593Smuzhiyun }
721*4882a593Smuzhiyun 
sfb_module_exit(void)722*4882a593Smuzhiyun static void __exit sfb_module_exit(void)
723*4882a593Smuzhiyun {
724*4882a593Smuzhiyun 	unregister_qdisc(&sfb_qdisc_ops);
725*4882a593Smuzhiyun }
726*4882a593Smuzhiyun 
727*4882a593Smuzhiyun module_init(sfb_module_init)
728*4882a593Smuzhiyun module_exit(sfb_module_exit)
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun MODULE_DESCRIPTION("Stochastic Fair Blue queue discipline");
731*4882a593Smuzhiyun MODULE_AUTHOR("Juliusz Chroboczek");
732*4882a593Smuzhiyun MODULE_AUTHOR("Eric Dumazet");
733*4882a593Smuzhiyun MODULE_LICENSE("GPL");
734