1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Codel - The Controlled-Delay Active Queue Management algorithm
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
5*4882a593Smuzhiyun * Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Implemented on linux by :
8*4882a593Smuzhiyun * Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
9*4882a593Smuzhiyun * Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or without
12*4882a593Smuzhiyun * modification, are permitted provided that the following conditions
13*4882a593Smuzhiyun * are met:
14*4882a593Smuzhiyun * 1. Redistributions of source code must retain the above copyright
15*4882a593Smuzhiyun * notice, this list of conditions, and the following disclaimer,
16*4882a593Smuzhiyun * without modification.
17*4882a593Smuzhiyun * 2. Redistributions in binary form must reproduce the above copyright
18*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer in the
19*4882a593Smuzhiyun * documentation and/or other materials provided with the distribution.
20*4882a593Smuzhiyun * 3. The names of the authors may not be used to endorse or promote products
21*4882a593Smuzhiyun * derived from this software without specific prior written permission.
22*4882a593Smuzhiyun *
23*4882a593Smuzhiyun * Alternatively, provided that this notice is retained in full, this
24*4882a593Smuzhiyun * software may be distributed under the terms of the GNU General
25*4882a593Smuzhiyun * Public License ("GPL") version 2, in which case the provisions of the
26*4882a593Smuzhiyun * GPL apply INSTEAD OF those given above.
27*4882a593Smuzhiyun *
28*4882a593Smuzhiyun * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29*4882a593Smuzhiyun * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30*4882a593Smuzhiyun * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31*4882a593Smuzhiyun * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32*4882a593Smuzhiyun * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33*4882a593Smuzhiyun * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34*4882a593Smuzhiyun * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35*4882a593Smuzhiyun * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36*4882a593Smuzhiyun * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37*4882a593Smuzhiyun * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38*4882a593Smuzhiyun * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
39*4882a593Smuzhiyun * DAMAGE.
40*4882a593Smuzhiyun *
41*4882a593Smuzhiyun */
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun #include <linux/module.h>
44*4882a593Smuzhiyun #include <linux/slab.h>
45*4882a593Smuzhiyun #include <linux/types.h>
46*4882a593Smuzhiyun #include <linux/kernel.h>
47*4882a593Smuzhiyun #include <linux/errno.h>
48*4882a593Smuzhiyun #include <linux/skbuff.h>
49*4882a593Smuzhiyun #include <linux/prefetch.h>
50*4882a593Smuzhiyun #include <net/pkt_sched.h>
51*4882a593Smuzhiyun #include <net/codel.h>
52*4882a593Smuzhiyun #include <net/codel_impl.h>
53*4882a593Smuzhiyun #include <net/codel_qdisc.h>
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun #define DEFAULT_CODEL_LIMIT 1000
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun struct codel_sched_data {
59*4882a593Smuzhiyun struct codel_params params;
60*4882a593Smuzhiyun struct codel_vars vars;
61*4882a593Smuzhiyun struct codel_stats stats;
62*4882a593Smuzhiyun u32 drop_overlimit;
63*4882a593Smuzhiyun };
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun /* This is the specific function called from codel_dequeue()
66*4882a593Smuzhiyun * to dequeue a packet from queue. Note: backlog is handled in
67*4882a593Smuzhiyun * codel, we dont need to reduce it here.
68*4882a593Smuzhiyun */
dequeue_func(struct codel_vars * vars,void * ctx)69*4882a593Smuzhiyun static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun struct Qdisc *sch = ctx;
72*4882a593Smuzhiyun struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun if (skb) {
75*4882a593Smuzhiyun sch->qstats.backlog -= qdisc_pkt_len(skb);
76*4882a593Smuzhiyun prefetch(&skb->end); /* we'll need skb_shinfo() */
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun return skb;
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun
drop_func(struct sk_buff * skb,void * ctx)81*4882a593Smuzhiyun static void drop_func(struct sk_buff *skb, void *ctx)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun struct Qdisc *sch = ctx;
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun kfree_skb(skb);
86*4882a593Smuzhiyun qdisc_qstats_drop(sch);
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun
codel_qdisc_dequeue(struct Qdisc * sch)89*4882a593Smuzhiyun static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun struct codel_sched_data *q = qdisc_priv(sch);
92*4882a593Smuzhiyun struct sk_buff *skb;
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun skb = codel_dequeue(sch, &sch->qstats.backlog, &q->params, &q->vars,
95*4882a593Smuzhiyun &q->stats, qdisc_pkt_len, codel_get_enqueue_time,
96*4882a593Smuzhiyun drop_func, dequeue_func);
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
99*4882a593Smuzhiyun * or HTB crashes. Defer it for next round.
100*4882a593Smuzhiyun */
101*4882a593Smuzhiyun if (q->stats.drop_count && sch->q.qlen) {
102*4882a593Smuzhiyun qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len);
103*4882a593Smuzhiyun q->stats.drop_count = 0;
104*4882a593Smuzhiyun q->stats.drop_len = 0;
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun if (skb)
107*4882a593Smuzhiyun qdisc_bstats_update(sch, skb);
108*4882a593Smuzhiyun return skb;
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun
codel_qdisc_enqueue(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)111*4882a593Smuzhiyun static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
112*4882a593Smuzhiyun struct sk_buff **to_free)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun struct codel_sched_data *q;
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun if (likely(qdisc_qlen(sch) < sch->limit)) {
117*4882a593Smuzhiyun codel_set_enqueue_time(skb);
118*4882a593Smuzhiyun return qdisc_enqueue_tail(skb, sch);
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun q = qdisc_priv(sch);
121*4882a593Smuzhiyun q->drop_overlimit++;
122*4882a593Smuzhiyun return qdisc_drop(skb, sch, to_free);
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = {
126*4882a593Smuzhiyun [TCA_CODEL_TARGET] = { .type = NLA_U32 },
127*4882a593Smuzhiyun [TCA_CODEL_LIMIT] = { .type = NLA_U32 },
128*4882a593Smuzhiyun [TCA_CODEL_INTERVAL] = { .type = NLA_U32 },
129*4882a593Smuzhiyun [TCA_CODEL_ECN] = { .type = NLA_U32 },
130*4882a593Smuzhiyun [TCA_CODEL_CE_THRESHOLD]= { .type = NLA_U32 },
131*4882a593Smuzhiyun };
132*4882a593Smuzhiyun
codel_change(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)133*4882a593Smuzhiyun static int codel_change(struct Qdisc *sch, struct nlattr *opt,
134*4882a593Smuzhiyun struct netlink_ext_ack *extack)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun struct codel_sched_data *q = qdisc_priv(sch);
137*4882a593Smuzhiyun struct nlattr *tb[TCA_CODEL_MAX + 1];
138*4882a593Smuzhiyun unsigned int qlen, dropped = 0;
139*4882a593Smuzhiyun int err;
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun if (!opt)
142*4882a593Smuzhiyun return -EINVAL;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun err = nla_parse_nested_deprecated(tb, TCA_CODEL_MAX, opt,
145*4882a593Smuzhiyun codel_policy, NULL);
146*4882a593Smuzhiyun if (err < 0)
147*4882a593Smuzhiyun return err;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun sch_tree_lock(sch);
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun if (tb[TCA_CODEL_TARGET]) {
152*4882a593Smuzhiyun u32 target = nla_get_u32(tb[TCA_CODEL_TARGET]);
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun q->params.target = ((u64)target * NSEC_PER_USEC) >> CODEL_SHIFT;
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun if (tb[TCA_CODEL_CE_THRESHOLD]) {
158*4882a593Smuzhiyun u64 val = nla_get_u32(tb[TCA_CODEL_CE_THRESHOLD]);
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun q->params.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT;
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun if (tb[TCA_CODEL_INTERVAL]) {
164*4882a593Smuzhiyun u32 interval = nla_get_u32(tb[TCA_CODEL_INTERVAL]);
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun q->params.interval = ((u64)interval * NSEC_PER_USEC) >> CODEL_SHIFT;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun if (tb[TCA_CODEL_LIMIT])
170*4882a593Smuzhiyun sch->limit = nla_get_u32(tb[TCA_CODEL_LIMIT]);
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun if (tb[TCA_CODEL_ECN])
173*4882a593Smuzhiyun q->params.ecn = !!nla_get_u32(tb[TCA_CODEL_ECN]);
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun qlen = sch->q.qlen;
176*4882a593Smuzhiyun while (sch->q.qlen > sch->limit) {
177*4882a593Smuzhiyun struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun dropped += qdisc_pkt_len(skb);
180*4882a593Smuzhiyun qdisc_qstats_backlog_dec(sch, skb);
181*4882a593Smuzhiyun rtnl_qdisc_drop(skb, sch);
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun sch_tree_unlock(sch);
186*4882a593Smuzhiyun return 0;
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun
codel_init(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)189*4882a593Smuzhiyun static int codel_init(struct Qdisc *sch, struct nlattr *opt,
190*4882a593Smuzhiyun struct netlink_ext_ack *extack)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun struct codel_sched_data *q = qdisc_priv(sch);
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun sch->limit = DEFAULT_CODEL_LIMIT;
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun codel_params_init(&q->params);
197*4882a593Smuzhiyun codel_vars_init(&q->vars);
198*4882a593Smuzhiyun codel_stats_init(&q->stats);
199*4882a593Smuzhiyun q->params.mtu = psched_mtu(qdisc_dev(sch));
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun if (opt) {
202*4882a593Smuzhiyun int err = codel_change(sch, opt, extack);
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun if (err)
205*4882a593Smuzhiyun return err;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun if (sch->limit >= 1)
209*4882a593Smuzhiyun sch->flags |= TCQ_F_CAN_BYPASS;
210*4882a593Smuzhiyun else
211*4882a593Smuzhiyun sch->flags &= ~TCQ_F_CAN_BYPASS;
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun return 0;
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun
codel_dump(struct Qdisc * sch,struct sk_buff * skb)216*4882a593Smuzhiyun static int codel_dump(struct Qdisc *sch, struct sk_buff *skb)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun struct codel_sched_data *q = qdisc_priv(sch);
219*4882a593Smuzhiyun struct nlattr *opts;
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
222*4882a593Smuzhiyun if (opts == NULL)
223*4882a593Smuzhiyun goto nla_put_failure;
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun if (nla_put_u32(skb, TCA_CODEL_TARGET,
226*4882a593Smuzhiyun codel_time_to_us(q->params.target)) ||
227*4882a593Smuzhiyun nla_put_u32(skb, TCA_CODEL_LIMIT,
228*4882a593Smuzhiyun sch->limit) ||
229*4882a593Smuzhiyun nla_put_u32(skb, TCA_CODEL_INTERVAL,
230*4882a593Smuzhiyun codel_time_to_us(q->params.interval)) ||
231*4882a593Smuzhiyun nla_put_u32(skb, TCA_CODEL_ECN,
232*4882a593Smuzhiyun q->params.ecn))
233*4882a593Smuzhiyun goto nla_put_failure;
234*4882a593Smuzhiyun if (q->params.ce_threshold != CODEL_DISABLED_THRESHOLD &&
235*4882a593Smuzhiyun nla_put_u32(skb, TCA_CODEL_CE_THRESHOLD,
236*4882a593Smuzhiyun codel_time_to_us(q->params.ce_threshold)))
237*4882a593Smuzhiyun goto nla_put_failure;
238*4882a593Smuzhiyun return nla_nest_end(skb, opts);
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun nla_put_failure:
241*4882a593Smuzhiyun nla_nest_cancel(skb, opts);
242*4882a593Smuzhiyun return -1;
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun
codel_dump_stats(struct Qdisc * sch,struct gnet_dump * d)245*4882a593Smuzhiyun static int codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun const struct codel_sched_data *q = qdisc_priv(sch);
248*4882a593Smuzhiyun struct tc_codel_xstats st = {
249*4882a593Smuzhiyun .maxpacket = q->stats.maxpacket,
250*4882a593Smuzhiyun .count = q->vars.count,
251*4882a593Smuzhiyun .lastcount = q->vars.lastcount,
252*4882a593Smuzhiyun .drop_overlimit = q->drop_overlimit,
253*4882a593Smuzhiyun .ldelay = codel_time_to_us(q->vars.ldelay),
254*4882a593Smuzhiyun .dropping = q->vars.dropping,
255*4882a593Smuzhiyun .ecn_mark = q->stats.ecn_mark,
256*4882a593Smuzhiyun .ce_mark = q->stats.ce_mark,
257*4882a593Smuzhiyun };
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun if (q->vars.dropping) {
260*4882a593Smuzhiyun codel_tdiff_t delta = q->vars.drop_next - codel_get_time();
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun if (delta >= 0)
263*4882a593Smuzhiyun st.drop_next = codel_time_to_us(delta);
264*4882a593Smuzhiyun else
265*4882a593Smuzhiyun st.drop_next = -codel_time_to_us(-delta);
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun return gnet_stats_copy_app(d, &st, sizeof(st));
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun
codel_reset(struct Qdisc * sch)271*4882a593Smuzhiyun static void codel_reset(struct Qdisc *sch)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun struct codel_sched_data *q = qdisc_priv(sch);
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun qdisc_reset_queue(sch);
276*4882a593Smuzhiyun codel_vars_init(&q->vars);
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun static struct Qdisc_ops codel_qdisc_ops __read_mostly = {
280*4882a593Smuzhiyun .id = "codel",
281*4882a593Smuzhiyun .priv_size = sizeof(struct codel_sched_data),
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun .enqueue = codel_qdisc_enqueue,
284*4882a593Smuzhiyun .dequeue = codel_qdisc_dequeue,
285*4882a593Smuzhiyun .peek = qdisc_peek_dequeued,
286*4882a593Smuzhiyun .init = codel_init,
287*4882a593Smuzhiyun .reset = codel_reset,
288*4882a593Smuzhiyun .change = codel_change,
289*4882a593Smuzhiyun .dump = codel_dump,
290*4882a593Smuzhiyun .dump_stats = codel_dump_stats,
291*4882a593Smuzhiyun .owner = THIS_MODULE,
292*4882a593Smuzhiyun };
293*4882a593Smuzhiyun
codel_module_init(void)294*4882a593Smuzhiyun static int __init codel_module_init(void)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun return register_qdisc(&codel_qdisc_ops);
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun
codel_module_exit(void)299*4882a593Smuzhiyun static void __exit codel_module_exit(void)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun unregister_qdisc(&codel_qdisc_ops);
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun module_init(codel_module_init)
305*4882a593Smuzhiyun module_exit(codel_module_exit)
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun MODULE_DESCRIPTION("Controlled Delay queue discipline");
308*4882a593Smuzhiyun MODULE_AUTHOR("Dave Taht");
309*4882a593Smuzhiyun MODULE_AUTHOR("Eric Dumazet");
310*4882a593Smuzhiyun MODULE_LICENSE("Dual BSD/GPL");
311