1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /* Copyright (C) 2013 Cisco Systems, Inc, 2013.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Author: Vijay Subramanian <vijaynsu@cisco.com>
5*4882a593Smuzhiyun * Author: Mythili Prabhu <mysuryan@cisco.com>
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * ECN support is added by Naeem Khademi <naeemk@ifi.uio.no>
8*4882a593Smuzhiyun * University of Oslo, Norway.
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * References:
11*4882a593Smuzhiyun * RFC 8033: https://tools.ietf.org/html/rfc8033
12*4882a593Smuzhiyun */
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #include <linux/module.h>
15*4882a593Smuzhiyun #include <linux/slab.h>
16*4882a593Smuzhiyun #include <linux/types.h>
17*4882a593Smuzhiyun #include <linux/kernel.h>
18*4882a593Smuzhiyun #include <linux/errno.h>
19*4882a593Smuzhiyun #include <linux/skbuff.h>
20*4882a593Smuzhiyun #include <net/pkt_sched.h>
21*4882a593Smuzhiyun #include <net/inet_ecn.h>
22*4882a593Smuzhiyun #include <net/pie.h>
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun /* private data for the Qdisc */
25*4882a593Smuzhiyun struct pie_sched_data {
26*4882a593Smuzhiyun struct pie_vars vars;
27*4882a593Smuzhiyun struct pie_params params;
28*4882a593Smuzhiyun struct pie_stats stats;
29*4882a593Smuzhiyun struct timer_list adapt_timer;
30*4882a593Smuzhiyun struct Qdisc *sch;
31*4882a593Smuzhiyun };
32*4882a593Smuzhiyun
pie_drop_early(struct Qdisc * sch,struct pie_params * params,struct pie_vars * vars,u32 backlog,u32 packet_size)33*4882a593Smuzhiyun bool pie_drop_early(struct Qdisc *sch, struct pie_params *params,
34*4882a593Smuzhiyun struct pie_vars *vars, u32 backlog, u32 packet_size)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun u64 rnd;
37*4882a593Smuzhiyun u64 local_prob = vars->prob;
38*4882a593Smuzhiyun u32 mtu = psched_mtu(qdisc_dev(sch));
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun /* If there is still burst allowance left skip random early drop */
41*4882a593Smuzhiyun if (vars->burst_time > 0)
42*4882a593Smuzhiyun return false;
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun /* If current delay is less than half of target, and
45*4882a593Smuzhiyun * if drop prob is low already, disable early_drop
46*4882a593Smuzhiyun */
47*4882a593Smuzhiyun if ((vars->qdelay < params->target / 2) &&
48*4882a593Smuzhiyun (vars->prob < MAX_PROB / 5))
49*4882a593Smuzhiyun return false;
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun /* If we have fewer than 2 mtu-sized packets, disable pie_drop_early,
52*4882a593Smuzhiyun * similar to min_th in RED
53*4882a593Smuzhiyun */
54*4882a593Smuzhiyun if (backlog < 2 * mtu)
55*4882a593Smuzhiyun return false;
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun /* If bytemode is turned on, use packet size to compute new
58*4882a593Smuzhiyun * probablity. Smaller packets will have lower drop prob in this case
59*4882a593Smuzhiyun */
60*4882a593Smuzhiyun if (params->bytemode && packet_size <= mtu)
61*4882a593Smuzhiyun local_prob = (u64)packet_size * div_u64(local_prob, mtu);
62*4882a593Smuzhiyun else
63*4882a593Smuzhiyun local_prob = vars->prob;
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun if (local_prob == 0)
66*4882a593Smuzhiyun vars->accu_prob = 0;
67*4882a593Smuzhiyun else
68*4882a593Smuzhiyun vars->accu_prob += local_prob;
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun if (vars->accu_prob < (MAX_PROB / 100) * 85)
71*4882a593Smuzhiyun return false;
72*4882a593Smuzhiyun if (vars->accu_prob >= (MAX_PROB / 2) * 17)
73*4882a593Smuzhiyun return true;
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun prandom_bytes(&rnd, 8);
76*4882a593Smuzhiyun if ((rnd >> BITS_PER_BYTE) < local_prob) {
77*4882a593Smuzhiyun vars->accu_prob = 0;
78*4882a593Smuzhiyun return true;
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun return false;
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pie_drop_early);
84*4882a593Smuzhiyun
pie_qdisc_enqueue(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)85*4882a593Smuzhiyun static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
86*4882a593Smuzhiyun struct sk_buff **to_free)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun struct pie_sched_data *q = qdisc_priv(sch);
89*4882a593Smuzhiyun bool enqueue = false;
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun if (unlikely(qdisc_qlen(sch) >= sch->limit)) {
92*4882a593Smuzhiyun q->stats.overlimit++;
93*4882a593Smuzhiyun goto out;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun if (!pie_drop_early(sch, &q->params, &q->vars, sch->qstats.backlog,
97*4882a593Smuzhiyun skb->len)) {
98*4882a593Smuzhiyun enqueue = true;
99*4882a593Smuzhiyun } else if (q->params.ecn && (q->vars.prob <= MAX_PROB / 10) &&
100*4882a593Smuzhiyun INET_ECN_set_ce(skb)) {
101*4882a593Smuzhiyun /* If packet is ecn capable, mark it if drop probability
102*4882a593Smuzhiyun * is lower than 10%, else drop it.
103*4882a593Smuzhiyun */
104*4882a593Smuzhiyun q->stats.ecn_mark++;
105*4882a593Smuzhiyun enqueue = true;
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun /* we can enqueue the packet */
109*4882a593Smuzhiyun if (enqueue) {
110*4882a593Smuzhiyun /* Set enqueue time only when dq_rate_estimator is disabled. */
111*4882a593Smuzhiyun if (!q->params.dq_rate_estimator)
112*4882a593Smuzhiyun pie_set_enqueue_time(skb);
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun q->stats.packets_in++;
115*4882a593Smuzhiyun if (qdisc_qlen(sch) > q->stats.maxq)
116*4882a593Smuzhiyun q->stats.maxq = qdisc_qlen(sch);
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun return qdisc_enqueue_tail(skb, sch);
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun out:
122*4882a593Smuzhiyun q->stats.dropped++;
123*4882a593Smuzhiyun q->vars.accu_prob = 0;
124*4882a593Smuzhiyun return qdisc_drop(skb, sch, to_free);
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun static const struct nla_policy pie_policy[TCA_PIE_MAX + 1] = {
128*4882a593Smuzhiyun [TCA_PIE_TARGET] = {.type = NLA_U32},
129*4882a593Smuzhiyun [TCA_PIE_LIMIT] = {.type = NLA_U32},
130*4882a593Smuzhiyun [TCA_PIE_TUPDATE] = {.type = NLA_U32},
131*4882a593Smuzhiyun [TCA_PIE_ALPHA] = {.type = NLA_U32},
132*4882a593Smuzhiyun [TCA_PIE_BETA] = {.type = NLA_U32},
133*4882a593Smuzhiyun [TCA_PIE_ECN] = {.type = NLA_U32},
134*4882a593Smuzhiyun [TCA_PIE_BYTEMODE] = {.type = NLA_U32},
135*4882a593Smuzhiyun [TCA_PIE_DQ_RATE_ESTIMATOR] = {.type = NLA_U32},
136*4882a593Smuzhiyun };
137*4882a593Smuzhiyun
pie_change(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)138*4882a593Smuzhiyun static int pie_change(struct Qdisc *sch, struct nlattr *opt,
139*4882a593Smuzhiyun struct netlink_ext_ack *extack)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun struct pie_sched_data *q = qdisc_priv(sch);
142*4882a593Smuzhiyun struct nlattr *tb[TCA_PIE_MAX + 1];
143*4882a593Smuzhiyun unsigned int qlen, dropped = 0;
144*4882a593Smuzhiyun int err;
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun if (!opt)
147*4882a593Smuzhiyun return -EINVAL;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun err = nla_parse_nested_deprecated(tb, TCA_PIE_MAX, opt, pie_policy,
150*4882a593Smuzhiyun NULL);
151*4882a593Smuzhiyun if (err < 0)
152*4882a593Smuzhiyun return err;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun sch_tree_lock(sch);
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun /* convert from microseconds to pschedtime */
157*4882a593Smuzhiyun if (tb[TCA_PIE_TARGET]) {
158*4882a593Smuzhiyun /* target is in us */
159*4882a593Smuzhiyun u32 target = nla_get_u32(tb[TCA_PIE_TARGET]);
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun /* convert to pschedtime */
162*4882a593Smuzhiyun q->params.target = PSCHED_NS2TICKS((u64)target * NSEC_PER_USEC);
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun /* tupdate is in jiffies */
166*4882a593Smuzhiyun if (tb[TCA_PIE_TUPDATE])
167*4882a593Smuzhiyun q->params.tupdate =
168*4882a593Smuzhiyun usecs_to_jiffies(nla_get_u32(tb[TCA_PIE_TUPDATE]));
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun if (tb[TCA_PIE_LIMIT]) {
171*4882a593Smuzhiyun u32 limit = nla_get_u32(tb[TCA_PIE_LIMIT]);
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun q->params.limit = limit;
174*4882a593Smuzhiyun sch->limit = limit;
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun if (tb[TCA_PIE_ALPHA])
178*4882a593Smuzhiyun q->params.alpha = nla_get_u32(tb[TCA_PIE_ALPHA]);
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun if (tb[TCA_PIE_BETA])
181*4882a593Smuzhiyun q->params.beta = nla_get_u32(tb[TCA_PIE_BETA]);
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun if (tb[TCA_PIE_ECN])
184*4882a593Smuzhiyun q->params.ecn = nla_get_u32(tb[TCA_PIE_ECN]);
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun if (tb[TCA_PIE_BYTEMODE])
187*4882a593Smuzhiyun q->params.bytemode = nla_get_u32(tb[TCA_PIE_BYTEMODE]);
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun if (tb[TCA_PIE_DQ_RATE_ESTIMATOR])
190*4882a593Smuzhiyun q->params.dq_rate_estimator =
191*4882a593Smuzhiyun nla_get_u32(tb[TCA_PIE_DQ_RATE_ESTIMATOR]);
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun /* Drop excess packets if new limit is lower */
194*4882a593Smuzhiyun qlen = sch->q.qlen;
195*4882a593Smuzhiyun while (sch->q.qlen > sch->limit) {
196*4882a593Smuzhiyun struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun dropped += qdisc_pkt_len(skb);
199*4882a593Smuzhiyun qdisc_qstats_backlog_dec(sch, skb);
200*4882a593Smuzhiyun rtnl_qdisc_drop(skb, sch);
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun sch_tree_unlock(sch);
205*4882a593Smuzhiyun return 0;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun
pie_process_dequeue(struct sk_buff * skb,struct pie_params * params,struct pie_vars * vars,u32 backlog)208*4882a593Smuzhiyun void pie_process_dequeue(struct sk_buff *skb, struct pie_params *params,
209*4882a593Smuzhiyun struct pie_vars *vars, u32 backlog)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun psched_time_t now = psched_get_time();
212*4882a593Smuzhiyun u32 dtime = 0;
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun /* If dq_rate_estimator is disabled, calculate qdelay using the
215*4882a593Smuzhiyun * packet timestamp.
216*4882a593Smuzhiyun */
217*4882a593Smuzhiyun if (!params->dq_rate_estimator) {
218*4882a593Smuzhiyun vars->qdelay = now - pie_get_enqueue_time(skb);
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun if (vars->dq_tstamp != DTIME_INVALID)
221*4882a593Smuzhiyun dtime = now - vars->dq_tstamp;
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun vars->dq_tstamp = now;
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun if (backlog == 0)
226*4882a593Smuzhiyun vars->qdelay = 0;
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun if (dtime == 0)
229*4882a593Smuzhiyun return;
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun goto burst_allowance_reduction;
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun /* If current queue is about 10 packets or more and dq_count is unset
235*4882a593Smuzhiyun * we have enough packets to calculate the drain rate. Save
236*4882a593Smuzhiyun * current time as dq_tstamp and start measurement cycle.
237*4882a593Smuzhiyun */
238*4882a593Smuzhiyun if (backlog >= QUEUE_THRESHOLD && vars->dq_count == DQCOUNT_INVALID) {
239*4882a593Smuzhiyun vars->dq_tstamp = psched_get_time();
240*4882a593Smuzhiyun vars->dq_count = 0;
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun /* Calculate the average drain rate from this value. If queue length
244*4882a593Smuzhiyun * has receded to a small value viz., <= QUEUE_THRESHOLD bytes, reset
245*4882a593Smuzhiyun * the dq_count to -1 as we don't have enough packets to calculate the
246*4882a593Smuzhiyun * drain rate anymore. The following if block is entered only when we
247*4882a593Smuzhiyun * have a substantial queue built up (QUEUE_THRESHOLD bytes or more)
248*4882a593Smuzhiyun * and we calculate the drain rate for the threshold here. dq_count is
249*4882a593Smuzhiyun * in bytes, time difference in psched_time, hence rate is in
250*4882a593Smuzhiyun * bytes/psched_time.
251*4882a593Smuzhiyun */
252*4882a593Smuzhiyun if (vars->dq_count != DQCOUNT_INVALID) {
253*4882a593Smuzhiyun vars->dq_count += skb->len;
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun if (vars->dq_count >= QUEUE_THRESHOLD) {
256*4882a593Smuzhiyun u32 count = vars->dq_count << PIE_SCALE;
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun dtime = now - vars->dq_tstamp;
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun if (dtime == 0)
261*4882a593Smuzhiyun return;
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun count = count / dtime;
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun if (vars->avg_dq_rate == 0)
266*4882a593Smuzhiyun vars->avg_dq_rate = count;
267*4882a593Smuzhiyun else
268*4882a593Smuzhiyun vars->avg_dq_rate =
269*4882a593Smuzhiyun (vars->avg_dq_rate -
270*4882a593Smuzhiyun (vars->avg_dq_rate >> 3)) + (count >> 3);
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun /* If the queue has receded below the threshold, we hold
273*4882a593Smuzhiyun * on to the last drain rate calculated, else we reset
274*4882a593Smuzhiyun * dq_count to 0 to re-enter the if block when the next
275*4882a593Smuzhiyun * packet is dequeued
276*4882a593Smuzhiyun */
277*4882a593Smuzhiyun if (backlog < QUEUE_THRESHOLD) {
278*4882a593Smuzhiyun vars->dq_count = DQCOUNT_INVALID;
279*4882a593Smuzhiyun } else {
280*4882a593Smuzhiyun vars->dq_count = 0;
281*4882a593Smuzhiyun vars->dq_tstamp = psched_get_time();
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun goto burst_allowance_reduction;
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun return;
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun burst_allowance_reduction:
291*4882a593Smuzhiyun if (vars->burst_time > 0) {
292*4882a593Smuzhiyun if (vars->burst_time > dtime)
293*4882a593Smuzhiyun vars->burst_time -= dtime;
294*4882a593Smuzhiyun else
295*4882a593Smuzhiyun vars->burst_time = 0;
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pie_process_dequeue);
299*4882a593Smuzhiyun
pie_calculate_probability(struct pie_params * params,struct pie_vars * vars,u32 backlog)300*4882a593Smuzhiyun void pie_calculate_probability(struct pie_params *params, struct pie_vars *vars,
301*4882a593Smuzhiyun u32 backlog)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun psched_time_t qdelay = 0; /* in pschedtime */
304*4882a593Smuzhiyun psched_time_t qdelay_old = 0; /* in pschedtime */
305*4882a593Smuzhiyun s64 delta = 0; /* determines the change in probability */
306*4882a593Smuzhiyun u64 oldprob;
307*4882a593Smuzhiyun u64 alpha, beta;
308*4882a593Smuzhiyun u32 power;
309*4882a593Smuzhiyun bool update_prob = true;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun if (params->dq_rate_estimator) {
312*4882a593Smuzhiyun qdelay_old = vars->qdelay;
313*4882a593Smuzhiyun vars->qdelay_old = vars->qdelay;
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun if (vars->avg_dq_rate > 0)
316*4882a593Smuzhiyun qdelay = (backlog << PIE_SCALE) / vars->avg_dq_rate;
317*4882a593Smuzhiyun else
318*4882a593Smuzhiyun qdelay = 0;
319*4882a593Smuzhiyun } else {
320*4882a593Smuzhiyun qdelay = vars->qdelay;
321*4882a593Smuzhiyun qdelay_old = vars->qdelay_old;
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun /* If qdelay is zero and backlog is not, it means backlog is very small,
325*4882a593Smuzhiyun * so we do not update probabilty in this round.
326*4882a593Smuzhiyun */
327*4882a593Smuzhiyun if (qdelay == 0 && backlog != 0)
328*4882a593Smuzhiyun update_prob = false;
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun /* In the algorithm, alpha and beta are between 0 and 2 with typical
331*4882a593Smuzhiyun * value for alpha as 0.125. In this implementation, we use values 0-32
332*4882a593Smuzhiyun * passed from user space to represent this. Also, alpha and beta have
333*4882a593Smuzhiyun * unit of HZ and need to be scaled before they can used to update
334*4882a593Smuzhiyun * probability. alpha/beta are updated locally below by scaling down
335*4882a593Smuzhiyun * by 16 to come to 0-2 range.
336*4882a593Smuzhiyun */
337*4882a593Smuzhiyun alpha = ((u64)params->alpha * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 4;
338*4882a593Smuzhiyun beta = ((u64)params->beta * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 4;
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun /* We scale alpha and beta differently depending on how heavy the
341*4882a593Smuzhiyun * congestion is. Please see RFC 8033 for details.
342*4882a593Smuzhiyun */
343*4882a593Smuzhiyun if (vars->prob < MAX_PROB / 10) {
344*4882a593Smuzhiyun alpha >>= 1;
345*4882a593Smuzhiyun beta >>= 1;
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun power = 100;
348*4882a593Smuzhiyun while (vars->prob < div_u64(MAX_PROB, power) &&
349*4882a593Smuzhiyun power <= 1000000) {
350*4882a593Smuzhiyun alpha >>= 2;
351*4882a593Smuzhiyun beta >>= 2;
352*4882a593Smuzhiyun power *= 10;
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun /* alpha and beta should be between 0 and 32, in multiples of 1/16 */
357*4882a593Smuzhiyun delta += alpha * (qdelay - params->target);
358*4882a593Smuzhiyun delta += beta * (qdelay - qdelay_old);
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun oldprob = vars->prob;
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun /* to ensure we increase probability in steps of no more than 2% */
363*4882a593Smuzhiyun if (delta > (s64)(MAX_PROB / (100 / 2)) &&
364*4882a593Smuzhiyun vars->prob >= MAX_PROB / 10)
365*4882a593Smuzhiyun delta = (MAX_PROB / 100) * 2;
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun /* Non-linear drop:
368*4882a593Smuzhiyun * Tune drop probability to increase quickly for high delays(>= 250ms)
369*4882a593Smuzhiyun * 250ms is derived through experiments and provides error protection
370*4882a593Smuzhiyun */
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun if (qdelay > (PSCHED_NS2TICKS(250 * NSEC_PER_MSEC)))
373*4882a593Smuzhiyun delta += MAX_PROB / (100 / 2);
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun vars->prob += delta;
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun if (delta > 0) {
378*4882a593Smuzhiyun /* prevent overflow */
379*4882a593Smuzhiyun if (vars->prob < oldprob) {
380*4882a593Smuzhiyun vars->prob = MAX_PROB;
381*4882a593Smuzhiyun /* Prevent normalization error. If probability is at
382*4882a593Smuzhiyun * maximum value already, we normalize it here, and
383*4882a593Smuzhiyun * skip the check to do a non-linear drop in the next
384*4882a593Smuzhiyun * section.
385*4882a593Smuzhiyun */
386*4882a593Smuzhiyun update_prob = false;
387*4882a593Smuzhiyun }
388*4882a593Smuzhiyun } else {
389*4882a593Smuzhiyun /* prevent underflow */
390*4882a593Smuzhiyun if (vars->prob > oldprob)
391*4882a593Smuzhiyun vars->prob = 0;
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun /* Non-linear drop in probability: Reduce drop probability quickly if
395*4882a593Smuzhiyun * delay is 0 for 2 consecutive Tupdate periods.
396*4882a593Smuzhiyun */
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun if (qdelay == 0 && qdelay_old == 0 && update_prob)
399*4882a593Smuzhiyun /* Reduce drop probability to 98.4% */
400*4882a593Smuzhiyun vars->prob -= vars->prob / 64;
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun vars->qdelay = qdelay;
403*4882a593Smuzhiyun vars->backlog_old = backlog;
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun /* We restart the measurement cycle if the following conditions are met
406*4882a593Smuzhiyun * 1. If the delay has been low for 2 consecutive Tupdate periods
407*4882a593Smuzhiyun * 2. Calculated drop probability is zero
408*4882a593Smuzhiyun * 3. If average dq_rate_estimator is enabled, we have atleast one
409*4882a593Smuzhiyun * estimate for the avg_dq_rate ie., is a non-zero value
410*4882a593Smuzhiyun */
411*4882a593Smuzhiyun if ((vars->qdelay < params->target / 2) &&
412*4882a593Smuzhiyun (vars->qdelay_old < params->target / 2) &&
413*4882a593Smuzhiyun vars->prob == 0 &&
414*4882a593Smuzhiyun (!params->dq_rate_estimator || vars->avg_dq_rate > 0)) {
415*4882a593Smuzhiyun pie_vars_init(vars);
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun if (!params->dq_rate_estimator)
419*4882a593Smuzhiyun vars->qdelay_old = qdelay;
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pie_calculate_probability);
422*4882a593Smuzhiyun
pie_timer(struct timer_list * t)423*4882a593Smuzhiyun static void pie_timer(struct timer_list *t)
424*4882a593Smuzhiyun {
425*4882a593Smuzhiyun struct pie_sched_data *q = from_timer(q, t, adapt_timer);
426*4882a593Smuzhiyun struct Qdisc *sch = q->sch;
427*4882a593Smuzhiyun spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun spin_lock(root_lock);
430*4882a593Smuzhiyun pie_calculate_probability(&q->params, &q->vars, sch->qstats.backlog);
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun /* reset the timer to fire after 'tupdate'. tupdate is in jiffies. */
433*4882a593Smuzhiyun if (q->params.tupdate)
434*4882a593Smuzhiyun mod_timer(&q->adapt_timer, jiffies + q->params.tupdate);
435*4882a593Smuzhiyun spin_unlock(root_lock);
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun
pie_init(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)438*4882a593Smuzhiyun static int pie_init(struct Qdisc *sch, struct nlattr *opt,
439*4882a593Smuzhiyun struct netlink_ext_ack *extack)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun struct pie_sched_data *q = qdisc_priv(sch);
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun pie_params_init(&q->params);
444*4882a593Smuzhiyun pie_vars_init(&q->vars);
445*4882a593Smuzhiyun sch->limit = q->params.limit;
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun q->sch = sch;
448*4882a593Smuzhiyun timer_setup(&q->adapt_timer, pie_timer, 0);
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun if (opt) {
451*4882a593Smuzhiyun int err = pie_change(sch, opt, extack);
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun if (err)
454*4882a593Smuzhiyun return err;
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun mod_timer(&q->adapt_timer, jiffies + HZ / 2);
458*4882a593Smuzhiyun return 0;
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun
pie_dump(struct Qdisc * sch,struct sk_buff * skb)461*4882a593Smuzhiyun static int pie_dump(struct Qdisc *sch, struct sk_buff *skb)
462*4882a593Smuzhiyun {
463*4882a593Smuzhiyun struct pie_sched_data *q = qdisc_priv(sch);
464*4882a593Smuzhiyun struct nlattr *opts;
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
467*4882a593Smuzhiyun if (!opts)
468*4882a593Smuzhiyun goto nla_put_failure;
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun /* convert target from pschedtime to us */
471*4882a593Smuzhiyun if (nla_put_u32(skb, TCA_PIE_TARGET,
472*4882a593Smuzhiyun ((u32)PSCHED_TICKS2NS(q->params.target)) /
473*4882a593Smuzhiyun NSEC_PER_USEC) ||
474*4882a593Smuzhiyun nla_put_u32(skb, TCA_PIE_LIMIT, sch->limit) ||
475*4882a593Smuzhiyun nla_put_u32(skb, TCA_PIE_TUPDATE,
476*4882a593Smuzhiyun jiffies_to_usecs(q->params.tupdate)) ||
477*4882a593Smuzhiyun nla_put_u32(skb, TCA_PIE_ALPHA, q->params.alpha) ||
478*4882a593Smuzhiyun nla_put_u32(skb, TCA_PIE_BETA, q->params.beta) ||
479*4882a593Smuzhiyun nla_put_u32(skb, TCA_PIE_ECN, q->params.ecn) ||
480*4882a593Smuzhiyun nla_put_u32(skb, TCA_PIE_BYTEMODE, q->params.bytemode) ||
481*4882a593Smuzhiyun nla_put_u32(skb, TCA_PIE_DQ_RATE_ESTIMATOR,
482*4882a593Smuzhiyun q->params.dq_rate_estimator))
483*4882a593Smuzhiyun goto nla_put_failure;
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun return nla_nest_end(skb, opts);
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun nla_put_failure:
488*4882a593Smuzhiyun nla_nest_cancel(skb, opts);
489*4882a593Smuzhiyun return -1;
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun
pie_dump_stats(struct Qdisc * sch,struct gnet_dump * d)492*4882a593Smuzhiyun static int pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
493*4882a593Smuzhiyun {
494*4882a593Smuzhiyun struct pie_sched_data *q = qdisc_priv(sch);
495*4882a593Smuzhiyun struct tc_pie_xstats st = {
496*4882a593Smuzhiyun .prob = q->vars.prob << BITS_PER_BYTE,
497*4882a593Smuzhiyun .delay = ((u32)PSCHED_TICKS2NS(q->vars.qdelay)) /
498*4882a593Smuzhiyun NSEC_PER_USEC,
499*4882a593Smuzhiyun .packets_in = q->stats.packets_in,
500*4882a593Smuzhiyun .overlimit = q->stats.overlimit,
501*4882a593Smuzhiyun .maxq = q->stats.maxq,
502*4882a593Smuzhiyun .dropped = q->stats.dropped,
503*4882a593Smuzhiyun .ecn_mark = q->stats.ecn_mark,
504*4882a593Smuzhiyun };
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun /* avg_dq_rate is only valid if dq_rate_estimator is enabled */
507*4882a593Smuzhiyun st.dq_rate_estimating = q->params.dq_rate_estimator;
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun /* unscale and return dq_rate in bytes per sec */
510*4882a593Smuzhiyun if (q->params.dq_rate_estimator)
511*4882a593Smuzhiyun st.avg_dq_rate = q->vars.avg_dq_rate *
512*4882a593Smuzhiyun (PSCHED_TICKS_PER_SEC) >> PIE_SCALE;
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun return gnet_stats_copy_app(d, &st, sizeof(st));
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun
pie_qdisc_dequeue(struct Qdisc * sch)517*4882a593Smuzhiyun static struct sk_buff *pie_qdisc_dequeue(struct Qdisc *sch)
518*4882a593Smuzhiyun {
519*4882a593Smuzhiyun struct pie_sched_data *q = qdisc_priv(sch);
520*4882a593Smuzhiyun struct sk_buff *skb = qdisc_dequeue_head(sch);
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun if (!skb)
523*4882a593Smuzhiyun return NULL;
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun pie_process_dequeue(skb, &q->params, &q->vars, sch->qstats.backlog);
526*4882a593Smuzhiyun return skb;
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun
pie_reset(struct Qdisc * sch)529*4882a593Smuzhiyun static void pie_reset(struct Qdisc *sch)
530*4882a593Smuzhiyun {
531*4882a593Smuzhiyun struct pie_sched_data *q = qdisc_priv(sch);
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun qdisc_reset_queue(sch);
534*4882a593Smuzhiyun pie_vars_init(&q->vars);
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun
pie_destroy(struct Qdisc * sch)537*4882a593Smuzhiyun static void pie_destroy(struct Qdisc *sch)
538*4882a593Smuzhiyun {
539*4882a593Smuzhiyun struct pie_sched_data *q = qdisc_priv(sch);
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun q->params.tupdate = 0;
542*4882a593Smuzhiyun del_timer_sync(&q->adapt_timer);
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun static struct Qdisc_ops pie_qdisc_ops __read_mostly = {
546*4882a593Smuzhiyun .id = "pie",
547*4882a593Smuzhiyun .priv_size = sizeof(struct pie_sched_data),
548*4882a593Smuzhiyun .enqueue = pie_qdisc_enqueue,
549*4882a593Smuzhiyun .dequeue = pie_qdisc_dequeue,
550*4882a593Smuzhiyun .peek = qdisc_peek_dequeued,
551*4882a593Smuzhiyun .init = pie_init,
552*4882a593Smuzhiyun .destroy = pie_destroy,
553*4882a593Smuzhiyun .reset = pie_reset,
554*4882a593Smuzhiyun .change = pie_change,
555*4882a593Smuzhiyun .dump = pie_dump,
556*4882a593Smuzhiyun .dump_stats = pie_dump_stats,
557*4882a593Smuzhiyun .owner = THIS_MODULE,
558*4882a593Smuzhiyun };
559*4882a593Smuzhiyun
pie_module_init(void)560*4882a593Smuzhiyun static int __init pie_module_init(void)
561*4882a593Smuzhiyun {
562*4882a593Smuzhiyun return register_qdisc(&pie_qdisc_ops);
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun
pie_module_exit(void)565*4882a593Smuzhiyun static void __exit pie_module_exit(void)
566*4882a593Smuzhiyun {
567*4882a593Smuzhiyun unregister_qdisc(&pie_qdisc_ops);
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun module_init(pie_module_init);
571*4882a593Smuzhiyun module_exit(pie_module_exit);
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun MODULE_DESCRIPTION("Proportional Integral controller Enhanced (PIE) scheduler");
574*4882a593Smuzhiyun MODULE_AUTHOR("Vijay Subramanian");
575*4882a593Smuzhiyun MODULE_AUTHOR("Mythili Prabhu");
576*4882a593Smuzhiyun MODULE_LICENSE("GPL");
577