1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * net/sched/sch_choke.c CHOKE scheduler
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (c) 2011 Stephen Hemminger <shemminger@vyatta.com>
6*4882a593Smuzhiyun * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com>
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/module.h>
10*4882a593Smuzhiyun #include <linux/types.h>
11*4882a593Smuzhiyun #include <linux/kernel.h>
12*4882a593Smuzhiyun #include <linux/skbuff.h>
13*4882a593Smuzhiyun #include <linux/vmalloc.h>
14*4882a593Smuzhiyun #include <net/pkt_sched.h>
15*4882a593Smuzhiyun #include <net/pkt_cls.h>
16*4882a593Smuzhiyun #include <net/inet_ecn.h>
17*4882a593Smuzhiyun #include <net/red.h>
18*4882a593Smuzhiyun #include <net/flow_dissector.h>
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun /*
21*4882a593Smuzhiyun CHOKe stateless AQM for fair bandwidth allocation
22*4882a593Smuzhiyun =================================================
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun CHOKe (CHOose and Keep for responsive flows, CHOose and Kill for
25*4882a593Smuzhiyun unresponsive flows) is a variant of RED that penalizes misbehaving flows but
26*4882a593Smuzhiyun maintains no flow state. The difference from RED is an additional step
27*4882a593Smuzhiyun during the enqueuing process. If average queue size is over the
28*4882a593Smuzhiyun low threshold (qmin), a packet is chosen at random from the queue.
29*4882a593Smuzhiyun If both the new and chosen packet are from the same flow, both
30*4882a593Smuzhiyun are dropped. Unlike RED, CHOKe is not really a "classful" qdisc because it
31*4882a593Smuzhiyun needs to access packets in queue randomly. It has a minimal class
32*4882a593Smuzhiyun interface to allow overriding the builtin flow classifier with
33*4882a593Smuzhiyun filters.
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun Source:
36*4882a593Smuzhiyun R. Pan, B. Prabhakar, and K. Psounis, "CHOKe, A Stateless
37*4882a593Smuzhiyun Active Queue Management Scheme for Approximating Fair Bandwidth Allocation",
38*4882a593Smuzhiyun IEEE INFOCOM, 2000.
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun A. Tang, J. Wang, S. Low, "Understanding CHOKe: Throughput and Spatial
41*4882a593Smuzhiyun Characteristics", IEEE/ACM Transactions on Networking, 2004
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun */
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun /* Upper bound on size of sk_buff table (packets) */
46*4882a593Smuzhiyun #define CHOKE_MAX_QUEUE (128*1024 - 1)
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun struct choke_sched_data {
49*4882a593Smuzhiyun /* Parameters */
50*4882a593Smuzhiyun u32 limit;
51*4882a593Smuzhiyun unsigned char flags;
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun struct red_parms parms;
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun /* Variables */
56*4882a593Smuzhiyun struct red_vars vars;
57*4882a593Smuzhiyun struct {
58*4882a593Smuzhiyun u32 prob_drop; /* Early probability drops */
59*4882a593Smuzhiyun u32 prob_mark; /* Early probability marks */
60*4882a593Smuzhiyun u32 forced_drop; /* Forced drops, qavg > max_thresh */
61*4882a593Smuzhiyun u32 forced_mark; /* Forced marks, qavg > max_thresh */
62*4882a593Smuzhiyun u32 pdrop; /* Drops due to queue limits */
63*4882a593Smuzhiyun u32 other; /* Drops due to drop() calls */
64*4882a593Smuzhiyun u32 matched; /* Drops to flow match */
65*4882a593Smuzhiyun } stats;
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun unsigned int head;
68*4882a593Smuzhiyun unsigned int tail;
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun unsigned int tab_mask; /* size - 1 */
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun struct sk_buff **tab;
73*4882a593Smuzhiyun };
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun /* number of elements in queue including holes */
choke_len(const struct choke_sched_data * q)76*4882a593Smuzhiyun static unsigned int choke_len(const struct choke_sched_data *q)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun return (q->tail - q->head) & q->tab_mask;
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun /* Is ECN parameter configured */
use_ecn(const struct choke_sched_data * q)82*4882a593Smuzhiyun static int use_ecn(const struct choke_sched_data *q)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun return q->flags & TC_RED_ECN;
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun /* Should packets over max just be dropped (versus marked) */
use_harddrop(const struct choke_sched_data * q)88*4882a593Smuzhiyun static int use_harddrop(const struct choke_sched_data *q)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun return q->flags & TC_RED_HARDDROP;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun /* Move head pointer forward to skip over holes */
choke_zap_head_holes(struct choke_sched_data * q)94*4882a593Smuzhiyun static void choke_zap_head_holes(struct choke_sched_data *q)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun do {
97*4882a593Smuzhiyun q->head = (q->head + 1) & q->tab_mask;
98*4882a593Smuzhiyun if (q->head == q->tail)
99*4882a593Smuzhiyun break;
100*4882a593Smuzhiyun } while (q->tab[q->head] == NULL);
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun /* Move tail pointer backwards to reuse holes */
choke_zap_tail_holes(struct choke_sched_data * q)104*4882a593Smuzhiyun static void choke_zap_tail_holes(struct choke_sched_data *q)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun do {
107*4882a593Smuzhiyun q->tail = (q->tail - 1) & q->tab_mask;
108*4882a593Smuzhiyun if (q->head == q->tail)
109*4882a593Smuzhiyun break;
110*4882a593Smuzhiyun } while (q->tab[q->tail] == NULL);
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun /* Drop packet from queue array by creating a "hole" */
choke_drop_by_idx(struct Qdisc * sch,unsigned int idx,struct sk_buff ** to_free)114*4882a593Smuzhiyun static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx,
115*4882a593Smuzhiyun struct sk_buff **to_free)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun struct choke_sched_data *q = qdisc_priv(sch);
118*4882a593Smuzhiyun struct sk_buff *skb = q->tab[idx];
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun q->tab[idx] = NULL;
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun if (idx == q->head)
123*4882a593Smuzhiyun choke_zap_head_holes(q);
124*4882a593Smuzhiyun if (idx == q->tail)
125*4882a593Smuzhiyun choke_zap_tail_holes(q);
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun qdisc_qstats_backlog_dec(sch, skb);
128*4882a593Smuzhiyun qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
129*4882a593Smuzhiyun qdisc_drop(skb, sch, to_free);
130*4882a593Smuzhiyun --sch->q.qlen;
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun struct choke_skb_cb {
134*4882a593Smuzhiyun u8 keys_valid;
135*4882a593Smuzhiyun struct flow_keys_digest keys;
136*4882a593Smuzhiyun };
137*4882a593Smuzhiyun
choke_skb_cb(const struct sk_buff * skb)138*4882a593Smuzhiyun static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun qdisc_cb_private_validate(skb, sizeof(struct choke_skb_cb));
141*4882a593Smuzhiyun return (struct choke_skb_cb *)qdisc_skb_cb(skb)->data;
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun /*
145*4882a593Smuzhiyun * Compare flow of two packets
146*4882a593Smuzhiyun * Returns true only if source and destination address and port match.
147*4882a593Smuzhiyun * false for special cases
148*4882a593Smuzhiyun */
choke_match_flow(struct sk_buff * skb1,struct sk_buff * skb2)149*4882a593Smuzhiyun static bool choke_match_flow(struct sk_buff *skb1,
150*4882a593Smuzhiyun struct sk_buff *skb2)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun struct flow_keys temp;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun if (skb1->protocol != skb2->protocol)
155*4882a593Smuzhiyun return false;
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun if (!choke_skb_cb(skb1)->keys_valid) {
158*4882a593Smuzhiyun choke_skb_cb(skb1)->keys_valid = 1;
159*4882a593Smuzhiyun skb_flow_dissect_flow_keys(skb1, &temp, 0);
160*4882a593Smuzhiyun make_flow_keys_digest(&choke_skb_cb(skb1)->keys, &temp);
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun if (!choke_skb_cb(skb2)->keys_valid) {
164*4882a593Smuzhiyun choke_skb_cb(skb2)->keys_valid = 1;
165*4882a593Smuzhiyun skb_flow_dissect_flow_keys(skb2, &temp, 0);
166*4882a593Smuzhiyun make_flow_keys_digest(&choke_skb_cb(skb2)->keys, &temp);
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun return !memcmp(&choke_skb_cb(skb1)->keys,
170*4882a593Smuzhiyun &choke_skb_cb(skb2)->keys,
171*4882a593Smuzhiyun sizeof(choke_skb_cb(skb1)->keys));
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun /*
175*4882a593Smuzhiyun * Select a packet at random from queue
176*4882a593Smuzhiyun * HACK: since queue can have holes from previous deletion; retry several
177*4882a593Smuzhiyun * times to find a random skb but then just give up and return the head
178*4882a593Smuzhiyun * Will return NULL if queue is empty (q->head == q->tail)
179*4882a593Smuzhiyun */
choke_peek_random(const struct choke_sched_data * q,unsigned int * pidx)180*4882a593Smuzhiyun static struct sk_buff *choke_peek_random(const struct choke_sched_data *q,
181*4882a593Smuzhiyun unsigned int *pidx)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun struct sk_buff *skb;
184*4882a593Smuzhiyun int retrys = 3;
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun do {
187*4882a593Smuzhiyun *pidx = (q->head + prandom_u32_max(choke_len(q))) & q->tab_mask;
188*4882a593Smuzhiyun skb = q->tab[*pidx];
189*4882a593Smuzhiyun if (skb)
190*4882a593Smuzhiyun return skb;
191*4882a593Smuzhiyun } while (--retrys > 0);
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun return q->tab[*pidx = q->head];
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun /*
197*4882a593Smuzhiyun * Compare new packet with random packet in queue
198*4882a593Smuzhiyun * returns true if matched and sets *pidx
199*4882a593Smuzhiyun */
choke_match_random(const struct choke_sched_data * q,struct sk_buff * nskb,unsigned int * pidx)200*4882a593Smuzhiyun static bool choke_match_random(const struct choke_sched_data *q,
201*4882a593Smuzhiyun struct sk_buff *nskb,
202*4882a593Smuzhiyun unsigned int *pidx)
203*4882a593Smuzhiyun {
204*4882a593Smuzhiyun struct sk_buff *oskb;
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun if (q->head == q->tail)
207*4882a593Smuzhiyun return false;
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun oskb = choke_peek_random(q, pidx);
210*4882a593Smuzhiyun return choke_match_flow(oskb, nskb);
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun
choke_enqueue(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)213*4882a593Smuzhiyun static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch,
214*4882a593Smuzhiyun struct sk_buff **to_free)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun struct choke_sched_data *q = qdisc_priv(sch);
217*4882a593Smuzhiyun const struct red_parms *p = &q->parms;
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun choke_skb_cb(skb)->keys_valid = 0;
220*4882a593Smuzhiyun /* Compute average queue usage (see RED) */
221*4882a593Smuzhiyun q->vars.qavg = red_calc_qavg(p, &q->vars, sch->q.qlen);
222*4882a593Smuzhiyun if (red_is_idling(&q->vars))
223*4882a593Smuzhiyun red_end_of_idle_period(&q->vars);
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun /* Is queue small? */
226*4882a593Smuzhiyun if (q->vars.qavg <= p->qth_min)
227*4882a593Smuzhiyun q->vars.qcount = -1;
228*4882a593Smuzhiyun else {
229*4882a593Smuzhiyun unsigned int idx;
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun /* Draw a packet at random from queue and compare flow */
232*4882a593Smuzhiyun if (choke_match_random(q, skb, &idx)) {
233*4882a593Smuzhiyun q->stats.matched++;
234*4882a593Smuzhiyun choke_drop_by_idx(sch, idx, to_free);
235*4882a593Smuzhiyun goto congestion_drop;
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun /* Queue is large, always mark/drop */
239*4882a593Smuzhiyun if (q->vars.qavg > p->qth_max) {
240*4882a593Smuzhiyun q->vars.qcount = -1;
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun qdisc_qstats_overlimit(sch);
243*4882a593Smuzhiyun if (use_harddrop(q) || !use_ecn(q) ||
244*4882a593Smuzhiyun !INET_ECN_set_ce(skb)) {
245*4882a593Smuzhiyun q->stats.forced_drop++;
246*4882a593Smuzhiyun goto congestion_drop;
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun q->stats.forced_mark++;
250*4882a593Smuzhiyun } else if (++q->vars.qcount) {
251*4882a593Smuzhiyun if (red_mark_probability(p, &q->vars, q->vars.qavg)) {
252*4882a593Smuzhiyun q->vars.qcount = 0;
253*4882a593Smuzhiyun q->vars.qR = red_random(p);
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun qdisc_qstats_overlimit(sch);
256*4882a593Smuzhiyun if (!use_ecn(q) || !INET_ECN_set_ce(skb)) {
257*4882a593Smuzhiyun q->stats.prob_drop++;
258*4882a593Smuzhiyun goto congestion_drop;
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun q->stats.prob_mark++;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun } else
264*4882a593Smuzhiyun q->vars.qR = red_random(p);
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun /* Admit new packet */
268*4882a593Smuzhiyun if (sch->q.qlen < q->limit) {
269*4882a593Smuzhiyun q->tab[q->tail] = skb;
270*4882a593Smuzhiyun q->tail = (q->tail + 1) & q->tab_mask;
271*4882a593Smuzhiyun ++sch->q.qlen;
272*4882a593Smuzhiyun qdisc_qstats_backlog_inc(sch, skb);
273*4882a593Smuzhiyun return NET_XMIT_SUCCESS;
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun q->stats.pdrop++;
277*4882a593Smuzhiyun return qdisc_drop(skb, sch, to_free);
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun congestion_drop:
280*4882a593Smuzhiyun qdisc_drop(skb, sch, to_free);
281*4882a593Smuzhiyun return NET_XMIT_CN;
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun
choke_dequeue(struct Qdisc * sch)284*4882a593Smuzhiyun static struct sk_buff *choke_dequeue(struct Qdisc *sch)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun struct choke_sched_data *q = qdisc_priv(sch);
287*4882a593Smuzhiyun struct sk_buff *skb;
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun if (q->head == q->tail) {
290*4882a593Smuzhiyun if (!red_is_idling(&q->vars))
291*4882a593Smuzhiyun red_start_of_idle_period(&q->vars);
292*4882a593Smuzhiyun return NULL;
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun skb = q->tab[q->head];
296*4882a593Smuzhiyun q->tab[q->head] = NULL;
297*4882a593Smuzhiyun choke_zap_head_holes(q);
298*4882a593Smuzhiyun --sch->q.qlen;
299*4882a593Smuzhiyun qdisc_qstats_backlog_dec(sch, skb);
300*4882a593Smuzhiyun qdisc_bstats_update(sch, skb);
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun return skb;
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
choke_reset(struct Qdisc * sch)305*4882a593Smuzhiyun static void choke_reset(struct Qdisc *sch)
306*4882a593Smuzhiyun {
307*4882a593Smuzhiyun struct choke_sched_data *q = qdisc_priv(sch);
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun while (q->head != q->tail) {
310*4882a593Smuzhiyun struct sk_buff *skb = q->tab[q->head];
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun q->head = (q->head + 1) & q->tab_mask;
313*4882a593Smuzhiyun if (!skb)
314*4882a593Smuzhiyun continue;
315*4882a593Smuzhiyun rtnl_qdisc_drop(skb, sch);
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun if (q->tab)
319*4882a593Smuzhiyun memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *));
320*4882a593Smuzhiyun q->head = q->tail = 0;
321*4882a593Smuzhiyun red_restart(&q->vars);
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun static const struct nla_policy choke_policy[TCA_CHOKE_MAX + 1] = {
325*4882a593Smuzhiyun [TCA_CHOKE_PARMS] = { .len = sizeof(struct tc_red_qopt) },
326*4882a593Smuzhiyun [TCA_CHOKE_STAB] = { .len = RED_STAB_SIZE },
327*4882a593Smuzhiyun [TCA_CHOKE_MAX_P] = { .type = NLA_U32 },
328*4882a593Smuzhiyun };
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun
choke_free(void * addr)331*4882a593Smuzhiyun static void choke_free(void *addr)
332*4882a593Smuzhiyun {
333*4882a593Smuzhiyun kvfree(addr);
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun
choke_change(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)336*4882a593Smuzhiyun static int choke_change(struct Qdisc *sch, struct nlattr *opt,
337*4882a593Smuzhiyun struct netlink_ext_ack *extack)
338*4882a593Smuzhiyun {
339*4882a593Smuzhiyun struct choke_sched_data *q = qdisc_priv(sch);
340*4882a593Smuzhiyun struct nlattr *tb[TCA_CHOKE_MAX + 1];
341*4882a593Smuzhiyun const struct tc_red_qopt *ctl;
342*4882a593Smuzhiyun int err;
343*4882a593Smuzhiyun struct sk_buff **old = NULL;
344*4882a593Smuzhiyun unsigned int mask;
345*4882a593Smuzhiyun u32 max_P;
346*4882a593Smuzhiyun u8 *stab;
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun if (opt == NULL)
349*4882a593Smuzhiyun return -EINVAL;
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun err = nla_parse_nested_deprecated(tb, TCA_CHOKE_MAX, opt,
352*4882a593Smuzhiyun choke_policy, NULL);
353*4882a593Smuzhiyun if (err < 0)
354*4882a593Smuzhiyun return err;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun if (tb[TCA_CHOKE_PARMS] == NULL ||
357*4882a593Smuzhiyun tb[TCA_CHOKE_STAB] == NULL)
358*4882a593Smuzhiyun return -EINVAL;
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun max_P = tb[TCA_CHOKE_MAX_P] ? nla_get_u32(tb[TCA_CHOKE_MAX_P]) : 0;
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun ctl = nla_data(tb[TCA_CHOKE_PARMS]);
363*4882a593Smuzhiyun stab = nla_data(tb[TCA_CHOKE_STAB]);
364*4882a593Smuzhiyun if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log, stab))
365*4882a593Smuzhiyun return -EINVAL;
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun if (ctl->limit > CHOKE_MAX_QUEUE)
368*4882a593Smuzhiyun return -EINVAL;
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun mask = roundup_pow_of_two(ctl->limit + 1) - 1;
371*4882a593Smuzhiyun if (mask != q->tab_mask) {
372*4882a593Smuzhiyun struct sk_buff **ntab;
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun ntab = kvcalloc(mask + 1, sizeof(struct sk_buff *), GFP_KERNEL);
375*4882a593Smuzhiyun if (!ntab)
376*4882a593Smuzhiyun return -ENOMEM;
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun sch_tree_lock(sch);
379*4882a593Smuzhiyun old = q->tab;
380*4882a593Smuzhiyun if (old) {
381*4882a593Smuzhiyun unsigned int oqlen = sch->q.qlen, tail = 0;
382*4882a593Smuzhiyun unsigned dropped = 0;
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun while (q->head != q->tail) {
385*4882a593Smuzhiyun struct sk_buff *skb = q->tab[q->head];
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun q->head = (q->head + 1) & q->tab_mask;
388*4882a593Smuzhiyun if (!skb)
389*4882a593Smuzhiyun continue;
390*4882a593Smuzhiyun if (tail < mask) {
391*4882a593Smuzhiyun ntab[tail++] = skb;
392*4882a593Smuzhiyun continue;
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun dropped += qdisc_pkt_len(skb);
395*4882a593Smuzhiyun qdisc_qstats_backlog_dec(sch, skb);
396*4882a593Smuzhiyun --sch->q.qlen;
397*4882a593Smuzhiyun rtnl_qdisc_drop(skb, sch);
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun qdisc_tree_reduce_backlog(sch, oqlen - sch->q.qlen, dropped);
400*4882a593Smuzhiyun q->head = 0;
401*4882a593Smuzhiyun q->tail = tail;
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun q->tab_mask = mask;
405*4882a593Smuzhiyun q->tab = ntab;
406*4882a593Smuzhiyun } else
407*4882a593Smuzhiyun sch_tree_lock(sch);
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun q->flags = ctl->flags;
410*4882a593Smuzhiyun q->limit = ctl->limit;
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
413*4882a593Smuzhiyun ctl->Plog, ctl->Scell_log,
414*4882a593Smuzhiyun stab,
415*4882a593Smuzhiyun max_P);
416*4882a593Smuzhiyun red_set_vars(&q->vars);
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun if (q->head == q->tail)
419*4882a593Smuzhiyun red_end_of_idle_period(&q->vars);
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun sch_tree_unlock(sch);
422*4882a593Smuzhiyun choke_free(old);
423*4882a593Smuzhiyun return 0;
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun
choke_init(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)426*4882a593Smuzhiyun static int choke_init(struct Qdisc *sch, struct nlattr *opt,
427*4882a593Smuzhiyun struct netlink_ext_ack *extack)
428*4882a593Smuzhiyun {
429*4882a593Smuzhiyun return choke_change(sch, opt, extack);
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun
choke_dump(struct Qdisc * sch,struct sk_buff * skb)432*4882a593Smuzhiyun static int choke_dump(struct Qdisc *sch, struct sk_buff *skb)
433*4882a593Smuzhiyun {
434*4882a593Smuzhiyun struct choke_sched_data *q = qdisc_priv(sch);
435*4882a593Smuzhiyun struct nlattr *opts = NULL;
436*4882a593Smuzhiyun struct tc_red_qopt opt = {
437*4882a593Smuzhiyun .limit = q->limit,
438*4882a593Smuzhiyun .flags = q->flags,
439*4882a593Smuzhiyun .qth_min = q->parms.qth_min >> q->parms.Wlog,
440*4882a593Smuzhiyun .qth_max = q->parms.qth_max >> q->parms.Wlog,
441*4882a593Smuzhiyun .Wlog = q->parms.Wlog,
442*4882a593Smuzhiyun .Plog = q->parms.Plog,
443*4882a593Smuzhiyun .Scell_log = q->parms.Scell_log,
444*4882a593Smuzhiyun };
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
447*4882a593Smuzhiyun if (opts == NULL)
448*4882a593Smuzhiyun goto nla_put_failure;
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun if (nla_put(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt) ||
451*4882a593Smuzhiyun nla_put_u32(skb, TCA_CHOKE_MAX_P, q->parms.max_P))
452*4882a593Smuzhiyun goto nla_put_failure;
453*4882a593Smuzhiyun return nla_nest_end(skb, opts);
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun nla_put_failure:
456*4882a593Smuzhiyun nla_nest_cancel(skb, opts);
457*4882a593Smuzhiyun return -EMSGSIZE;
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun
choke_dump_stats(struct Qdisc * sch,struct gnet_dump * d)460*4882a593Smuzhiyun static int choke_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
461*4882a593Smuzhiyun {
462*4882a593Smuzhiyun struct choke_sched_data *q = qdisc_priv(sch);
463*4882a593Smuzhiyun struct tc_choke_xstats st = {
464*4882a593Smuzhiyun .early = q->stats.prob_drop + q->stats.forced_drop,
465*4882a593Smuzhiyun .marked = q->stats.prob_mark + q->stats.forced_mark,
466*4882a593Smuzhiyun .pdrop = q->stats.pdrop,
467*4882a593Smuzhiyun .other = q->stats.other,
468*4882a593Smuzhiyun .matched = q->stats.matched,
469*4882a593Smuzhiyun };
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun return gnet_stats_copy_app(d, &st, sizeof(st));
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun
choke_destroy(struct Qdisc * sch)474*4882a593Smuzhiyun static void choke_destroy(struct Qdisc *sch)
475*4882a593Smuzhiyun {
476*4882a593Smuzhiyun struct choke_sched_data *q = qdisc_priv(sch);
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun choke_free(q->tab);
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun
choke_peek_head(struct Qdisc * sch)481*4882a593Smuzhiyun static struct sk_buff *choke_peek_head(struct Qdisc *sch)
482*4882a593Smuzhiyun {
483*4882a593Smuzhiyun struct choke_sched_data *q = qdisc_priv(sch);
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun return (q->head != q->tail) ? q->tab[q->head] : NULL;
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun static struct Qdisc_ops choke_qdisc_ops __read_mostly = {
489*4882a593Smuzhiyun .id = "choke",
490*4882a593Smuzhiyun .priv_size = sizeof(struct choke_sched_data),
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun .enqueue = choke_enqueue,
493*4882a593Smuzhiyun .dequeue = choke_dequeue,
494*4882a593Smuzhiyun .peek = choke_peek_head,
495*4882a593Smuzhiyun .init = choke_init,
496*4882a593Smuzhiyun .destroy = choke_destroy,
497*4882a593Smuzhiyun .reset = choke_reset,
498*4882a593Smuzhiyun .change = choke_change,
499*4882a593Smuzhiyun .dump = choke_dump,
500*4882a593Smuzhiyun .dump_stats = choke_dump_stats,
501*4882a593Smuzhiyun .owner = THIS_MODULE,
502*4882a593Smuzhiyun };
503*4882a593Smuzhiyun
choke_module_init(void)504*4882a593Smuzhiyun static int __init choke_module_init(void)
505*4882a593Smuzhiyun {
506*4882a593Smuzhiyun return register_qdisc(&choke_qdisc_ops);
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun
choke_module_exit(void)509*4882a593Smuzhiyun static void __exit choke_module_exit(void)
510*4882a593Smuzhiyun {
511*4882a593Smuzhiyun unregister_qdisc(&choke_qdisc_ops);
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun module_init(choke_module_init)
515*4882a593Smuzhiyun module_exit(choke_module_exit)
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun MODULE_LICENSE("GPL");
518