1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * net/sched/sch_red.c Random Early Detection queue.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Changes:
8*4882a593Smuzhiyun * J Hadi Salim 980914: computation fixes
9*4882a593Smuzhiyun * Alexey Makarenko <makar@phoenix.kharkov.ua> 990814: qave on idle link was calculated incorrectly.
10*4882a593Smuzhiyun * J Hadi Salim 980816: ECN support
11*4882a593Smuzhiyun */
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include <linux/module.h>
14*4882a593Smuzhiyun #include <linux/types.h>
15*4882a593Smuzhiyun #include <linux/kernel.h>
16*4882a593Smuzhiyun #include <linux/skbuff.h>
17*4882a593Smuzhiyun #include <net/pkt_sched.h>
18*4882a593Smuzhiyun #include <net/pkt_cls.h>
19*4882a593Smuzhiyun #include <net/inet_ecn.h>
20*4882a593Smuzhiyun #include <net/red.h>
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun /* Parameters, settable by user:
24*4882a593Smuzhiyun -----------------------------
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun limit - bytes (must be > qth_max + burst)
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun Hard limit on queue length, should be chosen >qth_max
29*4882a593Smuzhiyun to allow packet bursts. This parameter does not
30*4882a593Smuzhiyun affect the algorithms behaviour and can be chosen
31*4882a593Smuzhiyun arbitrarily high (well, less than ram size)
32*4882a593Smuzhiyun Really, this limit will never be reached
33*4882a593Smuzhiyun if RED works correctly.
34*4882a593Smuzhiyun */
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun struct red_sched_data {
37*4882a593Smuzhiyun u32 limit; /* HARD maximal queue length */
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun unsigned char flags;
40*4882a593Smuzhiyun /* Non-flags in tc_red_qopt.flags. */
41*4882a593Smuzhiyun unsigned char userbits;
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun struct timer_list adapt_timer;
44*4882a593Smuzhiyun struct Qdisc *sch;
45*4882a593Smuzhiyun struct red_parms parms;
46*4882a593Smuzhiyun struct red_vars vars;
47*4882a593Smuzhiyun struct red_stats stats;
48*4882a593Smuzhiyun struct Qdisc *qdisc;
49*4882a593Smuzhiyun struct tcf_qevent qe_early_drop;
50*4882a593Smuzhiyun struct tcf_qevent qe_mark;
51*4882a593Smuzhiyun };
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun #define TC_RED_SUPPORTED_FLAGS (TC_RED_HISTORIC_FLAGS | TC_RED_NODROP)
54*4882a593Smuzhiyun
red_use_ecn(struct red_sched_data * q)55*4882a593Smuzhiyun static inline int red_use_ecn(struct red_sched_data *q)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun return q->flags & TC_RED_ECN;
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun
red_use_harddrop(struct red_sched_data * q)60*4882a593Smuzhiyun static inline int red_use_harddrop(struct red_sched_data *q)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun return q->flags & TC_RED_HARDDROP;
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun
red_use_nodrop(struct red_sched_data * q)65*4882a593Smuzhiyun static int red_use_nodrop(struct red_sched_data *q)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun return q->flags & TC_RED_NODROP;
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun
red_enqueue(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)70*4882a593Smuzhiyun static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
71*4882a593Smuzhiyun struct sk_buff **to_free)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun struct red_sched_data *q = qdisc_priv(sch);
74*4882a593Smuzhiyun struct Qdisc *child = q->qdisc;
75*4882a593Smuzhiyun unsigned int len;
76*4882a593Smuzhiyun int ret;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun q->vars.qavg = red_calc_qavg(&q->parms,
79*4882a593Smuzhiyun &q->vars,
80*4882a593Smuzhiyun child->qstats.backlog);
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun if (red_is_idling(&q->vars))
83*4882a593Smuzhiyun red_end_of_idle_period(&q->vars);
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun switch (red_action(&q->parms, &q->vars, q->vars.qavg)) {
86*4882a593Smuzhiyun case RED_DONT_MARK:
87*4882a593Smuzhiyun break;
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun case RED_PROB_MARK:
90*4882a593Smuzhiyun qdisc_qstats_overlimit(sch);
91*4882a593Smuzhiyun if (!red_use_ecn(q)) {
92*4882a593Smuzhiyun q->stats.prob_drop++;
93*4882a593Smuzhiyun goto congestion_drop;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun if (INET_ECN_set_ce(skb)) {
97*4882a593Smuzhiyun q->stats.prob_mark++;
98*4882a593Smuzhiyun skb = tcf_qevent_handle(&q->qe_mark, sch, skb, to_free, &ret);
99*4882a593Smuzhiyun if (!skb)
100*4882a593Smuzhiyun return NET_XMIT_CN | ret;
101*4882a593Smuzhiyun } else if (!red_use_nodrop(q)) {
102*4882a593Smuzhiyun q->stats.prob_drop++;
103*4882a593Smuzhiyun goto congestion_drop;
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun /* Non-ECT packet in ECN nodrop mode: queue it. */
107*4882a593Smuzhiyun break;
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun case RED_HARD_MARK:
110*4882a593Smuzhiyun qdisc_qstats_overlimit(sch);
111*4882a593Smuzhiyun if (red_use_harddrop(q) || !red_use_ecn(q)) {
112*4882a593Smuzhiyun q->stats.forced_drop++;
113*4882a593Smuzhiyun goto congestion_drop;
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun if (INET_ECN_set_ce(skb)) {
117*4882a593Smuzhiyun q->stats.forced_mark++;
118*4882a593Smuzhiyun skb = tcf_qevent_handle(&q->qe_mark, sch, skb, to_free, &ret);
119*4882a593Smuzhiyun if (!skb)
120*4882a593Smuzhiyun return NET_XMIT_CN | ret;
121*4882a593Smuzhiyun } else if (!red_use_nodrop(q)) {
122*4882a593Smuzhiyun q->stats.forced_drop++;
123*4882a593Smuzhiyun goto congestion_drop;
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun /* Non-ECT packet in ECN nodrop mode: queue it. */
127*4882a593Smuzhiyun break;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun len = qdisc_pkt_len(skb);
131*4882a593Smuzhiyun ret = qdisc_enqueue(skb, child, to_free);
132*4882a593Smuzhiyun if (likely(ret == NET_XMIT_SUCCESS)) {
133*4882a593Smuzhiyun sch->qstats.backlog += len;
134*4882a593Smuzhiyun sch->q.qlen++;
135*4882a593Smuzhiyun } else if (net_xmit_drop_count(ret)) {
136*4882a593Smuzhiyun q->stats.pdrop++;
137*4882a593Smuzhiyun qdisc_qstats_drop(sch);
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun return ret;
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun congestion_drop:
142*4882a593Smuzhiyun skb = tcf_qevent_handle(&q->qe_early_drop, sch, skb, to_free, &ret);
143*4882a593Smuzhiyun if (!skb)
144*4882a593Smuzhiyun return NET_XMIT_CN | ret;
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun qdisc_drop(skb, sch, to_free);
147*4882a593Smuzhiyun return NET_XMIT_CN;
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun
red_dequeue(struct Qdisc * sch)150*4882a593Smuzhiyun static struct sk_buff *red_dequeue(struct Qdisc *sch)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun struct sk_buff *skb;
153*4882a593Smuzhiyun struct red_sched_data *q = qdisc_priv(sch);
154*4882a593Smuzhiyun struct Qdisc *child = q->qdisc;
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun skb = child->dequeue(child);
157*4882a593Smuzhiyun if (skb) {
158*4882a593Smuzhiyun qdisc_bstats_update(sch, skb);
159*4882a593Smuzhiyun qdisc_qstats_backlog_dec(sch, skb);
160*4882a593Smuzhiyun sch->q.qlen--;
161*4882a593Smuzhiyun } else {
162*4882a593Smuzhiyun if (!red_is_idling(&q->vars))
163*4882a593Smuzhiyun red_start_of_idle_period(&q->vars);
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun return skb;
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun
red_peek(struct Qdisc * sch)168*4882a593Smuzhiyun static struct sk_buff *red_peek(struct Qdisc *sch)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun struct red_sched_data *q = qdisc_priv(sch);
171*4882a593Smuzhiyun struct Qdisc *child = q->qdisc;
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun return child->ops->peek(child);
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun
red_reset(struct Qdisc * sch)176*4882a593Smuzhiyun static void red_reset(struct Qdisc *sch)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun struct red_sched_data *q = qdisc_priv(sch);
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun qdisc_reset(q->qdisc);
181*4882a593Smuzhiyun red_restart(&q->vars);
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun
red_offload(struct Qdisc * sch,bool enable)184*4882a593Smuzhiyun static int red_offload(struct Qdisc *sch, bool enable)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun struct red_sched_data *q = qdisc_priv(sch);
187*4882a593Smuzhiyun struct net_device *dev = qdisc_dev(sch);
188*4882a593Smuzhiyun struct tc_red_qopt_offload opt = {
189*4882a593Smuzhiyun .handle = sch->handle,
190*4882a593Smuzhiyun .parent = sch->parent,
191*4882a593Smuzhiyun };
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
194*4882a593Smuzhiyun return -EOPNOTSUPP;
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun if (enable) {
197*4882a593Smuzhiyun opt.command = TC_RED_REPLACE;
198*4882a593Smuzhiyun opt.set.min = q->parms.qth_min >> q->parms.Wlog;
199*4882a593Smuzhiyun opt.set.max = q->parms.qth_max >> q->parms.Wlog;
200*4882a593Smuzhiyun opt.set.probability = q->parms.max_P;
201*4882a593Smuzhiyun opt.set.limit = q->limit;
202*4882a593Smuzhiyun opt.set.is_ecn = red_use_ecn(q);
203*4882a593Smuzhiyun opt.set.is_harddrop = red_use_harddrop(q);
204*4882a593Smuzhiyun opt.set.is_nodrop = red_use_nodrop(q);
205*4882a593Smuzhiyun opt.set.qstats = &sch->qstats;
206*4882a593Smuzhiyun } else {
207*4882a593Smuzhiyun opt.command = TC_RED_DESTROY;
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, &opt);
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun
red_destroy(struct Qdisc * sch)213*4882a593Smuzhiyun static void red_destroy(struct Qdisc *sch)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun struct red_sched_data *q = qdisc_priv(sch);
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun tcf_qevent_destroy(&q->qe_mark, sch);
218*4882a593Smuzhiyun tcf_qevent_destroy(&q->qe_early_drop, sch);
219*4882a593Smuzhiyun del_timer_sync(&q->adapt_timer);
220*4882a593Smuzhiyun red_offload(sch, false);
221*4882a593Smuzhiyun qdisc_put(q->qdisc);
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
225*4882a593Smuzhiyun [TCA_RED_UNSPEC] = { .strict_start_type = TCA_RED_FLAGS },
226*4882a593Smuzhiyun [TCA_RED_PARMS] = { .len = sizeof(struct tc_red_qopt) },
227*4882a593Smuzhiyun [TCA_RED_STAB] = { .len = RED_STAB_SIZE },
228*4882a593Smuzhiyun [TCA_RED_MAX_P] = { .type = NLA_U32 },
229*4882a593Smuzhiyun [TCA_RED_FLAGS] = NLA_POLICY_BITFIELD32(TC_RED_SUPPORTED_FLAGS),
230*4882a593Smuzhiyun [TCA_RED_EARLY_DROP_BLOCK] = { .type = NLA_U32 },
231*4882a593Smuzhiyun [TCA_RED_MARK_BLOCK] = { .type = NLA_U32 },
232*4882a593Smuzhiyun };
233*4882a593Smuzhiyun
__red_change(struct Qdisc * sch,struct nlattr ** tb,struct netlink_ext_ack * extack)234*4882a593Smuzhiyun static int __red_change(struct Qdisc *sch, struct nlattr **tb,
235*4882a593Smuzhiyun struct netlink_ext_ack *extack)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun struct Qdisc *old_child = NULL, *child = NULL;
238*4882a593Smuzhiyun struct red_sched_data *q = qdisc_priv(sch);
239*4882a593Smuzhiyun struct nla_bitfield32 flags_bf;
240*4882a593Smuzhiyun struct tc_red_qopt *ctl;
241*4882a593Smuzhiyun unsigned char userbits;
242*4882a593Smuzhiyun unsigned char flags;
243*4882a593Smuzhiyun int err;
244*4882a593Smuzhiyun u32 max_P;
245*4882a593Smuzhiyun u8 *stab;
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun if (tb[TCA_RED_PARMS] == NULL ||
248*4882a593Smuzhiyun tb[TCA_RED_STAB] == NULL)
249*4882a593Smuzhiyun return -EINVAL;
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun ctl = nla_data(tb[TCA_RED_PARMS]);
254*4882a593Smuzhiyun stab = nla_data(tb[TCA_RED_STAB]);
255*4882a593Smuzhiyun if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog,
256*4882a593Smuzhiyun ctl->Scell_log, stab))
257*4882a593Smuzhiyun return -EINVAL;
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun err = red_get_flags(ctl->flags, TC_RED_HISTORIC_FLAGS,
260*4882a593Smuzhiyun tb[TCA_RED_FLAGS], TC_RED_SUPPORTED_FLAGS,
261*4882a593Smuzhiyun &flags_bf, &userbits, extack);
262*4882a593Smuzhiyun if (err)
263*4882a593Smuzhiyun return err;
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun if (ctl->limit > 0) {
266*4882a593Smuzhiyun child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit,
267*4882a593Smuzhiyun extack);
268*4882a593Smuzhiyun if (IS_ERR(child))
269*4882a593Smuzhiyun return PTR_ERR(child);
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun /* child is fifo, no need to check for noop_qdisc */
272*4882a593Smuzhiyun qdisc_hash_add(child, true);
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun sch_tree_lock(sch);
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun flags = (q->flags & ~flags_bf.selector) | flags_bf.value;
278*4882a593Smuzhiyun err = red_validate_flags(flags, extack);
279*4882a593Smuzhiyun if (err)
280*4882a593Smuzhiyun goto unlock_out;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun q->flags = flags;
283*4882a593Smuzhiyun q->userbits = userbits;
284*4882a593Smuzhiyun q->limit = ctl->limit;
285*4882a593Smuzhiyun if (child) {
286*4882a593Smuzhiyun qdisc_tree_flush_backlog(q->qdisc);
287*4882a593Smuzhiyun old_child = q->qdisc;
288*4882a593Smuzhiyun q->qdisc = child;
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun red_set_parms(&q->parms,
292*4882a593Smuzhiyun ctl->qth_min, ctl->qth_max, ctl->Wlog,
293*4882a593Smuzhiyun ctl->Plog, ctl->Scell_log,
294*4882a593Smuzhiyun stab,
295*4882a593Smuzhiyun max_P);
296*4882a593Smuzhiyun red_set_vars(&q->vars);
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun del_timer(&q->adapt_timer);
299*4882a593Smuzhiyun if (ctl->flags & TC_RED_ADAPTATIVE)
300*4882a593Smuzhiyun mod_timer(&q->adapt_timer, jiffies + HZ/2);
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun if (!q->qdisc->q.qlen)
303*4882a593Smuzhiyun red_start_of_idle_period(&q->vars);
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun sch_tree_unlock(sch);
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun red_offload(sch, true);
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun if (old_child)
310*4882a593Smuzhiyun qdisc_put(old_child);
311*4882a593Smuzhiyun return 0;
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun unlock_out:
314*4882a593Smuzhiyun sch_tree_unlock(sch);
315*4882a593Smuzhiyun if (child)
316*4882a593Smuzhiyun qdisc_put(child);
317*4882a593Smuzhiyun return err;
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun
red_adaptative_timer(struct timer_list * t)320*4882a593Smuzhiyun static inline void red_adaptative_timer(struct timer_list *t)
321*4882a593Smuzhiyun {
322*4882a593Smuzhiyun struct red_sched_data *q = from_timer(q, t, adapt_timer);
323*4882a593Smuzhiyun struct Qdisc *sch = q->sch;
324*4882a593Smuzhiyun spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun spin_lock(root_lock);
327*4882a593Smuzhiyun red_adaptative_algo(&q->parms, &q->vars);
328*4882a593Smuzhiyun mod_timer(&q->adapt_timer, jiffies + HZ/2);
329*4882a593Smuzhiyun spin_unlock(root_lock);
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun
red_init(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)332*4882a593Smuzhiyun static int red_init(struct Qdisc *sch, struct nlattr *opt,
333*4882a593Smuzhiyun struct netlink_ext_ack *extack)
334*4882a593Smuzhiyun {
335*4882a593Smuzhiyun struct red_sched_data *q = qdisc_priv(sch);
336*4882a593Smuzhiyun struct nlattr *tb[TCA_RED_MAX + 1];
337*4882a593Smuzhiyun int err;
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun q->qdisc = &noop_qdisc;
340*4882a593Smuzhiyun q->sch = sch;
341*4882a593Smuzhiyun timer_setup(&q->adapt_timer, red_adaptative_timer, 0);
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun if (!opt)
344*4882a593Smuzhiyun return -EINVAL;
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun err = nla_parse_nested_deprecated(tb, TCA_RED_MAX, opt, red_policy,
347*4882a593Smuzhiyun extack);
348*4882a593Smuzhiyun if (err < 0)
349*4882a593Smuzhiyun return err;
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun err = __red_change(sch, tb, extack);
352*4882a593Smuzhiyun if (err)
353*4882a593Smuzhiyun return err;
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun err = tcf_qevent_init(&q->qe_early_drop, sch,
356*4882a593Smuzhiyun FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP,
357*4882a593Smuzhiyun tb[TCA_RED_EARLY_DROP_BLOCK], extack);
358*4882a593Smuzhiyun if (err)
359*4882a593Smuzhiyun return err;
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun return tcf_qevent_init(&q->qe_mark, sch,
362*4882a593Smuzhiyun FLOW_BLOCK_BINDER_TYPE_RED_MARK,
363*4882a593Smuzhiyun tb[TCA_RED_MARK_BLOCK], extack);
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun
red_change(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)366*4882a593Smuzhiyun static int red_change(struct Qdisc *sch, struct nlattr *opt,
367*4882a593Smuzhiyun struct netlink_ext_ack *extack)
368*4882a593Smuzhiyun {
369*4882a593Smuzhiyun struct red_sched_data *q = qdisc_priv(sch);
370*4882a593Smuzhiyun struct nlattr *tb[TCA_RED_MAX + 1];
371*4882a593Smuzhiyun int err;
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun if (!opt)
374*4882a593Smuzhiyun return -EINVAL;
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun err = nla_parse_nested_deprecated(tb, TCA_RED_MAX, opt, red_policy,
377*4882a593Smuzhiyun extack);
378*4882a593Smuzhiyun if (err < 0)
379*4882a593Smuzhiyun return err;
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun err = tcf_qevent_validate_change(&q->qe_early_drop,
382*4882a593Smuzhiyun tb[TCA_RED_EARLY_DROP_BLOCK], extack);
383*4882a593Smuzhiyun if (err)
384*4882a593Smuzhiyun return err;
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun err = tcf_qevent_validate_change(&q->qe_mark,
387*4882a593Smuzhiyun tb[TCA_RED_MARK_BLOCK], extack);
388*4882a593Smuzhiyun if (err)
389*4882a593Smuzhiyun return err;
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun return __red_change(sch, tb, extack);
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun
red_dump_offload_stats(struct Qdisc * sch)394*4882a593Smuzhiyun static int red_dump_offload_stats(struct Qdisc *sch)
395*4882a593Smuzhiyun {
396*4882a593Smuzhiyun struct tc_red_qopt_offload hw_stats = {
397*4882a593Smuzhiyun .command = TC_RED_STATS,
398*4882a593Smuzhiyun .handle = sch->handle,
399*4882a593Smuzhiyun .parent = sch->parent,
400*4882a593Smuzhiyun {
401*4882a593Smuzhiyun .stats.bstats = &sch->bstats,
402*4882a593Smuzhiyun .stats.qstats = &sch->qstats,
403*4882a593Smuzhiyun },
404*4882a593Smuzhiyun };
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_RED, &hw_stats);
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun
red_dump(struct Qdisc * sch,struct sk_buff * skb)409*4882a593Smuzhiyun static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
410*4882a593Smuzhiyun {
411*4882a593Smuzhiyun struct red_sched_data *q = qdisc_priv(sch);
412*4882a593Smuzhiyun struct nlattr *opts = NULL;
413*4882a593Smuzhiyun struct tc_red_qopt opt = {
414*4882a593Smuzhiyun .limit = q->limit,
415*4882a593Smuzhiyun .flags = (q->flags & TC_RED_HISTORIC_FLAGS) |
416*4882a593Smuzhiyun q->userbits,
417*4882a593Smuzhiyun .qth_min = q->parms.qth_min >> q->parms.Wlog,
418*4882a593Smuzhiyun .qth_max = q->parms.qth_max >> q->parms.Wlog,
419*4882a593Smuzhiyun .Wlog = q->parms.Wlog,
420*4882a593Smuzhiyun .Plog = q->parms.Plog,
421*4882a593Smuzhiyun .Scell_log = q->parms.Scell_log,
422*4882a593Smuzhiyun };
423*4882a593Smuzhiyun int err;
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun err = red_dump_offload_stats(sch);
426*4882a593Smuzhiyun if (err)
427*4882a593Smuzhiyun goto nla_put_failure;
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
430*4882a593Smuzhiyun if (opts == NULL)
431*4882a593Smuzhiyun goto nla_put_failure;
432*4882a593Smuzhiyun if (nla_put(skb, TCA_RED_PARMS, sizeof(opt), &opt) ||
433*4882a593Smuzhiyun nla_put_u32(skb, TCA_RED_MAX_P, q->parms.max_P) ||
434*4882a593Smuzhiyun nla_put_bitfield32(skb, TCA_RED_FLAGS,
435*4882a593Smuzhiyun q->flags, TC_RED_SUPPORTED_FLAGS) ||
436*4882a593Smuzhiyun tcf_qevent_dump(skb, TCA_RED_MARK_BLOCK, &q->qe_mark) ||
437*4882a593Smuzhiyun tcf_qevent_dump(skb, TCA_RED_EARLY_DROP_BLOCK, &q->qe_early_drop))
438*4882a593Smuzhiyun goto nla_put_failure;
439*4882a593Smuzhiyun return nla_nest_end(skb, opts);
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun nla_put_failure:
442*4882a593Smuzhiyun nla_nest_cancel(skb, opts);
443*4882a593Smuzhiyun return -EMSGSIZE;
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun
red_dump_stats(struct Qdisc * sch,struct gnet_dump * d)446*4882a593Smuzhiyun static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
447*4882a593Smuzhiyun {
448*4882a593Smuzhiyun struct red_sched_data *q = qdisc_priv(sch);
449*4882a593Smuzhiyun struct net_device *dev = qdisc_dev(sch);
450*4882a593Smuzhiyun struct tc_red_xstats st = {0};
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun if (sch->flags & TCQ_F_OFFLOADED) {
453*4882a593Smuzhiyun struct tc_red_qopt_offload hw_stats_request = {
454*4882a593Smuzhiyun .command = TC_RED_XSTATS,
455*4882a593Smuzhiyun .handle = sch->handle,
456*4882a593Smuzhiyun .parent = sch->parent,
457*4882a593Smuzhiyun {
458*4882a593Smuzhiyun .xstats = &q->stats,
459*4882a593Smuzhiyun },
460*4882a593Smuzhiyun };
461*4882a593Smuzhiyun dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED,
462*4882a593Smuzhiyun &hw_stats_request);
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun st.early = q->stats.prob_drop + q->stats.forced_drop;
465*4882a593Smuzhiyun st.pdrop = q->stats.pdrop;
466*4882a593Smuzhiyun st.other = q->stats.other;
467*4882a593Smuzhiyun st.marked = q->stats.prob_mark + q->stats.forced_mark;
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun return gnet_stats_copy_app(d, &st, sizeof(st));
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun
red_dump_class(struct Qdisc * sch,unsigned long cl,struct sk_buff * skb,struct tcmsg * tcm)472*4882a593Smuzhiyun static int red_dump_class(struct Qdisc *sch, unsigned long cl,
473*4882a593Smuzhiyun struct sk_buff *skb, struct tcmsg *tcm)
474*4882a593Smuzhiyun {
475*4882a593Smuzhiyun struct red_sched_data *q = qdisc_priv(sch);
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun tcm->tcm_handle |= TC_H_MIN(1);
478*4882a593Smuzhiyun tcm->tcm_info = q->qdisc->handle;
479*4882a593Smuzhiyun return 0;
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun
red_graft_offload(struct Qdisc * sch,struct Qdisc * new,struct Qdisc * old,struct netlink_ext_ack * extack)482*4882a593Smuzhiyun static void red_graft_offload(struct Qdisc *sch,
483*4882a593Smuzhiyun struct Qdisc *new, struct Qdisc *old,
484*4882a593Smuzhiyun struct netlink_ext_ack *extack)
485*4882a593Smuzhiyun {
486*4882a593Smuzhiyun struct tc_red_qopt_offload graft_offload = {
487*4882a593Smuzhiyun .handle = sch->handle,
488*4882a593Smuzhiyun .parent = sch->parent,
489*4882a593Smuzhiyun .child_handle = new->handle,
490*4882a593Smuzhiyun .command = TC_RED_GRAFT,
491*4882a593Smuzhiyun };
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, old,
494*4882a593Smuzhiyun TC_SETUP_QDISC_RED, &graft_offload, extack);
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun
red_graft(struct Qdisc * sch,unsigned long arg,struct Qdisc * new,struct Qdisc ** old,struct netlink_ext_ack * extack)497*4882a593Smuzhiyun static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
498*4882a593Smuzhiyun struct Qdisc **old, struct netlink_ext_ack *extack)
499*4882a593Smuzhiyun {
500*4882a593Smuzhiyun struct red_sched_data *q = qdisc_priv(sch);
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun if (new == NULL)
503*4882a593Smuzhiyun new = &noop_qdisc;
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun *old = qdisc_replace(sch, new, &q->qdisc);
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun red_graft_offload(sch, new, *old, extack);
508*4882a593Smuzhiyun return 0;
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun
red_leaf(struct Qdisc * sch,unsigned long arg)511*4882a593Smuzhiyun static struct Qdisc *red_leaf(struct Qdisc *sch, unsigned long arg)
512*4882a593Smuzhiyun {
513*4882a593Smuzhiyun struct red_sched_data *q = qdisc_priv(sch);
514*4882a593Smuzhiyun return q->qdisc;
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun
red_find(struct Qdisc * sch,u32 classid)517*4882a593Smuzhiyun static unsigned long red_find(struct Qdisc *sch, u32 classid)
518*4882a593Smuzhiyun {
519*4882a593Smuzhiyun return 1;
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun
red_walk(struct Qdisc * sch,struct qdisc_walker * walker)522*4882a593Smuzhiyun static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker)
523*4882a593Smuzhiyun {
524*4882a593Smuzhiyun if (!walker->stop) {
525*4882a593Smuzhiyun if (walker->count >= walker->skip)
526*4882a593Smuzhiyun if (walker->fn(sch, 1, walker) < 0) {
527*4882a593Smuzhiyun walker->stop = 1;
528*4882a593Smuzhiyun return;
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun walker->count++;
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun static const struct Qdisc_class_ops red_class_ops = {
535*4882a593Smuzhiyun .graft = red_graft,
536*4882a593Smuzhiyun .leaf = red_leaf,
537*4882a593Smuzhiyun .find = red_find,
538*4882a593Smuzhiyun .walk = red_walk,
539*4882a593Smuzhiyun .dump = red_dump_class,
540*4882a593Smuzhiyun };
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun static struct Qdisc_ops red_qdisc_ops __read_mostly = {
543*4882a593Smuzhiyun .id = "red",
544*4882a593Smuzhiyun .priv_size = sizeof(struct red_sched_data),
545*4882a593Smuzhiyun .cl_ops = &red_class_ops,
546*4882a593Smuzhiyun .enqueue = red_enqueue,
547*4882a593Smuzhiyun .dequeue = red_dequeue,
548*4882a593Smuzhiyun .peek = red_peek,
549*4882a593Smuzhiyun .init = red_init,
550*4882a593Smuzhiyun .reset = red_reset,
551*4882a593Smuzhiyun .destroy = red_destroy,
552*4882a593Smuzhiyun .change = red_change,
553*4882a593Smuzhiyun .dump = red_dump,
554*4882a593Smuzhiyun .dump_stats = red_dump_stats,
555*4882a593Smuzhiyun .owner = THIS_MODULE,
556*4882a593Smuzhiyun };
557*4882a593Smuzhiyun
red_module_init(void)558*4882a593Smuzhiyun static int __init red_module_init(void)
559*4882a593Smuzhiyun {
560*4882a593Smuzhiyun return register_qdisc(&red_qdisc_ops);
561*4882a593Smuzhiyun }
562*4882a593Smuzhiyun
red_module_exit(void)563*4882a593Smuzhiyun static void __exit red_module_exit(void)
564*4882a593Smuzhiyun {
565*4882a593Smuzhiyun unregister_qdisc(&red_qdisc_ops);
566*4882a593Smuzhiyun }
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun module_init(red_module_init)
569*4882a593Smuzhiyun module_exit(red_module_exit)
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun MODULE_LICENSE("GPL");
572