1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * net/sched/sch_drr.c Deficit Round Robin scheduler
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/module.h>
9*4882a593Smuzhiyun #include <linux/slab.h>
10*4882a593Smuzhiyun #include <linux/init.h>
11*4882a593Smuzhiyun #include <linux/errno.h>
12*4882a593Smuzhiyun #include <linux/netdevice.h>
13*4882a593Smuzhiyun #include <linux/pkt_sched.h>
14*4882a593Smuzhiyun #include <net/sch_generic.h>
15*4882a593Smuzhiyun #include <net/pkt_sched.h>
16*4882a593Smuzhiyun #include <net/pkt_cls.h>
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun struct drr_class {
19*4882a593Smuzhiyun struct Qdisc_class_common common;
20*4882a593Smuzhiyun unsigned int filter_cnt;
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun struct gnet_stats_basic_packed bstats;
23*4882a593Smuzhiyun struct gnet_stats_queue qstats;
24*4882a593Smuzhiyun struct net_rate_estimator __rcu *rate_est;
25*4882a593Smuzhiyun struct list_head alist;
26*4882a593Smuzhiyun struct Qdisc *qdisc;
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun u32 quantum;
29*4882a593Smuzhiyun u32 deficit;
30*4882a593Smuzhiyun };
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun struct drr_sched {
33*4882a593Smuzhiyun struct list_head active;
34*4882a593Smuzhiyun struct tcf_proto __rcu *filter_list;
35*4882a593Smuzhiyun struct tcf_block *block;
36*4882a593Smuzhiyun struct Qdisc_class_hash clhash;
37*4882a593Smuzhiyun };
38*4882a593Smuzhiyun
drr_find_class(struct Qdisc * sch,u32 classid)39*4882a593Smuzhiyun static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun struct drr_sched *q = qdisc_priv(sch);
42*4882a593Smuzhiyun struct Qdisc_class_common *clc;
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun clc = qdisc_class_find(&q->clhash, classid);
45*4882a593Smuzhiyun if (clc == NULL)
46*4882a593Smuzhiyun return NULL;
47*4882a593Smuzhiyun return container_of(clc, struct drr_class, common);
48*4882a593Smuzhiyun }
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = {
51*4882a593Smuzhiyun [TCA_DRR_QUANTUM] = { .type = NLA_U32 },
52*4882a593Smuzhiyun };
53*4882a593Smuzhiyun
drr_change_class(struct Qdisc * sch,u32 classid,u32 parentid,struct nlattr ** tca,unsigned long * arg,struct netlink_ext_ack * extack)54*4882a593Smuzhiyun static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
55*4882a593Smuzhiyun struct nlattr **tca, unsigned long *arg,
56*4882a593Smuzhiyun struct netlink_ext_ack *extack)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun struct drr_sched *q = qdisc_priv(sch);
59*4882a593Smuzhiyun struct drr_class *cl = (struct drr_class *)*arg;
60*4882a593Smuzhiyun struct nlattr *opt = tca[TCA_OPTIONS];
61*4882a593Smuzhiyun struct nlattr *tb[TCA_DRR_MAX + 1];
62*4882a593Smuzhiyun u32 quantum;
63*4882a593Smuzhiyun int err;
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun if (!opt) {
66*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "DRR options are required for this operation");
67*4882a593Smuzhiyun return -EINVAL;
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun err = nla_parse_nested_deprecated(tb, TCA_DRR_MAX, opt, drr_policy,
71*4882a593Smuzhiyun extack);
72*4882a593Smuzhiyun if (err < 0)
73*4882a593Smuzhiyun return err;
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun if (tb[TCA_DRR_QUANTUM]) {
76*4882a593Smuzhiyun quantum = nla_get_u32(tb[TCA_DRR_QUANTUM]);
77*4882a593Smuzhiyun if (quantum == 0) {
78*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Specified DRR quantum cannot be zero");
79*4882a593Smuzhiyun return -EINVAL;
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun } else
82*4882a593Smuzhiyun quantum = psched_mtu(qdisc_dev(sch));
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun if (cl != NULL) {
85*4882a593Smuzhiyun if (tca[TCA_RATE]) {
86*4882a593Smuzhiyun err = gen_replace_estimator(&cl->bstats, NULL,
87*4882a593Smuzhiyun &cl->rate_est,
88*4882a593Smuzhiyun NULL,
89*4882a593Smuzhiyun qdisc_root_sleeping_running(sch),
90*4882a593Smuzhiyun tca[TCA_RATE]);
91*4882a593Smuzhiyun if (err) {
92*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Failed to replace estimator");
93*4882a593Smuzhiyun return err;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun sch_tree_lock(sch);
98*4882a593Smuzhiyun if (tb[TCA_DRR_QUANTUM])
99*4882a593Smuzhiyun cl->quantum = quantum;
100*4882a593Smuzhiyun sch_tree_unlock(sch);
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun return 0;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun cl = kzalloc(sizeof(struct drr_class), GFP_KERNEL);
106*4882a593Smuzhiyun if (cl == NULL)
107*4882a593Smuzhiyun return -ENOBUFS;
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun cl->common.classid = classid;
110*4882a593Smuzhiyun cl->quantum = quantum;
111*4882a593Smuzhiyun cl->qdisc = qdisc_create_dflt(sch->dev_queue,
112*4882a593Smuzhiyun &pfifo_qdisc_ops, classid,
113*4882a593Smuzhiyun NULL);
114*4882a593Smuzhiyun if (cl->qdisc == NULL)
115*4882a593Smuzhiyun cl->qdisc = &noop_qdisc;
116*4882a593Smuzhiyun else
117*4882a593Smuzhiyun qdisc_hash_add(cl->qdisc, true);
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun if (tca[TCA_RATE]) {
120*4882a593Smuzhiyun err = gen_replace_estimator(&cl->bstats, NULL, &cl->rate_est,
121*4882a593Smuzhiyun NULL,
122*4882a593Smuzhiyun qdisc_root_sleeping_running(sch),
123*4882a593Smuzhiyun tca[TCA_RATE]);
124*4882a593Smuzhiyun if (err) {
125*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Failed to replace estimator");
126*4882a593Smuzhiyun qdisc_put(cl->qdisc);
127*4882a593Smuzhiyun kfree(cl);
128*4882a593Smuzhiyun return err;
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun sch_tree_lock(sch);
133*4882a593Smuzhiyun qdisc_class_hash_insert(&q->clhash, &cl->common);
134*4882a593Smuzhiyun sch_tree_unlock(sch);
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun qdisc_class_hash_grow(sch, &q->clhash);
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun *arg = (unsigned long)cl;
139*4882a593Smuzhiyun return 0;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
drr_destroy_class(struct Qdisc * sch,struct drr_class * cl)142*4882a593Smuzhiyun static void drr_destroy_class(struct Qdisc *sch, struct drr_class *cl)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun gen_kill_estimator(&cl->rate_est);
145*4882a593Smuzhiyun qdisc_put(cl->qdisc);
146*4882a593Smuzhiyun kfree(cl);
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun
drr_delete_class(struct Qdisc * sch,unsigned long arg)149*4882a593Smuzhiyun static int drr_delete_class(struct Qdisc *sch, unsigned long arg)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun struct drr_sched *q = qdisc_priv(sch);
152*4882a593Smuzhiyun struct drr_class *cl = (struct drr_class *)arg;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun if (cl->filter_cnt > 0)
155*4882a593Smuzhiyun return -EBUSY;
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun sch_tree_lock(sch);
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun qdisc_purge_queue(cl->qdisc);
160*4882a593Smuzhiyun qdisc_class_hash_remove(&q->clhash, &cl->common);
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun sch_tree_unlock(sch);
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun drr_destroy_class(sch, cl);
165*4882a593Smuzhiyun return 0;
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun
drr_search_class(struct Qdisc * sch,u32 classid)168*4882a593Smuzhiyun static unsigned long drr_search_class(struct Qdisc *sch, u32 classid)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun return (unsigned long)drr_find_class(sch, classid);
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun
drr_tcf_block(struct Qdisc * sch,unsigned long cl,struct netlink_ext_ack * extack)173*4882a593Smuzhiyun static struct tcf_block *drr_tcf_block(struct Qdisc *sch, unsigned long cl,
174*4882a593Smuzhiyun struct netlink_ext_ack *extack)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun struct drr_sched *q = qdisc_priv(sch);
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun if (cl) {
179*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "DRR classid must be zero");
180*4882a593Smuzhiyun return NULL;
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun return q->block;
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun
drr_bind_tcf(struct Qdisc * sch,unsigned long parent,u32 classid)186*4882a593Smuzhiyun static unsigned long drr_bind_tcf(struct Qdisc *sch, unsigned long parent,
187*4882a593Smuzhiyun u32 classid)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun struct drr_class *cl = drr_find_class(sch, classid);
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun if (cl != NULL)
192*4882a593Smuzhiyun cl->filter_cnt++;
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun return (unsigned long)cl;
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
drr_unbind_tcf(struct Qdisc * sch,unsigned long arg)197*4882a593Smuzhiyun static void drr_unbind_tcf(struct Qdisc *sch, unsigned long arg)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun struct drr_class *cl = (struct drr_class *)arg;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun cl->filter_cnt--;
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun
drr_graft_class(struct Qdisc * sch,unsigned long arg,struct Qdisc * new,struct Qdisc ** old,struct netlink_ext_ack * extack)204*4882a593Smuzhiyun static int drr_graft_class(struct Qdisc *sch, unsigned long arg,
205*4882a593Smuzhiyun struct Qdisc *new, struct Qdisc **old,
206*4882a593Smuzhiyun struct netlink_ext_ack *extack)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun struct drr_class *cl = (struct drr_class *)arg;
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun if (new == NULL) {
211*4882a593Smuzhiyun new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
212*4882a593Smuzhiyun cl->common.classid, NULL);
213*4882a593Smuzhiyun if (new == NULL)
214*4882a593Smuzhiyun new = &noop_qdisc;
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun *old = qdisc_replace(sch, new, &cl->qdisc);
218*4882a593Smuzhiyun return 0;
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun
drr_class_leaf(struct Qdisc * sch,unsigned long arg)221*4882a593Smuzhiyun static struct Qdisc *drr_class_leaf(struct Qdisc *sch, unsigned long arg)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun struct drr_class *cl = (struct drr_class *)arg;
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun return cl->qdisc;
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun
drr_qlen_notify(struct Qdisc * csh,unsigned long arg)228*4882a593Smuzhiyun static void drr_qlen_notify(struct Qdisc *csh, unsigned long arg)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun struct drr_class *cl = (struct drr_class *)arg;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun list_del(&cl->alist);
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun
drr_dump_class(struct Qdisc * sch,unsigned long arg,struct sk_buff * skb,struct tcmsg * tcm)235*4882a593Smuzhiyun static int drr_dump_class(struct Qdisc *sch, unsigned long arg,
236*4882a593Smuzhiyun struct sk_buff *skb, struct tcmsg *tcm)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun struct drr_class *cl = (struct drr_class *)arg;
239*4882a593Smuzhiyun struct nlattr *nest;
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun tcm->tcm_parent = TC_H_ROOT;
242*4882a593Smuzhiyun tcm->tcm_handle = cl->common.classid;
243*4882a593Smuzhiyun tcm->tcm_info = cl->qdisc->handle;
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
246*4882a593Smuzhiyun if (nest == NULL)
247*4882a593Smuzhiyun goto nla_put_failure;
248*4882a593Smuzhiyun if (nla_put_u32(skb, TCA_DRR_QUANTUM, cl->quantum))
249*4882a593Smuzhiyun goto nla_put_failure;
250*4882a593Smuzhiyun return nla_nest_end(skb, nest);
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun nla_put_failure:
253*4882a593Smuzhiyun nla_nest_cancel(skb, nest);
254*4882a593Smuzhiyun return -EMSGSIZE;
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun
drr_dump_class_stats(struct Qdisc * sch,unsigned long arg,struct gnet_dump * d)257*4882a593Smuzhiyun static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
258*4882a593Smuzhiyun struct gnet_dump *d)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun struct drr_class *cl = (struct drr_class *)arg;
261*4882a593Smuzhiyun __u32 qlen = qdisc_qlen_sum(cl->qdisc);
262*4882a593Smuzhiyun struct Qdisc *cl_q = cl->qdisc;
263*4882a593Smuzhiyun struct tc_drr_stats xstats;
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun memset(&xstats, 0, sizeof(xstats));
266*4882a593Smuzhiyun if (qlen)
267*4882a593Smuzhiyun xstats.deficit = cl->deficit;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
270*4882a593Smuzhiyun d, NULL, &cl->bstats) < 0 ||
271*4882a593Smuzhiyun gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
272*4882a593Smuzhiyun gnet_stats_copy_queue(d, cl_q->cpu_qstats, &cl_q->qstats, qlen) < 0)
273*4882a593Smuzhiyun return -1;
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun
drr_walk(struct Qdisc * sch,struct qdisc_walker * arg)278*4882a593Smuzhiyun static void drr_walk(struct Qdisc *sch, struct qdisc_walker *arg)
279*4882a593Smuzhiyun {
280*4882a593Smuzhiyun struct drr_sched *q = qdisc_priv(sch);
281*4882a593Smuzhiyun struct drr_class *cl;
282*4882a593Smuzhiyun unsigned int i;
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun if (arg->stop)
285*4882a593Smuzhiyun return;
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun for (i = 0; i < q->clhash.hashsize; i++) {
288*4882a593Smuzhiyun hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
289*4882a593Smuzhiyun if (arg->count < arg->skip) {
290*4882a593Smuzhiyun arg->count++;
291*4882a593Smuzhiyun continue;
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
294*4882a593Smuzhiyun arg->stop = 1;
295*4882a593Smuzhiyun return;
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun arg->count++;
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun
drr_classify(struct sk_buff * skb,struct Qdisc * sch,int * qerr)302*4882a593Smuzhiyun static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch,
303*4882a593Smuzhiyun int *qerr)
304*4882a593Smuzhiyun {
305*4882a593Smuzhiyun struct drr_sched *q = qdisc_priv(sch);
306*4882a593Smuzhiyun struct drr_class *cl;
307*4882a593Smuzhiyun struct tcf_result res;
308*4882a593Smuzhiyun struct tcf_proto *fl;
309*4882a593Smuzhiyun int result;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) {
312*4882a593Smuzhiyun cl = drr_find_class(sch, skb->priority);
313*4882a593Smuzhiyun if (cl != NULL)
314*4882a593Smuzhiyun return cl;
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
318*4882a593Smuzhiyun fl = rcu_dereference_bh(q->filter_list);
319*4882a593Smuzhiyun result = tcf_classify(skb, fl, &res, false);
320*4882a593Smuzhiyun if (result >= 0) {
321*4882a593Smuzhiyun #ifdef CONFIG_NET_CLS_ACT
322*4882a593Smuzhiyun switch (result) {
323*4882a593Smuzhiyun case TC_ACT_QUEUED:
324*4882a593Smuzhiyun case TC_ACT_STOLEN:
325*4882a593Smuzhiyun case TC_ACT_TRAP:
326*4882a593Smuzhiyun *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
327*4882a593Smuzhiyun fallthrough;
328*4882a593Smuzhiyun case TC_ACT_SHOT:
329*4882a593Smuzhiyun return NULL;
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun #endif
332*4882a593Smuzhiyun cl = (struct drr_class *)res.class;
333*4882a593Smuzhiyun if (cl == NULL)
334*4882a593Smuzhiyun cl = drr_find_class(sch, res.classid);
335*4882a593Smuzhiyun return cl;
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun return NULL;
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun
drr_enqueue(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)340*4882a593Smuzhiyun static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
341*4882a593Smuzhiyun struct sk_buff **to_free)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun unsigned int len = qdisc_pkt_len(skb);
344*4882a593Smuzhiyun struct drr_sched *q = qdisc_priv(sch);
345*4882a593Smuzhiyun struct drr_class *cl;
346*4882a593Smuzhiyun int err = 0;
347*4882a593Smuzhiyun bool first;
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun cl = drr_classify(skb, sch, &err);
350*4882a593Smuzhiyun if (cl == NULL) {
351*4882a593Smuzhiyun if (err & __NET_XMIT_BYPASS)
352*4882a593Smuzhiyun qdisc_qstats_drop(sch);
353*4882a593Smuzhiyun __qdisc_drop(skb, to_free);
354*4882a593Smuzhiyun return err;
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun first = !cl->qdisc->q.qlen;
358*4882a593Smuzhiyun err = qdisc_enqueue(skb, cl->qdisc, to_free);
359*4882a593Smuzhiyun if (unlikely(err != NET_XMIT_SUCCESS)) {
360*4882a593Smuzhiyun if (net_xmit_drop_count(err)) {
361*4882a593Smuzhiyun cl->qstats.drops++;
362*4882a593Smuzhiyun qdisc_qstats_drop(sch);
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun return err;
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun if (first) {
368*4882a593Smuzhiyun list_add_tail(&cl->alist, &q->active);
369*4882a593Smuzhiyun cl->deficit = cl->quantum;
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun sch->qstats.backlog += len;
373*4882a593Smuzhiyun sch->q.qlen++;
374*4882a593Smuzhiyun return err;
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun
drr_dequeue(struct Qdisc * sch)377*4882a593Smuzhiyun static struct sk_buff *drr_dequeue(struct Qdisc *sch)
378*4882a593Smuzhiyun {
379*4882a593Smuzhiyun struct drr_sched *q = qdisc_priv(sch);
380*4882a593Smuzhiyun struct drr_class *cl;
381*4882a593Smuzhiyun struct sk_buff *skb;
382*4882a593Smuzhiyun unsigned int len;
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun if (list_empty(&q->active))
385*4882a593Smuzhiyun goto out;
386*4882a593Smuzhiyun while (1) {
387*4882a593Smuzhiyun cl = list_first_entry(&q->active, struct drr_class, alist);
388*4882a593Smuzhiyun skb = cl->qdisc->ops->peek(cl->qdisc);
389*4882a593Smuzhiyun if (skb == NULL) {
390*4882a593Smuzhiyun qdisc_warn_nonwc(__func__, cl->qdisc);
391*4882a593Smuzhiyun goto out;
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun len = qdisc_pkt_len(skb);
395*4882a593Smuzhiyun if (len <= cl->deficit) {
396*4882a593Smuzhiyun cl->deficit -= len;
397*4882a593Smuzhiyun skb = qdisc_dequeue_peeked(cl->qdisc);
398*4882a593Smuzhiyun if (unlikely(skb == NULL))
399*4882a593Smuzhiyun goto out;
400*4882a593Smuzhiyun if (cl->qdisc->q.qlen == 0)
401*4882a593Smuzhiyun list_del(&cl->alist);
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun bstats_update(&cl->bstats, skb);
404*4882a593Smuzhiyun qdisc_bstats_update(sch, skb);
405*4882a593Smuzhiyun qdisc_qstats_backlog_dec(sch, skb);
406*4882a593Smuzhiyun sch->q.qlen--;
407*4882a593Smuzhiyun return skb;
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun cl->deficit += cl->quantum;
411*4882a593Smuzhiyun list_move_tail(&cl->alist, &q->active);
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun out:
414*4882a593Smuzhiyun return NULL;
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun
drr_init_qdisc(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)417*4882a593Smuzhiyun static int drr_init_qdisc(struct Qdisc *sch, struct nlattr *opt,
418*4882a593Smuzhiyun struct netlink_ext_ack *extack)
419*4882a593Smuzhiyun {
420*4882a593Smuzhiyun struct drr_sched *q = qdisc_priv(sch);
421*4882a593Smuzhiyun int err;
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
424*4882a593Smuzhiyun if (err)
425*4882a593Smuzhiyun return err;
426*4882a593Smuzhiyun err = qdisc_class_hash_init(&q->clhash);
427*4882a593Smuzhiyun if (err < 0)
428*4882a593Smuzhiyun return err;
429*4882a593Smuzhiyun INIT_LIST_HEAD(&q->active);
430*4882a593Smuzhiyun return 0;
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun
drr_reset_qdisc(struct Qdisc * sch)433*4882a593Smuzhiyun static void drr_reset_qdisc(struct Qdisc *sch)
434*4882a593Smuzhiyun {
435*4882a593Smuzhiyun struct drr_sched *q = qdisc_priv(sch);
436*4882a593Smuzhiyun struct drr_class *cl;
437*4882a593Smuzhiyun unsigned int i;
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun for (i = 0; i < q->clhash.hashsize; i++) {
440*4882a593Smuzhiyun hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
441*4882a593Smuzhiyun if (cl->qdisc->q.qlen)
442*4882a593Smuzhiyun list_del(&cl->alist);
443*4882a593Smuzhiyun qdisc_reset(cl->qdisc);
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun
drr_destroy_qdisc(struct Qdisc * sch)448*4882a593Smuzhiyun static void drr_destroy_qdisc(struct Qdisc *sch)
449*4882a593Smuzhiyun {
450*4882a593Smuzhiyun struct drr_sched *q = qdisc_priv(sch);
451*4882a593Smuzhiyun struct drr_class *cl;
452*4882a593Smuzhiyun struct hlist_node *next;
453*4882a593Smuzhiyun unsigned int i;
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun tcf_block_put(q->block);
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun for (i = 0; i < q->clhash.hashsize; i++) {
458*4882a593Smuzhiyun hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
459*4882a593Smuzhiyun common.hnode)
460*4882a593Smuzhiyun drr_destroy_class(sch, cl);
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun qdisc_class_hash_destroy(&q->clhash);
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun static const struct Qdisc_class_ops drr_class_ops = {
466*4882a593Smuzhiyun .change = drr_change_class,
467*4882a593Smuzhiyun .delete = drr_delete_class,
468*4882a593Smuzhiyun .find = drr_search_class,
469*4882a593Smuzhiyun .tcf_block = drr_tcf_block,
470*4882a593Smuzhiyun .bind_tcf = drr_bind_tcf,
471*4882a593Smuzhiyun .unbind_tcf = drr_unbind_tcf,
472*4882a593Smuzhiyun .graft = drr_graft_class,
473*4882a593Smuzhiyun .leaf = drr_class_leaf,
474*4882a593Smuzhiyun .qlen_notify = drr_qlen_notify,
475*4882a593Smuzhiyun .dump = drr_dump_class,
476*4882a593Smuzhiyun .dump_stats = drr_dump_class_stats,
477*4882a593Smuzhiyun .walk = drr_walk,
478*4882a593Smuzhiyun };
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun static struct Qdisc_ops drr_qdisc_ops __read_mostly = {
481*4882a593Smuzhiyun .cl_ops = &drr_class_ops,
482*4882a593Smuzhiyun .id = "drr",
483*4882a593Smuzhiyun .priv_size = sizeof(struct drr_sched),
484*4882a593Smuzhiyun .enqueue = drr_enqueue,
485*4882a593Smuzhiyun .dequeue = drr_dequeue,
486*4882a593Smuzhiyun .peek = qdisc_peek_dequeued,
487*4882a593Smuzhiyun .init = drr_init_qdisc,
488*4882a593Smuzhiyun .reset = drr_reset_qdisc,
489*4882a593Smuzhiyun .destroy = drr_destroy_qdisc,
490*4882a593Smuzhiyun .owner = THIS_MODULE,
491*4882a593Smuzhiyun };
492*4882a593Smuzhiyun
drr_init(void)493*4882a593Smuzhiyun static int __init drr_init(void)
494*4882a593Smuzhiyun {
495*4882a593Smuzhiyun return register_qdisc(&drr_qdisc_ops);
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun
drr_exit(void)498*4882a593Smuzhiyun static void __exit drr_exit(void)
499*4882a593Smuzhiyun {
500*4882a593Smuzhiyun unregister_qdisc(&drr_qdisc_ops);
501*4882a593Smuzhiyun }
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun module_init(drr_init);
504*4882a593Smuzhiyun module_exit(drr_exit);
505*4882a593Smuzhiyun MODULE_LICENSE("GPL");
506