1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * net/sched/sch_ets.c Enhanced Transmission Selection scheduler
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Description
6*4882a593Smuzhiyun * -----------
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * The Enhanced Transmission Selection scheduler is a classful queuing
9*4882a593Smuzhiyun * discipline that merges functionality of PRIO and DRR qdiscs in one scheduler.
10*4882a593Smuzhiyun * ETS makes it easy to configure a set of strict and bandwidth-sharing bands to
11*4882a593Smuzhiyun * implement the transmission selection described in 802.1Qaz.
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * Although ETS is technically classful, it's not possible to add and remove
14*4882a593Smuzhiyun * classes at will. Instead one specifies number of classes, how many are
15*4882a593Smuzhiyun * PRIO-like and how many DRR-like, and quanta for the latter.
16*4882a593Smuzhiyun *
17*4882a593Smuzhiyun * Algorithm
18*4882a593Smuzhiyun * ---------
19*4882a593Smuzhiyun *
20*4882a593Smuzhiyun * The strict classes, if any, are tried for traffic first: first band 0, if it
21*4882a593Smuzhiyun * has no traffic then band 1, etc.
22*4882a593Smuzhiyun *
23*4882a593Smuzhiyun * When there is no traffic in any of the strict queues, the bandwidth-sharing
24*4882a593Smuzhiyun * ones are tried next. Each band is assigned a deficit counter, initialized to
25*4882a593Smuzhiyun * "quantum" of that band. ETS maintains a list of active bandwidth-sharing
26*4882a593Smuzhiyun * bands whose qdiscs are non-empty. A packet is dequeued from the band at the
27*4882a593Smuzhiyun * head of the list if the packet size is smaller or equal to the deficit
28*4882a593Smuzhiyun * counter. If the counter is too small, it is increased by "quantum" and the
29*4882a593Smuzhiyun * scheduler moves on to the next band in the active list.
30*4882a593Smuzhiyun */
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #include <linux/module.h>
33*4882a593Smuzhiyun #include <net/gen_stats.h>
34*4882a593Smuzhiyun #include <net/netlink.h>
35*4882a593Smuzhiyun #include <net/pkt_cls.h>
36*4882a593Smuzhiyun #include <net/pkt_sched.h>
37*4882a593Smuzhiyun #include <net/sch_generic.h>
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun struct ets_class {
40*4882a593Smuzhiyun struct list_head alist; /* In struct ets_sched.active. */
41*4882a593Smuzhiyun struct Qdisc *qdisc;
42*4882a593Smuzhiyun u32 quantum;
43*4882a593Smuzhiyun u32 deficit;
44*4882a593Smuzhiyun struct gnet_stats_basic_packed bstats;
45*4882a593Smuzhiyun struct gnet_stats_queue qstats;
46*4882a593Smuzhiyun };
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun struct ets_sched {
49*4882a593Smuzhiyun struct list_head active;
50*4882a593Smuzhiyun struct tcf_proto __rcu *filter_list;
51*4882a593Smuzhiyun struct tcf_block *block;
52*4882a593Smuzhiyun unsigned int nbands;
53*4882a593Smuzhiyun unsigned int nstrict;
54*4882a593Smuzhiyun u8 prio2band[TC_PRIO_MAX + 1];
55*4882a593Smuzhiyun struct ets_class classes[TCQ_ETS_MAX_BANDS];
56*4882a593Smuzhiyun };
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun static const struct nla_policy ets_policy[TCA_ETS_MAX + 1] = {
59*4882a593Smuzhiyun [TCA_ETS_NBANDS] = { .type = NLA_U8 },
60*4882a593Smuzhiyun [TCA_ETS_NSTRICT] = { .type = NLA_U8 },
61*4882a593Smuzhiyun [TCA_ETS_QUANTA] = { .type = NLA_NESTED },
62*4882a593Smuzhiyun [TCA_ETS_PRIOMAP] = { .type = NLA_NESTED },
63*4882a593Smuzhiyun };
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun static const struct nla_policy ets_priomap_policy[TCA_ETS_MAX + 1] = {
66*4882a593Smuzhiyun [TCA_ETS_PRIOMAP_BAND] = { .type = NLA_U8 },
67*4882a593Smuzhiyun };
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun static const struct nla_policy ets_quanta_policy[TCA_ETS_MAX + 1] = {
70*4882a593Smuzhiyun [TCA_ETS_QUANTA_BAND] = { .type = NLA_U32 },
71*4882a593Smuzhiyun };
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun static const struct nla_policy ets_class_policy[TCA_ETS_MAX + 1] = {
74*4882a593Smuzhiyun [TCA_ETS_QUANTA_BAND] = { .type = NLA_U32 },
75*4882a593Smuzhiyun };
76*4882a593Smuzhiyun
ets_quantum_parse(struct Qdisc * sch,const struct nlattr * attr,unsigned int * quantum,struct netlink_ext_ack * extack)77*4882a593Smuzhiyun static int ets_quantum_parse(struct Qdisc *sch, const struct nlattr *attr,
78*4882a593Smuzhiyun unsigned int *quantum,
79*4882a593Smuzhiyun struct netlink_ext_ack *extack)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun *quantum = nla_get_u32(attr);
82*4882a593Smuzhiyun if (!*quantum) {
83*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "ETS quantum cannot be zero");
84*4882a593Smuzhiyun return -EINVAL;
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun return 0;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun static struct ets_class *
ets_class_from_arg(struct Qdisc * sch,unsigned long arg)90*4882a593Smuzhiyun ets_class_from_arg(struct Qdisc *sch, unsigned long arg)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun struct ets_sched *q = qdisc_priv(sch);
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun return &q->classes[arg - 1];
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun
ets_class_id(struct Qdisc * sch,const struct ets_class * cl)97*4882a593Smuzhiyun static u32 ets_class_id(struct Qdisc *sch, const struct ets_class *cl)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun struct ets_sched *q = qdisc_priv(sch);
100*4882a593Smuzhiyun int band = cl - q->classes;
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun return TC_H_MAKE(sch->handle, band + 1);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
ets_offload_change(struct Qdisc * sch)105*4882a593Smuzhiyun static void ets_offload_change(struct Qdisc *sch)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun struct net_device *dev = qdisc_dev(sch);
108*4882a593Smuzhiyun struct ets_sched *q = qdisc_priv(sch);
109*4882a593Smuzhiyun struct tc_ets_qopt_offload qopt;
110*4882a593Smuzhiyun unsigned int w_psum_prev = 0;
111*4882a593Smuzhiyun unsigned int q_psum = 0;
112*4882a593Smuzhiyun unsigned int q_sum = 0;
113*4882a593Smuzhiyun unsigned int quantum;
114*4882a593Smuzhiyun unsigned int w_psum;
115*4882a593Smuzhiyun unsigned int weight;
116*4882a593Smuzhiyun unsigned int i;
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
119*4882a593Smuzhiyun return;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun qopt.command = TC_ETS_REPLACE;
122*4882a593Smuzhiyun qopt.handle = sch->handle;
123*4882a593Smuzhiyun qopt.parent = sch->parent;
124*4882a593Smuzhiyun qopt.replace_params.bands = q->nbands;
125*4882a593Smuzhiyun qopt.replace_params.qstats = &sch->qstats;
126*4882a593Smuzhiyun memcpy(&qopt.replace_params.priomap,
127*4882a593Smuzhiyun q->prio2band, sizeof(q->prio2band));
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun for (i = 0; i < q->nbands; i++)
130*4882a593Smuzhiyun q_sum += q->classes[i].quantum;
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun for (i = 0; i < q->nbands; i++) {
133*4882a593Smuzhiyun quantum = q->classes[i].quantum;
134*4882a593Smuzhiyun q_psum += quantum;
135*4882a593Smuzhiyun w_psum = quantum ? q_psum * 100 / q_sum : 0;
136*4882a593Smuzhiyun weight = w_psum - w_psum_prev;
137*4882a593Smuzhiyun w_psum_prev = w_psum;
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun qopt.replace_params.quanta[i] = quantum;
140*4882a593Smuzhiyun qopt.replace_params.weights[i] = weight;
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_ETS, &qopt);
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun
ets_offload_destroy(struct Qdisc * sch)146*4882a593Smuzhiyun static void ets_offload_destroy(struct Qdisc *sch)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun struct net_device *dev = qdisc_dev(sch);
149*4882a593Smuzhiyun struct tc_ets_qopt_offload qopt;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
152*4882a593Smuzhiyun return;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun qopt.command = TC_ETS_DESTROY;
155*4882a593Smuzhiyun qopt.handle = sch->handle;
156*4882a593Smuzhiyun qopt.parent = sch->parent;
157*4882a593Smuzhiyun dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_ETS, &qopt);
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun
ets_offload_graft(struct Qdisc * sch,struct Qdisc * new,struct Qdisc * old,unsigned long arg,struct netlink_ext_ack * extack)160*4882a593Smuzhiyun static void ets_offload_graft(struct Qdisc *sch, struct Qdisc *new,
161*4882a593Smuzhiyun struct Qdisc *old, unsigned long arg,
162*4882a593Smuzhiyun struct netlink_ext_ack *extack)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun struct net_device *dev = qdisc_dev(sch);
165*4882a593Smuzhiyun struct tc_ets_qopt_offload qopt;
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun qopt.command = TC_ETS_GRAFT;
168*4882a593Smuzhiyun qopt.handle = sch->handle;
169*4882a593Smuzhiyun qopt.parent = sch->parent;
170*4882a593Smuzhiyun qopt.graft_params.band = arg - 1;
171*4882a593Smuzhiyun qopt.graft_params.child_handle = new->handle;
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun qdisc_offload_graft_helper(dev, sch, new, old, TC_SETUP_QDISC_ETS,
174*4882a593Smuzhiyun &qopt, extack);
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
ets_offload_dump(struct Qdisc * sch)177*4882a593Smuzhiyun static int ets_offload_dump(struct Qdisc *sch)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun struct tc_ets_qopt_offload qopt;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun qopt.command = TC_ETS_STATS;
182*4882a593Smuzhiyun qopt.handle = sch->handle;
183*4882a593Smuzhiyun qopt.parent = sch->parent;
184*4882a593Smuzhiyun qopt.stats.bstats = &sch->bstats;
185*4882a593Smuzhiyun qopt.stats.qstats = &sch->qstats;
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_ETS, &qopt);
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun
ets_class_is_strict(struct ets_sched * q,const struct ets_class * cl)190*4882a593Smuzhiyun static bool ets_class_is_strict(struct ets_sched *q, const struct ets_class *cl)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun unsigned int band = cl - q->classes;
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun return band < q->nstrict;
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
ets_class_change(struct Qdisc * sch,u32 classid,u32 parentid,struct nlattr ** tca,unsigned long * arg,struct netlink_ext_ack * extack)197*4882a593Smuzhiyun static int ets_class_change(struct Qdisc *sch, u32 classid, u32 parentid,
198*4882a593Smuzhiyun struct nlattr **tca, unsigned long *arg,
199*4882a593Smuzhiyun struct netlink_ext_ack *extack)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun struct ets_class *cl = ets_class_from_arg(sch, *arg);
202*4882a593Smuzhiyun struct ets_sched *q = qdisc_priv(sch);
203*4882a593Smuzhiyun struct nlattr *opt = tca[TCA_OPTIONS];
204*4882a593Smuzhiyun struct nlattr *tb[TCA_ETS_MAX + 1];
205*4882a593Smuzhiyun unsigned int quantum;
206*4882a593Smuzhiyun int err;
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun /* Classes can be added and removed only through Qdisc_ops.change
209*4882a593Smuzhiyun * interface.
210*4882a593Smuzhiyun */
211*4882a593Smuzhiyun if (!cl) {
212*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Fine-grained class addition and removal is not supported");
213*4882a593Smuzhiyun return -EOPNOTSUPP;
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun if (!opt) {
217*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "ETS options are required for this operation");
218*4882a593Smuzhiyun return -EINVAL;
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun err = nla_parse_nested(tb, TCA_ETS_MAX, opt, ets_class_policy, extack);
222*4882a593Smuzhiyun if (err < 0)
223*4882a593Smuzhiyun return err;
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun if (!tb[TCA_ETS_QUANTA_BAND])
226*4882a593Smuzhiyun /* Nothing to configure. */
227*4882a593Smuzhiyun return 0;
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun if (ets_class_is_strict(q, cl)) {
230*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Strict bands do not have a configurable quantum");
231*4882a593Smuzhiyun return -EINVAL;
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun err = ets_quantum_parse(sch, tb[TCA_ETS_QUANTA_BAND], &quantum,
235*4882a593Smuzhiyun extack);
236*4882a593Smuzhiyun if (err)
237*4882a593Smuzhiyun return err;
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun sch_tree_lock(sch);
240*4882a593Smuzhiyun cl->quantum = quantum;
241*4882a593Smuzhiyun sch_tree_unlock(sch);
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun ets_offload_change(sch);
244*4882a593Smuzhiyun return 0;
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun
ets_class_graft(struct Qdisc * sch,unsigned long arg,struct Qdisc * new,struct Qdisc ** old,struct netlink_ext_ack * extack)247*4882a593Smuzhiyun static int ets_class_graft(struct Qdisc *sch, unsigned long arg,
248*4882a593Smuzhiyun struct Qdisc *new, struct Qdisc **old,
249*4882a593Smuzhiyun struct netlink_ext_ack *extack)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun struct ets_class *cl = ets_class_from_arg(sch, arg);
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun if (!new) {
254*4882a593Smuzhiyun new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
255*4882a593Smuzhiyun ets_class_id(sch, cl), NULL);
256*4882a593Smuzhiyun if (!new)
257*4882a593Smuzhiyun new = &noop_qdisc;
258*4882a593Smuzhiyun else
259*4882a593Smuzhiyun qdisc_hash_add(new, true);
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun *old = qdisc_replace(sch, new, &cl->qdisc);
263*4882a593Smuzhiyun ets_offload_graft(sch, new, *old, arg, extack);
264*4882a593Smuzhiyun return 0;
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun
ets_class_leaf(struct Qdisc * sch,unsigned long arg)267*4882a593Smuzhiyun static struct Qdisc *ets_class_leaf(struct Qdisc *sch, unsigned long arg)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun struct ets_class *cl = ets_class_from_arg(sch, arg);
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun return cl->qdisc;
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun
ets_class_find(struct Qdisc * sch,u32 classid)274*4882a593Smuzhiyun static unsigned long ets_class_find(struct Qdisc *sch, u32 classid)
275*4882a593Smuzhiyun {
276*4882a593Smuzhiyun unsigned long band = TC_H_MIN(classid);
277*4882a593Smuzhiyun struct ets_sched *q = qdisc_priv(sch);
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun if (band - 1 >= q->nbands)
280*4882a593Smuzhiyun return 0;
281*4882a593Smuzhiyun return band;
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun
ets_class_qlen_notify(struct Qdisc * sch,unsigned long arg)284*4882a593Smuzhiyun static void ets_class_qlen_notify(struct Qdisc *sch, unsigned long arg)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun struct ets_class *cl = ets_class_from_arg(sch, arg);
287*4882a593Smuzhiyun struct ets_sched *q = qdisc_priv(sch);
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun /* We get notified about zero-length child Qdiscs as well if they are
290*4882a593Smuzhiyun * offloaded. Those aren't on the active list though, so don't attempt
291*4882a593Smuzhiyun * to remove them.
292*4882a593Smuzhiyun */
293*4882a593Smuzhiyun if (!ets_class_is_strict(q, cl) && sch->q.qlen)
294*4882a593Smuzhiyun list_del(&cl->alist);
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun
ets_class_dump(struct Qdisc * sch,unsigned long arg,struct sk_buff * skb,struct tcmsg * tcm)297*4882a593Smuzhiyun static int ets_class_dump(struct Qdisc *sch, unsigned long arg,
298*4882a593Smuzhiyun struct sk_buff *skb, struct tcmsg *tcm)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun struct ets_class *cl = ets_class_from_arg(sch, arg);
301*4882a593Smuzhiyun struct ets_sched *q = qdisc_priv(sch);
302*4882a593Smuzhiyun struct nlattr *nest;
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun tcm->tcm_parent = TC_H_ROOT;
305*4882a593Smuzhiyun tcm->tcm_handle = ets_class_id(sch, cl);
306*4882a593Smuzhiyun tcm->tcm_info = cl->qdisc->handle;
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
309*4882a593Smuzhiyun if (!nest)
310*4882a593Smuzhiyun goto nla_put_failure;
311*4882a593Smuzhiyun if (!ets_class_is_strict(q, cl)) {
312*4882a593Smuzhiyun if (nla_put_u32(skb, TCA_ETS_QUANTA_BAND, cl->quantum))
313*4882a593Smuzhiyun goto nla_put_failure;
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun return nla_nest_end(skb, nest);
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun nla_put_failure:
318*4882a593Smuzhiyun nla_nest_cancel(skb, nest);
319*4882a593Smuzhiyun return -EMSGSIZE;
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun
ets_class_dump_stats(struct Qdisc * sch,unsigned long arg,struct gnet_dump * d)322*4882a593Smuzhiyun static int ets_class_dump_stats(struct Qdisc *sch, unsigned long arg,
323*4882a593Smuzhiyun struct gnet_dump *d)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun struct ets_class *cl = ets_class_from_arg(sch, arg);
326*4882a593Smuzhiyun struct Qdisc *cl_q = cl->qdisc;
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
329*4882a593Smuzhiyun d, NULL, &cl_q->bstats) < 0 ||
330*4882a593Smuzhiyun qdisc_qstats_copy(d, cl_q) < 0)
331*4882a593Smuzhiyun return -1;
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun return 0;
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun
ets_qdisc_walk(struct Qdisc * sch,struct qdisc_walker * arg)336*4882a593Smuzhiyun static void ets_qdisc_walk(struct Qdisc *sch, struct qdisc_walker *arg)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun struct ets_sched *q = qdisc_priv(sch);
339*4882a593Smuzhiyun int i;
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun if (arg->stop)
342*4882a593Smuzhiyun return;
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun for (i = 0; i < q->nbands; i++) {
345*4882a593Smuzhiyun if (arg->count < arg->skip) {
346*4882a593Smuzhiyun arg->count++;
347*4882a593Smuzhiyun continue;
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun if (arg->fn(sch, i + 1, arg) < 0) {
350*4882a593Smuzhiyun arg->stop = 1;
351*4882a593Smuzhiyun break;
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun arg->count++;
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun static struct tcf_block *
ets_qdisc_tcf_block(struct Qdisc * sch,unsigned long cl,struct netlink_ext_ack * extack)358*4882a593Smuzhiyun ets_qdisc_tcf_block(struct Qdisc *sch, unsigned long cl,
359*4882a593Smuzhiyun struct netlink_ext_ack *extack)
360*4882a593Smuzhiyun {
361*4882a593Smuzhiyun struct ets_sched *q = qdisc_priv(sch);
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun if (cl) {
364*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "ETS classid must be zero");
365*4882a593Smuzhiyun return NULL;
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun return q->block;
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun
ets_qdisc_bind_tcf(struct Qdisc * sch,unsigned long parent,u32 classid)371*4882a593Smuzhiyun static unsigned long ets_qdisc_bind_tcf(struct Qdisc *sch, unsigned long parent,
372*4882a593Smuzhiyun u32 classid)
373*4882a593Smuzhiyun {
374*4882a593Smuzhiyun return ets_class_find(sch, classid);
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun
ets_qdisc_unbind_tcf(struct Qdisc * sch,unsigned long arg)377*4882a593Smuzhiyun static void ets_qdisc_unbind_tcf(struct Qdisc *sch, unsigned long arg)
378*4882a593Smuzhiyun {
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun
ets_classify(struct sk_buff * skb,struct Qdisc * sch,int * qerr)381*4882a593Smuzhiyun static struct ets_class *ets_classify(struct sk_buff *skb, struct Qdisc *sch,
382*4882a593Smuzhiyun int *qerr)
383*4882a593Smuzhiyun {
384*4882a593Smuzhiyun struct ets_sched *q = qdisc_priv(sch);
385*4882a593Smuzhiyun u32 band = skb->priority;
386*4882a593Smuzhiyun struct tcf_result res;
387*4882a593Smuzhiyun struct tcf_proto *fl;
388*4882a593Smuzhiyun int err;
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
391*4882a593Smuzhiyun if (TC_H_MAJ(skb->priority) != sch->handle) {
392*4882a593Smuzhiyun fl = rcu_dereference_bh(q->filter_list);
393*4882a593Smuzhiyun err = tcf_classify(skb, fl, &res, false);
394*4882a593Smuzhiyun #ifdef CONFIG_NET_CLS_ACT
395*4882a593Smuzhiyun switch (err) {
396*4882a593Smuzhiyun case TC_ACT_STOLEN:
397*4882a593Smuzhiyun case TC_ACT_QUEUED:
398*4882a593Smuzhiyun case TC_ACT_TRAP:
399*4882a593Smuzhiyun *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
400*4882a593Smuzhiyun fallthrough;
401*4882a593Smuzhiyun case TC_ACT_SHOT:
402*4882a593Smuzhiyun return NULL;
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun #endif
405*4882a593Smuzhiyun if (!fl || err < 0) {
406*4882a593Smuzhiyun if (TC_H_MAJ(band))
407*4882a593Smuzhiyun band = 0;
408*4882a593Smuzhiyun return &q->classes[q->prio2band[band & TC_PRIO_MAX]];
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun band = res.classid;
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun band = TC_H_MIN(band) - 1;
413*4882a593Smuzhiyun if (band >= q->nbands)
414*4882a593Smuzhiyun return &q->classes[q->prio2band[0]];
415*4882a593Smuzhiyun return &q->classes[band];
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun
ets_qdisc_enqueue(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)418*4882a593Smuzhiyun static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
419*4882a593Smuzhiyun struct sk_buff **to_free)
420*4882a593Smuzhiyun {
421*4882a593Smuzhiyun unsigned int len = qdisc_pkt_len(skb);
422*4882a593Smuzhiyun struct ets_sched *q = qdisc_priv(sch);
423*4882a593Smuzhiyun struct ets_class *cl;
424*4882a593Smuzhiyun int err = 0;
425*4882a593Smuzhiyun bool first;
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun cl = ets_classify(skb, sch, &err);
428*4882a593Smuzhiyun if (!cl) {
429*4882a593Smuzhiyun if (err & __NET_XMIT_BYPASS)
430*4882a593Smuzhiyun qdisc_qstats_drop(sch);
431*4882a593Smuzhiyun __qdisc_drop(skb, to_free);
432*4882a593Smuzhiyun return err;
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun first = !cl->qdisc->q.qlen;
436*4882a593Smuzhiyun err = qdisc_enqueue(skb, cl->qdisc, to_free);
437*4882a593Smuzhiyun if (unlikely(err != NET_XMIT_SUCCESS)) {
438*4882a593Smuzhiyun if (net_xmit_drop_count(err)) {
439*4882a593Smuzhiyun cl->qstats.drops++;
440*4882a593Smuzhiyun qdisc_qstats_drop(sch);
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun return err;
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun if (first && !ets_class_is_strict(q, cl)) {
446*4882a593Smuzhiyun list_add_tail(&cl->alist, &q->active);
447*4882a593Smuzhiyun cl->deficit = cl->quantum;
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun sch->qstats.backlog += len;
451*4882a593Smuzhiyun sch->q.qlen++;
452*4882a593Smuzhiyun return err;
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun static struct sk_buff *
ets_qdisc_dequeue_skb(struct Qdisc * sch,struct sk_buff * skb)456*4882a593Smuzhiyun ets_qdisc_dequeue_skb(struct Qdisc *sch, struct sk_buff *skb)
457*4882a593Smuzhiyun {
458*4882a593Smuzhiyun qdisc_bstats_update(sch, skb);
459*4882a593Smuzhiyun qdisc_qstats_backlog_dec(sch, skb);
460*4882a593Smuzhiyun sch->q.qlen--;
461*4882a593Smuzhiyun return skb;
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun
ets_qdisc_dequeue(struct Qdisc * sch)464*4882a593Smuzhiyun static struct sk_buff *ets_qdisc_dequeue(struct Qdisc *sch)
465*4882a593Smuzhiyun {
466*4882a593Smuzhiyun struct ets_sched *q = qdisc_priv(sch);
467*4882a593Smuzhiyun struct ets_class *cl;
468*4882a593Smuzhiyun struct sk_buff *skb;
469*4882a593Smuzhiyun unsigned int band;
470*4882a593Smuzhiyun unsigned int len;
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun while (1) {
473*4882a593Smuzhiyun for (band = 0; band < q->nstrict; band++) {
474*4882a593Smuzhiyun cl = &q->classes[band];
475*4882a593Smuzhiyun skb = qdisc_dequeue_peeked(cl->qdisc);
476*4882a593Smuzhiyun if (skb)
477*4882a593Smuzhiyun return ets_qdisc_dequeue_skb(sch, skb);
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun if (list_empty(&q->active))
481*4882a593Smuzhiyun goto out;
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun cl = list_first_entry(&q->active, struct ets_class, alist);
484*4882a593Smuzhiyun skb = cl->qdisc->ops->peek(cl->qdisc);
485*4882a593Smuzhiyun if (!skb) {
486*4882a593Smuzhiyun qdisc_warn_nonwc(__func__, cl->qdisc);
487*4882a593Smuzhiyun goto out;
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun len = qdisc_pkt_len(skb);
491*4882a593Smuzhiyun if (len <= cl->deficit) {
492*4882a593Smuzhiyun cl->deficit -= len;
493*4882a593Smuzhiyun skb = qdisc_dequeue_peeked(cl->qdisc);
494*4882a593Smuzhiyun if (unlikely(!skb))
495*4882a593Smuzhiyun goto out;
496*4882a593Smuzhiyun if (cl->qdisc->q.qlen == 0)
497*4882a593Smuzhiyun list_del(&cl->alist);
498*4882a593Smuzhiyun return ets_qdisc_dequeue_skb(sch, skb);
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun cl->deficit += cl->quantum;
502*4882a593Smuzhiyun list_move_tail(&cl->alist, &q->active);
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun out:
505*4882a593Smuzhiyun return NULL;
506*4882a593Smuzhiyun }
507*4882a593Smuzhiyun
ets_qdisc_priomap_parse(struct nlattr * priomap_attr,unsigned int nbands,u8 * priomap,struct netlink_ext_ack * extack)508*4882a593Smuzhiyun static int ets_qdisc_priomap_parse(struct nlattr *priomap_attr,
509*4882a593Smuzhiyun unsigned int nbands, u8 *priomap,
510*4882a593Smuzhiyun struct netlink_ext_ack *extack)
511*4882a593Smuzhiyun {
512*4882a593Smuzhiyun const struct nlattr *attr;
513*4882a593Smuzhiyun int prio = 0;
514*4882a593Smuzhiyun u8 band;
515*4882a593Smuzhiyun int rem;
516*4882a593Smuzhiyun int err;
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun err = __nla_validate_nested(priomap_attr, TCA_ETS_MAX,
519*4882a593Smuzhiyun ets_priomap_policy, NL_VALIDATE_STRICT,
520*4882a593Smuzhiyun extack);
521*4882a593Smuzhiyun if (err)
522*4882a593Smuzhiyun return err;
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun nla_for_each_nested(attr, priomap_attr, rem) {
525*4882a593Smuzhiyun switch (nla_type(attr)) {
526*4882a593Smuzhiyun case TCA_ETS_PRIOMAP_BAND:
527*4882a593Smuzhiyun if (prio > TC_PRIO_MAX) {
528*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "Too many priorities in ETS priomap");
529*4882a593Smuzhiyun return -EINVAL;
530*4882a593Smuzhiyun }
531*4882a593Smuzhiyun band = nla_get_u8(attr);
532*4882a593Smuzhiyun if (band >= nbands) {
533*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "Invalid band number in ETS priomap");
534*4882a593Smuzhiyun return -EINVAL;
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun priomap[prio++] = band;
537*4882a593Smuzhiyun break;
538*4882a593Smuzhiyun default:
539*4882a593Smuzhiyun WARN_ON_ONCE(1); /* Validate should have caught this. */
540*4882a593Smuzhiyun return -EINVAL;
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun return 0;
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun
ets_qdisc_quanta_parse(struct Qdisc * sch,struct nlattr * quanta_attr,unsigned int nbands,unsigned int nstrict,unsigned int * quanta,struct netlink_ext_ack * extack)547*4882a593Smuzhiyun static int ets_qdisc_quanta_parse(struct Qdisc *sch, struct nlattr *quanta_attr,
548*4882a593Smuzhiyun unsigned int nbands, unsigned int nstrict,
549*4882a593Smuzhiyun unsigned int *quanta,
550*4882a593Smuzhiyun struct netlink_ext_ack *extack)
551*4882a593Smuzhiyun {
552*4882a593Smuzhiyun const struct nlattr *attr;
553*4882a593Smuzhiyun int band = nstrict;
554*4882a593Smuzhiyun int rem;
555*4882a593Smuzhiyun int err;
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun err = __nla_validate_nested(quanta_attr, TCA_ETS_MAX,
558*4882a593Smuzhiyun ets_quanta_policy, NL_VALIDATE_STRICT,
559*4882a593Smuzhiyun extack);
560*4882a593Smuzhiyun if (err < 0)
561*4882a593Smuzhiyun return err;
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun nla_for_each_nested(attr, quanta_attr, rem) {
564*4882a593Smuzhiyun switch (nla_type(attr)) {
565*4882a593Smuzhiyun case TCA_ETS_QUANTA_BAND:
566*4882a593Smuzhiyun if (band >= nbands) {
567*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "ETS quanta has more values than bands");
568*4882a593Smuzhiyun return -EINVAL;
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun err = ets_quantum_parse(sch, attr, &quanta[band++],
571*4882a593Smuzhiyun extack);
572*4882a593Smuzhiyun if (err)
573*4882a593Smuzhiyun return err;
574*4882a593Smuzhiyun break;
575*4882a593Smuzhiyun default:
576*4882a593Smuzhiyun WARN_ON_ONCE(1); /* Validate should have caught this. */
577*4882a593Smuzhiyun return -EINVAL;
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun }
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun return 0;
582*4882a593Smuzhiyun }
583*4882a593Smuzhiyun
ets_qdisc_change(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)584*4882a593Smuzhiyun static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
585*4882a593Smuzhiyun struct netlink_ext_ack *extack)
586*4882a593Smuzhiyun {
587*4882a593Smuzhiyun unsigned int quanta[TCQ_ETS_MAX_BANDS] = {0};
588*4882a593Smuzhiyun struct Qdisc *queues[TCQ_ETS_MAX_BANDS];
589*4882a593Smuzhiyun struct ets_sched *q = qdisc_priv(sch);
590*4882a593Smuzhiyun struct nlattr *tb[TCA_ETS_MAX + 1];
591*4882a593Smuzhiyun unsigned int oldbands = q->nbands;
592*4882a593Smuzhiyun u8 priomap[TC_PRIO_MAX + 1];
593*4882a593Smuzhiyun unsigned int nstrict = 0;
594*4882a593Smuzhiyun unsigned int nbands;
595*4882a593Smuzhiyun unsigned int i;
596*4882a593Smuzhiyun int err;
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun if (!opt) {
599*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "ETS options are required for this operation");
600*4882a593Smuzhiyun return -EINVAL;
601*4882a593Smuzhiyun }
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun err = nla_parse_nested(tb, TCA_ETS_MAX, opt, ets_policy, extack);
604*4882a593Smuzhiyun if (err < 0)
605*4882a593Smuzhiyun return err;
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun if (!tb[TCA_ETS_NBANDS]) {
608*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "Number of bands is a required argument");
609*4882a593Smuzhiyun return -EINVAL;
610*4882a593Smuzhiyun }
611*4882a593Smuzhiyun nbands = nla_get_u8(tb[TCA_ETS_NBANDS]);
612*4882a593Smuzhiyun if (nbands < 1 || nbands > TCQ_ETS_MAX_BANDS) {
613*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "Invalid number of bands");
614*4882a593Smuzhiyun return -EINVAL;
615*4882a593Smuzhiyun }
616*4882a593Smuzhiyun /* Unless overridden, traffic goes to the last band. */
617*4882a593Smuzhiyun memset(priomap, nbands - 1, sizeof(priomap));
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun if (tb[TCA_ETS_NSTRICT]) {
620*4882a593Smuzhiyun nstrict = nla_get_u8(tb[TCA_ETS_NSTRICT]);
621*4882a593Smuzhiyun if (nstrict > nbands) {
622*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "Invalid number of strict bands");
623*4882a593Smuzhiyun return -EINVAL;
624*4882a593Smuzhiyun }
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun if (tb[TCA_ETS_PRIOMAP]) {
628*4882a593Smuzhiyun err = ets_qdisc_priomap_parse(tb[TCA_ETS_PRIOMAP],
629*4882a593Smuzhiyun nbands, priomap, extack);
630*4882a593Smuzhiyun if (err)
631*4882a593Smuzhiyun return err;
632*4882a593Smuzhiyun }
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun if (tb[TCA_ETS_QUANTA]) {
635*4882a593Smuzhiyun err = ets_qdisc_quanta_parse(sch, tb[TCA_ETS_QUANTA],
636*4882a593Smuzhiyun nbands, nstrict, quanta, extack);
637*4882a593Smuzhiyun if (err)
638*4882a593Smuzhiyun return err;
639*4882a593Smuzhiyun }
640*4882a593Smuzhiyun /* If there are more bands than strict + quanta provided, the remaining
641*4882a593Smuzhiyun * ones are ETS with quantum of MTU. Initialize the missing values here.
642*4882a593Smuzhiyun */
643*4882a593Smuzhiyun for (i = nstrict; i < nbands; i++) {
644*4882a593Smuzhiyun if (!quanta[i])
645*4882a593Smuzhiyun quanta[i] = psched_mtu(qdisc_dev(sch));
646*4882a593Smuzhiyun }
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun /* Before commit, make sure we can allocate all new qdiscs */
649*4882a593Smuzhiyun for (i = oldbands; i < nbands; i++) {
650*4882a593Smuzhiyun queues[i] = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
651*4882a593Smuzhiyun ets_class_id(sch, &q->classes[i]),
652*4882a593Smuzhiyun extack);
653*4882a593Smuzhiyun if (!queues[i]) {
654*4882a593Smuzhiyun while (i > oldbands)
655*4882a593Smuzhiyun qdisc_put(queues[--i]);
656*4882a593Smuzhiyun return -ENOMEM;
657*4882a593Smuzhiyun }
658*4882a593Smuzhiyun }
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun sch_tree_lock(sch);
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun q->nbands = nbands;
663*4882a593Smuzhiyun for (i = nstrict; i < q->nstrict; i++) {
664*4882a593Smuzhiyun INIT_LIST_HEAD(&q->classes[i].alist);
665*4882a593Smuzhiyun if (q->classes[i].qdisc->q.qlen) {
666*4882a593Smuzhiyun list_add_tail(&q->classes[i].alist, &q->active);
667*4882a593Smuzhiyun q->classes[i].deficit = quanta[i];
668*4882a593Smuzhiyun }
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun for (i = q->nbands; i < oldbands; i++) {
671*4882a593Smuzhiyun if (i >= q->nstrict && q->classes[i].qdisc->q.qlen)
672*4882a593Smuzhiyun list_del(&q->classes[i].alist);
673*4882a593Smuzhiyun qdisc_tree_flush_backlog(q->classes[i].qdisc);
674*4882a593Smuzhiyun }
675*4882a593Smuzhiyun q->nstrict = nstrict;
676*4882a593Smuzhiyun memcpy(q->prio2band, priomap, sizeof(priomap));
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun for (i = 0; i < q->nbands; i++)
679*4882a593Smuzhiyun q->classes[i].quantum = quanta[i];
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun for (i = oldbands; i < q->nbands; i++) {
682*4882a593Smuzhiyun q->classes[i].qdisc = queues[i];
683*4882a593Smuzhiyun if (q->classes[i].qdisc != &noop_qdisc)
684*4882a593Smuzhiyun qdisc_hash_add(q->classes[i].qdisc, true);
685*4882a593Smuzhiyun }
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun sch_tree_unlock(sch);
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun ets_offload_change(sch);
690*4882a593Smuzhiyun for (i = q->nbands; i < oldbands; i++) {
691*4882a593Smuzhiyun qdisc_put(q->classes[i].qdisc);
692*4882a593Smuzhiyun memset(&q->classes[i], 0, sizeof(q->classes[i]));
693*4882a593Smuzhiyun }
694*4882a593Smuzhiyun return 0;
695*4882a593Smuzhiyun }
696*4882a593Smuzhiyun
ets_qdisc_init(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)697*4882a593Smuzhiyun static int ets_qdisc_init(struct Qdisc *sch, struct nlattr *opt,
698*4882a593Smuzhiyun struct netlink_ext_ack *extack)
699*4882a593Smuzhiyun {
700*4882a593Smuzhiyun struct ets_sched *q = qdisc_priv(sch);
701*4882a593Smuzhiyun int err;
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun if (!opt)
704*4882a593Smuzhiyun return -EINVAL;
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
707*4882a593Smuzhiyun if (err)
708*4882a593Smuzhiyun return err;
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun INIT_LIST_HEAD(&q->active);
711*4882a593Smuzhiyun return ets_qdisc_change(sch, opt, extack);
712*4882a593Smuzhiyun }
713*4882a593Smuzhiyun
ets_qdisc_reset(struct Qdisc * sch)714*4882a593Smuzhiyun static void ets_qdisc_reset(struct Qdisc *sch)
715*4882a593Smuzhiyun {
716*4882a593Smuzhiyun struct ets_sched *q = qdisc_priv(sch);
717*4882a593Smuzhiyun int band;
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun for (band = q->nstrict; band < q->nbands; band++) {
720*4882a593Smuzhiyun if (q->classes[band].qdisc->q.qlen)
721*4882a593Smuzhiyun list_del(&q->classes[band].alist);
722*4882a593Smuzhiyun }
723*4882a593Smuzhiyun for (band = 0; band < q->nbands; band++)
724*4882a593Smuzhiyun qdisc_reset(q->classes[band].qdisc);
725*4882a593Smuzhiyun }
726*4882a593Smuzhiyun
ets_qdisc_destroy(struct Qdisc * sch)727*4882a593Smuzhiyun static void ets_qdisc_destroy(struct Qdisc *sch)
728*4882a593Smuzhiyun {
729*4882a593Smuzhiyun struct ets_sched *q = qdisc_priv(sch);
730*4882a593Smuzhiyun int band;
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun ets_offload_destroy(sch);
733*4882a593Smuzhiyun tcf_block_put(q->block);
734*4882a593Smuzhiyun for (band = 0; band < q->nbands; band++)
735*4882a593Smuzhiyun qdisc_put(q->classes[band].qdisc);
736*4882a593Smuzhiyun }
737*4882a593Smuzhiyun
ets_qdisc_dump(struct Qdisc * sch,struct sk_buff * skb)738*4882a593Smuzhiyun static int ets_qdisc_dump(struct Qdisc *sch, struct sk_buff *skb)
739*4882a593Smuzhiyun {
740*4882a593Smuzhiyun struct ets_sched *q = qdisc_priv(sch);
741*4882a593Smuzhiyun struct nlattr *opts;
742*4882a593Smuzhiyun struct nlattr *nest;
743*4882a593Smuzhiyun int band;
744*4882a593Smuzhiyun int prio;
745*4882a593Smuzhiyun int err;
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun err = ets_offload_dump(sch);
748*4882a593Smuzhiyun if (err)
749*4882a593Smuzhiyun return err;
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
752*4882a593Smuzhiyun if (!opts)
753*4882a593Smuzhiyun goto nla_err;
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun if (nla_put_u8(skb, TCA_ETS_NBANDS, q->nbands))
756*4882a593Smuzhiyun goto nla_err;
757*4882a593Smuzhiyun
758*4882a593Smuzhiyun if (q->nstrict &&
759*4882a593Smuzhiyun nla_put_u8(skb, TCA_ETS_NSTRICT, q->nstrict))
760*4882a593Smuzhiyun goto nla_err;
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun if (q->nbands > q->nstrict) {
763*4882a593Smuzhiyun nest = nla_nest_start(skb, TCA_ETS_QUANTA);
764*4882a593Smuzhiyun if (!nest)
765*4882a593Smuzhiyun goto nla_err;
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun for (band = q->nstrict; band < q->nbands; band++) {
768*4882a593Smuzhiyun if (nla_put_u32(skb, TCA_ETS_QUANTA_BAND,
769*4882a593Smuzhiyun q->classes[band].quantum))
770*4882a593Smuzhiyun goto nla_err;
771*4882a593Smuzhiyun }
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun nla_nest_end(skb, nest);
774*4882a593Smuzhiyun }
775*4882a593Smuzhiyun
776*4882a593Smuzhiyun nest = nla_nest_start(skb, TCA_ETS_PRIOMAP);
777*4882a593Smuzhiyun if (!nest)
778*4882a593Smuzhiyun goto nla_err;
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun for (prio = 0; prio <= TC_PRIO_MAX; prio++) {
781*4882a593Smuzhiyun if (nla_put_u8(skb, TCA_ETS_PRIOMAP_BAND, q->prio2band[prio]))
782*4882a593Smuzhiyun goto nla_err;
783*4882a593Smuzhiyun }
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun nla_nest_end(skb, nest);
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun return nla_nest_end(skb, opts);
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun nla_err:
790*4882a593Smuzhiyun nla_nest_cancel(skb, opts);
791*4882a593Smuzhiyun return -EMSGSIZE;
792*4882a593Smuzhiyun }
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun static const struct Qdisc_class_ops ets_class_ops = {
795*4882a593Smuzhiyun .change = ets_class_change,
796*4882a593Smuzhiyun .graft = ets_class_graft,
797*4882a593Smuzhiyun .leaf = ets_class_leaf,
798*4882a593Smuzhiyun .find = ets_class_find,
799*4882a593Smuzhiyun .qlen_notify = ets_class_qlen_notify,
800*4882a593Smuzhiyun .dump = ets_class_dump,
801*4882a593Smuzhiyun .dump_stats = ets_class_dump_stats,
802*4882a593Smuzhiyun .walk = ets_qdisc_walk,
803*4882a593Smuzhiyun .tcf_block = ets_qdisc_tcf_block,
804*4882a593Smuzhiyun .bind_tcf = ets_qdisc_bind_tcf,
805*4882a593Smuzhiyun .unbind_tcf = ets_qdisc_unbind_tcf,
806*4882a593Smuzhiyun };
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun static struct Qdisc_ops ets_qdisc_ops __read_mostly = {
809*4882a593Smuzhiyun .cl_ops = &ets_class_ops,
810*4882a593Smuzhiyun .id = "ets",
811*4882a593Smuzhiyun .priv_size = sizeof(struct ets_sched),
812*4882a593Smuzhiyun .enqueue = ets_qdisc_enqueue,
813*4882a593Smuzhiyun .dequeue = ets_qdisc_dequeue,
814*4882a593Smuzhiyun .peek = qdisc_peek_dequeued,
815*4882a593Smuzhiyun .change = ets_qdisc_change,
816*4882a593Smuzhiyun .init = ets_qdisc_init,
817*4882a593Smuzhiyun .reset = ets_qdisc_reset,
818*4882a593Smuzhiyun .destroy = ets_qdisc_destroy,
819*4882a593Smuzhiyun .dump = ets_qdisc_dump,
820*4882a593Smuzhiyun .owner = THIS_MODULE,
821*4882a593Smuzhiyun };
822*4882a593Smuzhiyun
ets_init(void)823*4882a593Smuzhiyun static int __init ets_init(void)
824*4882a593Smuzhiyun {
825*4882a593Smuzhiyun return register_qdisc(&ets_qdisc_ops);
826*4882a593Smuzhiyun }
827*4882a593Smuzhiyun
ets_exit(void)828*4882a593Smuzhiyun static void __exit ets_exit(void)
829*4882a593Smuzhiyun {
830*4882a593Smuzhiyun unregister_qdisc(&ets_qdisc_ops);
831*4882a593Smuzhiyun }
832*4882a593Smuzhiyun
833*4882a593Smuzhiyun module_init(ets_init);
834*4882a593Smuzhiyun module_exit(ets_exit);
835*4882a593Smuzhiyun MODULE_LICENSE("GPL");
836