1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * net/sched/sch_gred.c Generic Random Early Detection queue.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Authors: J Hadi Salim (hadi@cyberus.ca) 1998-2002
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * 991129: - Bug fix with grio mode
8*4882a593Smuzhiyun * - a better sing. AvgQ mode with Grio(WRED)
9*4882a593Smuzhiyun * - A finer grained VQ dequeue based on sugestion
10*4882a593Smuzhiyun * from Ren Liu
11*4882a593Smuzhiyun * - More error checks
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * For all the glorious comments look at include/net/red.h
14*4882a593Smuzhiyun */
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #include <linux/slab.h>
17*4882a593Smuzhiyun #include <linux/module.h>
18*4882a593Smuzhiyun #include <linux/types.h>
19*4882a593Smuzhiyun #include <linux/kernel.h>
20*4882a593Smuzhiyun #include <linux/skbuff.h>
21*4882a593Smuzhiyun #include <net/pkt_cls.h>
22*4882a593Smuzhiyun #include <net/pkt_sched.h>
23*4882a593Smuzhiyun #include <net/red.h>
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #define GRED_DEF_PRIO (MAX_DPs / 2)
26*4882a593Smuzhiyun #define GRED_VQ_MASK (MAX_DPs - 1)
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #define GRED_VQ_RED_FLAGS (TC_RED_ECN | TC_RED_HARDDROP)
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun struct gred_sched_data;
31*4882a593Smuzhiyun struct gred_sched;
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun struct gred_sched_data {
34*4882a593Smuzhiyun u32 limit; /* HARD maximal queue length */
35*4882a593Smuzhiyun u32 DP; /* the drop parameters */
36*4882a593Smuzhiyun u32 red_flags; /* virtualQ version of red_flags */
37*4882a593Smuzhiyun u64 bytesin; /* bytes seen on virtualQ so far*/
38*4882a593Smuzhiyun u32 packetsin; /* packets seen on virtualQ so far*/
39*4882a593Smuzhiyun u32 backlog; /* bytes on the virtualQ */
40*4882a593Smuzhiyun u8 prio; /* the prio of this vq */
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun struct red_parms parms;
43*4882a593Smuzhiyun struct red_vars vars;
44*4882a593Smuzhiyun struct red_stats stats;
45*4882a593Smuzhiyun };
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun enum {
48*4882a593Smuzhiyun GRED_WRED_MODE = 1,
49*4882a593Smuzhiyun GRED_RIO_MODE,
50*4882a593Smuzhiyun };
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun struct gred_sched {
53*4882a593Smuzhiyun struct gred_sched_data *tab[MAX_DPs];
54*4882a593Smuzhiyun unsigned long flags;
55*4882a593Smuzhiyun u32 red_flags;
56*4882a593Smuzhiyun u32 DPs;
57*4882a593Smuzhiyun u32 def;
58*4882a593Smuzhiyun struct red_vars wred_set;
59*4882a593Smuzhiyun };
60*4882a593Smuzhiyun
gred_wred_mode(struct gred_sched * table)61*4882a593Smuzhiyun static inline int gred_wred_mode(struct gred_sched *table)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun return test_bit(GRED_WRED_MODE, &table->flags);
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun
gred_enable_wred_mode(struct gred_sched * table)66*4882a593Smuzhiyun static inline void gred_enable_wred_mode(struct gred_sched *table)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun __set_bit(GRED_WRED_MODE, &table->flags);
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun
gred_disable_wred_mode(struct gred_sched * table)71*4882a593Smuzhiyun static inline void gred_disable_wred_mode(struct gred_sched *table)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun __clear_bit(GRED_WRED_MODE, &table->flags);
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun
gred_rio_mode(struct gred_sched * table)76*4882a593Smuzhiyun static inline int gred_rio_mode(struct gred_sched *table)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun return test_bit(GRED_RIO_MODE, &table->flags);
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun
gred_enable_rio_mode(struct gred_sched * table)81*4882a593Smuzhiyun static inline void gred_enable_rio_mode(struct gred_sched *table)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun __set_bit(GRED_RIO_MODE, &table->flags);
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun
gred_disable_rio_mode(struct gred_sched * table)86*4882a593Smuzhiyun static inline void gred_disable_rio_mode(struct gred_sched *table)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun __clear_bit(GRED_RIO_MODE, &table->flags);
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun
gred_wred_mode_check(struct Qdisc * sch)91*4882a593Smuzhiyun static inline int gred_wred_mode_check(struct Qdisc *sch)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun struct gred_sched *table = qdisc_priv(sch);
94*4882a593Smuzhiyun int i;
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun /* Really ugly O(n^2) but shouldn't be necessary too frequent. */
97*4882a593Smuzhiyun for (i = 0; i < table->DPs; i++) {
98*4882a593Smuzhiyun struct gred_sched_data *q = table->tab[i];
99*4882a593Smuzhiyun int n;
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun if (q == NULL)
102*4882a593Smuzhiyun continue;
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun for (n = i + 1; n < table->DPs; n++)
105*4882a593Smuzhiyun if (table->tab[n] && table->tab[n]->prio == q->prio)
106*4882a593Smuzhiyun return 1;
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun return 0;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun
gred_backlog(struct gred_sched * table,struct gred_sched_data * q,struct Qdisc * sch)112*4882a593Smuzhiyun static inline unsigned int gred_backlog(struct gred_sched *table,
113*4882a593Smuzhiyun struct gred_sched_data *q,
114*4882a593Smuzhiyun struct Qdisc *sch)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun if (gred_wred_mode(table))
117*4882a593Smuzhiyun return sch->qstats.backlog;
118*4882a593Smuzhiyun else
119*4882a593Smuzhiyun return q->backlog;
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun
tc_index_to_dp(struct sk_buff * skb)122*4882a593Smuzhiyun static inline u16 tc_index_to_dp(struct sk_buff *skb)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun return skb->tc_index & GRED_VQ_MASK;
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun
gred_load_wred_set(const struct gred_sched * table,struct gred_sched_data * q)127*4882a593Smuzhiyun static inline void gred_load_wred_set(const struct gred_sched *table,
128*4882a593Smuzhiyun struct gred_sched_data *q)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun q->vars.qavg = table->wred_set.qavg;
131*4882a593Smuzhiyun q->vars.qidlestart = table->wred_set.qidlestart;
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun
gred_store_wred_set(struct gred_sched * table,struct gred_sched_data * q)134*4882a593Smuzhiyun static inline void gred_store_wred_set(struct gred_sched *table,
135*4882a593Smuzhiyun struct gred_sched_data *q)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun table->wred_set.qavg = q->vars.qavg;
138*4882a593Smuzhiyun table->wred_set.qidlestart = q->vars.qidlestart;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
gred_use_ecn(struct gred_sched_data * q)141*4882a593Smuzhiyun static int gred_use_ecn(struct gred_sched_data *q)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun return q->red_flags & TC_RED_ECN;
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun
gred_use_harddrop(struct gred_sched_data * q)146*4882a593Smuzhiyun static int gred_use_harddrop(struct gred_sched_data *q)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun return q->red_flags & TC_RED_HARDDROP;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun
gred_per_vq_red_flags_used(struct gred_sched * table)151*4882a593Smuzhiyun static bool gred_per_vq_red_flags_used(struct gred_sched *table)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun unsigned int i;
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun /* Local per-vq flags couldn't have been set unless global are 0 */
156*4882a593Smuzhiyun if (table->red_flags)
157*4882a593Smuzhiyun return false;
158*4882a593Smuzhiyun for (i = 0; i < MAX_DPs; i++)
159*4882a593Smuzhiyun if (table->tab[i] && table->tab[i]->red_flags)
160*4882a593Smuzhiyun return true;
161*4882a593Smuzhiyun return false;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun
gred_enqueue(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)164*4882a593Smuzhiyun static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch,
165*4882a593Smuzhiyun struct sk_buff **to_free)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun struct gred_sched_data *q = NULL;
168*4882a593Smuzhiyun struct gred_sched *t = qdisc_priv(sch);
169*4882a593Smuzhiyun unsigned long qavg = 0;
170*4882a593Smuzhiyun u16 dp = tc_index_to_dp(skb);
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
173*4882a593Smuzhiyun dp = t->def;
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun q = t->tab[dp];
176*4882a593Smuzhiyun if (!q) {
177*4882a593Smuzhiyun /* Pass through packets not assigned to a DP
178*4882a593Smuzhiyun * if no default DP has been configured. This
179*4882a593Smuzhiyun * allows for DP flows to be left untouched.
180*4882a593Smuzhiyun */
181*4882a593Smuzhiyun if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <=
182*4882a593Smuzhiyun sch->limit))
183*4882a593Smuzhiyun return qdisc_enqueue_tail(skb, sch);
184*4882a593Smuzhiyun else
185*4882a593Smuzhiyun goto drop;
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun /* fix tc_index? --could be controversial but needed for
189*4882a593Smuzhiyun requeueing */
190*4882a593Smuzhiyun skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp;
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun /* sum up all the qaves of prios < ours to get the new qave */
194*4882a593Smuzhiyun if (!gred_wred_mode(t) && gred_rio_mode(t)) {
195*4882a593Smuzhiyun int i;
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun for (i = 0; i < t->DPs; i++) {
198*4882a593Smuzhiyun if (t->tab[i] && t->tab[i]->prio < q->prio &&
199*4882a593Smuzhiyun !red_is_idling(&t->tab[i]->vars))
200*4882a593Smuzhiyun qavg += t->tab[i]->vars.qavg;
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun q->packetsin++;
206*4882a593Smuzhiyun q->bytesin += qdisc_pkt_len(skb);
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun if (gred_wred_mode(t))
209*4882a593Smuzhiyun gred_load_wred_set(t, q);
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun q->vars.qavg = red_calc_qavg(&q->parms,
212*4882a593Smuzhiyun &q->vars,
213*4882a593Smuzhiyun gred_backlog(t, q, sch));
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun if (red_is_idling(&q->vars))
216*4882a593Smuzhiyun red_end_of_idle_period(&q->vars);
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun if (gred_wred_mode(t))
219*4882a593Smuzhiyun gred_store_wred_set(t, q);
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun switch (red_action(&q->parms, &q->vars, q->vars.qavg + qavg)) {
222*4882a593Smuzhiyun case RED_DONT_MARK:
223*4882a593Smuzhiyun break;
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun case RED_PROB_MARK:
226*4882a593Smuzhiyun qdisc_qstats_overlimit(sch);
227*4882a593Smuzhiyun if (!gred_use_ecn(q) || !INET_ECN_set_ce(skb)) {
228*4882a593Smuzhiyun q->stats.prob_drop++;
229*4882a593Smuzhiyun goto congestion_drop;
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun q->stats.prob_mark++;
233*4882a593Smuzhiyun break;
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun case RED_HARD_MARK:
236*4882a593Smuzhiyun qdisc_qstats_overlimit(sch);
237*4882a593Smuzhiyun if (gred_use_harddrop(q) || !gred_use_ecn(q) ||
238*4882a593Smuzhiyun !INET_ECN_set_ce(skb)) {
239*4882a593Smuzhiyun q->stats.forced_drop++;
240*4882a593Smuzhiyun goto congestion_drop;
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun q->stats.forced_mark++;
243*4882a593Smuzhiyun break;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun if (gred_backlog(t, q, sch) + qdisc_pkt_len(skb) <= q->limit) {
247*4882a593Smuzhiyun q->backlog += qdisc_pkt_len(skb);
248*4882a593Smuzhiyun return qdisc_enqueue_tail(skb, sch);
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun q->stats.pdrop++;
252*4882a593Smuzhiyun drop:
253*4882a593Smuzhiyun return qdisc_drop(skb, sch, to_free);
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun congestion_drop:
256*4882a593Smuzhiyun qdisc_drop(skb, sch, to_free);
257*4882a593Smuzhiyun return NET_XMIT_CN;
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun
gred_dequeue(struct Qdisc * sch)260*4882a593Smuzhiyun static struct sk_buff *gred_dequeue(struct Qdisc *sch)
261*4882a593Smuzhiyun {
262*4882a593Smuzhiyun struct sk_buff *skb;
263*4882a593Smuzhiyun struct gred_sched *t = qdisc_priv(sch);
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun skb = qdisc_dequeue_head(sch);
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun if (skb) {
268*4882a593Smuzhiyun struct gred_sched_data *q;
269*4882a593Smuzhiyun u16 dp = tc_index_to_dp(skb);
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
272*4882a593Smuzhiyun net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x after dequeue, screwing up backlog\n",
273*4882a593Smuzhiyun tc_index_to_dp(skb));
274*4882a593Smuzhiyun } else {
275*4882a593Smuzhiyun q->backlog -= qdisc_pkt_len(skb);
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun if (gred_wred_mode(t)) {
278*4882a593Smuzhiyun if (!sch->qstats.backlog)
279*4882a593Smuzhiyun red_start_of_idle_period(&t->wred_set);
280*4882a593Smuzhiyun } else {
281*4882a593Smuzhiyun if (!q->backlog)
282*4882a593Smuzhiyun red_start_of_idle_period(&q->vars);
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun return skb;
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun return NULL;
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun
gred_reset(struct Qdisc * sch)292*4882a593Smuzhiyun static void gred_reset(struct Qdisc *sch)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun int i;
295*4882a593Smuzhiyun struct gred_sched *t = qdisc_priv(sch);
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun qdisc_reset_queue(sch);
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun for (i = 0; i < t->DPs; i++) {
300*4882a593Smuzhiyun struct gred_sched_data *q = t->tab[i];
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun if (!q)
303*4882a593Smuzhiyun continue;
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun red_restart(&q->vars);
306*4882a593Smuzhiyun q->backlog = 0;
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun
gred_offload(struct Qdisc * sch,enum tc_gred_command command)310*4882a593Smuzhiyun static void gred_offload(struct Qdisc *sch, enum tc_gred_command command)
311*4882a593Smuzhiyun {
312*4882a593Smuzhiyun struct gred_sched *table = qdisc_priv(sch);
313*4882a593Smuzhiyun struct net_device *dev = qdisc_dev(sch);
314*4882a593Smuzhiyun struct tc_gred_qopt_offload opt = {
315*4882a593Smuzhiyun .command = command,
316*4882a593Smuzhiyun .handle = sch->handle,
317*4882a593Smuzhiyun .parent = sch->parent,
318*4882a593Smuzhiyun };
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
321*4882a593Smuzhiyun return;
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun if (command == TC_GRED_REPLACE) {
324*4882a593Smuzhiyun unsigned int i;
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun opt.set.grio_on = gred_rio_mode(table);
327*4882a593Smuzhiyun opt.set.wred_on = gred_wred_mode(table);
328*4882a593Smuzhiyun opt.set.dp_cnt = table->DPs;
329*4882a593Smuzhiyun opt.set.dp_def = table->def;
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun for (i = 0; i < table->DPs; i++) {
332*4882a593Smuzhiyun struct gred_sched_data *q = table->tab[i];
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun if (!q)
335*4882a593Smuzhiyun continue;
336*4882a593Smuzhiyun opt.set.tab[i].present = true;
337*4882a593Smuzhiyun opt.set.tab[i].limit = q->limit;
338*4882a593Smuzhiyun opt.set.tab[i].prio = q->prio;
339*4882a593Smuzhiyun opt.set.tab[i].min = q->parms.qth_min >> q->parms.Wlog;
340*4882a593Smuzhiyun opt.set.tab[i].max = q->parms.qth_max >> q->parms.Wlog;
341*4882a593Smuzhiyun opt.set.tab[i].is_ecn = gred_use_ecn(q);
342*4882a593Smuzhiyun opt.set.tab[i].is_harddrop = gred_use_harddrop(q);
343*4882a593Smuzhiyun opt.set.tab[i].probability = q->parms.max_P;
344*4882a593Smuzhiyun opt.set.tab[i].backlog = &q->backlog;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun opt.set.qstats = &sch->qstats;
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_GRED, &opt);
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun
gred_offload_dump_stats(struct Qdisc * sch)352*4882a593Smuzhiyun static int gred_offload_dump_stats(struct Qdisc *sch)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun struct gred_sched *table = qdisc_priv(sch);
355*4882a593Smuzhiyun struct tc_gred_qopt_offload *hw_stats;
356*4882a593Smuzhiyun unsigned int i;
357*4882a593Smuzhiyun int ret;
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun hw_stats = kzalloc(sizeof(*hw_stats), GFP_KERNEL);
360*4882a593Smuzhiyun if (!hw_stats)
361*4882a593Smuzhiyun return -ENOMEM;
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun hw_stats->command = TC_GRED_STATS;
364*4882a593Smuzhiyun hw_stats->handle = sch->handle;
365*4882a593Smuzhiyun hw_stats->parent = sch->parent;
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun for (i = 0; i < MAX_DPs; i++)
368*4882a593Smuzhiyun if (table->tab[i])
369*4882a593Smuzhiyun hw_stats->stats.xstats[i] = &table->tab[i]->stats;
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun ret = qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_GRED, hw_stats);
372*4882a593Smuzhiyun /* Even if driver returns failure adjust the stats - in case offload
373*4882a593Smuzhiyun * ended but driver still wants to adjust the values.
374*4882a593Smuzhiyun */
375*4882a593Smuzhiyun for (i = 0; i < MAX_DPs; i++) {
376*4882a593Smuzhiyun if (!table->tab[i])
377*4882a593Smuzhiyun continue;
378*4882a593Smuzhiyun table->tab[i]->packetsin += hw_stats->stats.bstats[i].packets;
379*4882a593Smuzhiyun table->tab[i]->bytesin += hw_stats->stats.bstats[i].bytes;
380*4882a593Smuzhiyun table->tab[i]->backlog += hw_stats->stats.qstats[i].backlog;
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun _bstats_update(&sch->bstats,
383*4882a593Smuzhiyun hw_stats->stats.bstats[i].bytes,
384*4882a593Smuzhiyun hw_stats->stats.bstats[i].packets);
385*4882a593Smuzhiyun sch->qstats.qlen += hw_stats->stats.qstats[i].qlen;
386*4882a593Smuzhiyun sch->qstats.backlog += hw_stats->stats.qstats[i].backlog;
387*4882a593Smuzhiyun sch->qstats.drops += hw_stats->stats.qstats[i].drops;
388*4882a593Smuzhiyun sch->qstats.requeues += hw_stats->stats.qstats[i].requeues;
389*4882a593Smuzhiyun sch->qstats.overlimits += hw_stats->stats.qstats[i].overlimits;
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun kfree(hw_stats);
393*4882a593Smuzhiyun return ret;
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun
gred_destroy_vq(struct gred_sched_data * q)396*4882a593Smuzhiyun static inline void gred_destroy_vq(struct gred_sched_data *q)
397*4882a593Smuzhiyun {
398*4882a593Smuzhiyun kfree(q);
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun
gred_change_table_def(struct Qdisc * sch,struct nlattr * dps,struct netlink_ext_ack * extack)401*4882a593Smuzhiyun static int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps,
402*4882a593Smuzhiyun struct netlink_ext_ack *extack)
403*4882a593Smuzhiyun {
404*4882a593Smuzhiyun struct gred_sched *table = qdisc_priv(sch);
405*4882a593Smuzhiyun struct tc_gred_sopt *sopt;
406*4882a593Smuzhiyun bool red_flags_changed;
407*4882a593Smuzhiyun int i;
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun if (!dps)
410*4882a593Smuzhiyun return -EINVAL;
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun sopt = nla_data(dps);
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun if (sopt->DPs > MAX_DPs) {
415*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "number of virtual queues too high");
416*4882a593Smuzhiyun return -EINVAL;
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun if (sopt->DPs == 0) {
419*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack,
420*4882a593Smuzhiyun "number of virtual queues can't be 0");
421*4882a593Smuzhiyun return -EINVAL;
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun if (sopt->def_DP >= sopt->DPs) {
424*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "default virtual queue above virtual queue count");
425*4882a593Smuzhiyun return -EINVAL;
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun if (sopt->flags && gred_per_vq_red_flags_used(table)) {
428*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "can't set per-Qdisc RED flags when per-virtual queue flags are used");
429*4882a593Smuzhiyun return -EINVAL;
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun sch_tree_lock(sch);
433*4882a593Smuzhiyun table->DPs = sopt->DPs;
434*4882a593Smuzhiyun table->def = sopt->def_DP;
435*4882a593Smuzhiyun red_flags_changed = table->red_flags != sopt->flags;
436*4882a593Smuzhiyun table->red_flags = sopt->flags;
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun /*
439*4882a593Smuzhiyun * Every entry point to GRED is synchronized with the above code
440*4882a593Smuzhiyun * and the DP is checked against DPs, i.e. shadowed VQs can no
441*4882a593Smuzhiyun * longer be found so we can unlock right here.
442*4882a593Smuzhiyun */
443*4882a593Smuzhiyun sch_tree_unlock(sch);
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun if (sopt->grio) {
446*4882a593Smuzhiyun gred_enable_rio_mode(table);
447*4882a593Smuzhiyun gred_disable_wred_mode(table);
448*4882a593Smuzhiyun if (gred_wred_mode_check(sch))
449*4882a593Smuzhiyun gred_enable_wred_mode(table);
450*4882a593Smuzhiyun } else {
451*4882a593Smuzhiyun gred_disable_rio_mode(table);
452*4882a593Smuzhiyun gred_disable_wred_mode(table);
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun if (red_flags_changed)
456*4882a593Smuzhiyun for (i = 0; i < table->DPs; i++)
457*4882a593Smuzhiyun if (table->tab[i])
458*4882a593Smuzhiyun table->tab[i]->red_flags =
459*4882a593Smuzhiyun table->red_flags & GRED_VQ_RED_FLAGS;
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun for (i = table->DPs; i < MAX_DPs; i++) {
462*4882a593Smuzhiyun if (table->tab[i]) {
463*4882a593Smuzhiyun pr_warn("GRED: Warning: Destroying shadowed VQ 0x%x\n",
464*4882a593Smuzhiyun i);
465*4882a593Smuzhiyun gred_destroy_vq(table->tab[i]);
466*4882a593Smuzhiyun table->tab[i] = NULL;
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun gred_offload(sch, TC_GRED_REPLACE);
471*4882a593Smuzhiyun return 0;
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun
gred_change_vq(struct Qdisc * sch,int dp,struct tc_gred_qopt * ctl,int prio,u8 * stab,u32 max_P,struct gred_sched_data ** prealloc,struct netlink_ext_ack * extack)474*4882a593Smuzhiyun static inline int gred_change_vq(struct Qdisc *sch, int dp,
475*4882a593Smuzhiyun struct tc_gred_qopt *ctl, int prio,
476*4882a593Smuzhiyun u8 *stab, u32 max_P,
477*4882a593Smuzhiyun struct gred_sched_data **prealloc,
478*4882a593Smuzhiyun struct netlink_ext_ack *extack)
479*4882a593Smuzhiyun {
480*4882a593Smuzhiyun struct gred_sched *table = qdisc_priv(sch);
481*4882a593Smuzhiyun struct gred_sched_data *q = table->tab[dp];
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log, stab)) {
484*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "invalid RED parameters");
485*4882a593Smuzhiyun return -EINVAL;
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun if (!q) {
489*4882a593Smuzhiyun table->tab[dp] = q = *prealloc;
490*4882a593Smuzhiyun *prealloc = NULL;
491*4882a593Smuzhiyun if (!q)
492*4882a593Smuzhiyun return -ENOMEM;
493*4882a593Smuzhiyun q->red_flags = table->red_flags & GRED_VQ_RED_FLAGS;
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun q->DP = dp;
497*4882a593Smuzhiyun q->prio = prio;
498*4882a593Smuzhiyun if (ctl->limit > sch->limit)
499*4882a593Smuzhiyun q->limit = sch->limit;
500*4882a593Smuzhiyun else
501*4882a593Smuzhiyun q->limit = ctl->limit;
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun if (q->backlog == 0)
504*4882a593Smuzhiyun red_end_of_idle_period(&q->vars);
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun red_set_parms(&q->parms,
507*4882a593Smuzhiyun ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog,
508*4882a593Smuzhiyun ctl->Scell_log, stab, max_P);
509*4882a593Smuzhiyun red_set_vars(&q->vars);
510*4882a593Smuzhiyun return 0;
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun static const struct nla_policy gred_vq_policy[TCA_GRED_VQ_MAX + 1] = {
514*4882a593Smuzhiyun [TCA_GRED_VQ_DP] = { .type = NLA_U32 },
515*4882a593Smuzhiyun [TCA_GRED_VQ_FLAGS] = { .type = NLA_U32 },
516*4882a593Smuzhiyun };
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun static const struct nla_policy gred_vqe_policy[TCA_GRED_VQ_ENTRY_MAX + 1] = {
519*4882a593Smuzhiyun [TCA_GRED_VQ_ENTRY] = { .type = NLA_NESTED },
520*4882a593Smuzhiyun };
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun static const struct nla_policy gred_policy[TCA_GRED_MAX + 1] = {
523*4882a593Smuzhiyun [TCA_GRED_PARMS] = { .len = sizeof(struct tc_gred_qopt) },
524*4882a593Smuzhiyun [TCA_GRED_STAB] = { .len = 256 },
525*4882a593Smuzhiyun [TCA_GRED_DPS] = { .len = sizeof(struct tc_gred_sopt) },
526*4882a593Smuzhiyun [TCA_GRED_MAX_P] = { .type = NLA_U32 },
527*4882a593Smuzhiyun [TCA_GRED_LIMIT] = { .type = NLA_U32 },
528*4882a593Smuzhiyun [TCA_GRED_VQ_LIST] = { .type = NLA_NESTED },
529*4882a593Smuzhiyun };
530*4882a593Smuzhiyun
gred_vq_apply(struct gred_sched * table,const struct nlattr * entry)531*4882a593Smuzhiyun static void gred_vq_apply(struct gred_sched *table, const struct nlattr *entry)
532*4882a593Smuzhiyun {
533*4882a593Smuzhiyun struct nlattr *tb[TCA_GRED_VQ_MAX + 1];
534*4882a593Smuzhiyun u32 dp;
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun nla_parse_nested_deprecated(tb, TCA_GRED_VQ_MAX, entry,
537*4882a593Smuzhiyun gred_vq_policy, NULL);
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun dp = nla_get_u32(tb[TCA_GRED_VQ_DP]);
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun if (tb[TCA_GRED_VQ_FLAGS])
542*4882a593Smuzhiyun table->tab[dp]->red_flags = nla_get_u32(tb[TCA_GRED_VQ_FLAGS]);
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun
gred_vqs_apply(struct gred_sched * table,struct nlattr * vqs)545*4882a593Smuzhiyun static void gred_vqs_apply(struct gred_sched *table, struct nlattr *vqs)
546*4882a593Smuzhiyun {
547*4882a593Smuzhiyun const struct nlattr *attr;
548*4882a593Smuzhiyun int rem;
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun nla_for_each_nested(attr, vqs, rem) {
551*4882a593Smuzhiyun switch (nla_type(attr)) {
552*4882a593Smuzhiyun case TCA_GRED_VQ_ENTRY:
553*4882a593Smuzhiyun gred_vq_apply(table, attr);
554*4882a593Smuzhiyun break;
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun
gred_vq_validate(struct gred_sched * table,u32 cdp,const struct nlattr * entry,struct netlink_ext_ack * extack)559*4882a593Smuzhiyun static int gred_vq_validate(struct gred_sched *table, u32 cdp,
560*4882a593Smuzhiyun const struct nlattr *entry,
561*4882a593Smuzhiyun struct netlink_ext_ack *extack)
562*4882a593Smuzhiyun {
563*4882a593Smuzhiyun struct nlattr *tb[TCA_GRED_VQ_MAX + 1];
564*4882a593Smuzhiyun int err;
565*4882a593Smuzhiyun u32 dp;
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun err = nla_parse_nested_deprecated(tb, TCA_GRED_VQ_MAX, entry,
568*4882a593Smuzhiyun gred_vq_policy, extack);
569*4882a593Smuzhiyun if (err < 0)
570*4882a593Smuzhiyun return err;
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun if (!tb[TCA_GRED_VQ_DP]) {
573*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "Virtual queue with no index specified");
574*4882a593Smuzhiyun return -EINVAL;
575*4882a593Smuzhiyun }
576*4882a593Smuzhiyun dp = nla_get_u32(tb[TCA_GRED_VQ_DP]);
577*4882a593Smuzhiyun if (dp >= table->DPs) {
578*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "Virtual queue with index out of bounds");
579*4882a593Smuzhiyun return -EINVAL;
580*4882a593Smuzhiyun }
581*4882a593Smuzhiyun if (dp != cdp && !table->tab[dp]) {
582*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "Virtual queue not yet instantiated");
583*4882a593Smuzhiyun return -EINVAL;
584*4882a593Smuzhiyun }
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun if (tb[TCA_GRED_VQ_FLAGS]) {
587*4882a593Smuzhiyun u32 red_flags = nla_get_u32(tb[TCA_GRED_VQ_FLAGS]);
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun if (table->red_flags && table->red_flags != red_flags) {
590*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "can't change per-virtual queue RED flags when per-Qdisc flags are used");
591*4882a593Smuzhiyun return -EINVAL;
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun if (red_flags & ~GRED_VQ_RED_FLAGS) {
594*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack,
595*4882a593Smuzhiyun "invalid RED flags specified");
596*4882a593Smuzhiyun return -EINVAL;
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun }
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun return 0;
601*4882a593Smuzhiyun }
602*4882a593Smuzhiyun
gred_vqs_validate(struct gred_sched * table,u32 cdp,struct nlattr * vqs,struct netlink_ext_ack * extack)603*4882a593Smuzhiyun static int gred_vqs_validate(struct gred_sched *table, u32 cdp,
604*4882a593Smuzhiyun struct nlattr *vqs, struct netlink_ext_ack *extack)
605*4882a593Smuzhiyun {
606*4882a593Smuzhiyun const struct nlattr *attr;
607*4882a593Smuzhiyun int rem, err;
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun err = nla_validate_nested_deprecated(vqs, TCA_GRED_VQ_ENTRY_MAX,
610*4882a593Smuzhiyun gred_vqe_policy, extack);
611*4882a593Smuzhiyun if (err < 0)
612*4882a593Smuzhiyun return err;
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun nla_for_each_nested(attr, vqs, rem) {
615*4882a593Smuzhiyun switch (nla_type(attr)) {
616*4882a593Smuzhiyun case TCA_GRED_VQ_ENTRY:
617*4882a593Smuzhiyun err = gred_vq_validate(table, cdp, attr, extack);
618*4882a593Smuzhiyun if (err)
619*4882a593Smuzhiyun return err;
620*4882a593Smuzhiyun break;
621*4882a593Smuzhiyun default:
622*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "GRED_VQ_LIST can contain only entry attributes");
623*4882a593Smuzhiyun return -EINVAL;
624*4882a593Smuzhiyun }
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun if (rem > 0) {
628*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "Trailing data after parsing virtual queue list");
629*4882a593Smuzhiyun return -EINVAL;
630*4882a593Smuzhiyun }
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun return 0;
633*4882a593Smuzhiyun }
634*4882a593Smuzhiyun
gred_change(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)635*4882a593Smuzhiyun static int gred_change(struct Qdisc *sch, struct nlattr *opt,
636*4882a593Smuzhiyun struct netlink_ext_ack *extack)
637*4882a593Smuzhiyun {
638*4882a593Smuzhiyun struct gred_sched *table = qdisc_priv(sch);
639*4882a593Smuzhiyun struct tc_gred_qopt *ctl;
640*4882a593Smuzhiyun struct nlattr *tb[TCA_GRED_MAX + 1];
641*4882a593Smuzhiyun int err, prio = GRED_DEF_PRIO;
642*4882a593Smuzhiyun u8 *stab;
643*4882a593Smuzhiyun u32 max_P;
644*4882a593Smuzhiyun struct gred_sched_data *prealloc;
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun if (opt == NULL)
647*4882a593Smuzhiyun return -EINVAL;
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun err = nla_parse_nested_deprecated(tb, TCA_GRED_MAX, opt, gred_policy,
650*4882a593Smuzhiyun extack);
651*4882a593Smuzhiyun if (err < 0)
652*4882a593Smuzhiyun return err;
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL) {
655*4882a593Smuzhiyun if (tb[TCA_GRED_LIMIT] != NULL)
656*4882a593Smuzhiyun sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
657*4882a593Smuzhiyun return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack);
658*4882a593Smuzhiyun }
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun if (tb[TCA_GRED_PARMS] == NULL ||
661*4882a593Smuzhiyun tb[TCA_GRED_STAB] == NULL ||
662*4882a593Smuzhiyun tb[TCA_GRED_LIMIT] != NULL) {
663*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "can't configure Qdisc and virtual queue at the same time");
664*4882a593Smuzhiyun return -EINVAL;
665*4882a593Smuzhiyun }
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun max_P = tb[TCA_GRED_MAX_P] ? nla_get_u32(tb[TCA_GRED_MAX_P]) : 0;
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun ctl = nla_data(tb[TCA_GRED_PARMS]);
670*4882a593Smuzhiyun stab = nla_data(tb[TCA_GRED_STAB]);
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun if (ctl->DP >= table->DPs) {
673*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "virtual queue index above virtual queue count");
674*4882a593Smuzhiyun return -EINVAL;
675*4882a593Smuzhiyun }
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun if (tb[TCA_GRED_VQ_LIST]) {
678*4882a593Smuzhiyun err = gred_vqs_validate(table, ctl->DP, tb[TCA_GRED_VQ_LIST],
679*4882a593Smuzhiyun extack);
680*4882a593Smuzhiyun if (err)
681*4882a593Smuzhiyun return err;
682*4882a593Smuzhiyun }
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun if (gred_rio_mode(table)) {
685*4882a593Smuzhiyun if (ctl->prio == 0) {
686*4882a593Smuzhiyun int def_prio = GRED_DEF_PRIO;
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun if (table->tab[table->def])
689*4882a593Smuzhiyun def_prio = table->tab[table->def]->prio;
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun printk(KERN_DEBUG "GRED: DP %u does not have a prio "
692*4882a593Smuzhiyun "setting default to %d\n", ctl->DP, def_prio);
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun prio = def_prio;
695*4882a593Smuzhiyun } else
696*4882a593Smuzhiyun prio = ctl->prio;
697*4882a593Smuzhiyun }
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
700*4882a593Smuzhiyun sch_tree_lock(sch);
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun err = gred_change_vq(sch, ctl->DP, ctl, prio, stab, max_P, &prealloc,
703*4882a593Smuzhiyun extack);
704*4882a593Smuzhiyun if (err < 0)
705*4882a593Smuzhiyun goto err_unlock_free;
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun if (tb[TCA_GRED_VQ_LIST])
708*4882a593Smuzhiyun gred_vqs_apply(table, tb[TCA_GRED_VQ_LIST]);
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun if (gred_rio_mode(table)) {
711*4882a593Smuzhiyun gred_disable_wred_mode(table);
712*4882a593Smuzhiyun if (gred_wred_mode_check(sch))
713*4882a593Smuzhiyun gred_enable_wred_mode(table);
714*4882a593Smuzhiyun }
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun sch_tree_unlock(sch);
717*4882a593Smuzhiyun kfree(prealloc);
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun gred_offload(sch, TC_GRED_REPLACE);
720*4882a593Smuzhiyun return 0;
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun err_unlock_free:
723*4882a593Smuzhiyun sch_tree_unlock(sch);
724*4882a593Smuzhiyun kfree(prealloc);
725*4882a593Smuzhiyun return err;
726*4882a593Smuzhiyun }
727*4882a593Smuzhiyun
gred_init(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)728*4882a593Smuzhiyun static int gred_init(struct Qdisc *sch, struct nlattr *opt,
729*4882a593Smuzhiyun struct netlink_ext_ack *extack)
730*4882a593Smuzhiyun {
731*4882a593Smuzhiyun struct nlattr *tb[TCA_GRED_MAX + 1];
732*4882a593Smuzhiyun int err;
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun if (!opt)
735*4882a593Smuzhiyun return -EINVAL;
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun err = nla_parse_nested_deprecated(tb, TCA_GRED_MAX, opt, gred_policy,
738*4882a593Smuzhiyun extack);
739*4882a593Smuzhiyun if (err < 0)
740*4882a593Smuzhiyun return err;
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun if (tb[TCA_GRED_PARMS] || tb[TCA_GRED_STAB]) {
743*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack,
744*4882a593Smuzhiyun "virtual queue configuration can't be specified at initialization time");
745*4882a593Smuzhiyun return -EINVAL;
746*4882a593Smuzhiyun }
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun if (tb[TCA_GRED_LIMIT])
749*4882a593Smuzhiyun sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
750*4882a593Smuzhiyun else
751*4882a593Smuzhiyun sch->limit = qdisc_dev(sch)->tx_queue_len
752*4882a593Smuzhiyun * psched_mtu(qdisc_dev(sch));
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack);
755*4882a593Smuzhiyun }
756*4882a593Smuzhiyun
gred_dump(struct Qdisc * sch,struct sk_buff * skb)757*4882a593Smuzhiyun static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
758*4882a593Smuzhiyun {
759*4882a593Smuzhiyun struct gred_sched *table = qdisc_priv(sch);
760*4882a593Smuzhiyun struct nlattr *parms, *vqs, *opts = NULL;
761*4882a593Smuzhiyun int i;
762*4882a593Smuzhiyun u32 max_p[MAX_DPs];
763*4882a593Smuzhiyun struct tc_gred_sopt sopt = {
764*4882a593Smuzhiyun .DPs = table->DPs,
765*4882a593Smuzhiyun .def_DP = table->def,
766*4882a593Smuzhiyun .grio = gred_rio_mode(table),
767*4882a593Smuzhiyun .flags = table->red_flags,
768*4882a593Smuzhiyun };
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun if (gred_offload_dump_stats(sch))
771*4882a593Smuzhiyun goto nla_put_failure;
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
774*4882a593Smuzhiyun if (opts == NULL)
775*4882a593Smuzhiyun goto nla_put_failure;
776*4882a593Smuzhiyun if (nla_put(skb, TCA_GRED_DPS, sizeof(sopt), &sopt))
777*4882a593Smuzhiyun goto nla_put_failure;
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun for (i = 0; i < MAX_DPs; i++) {
780*4882a593Smuzhiyun struct gred_sched_data *q = table->tab[i];
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun max_p[i] = q ? q->parms.max_P : 0;
783*4882a593Smuzhiyun }
784*4882a593Smuzhiyun if (nla_put(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p))
785*4882a593Smuzhiyun goto nla_put_failure;
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun if (nla_put_u32(skb, TCA_GRED_LIMIT, sch->limit))
788*4882a593Smuzhiyun goto nla_put_failure;
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun /* Old style all-in-one dump of VQs */
791*4882a593Smuzhiyun parms = nla_nest_start_noflag(skb, TCA_GRED_PARMS);
792*4882a593Smuzhiyun if (parms == NULL)
793*4882a593Smuzhiyun goto nla_put_failure;
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun for (i = 0; i < MAX_DPs; i++) {
796*4882a593Smuzhiyun struct gred_sched_data *q = table->tab[i];
797*4882a593Smuzhiyun struct tc_gred_qopt opt;
798*4882a593Smuzhiyun unsigned long qavg;
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun memset(&opt, 0, sizeof(opt));
801*4882a593Smuzhiyun
802*4882a593Smuzhiyun if (!q) {
803*4882a593Smuzhiyun /* hack -- fix at some point with proper message
804*4882a593Smuzhiyun This is how we indicate to tc that there is no VQ
805*4882a593Smuzhiyun at this DP */
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun opt.DP = MAX_DPs + i;
808*4882a593Smuzhiyun goto append_opt;
809*4882a593Smuzhiyun }
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun opt.limit = q->limit;
812*4882a593Smuzhiyun opt.DP = q->DP;
813*4882a593Smuzhiyun opt.backlog = gred_backlog(table, q, sch);
814*4882a593Smuzhiyun opt.prio = q->prio;
815*4882a593Smuzhiyun opt.qth_min = q->parms.qth_min >> q->parms.Wlog;
816*4882a593Smuzhiyun opt.qth_max = q->parms.qth_max >> q->parms.Wlog;
817*4882a593Smuzhiyun opt.Wlog = q->parms.Wlog;
818*4882a593Smuzhiyun opt.Plog = q->parms.Plog;
819*4882a593Smuzhiyun opt.Scell_log = q->parms.Scell_log;
820*4882a593Smuzhiyun opt.other = q->stats.other;
821*4882a593Smuzhiyun opt.early = q->stats.prob_drop;
822*4882a593Smuzhiyun opt.forced = q->stats.forced_drop;
823*4882a593Smuzhiyun opt.pdrop = q->stats.pdrop;
824*4882a593Smuzhiyun opt.packets = q->packetsin;
825*4882a593Smuzhiyun opt.bytesin = q->bytesin;
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun if (gred_wred_mode(table))
828*4882a593Smuzhiyun gred_load_wred_set(table, q);
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun qavg = red_calc_qavg(&q->parms, &q->vars,
831*4882a593Smuzhiyun q->vars.qavg >> q->parms.Wlog);
832*4882a593Smuzhiyun opt.qave = qavg >> q->parms.Wlog;
833*4882a593Smuzhiyun
834*4882a593Smuzhiyun append_opt:
835*4882a593Smuzhiyun if (nla_append(skb, sizeof(opt), &opt) < 0)
836*4882a593Smuzhiyun goto nla_put_failure;
837*4882a593Smuzhiyun }
838*4882a593Smuzhiyun
839*4882a593Smuzhiyun nla_nest_end(skb, parms);
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun /* Dump the VQs again, in more structured way */
842*4882a593Smuzhiyun vqs = nla_nest_start_noflag(skb, TCA_GRED_VQ_LIST);
843*4882a593Smuzhiyun if (!vqs)
844*4882a593Smuzhiyun goto nla_put_failure;
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun for (i = 0; i < MAX_DPs; i++) {
847*4882a593Smuzhiyun struct gred_sched_data *q = table->tab[i];
848*4882a593Smuzhiyun struct nlattr *vq;
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun if (!q)
851*4882a593Smuzhiyun continue;
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun vq = nla_nest_start_noflag(skb, TCA_GRED_VQ_ENTRY);
854*4882a593Smuzhiyun if (!vq)
855*4882a593Smuzhiyun goto nla_put_failure;
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun if (nla_put_u32(skb, TCA_GRED_VQ_DP, q->DP))
858*4882a593Smuzhiyun goto nla_put_failure;
859*4882a593Smuzhiyun
860*4882a593Smuzhiyun if (nla_put_u32(skb, TCA_GRED_VQ_FLAGS, q->red_flags))
861*4882a593Smuzhiyun goto nla_put_failure;
862*4882a593Smuzhiyun
863*4882a593Smuzhiyun /* Stats */
864*4882a593Smuzhiyun if (nla_put_u64_64bit(skb, TCA_GRED_VQ_STAT_BYTES, q->bytesin,
865*4882a593Smuzhiyun TCA_GRED_VQ_PAD))
866*4882a593Smuzhiyun goto nla_put_failure;
867*4882a593Smuzhiyun if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PACKETS, q->packetsin))
868*4882a593Smuzhiyun goto nla_put_failure;
869*4882a593Smuzhiyun if (nla_put_u32(skb, TCA_GRED_VQ_STAT_BACKLOG,
870*4882a593Smuzhiyun gred_backlog(table, q, sch)))
871*4882a593Smuzhiyun goto nla_put_failure;
872*4882a593Smuzhiyun if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_DROP,
873*4882a593Smuzhiyun q->stats.prob_drop))
874*4882a593Smuzhiyun goto nla_put_failure;
875*4882a593Smuzhiyun if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_MARK,
876*4882a593Smuzhiyun q->stats.prob_mark))
877*4882a593Smuzhiyun goto nla_put_failure;
878*4882a593Smuzhiyun if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_DROP,
879*4882a593Smuzhiyun q->stats.forced_drop))
880*4882a593Smuzhiyun goto nla_put_failure;
881*4882a593Smuzhiyun if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_MARK,
882*4882a593Smuzhiyun q->stats.forced_mark))
883*4882a593Smuzhiyun goto nla_put_failure;
884*4882a593Smuzhiyun if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PDROP, q->stats.pdrop))
885*4882a593Smuzhiyun goto nla_put_failure;
886*4882a593Smuzhiyun if (nla_put_u32(skb, TCA_GRED_VQ_STAT_OTHER, q->stats.other))
887*4882a593Smuzhiyun goto nla_put_failure;
888*4882a593Smuzhiyun
889*4882a593Smuzhiyun nla_nest_end(skb, vq);
890*4882a593Smuzhiyun }
891*4882a593Smuzhiyun nla_nest_end(skb, vqs);
892*4882a593Smuzhiyun
893*4882a593Smuzhiyun return nla_nest_end(skb, opts);
894*4882a593Smuzhiyun
895*4882a593Smuzhiyun nla_put_failure:
896*4882a593Smuzhiyun nla_nest_cancel(skb, opts);
897*4882a593Smuzhiyun return -EMSGSIZE;
898*4882a593Smuzhiyun }
899*4882a593Smuzhiyun
gred_destroy(struct Qdisc * sch)900*4882a593Smuzhiyun static void gred_destroy(struct Qdisc *sch)
901*4882a593Smuzhiyun {
902*4882a593Smuzhiyun struct gred_sched *table = qdisc_priv(sch);
903*4882a593Smuzhiyun int i;
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun for (i = 0; i < table->DPs; i++) {
906*4882a593Smuzhiyun if (table->tab[i])
907*4882a593Smuzhiyun gred_destroy_vq(table->tab[i]);
908*4882a593Smuzhiyun }
909*4882a593Smuzhiyun gred_offload(sch, TC_GRED_DESTROY);
910*4882a593Smuzhiyun }
911*4882a593Smuzhiyun
912*4882a593Smuzhiyun static struct Qdisc_ops gred_qdisc_ops __read_mostly = {
913*4882a593Smuzhiyun .id = "gred",
914*4882a593Smuzhiyun .priv_size = sizeof(struct gred_sched),
915*4882a593Smuzhiyun .enqueue = gred_enqueue,
916*4882a593Smuzhiyun .dequeue = gred_dequeue,
917*4882a593Smuzhiyun .peek = qdisc_peek_head,
918*4882a593Smuzhiyun .init = gred_init,
919*4882a593Smuzhiyun .reset = gred_reset,
920*4882a593Smuzhiyun .destroy = gred_destroy,
921*4882a593Smuzhiyun .change = gred_change,
922*4882a593Smuzhiyun .dump = gred_dump,
923*4882a593Smuzhiyun .owner = THIS_MODULE,
924*4882a593Smuzhiyun };
925*4882a593Smuzhiyun
gred_module_init(void)926*4882a593Smuzhiyun static int __init gred_module_init(void)
927*4882a593Smuzhiyun {
928*4882a593Smuzhiyun return register_qdisc(&gred_qdisc_ops);
929*4882a593Smuzhiyun }
930*4882a593Smuzhiyun
gred_module_exit(void)931*4882a593Smuzhiyun static void __exit gred_module_exit(void)
932*4882a593Smuzhiyun {
933*4882a593Smuzhiyun unregister_qdisc(&gred_qdisc_ops);
934*4882a593Smuzhiyun }
935*4882a593Smuzhiyun
936*4882a593Smuzhiyun module_init(gred_module_init)
937*4882a593Smuzhiyun module_exit(gred_module_exit)
938*4882a593Smuzhiyun
939*4882a593Smuzhiyun MODULE_LICENSE("GPL");
940