1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * net/sched/sch_htb.c Hierarchical token bucket, feed tree version
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Authors: Martin Devera, <devik@cdi.cz>
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Credits (in time order) for older HTB versions:
8*4882a593Smuzhiyun * Stef Coene <stef.coene@docum.org>
9*4882a593Smuzhiyun * HTB support at LARTC mailing list
10*4882a593Smuzhiyun * Ondrej Kraus, <krauso@barr.cz>
11*4882a593Smuzhiyun * found missing INIT_QDISC(htb)
12*4882a593Smuzhiyun * Vladimir Smelhaus, Aamer Akhter, Bert Hubert
13*4882a593Smuzhiyun * helped a lot to locate nasty class stall bug
14*4882a593Smuzhiyun * Andi Kleen, Jamal Hadi, Bert Hubert
15*4882a593Smuzhiyun * code review and helpful comments on shaping
16*4882a593Smuzhiyun * Tomasz Wrona, <tw@eter.tym.pl>
17*4882a593Smuzhiyun * created test case so that I was able to fix nasty bug
18*4882a593Smuzhiyun * Wilfried Weissmann
19*4882a593Smuzhiyun * spotted bug in dequeue code and helped with fix
20*4882a593Smuzhiyun * Jiri Fojtasek
21*4882a593Smuzhiyun * fixed requeue routine
22*4882a593Smuzhiyun * and many others. thanks.
23*4882a593Smuzhiyun */
24*4882a593Smuzhiyun #include <linux/module.h>
25*4882a593Smuzhiyun #include <linux/moduleparam.h>
26*4882a593Smuzhiyun #include <linux/types.h>
27*4882a593Smuzhiyun #include <linux/kernel.h>
28*4882a593Smuzhiyun #include <linux/string.h>
29*4882a593Smuzhiyun #include <linux/errno.h>
30*4882a593Smuzhiyun #include <linux/skbuff.h>
31*4882a593Smuzhiyun #include <linux/list.h>
32*4882a593Smuzhiyun #include <linux/compiler.h>
33*4882a593Smuzhiyun #include <linux/rbtree.h>
34*4882a593Smuzhiyun #include <linux/workqueue.h>
35*4882a593Smuzhiyun #include <linux/slab.h>
36*4882a593Smuzhiyun #include <net/netlink.h>
37*4882a593Smuzhiyun #include <net/sch_generic.h>
38*4882a593Smuzhiyun #include <net/pkt_sched.h>
39*4882a593Smuzhiyun #include <net/pkt_cls.h>
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun /* HTB algorithm.
42*4882a593Smuzhiyun Author: devik@cdi.cz
43*4882a593Smuzhiyun ========================================================================
44*4882a593Smuzhiyun HTB is like TBF with multiple classes. It is also similar to CBQ because
45*4882a593Smuzhiyun it allows to assign priority to each class in hierarchy.
46*4882a593Smuzhiyun In fact it is another implementation of Floyd's formal sharing.
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun Levels:
49*4882a593Smuzhiyun Each class is assigned level. Leaf has ALWAYS level 0 and root
50*4882a593Smuzhiyun classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
51*4882a593Smuzhiyun one less than their parent.
52*4882a593Smuzhiyun */
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun static int htb_hysteresis __read_mostly = 0; /* whether to use mode hysteresis for speedup */
55*4882a593Smuzhiyun #define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun #if HTB_VER >> 16 != TC_HTB_PROTOVER
58*4882a593Smuzhiyun #error "Mismatched sch_htb.c and pkt_sch.h"
59*4882a593Smuzhiyun #endif
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun /* Module parameter and sysfs export */
62*4882a593Smuzhiyun module_param (htb_hysteresis, int, 0640);
63*4882a593Smuzhiyun MODULE_PARM_DESC(htb_hysteresis, "Hysteresis mode, less CPU load, less accurate");
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun static int htb_rate_est = 0; /* htb classes have a default rate estimator */
66*4882a593Smuzhiyun module_param(htb_rate_est, int, 0640);
67*4882a593Smuzhiyun MODULE_PARM_DESC(htb_rate_est, "setup a default rate estimator (4sec 16sec) for htb classes");
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun /* used internaly to keep status of single class */
70*4882a593Smuzhiyun enum htb_cmode {
71*4882a593Smuzhiyun HTB_CANT_SEND, /* class can't send and can't borrow */
72*4882a593Smuzhiyun HTB_MAY_BORROW, /* class can't send but may borrow */
73*4882a593Smuzhiyun HTB_CAN_SEND /* class can send */
74*4882a593Smuzhiyun };
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun struct htb_prio {
77*4882a593Smuzhiyun union {
78*4882a593Smuzhiyun struct rb_root row;
79*4882a593Smuzhiyun struct rb_root feed;
80*4882a593Smuzhiyun };
81*4882a593Smuzhiyun struct rb_node *ptr;
82*4882a593Smuzhiyun /* When class changes from state 1->2 and disconnects from
83*4882a593Smuzhiyun * parent's feed then we lost ptr value and start from the
84*4882a593Smuzhiyun * first child again. Here we store classid of the
85*4882a593Smuzhiyun * last valid ptr (used when ptr is NULL).
86*4882a593Smuzhiyun */
87*4882a593Smuzhiyun u32 last_ptr_id;
88*4882a593Smuzhiyun };
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun /* interior & leaf nodes; props specific to leaves are marked L:
91*4882a593Smuzhiyun * To reduce false sharing, place mostly read fields at beginning,
92*4882a593Smuzhiyun * and mostly written ones at the end.
93*4882a593Smuzhiyun */
94*4882a593Smuzhiyun struct htb_class {
95*4882a593Smuzhiyun struct Qdisc_class_common common;
96*4882a593Smuzhiyun struct psched_ratecfg rate;
97*4882a593Smuzhiyun struct psched_ratecfg ceil;
98*4882a593Smuzhiyun s64 buffer, cbuffer;/* token bucket depth/rate */
99*4882a593Smuzhiyun s64 mbuffer; /* max wait time */
100*4882a593Smuzhiyun u32 prio; /* these two are used only by leaves... */
101*4882a593Smuzhiyun int quantum; /* but stored for parent-to-leaf return */
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun struct tcf_proto __rcu *filter_list; /* class attached filters */
104*4882a593Smuzhiyun struct tcf_block *block;
105*4882a593Smuzhiyun int filter_cnt;
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun int level; /* our level (see above) */
108*4882a593Smuzhiyun unsigned int children;
109*4882a593Smuzhiyun struct htb_class *parent; /* parent class */
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun struct net_rate_estimator __rcu *rate_est;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun /*
114*4882a593Smuzhiyun * Written often fields
115*4882a593Smuzhiyun */
116*4882a593Smuzhiyun struct gnet_stats_basic_packed bstats;
117*4882a593Smuzhiyun struct tc_htb_xstats xstats; /* our special stats */
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun /* token bucket parameters */
120*4882a593Smuzhiyun s64 tokens, ctokens;/* current number of tokens */
121*4882a593Smuzhiyun s64 t_c; /* checkpoint time */
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun union {
124*4882a593Smuzhiyun struct htb_class_leaf {
125*4882a593Smuzhiyun int deficit[TC_HTB_MAXDEPTH];
126*4882a593Smuzhiyun struct Qdisc *q;
127*4882a593Smuzhiyun } leaf;
128*4882a593Smuzhiyun struct htb_class_inner {
129*4882a593Smuzhiyun struct htb_prio clprio[TC_HTB_NUMPRIO];
130*4882a593Smuzhiyun } inner;
131*4882a593Smuzhiyun };
132*4882a593Smuzhiyun s64 pq_key;
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun int prio_activity; /* for which prios are we active */
135*4882a593Smuzhiyun enum htb_cmode cmode; /* current mode of the class */
136*4882a593Smuzhiyun struct rb_node pq_node; /* node for event queue */
137*4882a593Smuzhiyun struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun unsigned int drops ____cacheline_aligned_in_smp;
140*4882a593Smuzhiyun unsigned int overlimits;
141*4882a593Smuzhiyun };
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun struct htb_level {
144*4882a593Smuzhiyun struct rb_root wait_pq;
145*4882a593Smuzhiyun struct htb_prio hprio[TC_HTB_NUMPRIO];
146*4882a593Smuzhiyun };
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun struct htb_sched {
149*4882a593Smuzhiyun struct Qdisc_class_hash clhash;
150*4882a593Smuzhiyun int defcls; /* class where unclassified flows go to */
151*4882a593Smuzhiyun int rate2quantum; /* quant = rate / rate2quantum */
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun /* filters for qdisc itself */
154*4882a593Smuzhiyun struct tcf_proto __rcu *filter_list;
155*4882a593Smuzhiyun struct tcf_block *block;
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun #define HTB_WARN_TOOMANYEVENTS 0x1
158*4882a593Smuzhiyun unsigned int warned; /* only one warning */
159*4882a593Smuzhiyun int direct_qlen;
160*4882a593Smuzhiyun struct work_struct work;
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun /* non shaped skbs; let them go directly thru */
163*4882a593Smuzhiyun struct qdisc_skb_head direct_queue;
164*4882a593Smuzhiyun u32 direct_pkts;
165*4882a593Smuzhiyun u32 overlimits;
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun struct qdisc_watchdog watchdog;
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun s64 now; /* cached dequeue time */
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun /* time of nearest event per level (row) */
172*4882a593Smuzhiyun s64 near_ev_cache[TC_HTB_MAXDEPTH];
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun int row_mask[TC_HTB_MAXDEPTH];
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun struct htb_level hlevel[TC_HTB_MAXDEPTH];
177*4882a593Smuzhiyun };
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun /* find class in global hash table using given handle */
htb_find(u32 handle,struct Qdisc * sch)180*4882a593Smuzhiyun static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun struct htb_sched *q = qdisc_priv(sch);
183*4882a593Smuzhiyun struct Qdisc_class_common *clc;
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun clc = qdisc_class_find(&q->clhash, handle);
186*4882a593Smuzhiyun if (clc == NULL)
187*4882a593Smuzhiyun return NULL;
188*4882a593Smuzhiyun return container_of(clc, struct htb_class, common);
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun
htb_search(struct Qdisc * sch,u32 handle)191*4882a593Smuzhiyun static unsigned long htb_search(struct Qdisc *sch, u32 handle)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun return (unsigned long)htb_find(handle, sch);
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun /**
196*4882a593Smuzhiyun * htb_classify - classify a packet into class
197*4882a593Smuzhiyun *
198*4882a593Smuzhiyun * It returns NULL if the packet should be dropped or -1 if the packet
199*4882a593Smuzhiyun * should be passed directly thru. In all other cases leaf class is returned.
200*4882a593Smuzhiyun * We allow direct class selection by classid in priority. The we examine
201*4882a593Smuzhiyun * filters in qdisc and in inner nodes (if higher filter points to the inner
202*4882a593Smuzhiyun * node). If we end up with classid MAJOR:0 we enqueue the skb into special
203*4882a593Smuzhiyun * internal fifo (direct). These packets then go directly thru. If we still
204*4882a593Smuzhiyun * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessful
205*4882a593Smuzhiyun * then finish and return direct queue.
206*4882a593Smuzhiyun */
207*4882a593Smuzhiyun #define HTB_DIRECT ((struct htb_class *)-1L)
208*4882a593Smuzhiyun
htb_classify(struct sk_buff * skb,struct Qdisc * sch,int * qerr)209*4882a593Smuzhiyun static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
210*4882a593Smuzhiyun int *qerr)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun struct htb_sched *q = qdisc_priv(sch);
213*4882a593Smuzhiyun struct htb_class *cl;
214*4882a593Smuzhiyun struct tcf_result res;
215*4882a593Smuzhiyun struct tcf_proto *tcf;
216*4882a593Smuzhiyun int result;
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun /* allow to select class by setting skb->priority to valid classid;
219*4882a593Smuzhiyun * note that nfmark can be used too by attaching filter fw with no
220*4882a593Smuzhiyun * rules in it
221*4882a593Smuzhiyun */
222*4882a593Smuzhiyun if (skb->priority == sch->handle)
223*4882a593Smuzhiyun return HTB_DIRECT; /* X:0 (direct flow) selected */
224*4882a593Smuzhiyun cl = htb_find(skb->priority, sch);
225*4882a593Smuzhiyun if (cl) {
226*4882a593Smuzhiyun if (cl->level == 0)
227*4882a593Smuzhiyun return cl;
228*4882a593Smuzhiyun /* Start with inner filter chain if a non-leaf class is selected */
229*4882a593Smuzhiyun tcf = rcu_dereference_bh(cl->filter_list);
230*4882a593Smuzhiyun } else {
231*4882a593Smuzhiyun tcf = rcu_dereference_bh(q->filter_list);
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
235*4882a593Smuzhiyun while (tcf && (result = tcf_classify(skb, tcf, &res, false)) >= 0) {
236*4882a593Smuzhiyun #ifdef CONFIG_NET_CLS_ACT
237*4882a593Smuzhiyun switch (result) {
238*4882a593Smuzhiyun case TC_ACT_QUEUED:
239*4882a593Smuzhiyun case TC_ACT_STOLEN:
240*4882a593Smuzhiyun case TC_ACT_TRAP:
241*4882a593Smuzhiyun *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
242*4882a593Smuzhiyun fallthrough;
243*4882a593Smuzhiyun case TC_ACT_SHOT:
244*4882a593Smuzhiyun return NULL;
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun #endif
247*4882a593Smuzhiyun cl = (void *)res.class;
248*4882a593Smuzhiyun if (!cl) {
249*4882a593Smuzhiyun if (res.classid == sch->handle)
250*4882a593Smuzhiyun return HTB_DIRECT; /* X:0 (direct flow) */
251*4882a593Smuzhiyun cl = htb_find(res.classid, sch);
252*4882a593Smuzhiyun if (!cl)
253*4882a593Smuzhiyun break; /* filter selected invalid classid */
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun if (!cl->level)
256*4882a593Smuzhiyun return cl; /* we hit leaf; return it */
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun /* we have got inner class; apply inner filter chain */
259*4882a593Smuzhiyun tcf = rcu_dereference_bh(cl->filter_list);
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun /* classification failed; try to use default class */
262*4882a593Smuzhiyun cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
263*4882a593Smuzhiyun if (!cl || cl->level)
264*4882a593Smuzhiyun return HTB_DIRECT; /* bad default .. this is safe bet */
265*4882a593Smuzhiyun return cl;
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun /**
269*4882a593Smuzhiyun * htb_add_to_id_tree - adds class to the round robin list
270*4882a593Smuzhiyun *
271*4882a593Smuzhiyun * Routine adds class to the list (actually tree) sorted by classid.
272*4882a593Smuzhiyun * Make sure that class is not already on such list for given prio.
273*4882a593Smuzhiyun */
htb_add_to_id_tree(struct rb_root * root,struct htb_class * cl,int prio)274*4882a593Smuzhiyun static void htb_add_to_id_tree(struct rb_root *root,
275*4882a593Smuzhiyun struct htb_class *cl, int prio)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun struct rb_node **p = &root->rb_node, *parent = NULL;
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun while (*p) {
280*4882a593Smuzhiyun struct htb_class *c;
281*4882a593Smuzhiyun parent = *p;
282*4882a593Smuzhiyun c = rb_entry(parent, struct htb_class, node[prio]);
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun if (cl->common.classid > c->common.classid)
285*4882a593Smuzhiyun p = &parent->rb_right;
286*4882a593Smuzhiyun else
287*4882a593Smuzhiyun p = &parent->rb_left;
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun rb_link_node(&cl->node[prio], parent, p);
290*4882a593Smuzhiyun rb_insert_color(&cl->node[prio], root);
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun /**
294*4882a593Smuzhiyun * htb_add_to_wait_tree - adds class to the event queue with delay
295*4882a593Smuzhiyun *
296*4882a593Smuzhiyun * The class is added to priority event queue to indicate that class will
297*4882a593Smuzhiyun * change its mode in cl->pq_key microseconds. Make sure that class is not
298*4882a593Smuzhiyun * already in the queue.
299*4882a593Smuzhiyun */
htb_add_to_wait_tree(struct htb_sched * q,struct htb_class * cl,s64 delay)300*4882a593Smuzhiyun static void htb_add_to_wait_tree(struct htb_sched *q,
301*4882a593Smuzhiyun struct htb_class *cl, s64 delay)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun struct rb_node **p = &q->hlevel[cl->level].wait_pq.rb_node, *parent = NULL;
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun cl->pq_key = q->now + delay;
306*4882a593Smuzhiyun if (cl->pq_key == q->now)
307*4882a593Smuzhiyun cl->pq_key++;
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun /* update the nearest event cache */
310*4882a593Smuzhiyun if (q->near_ev_cache[cl->level] > cl->pq_key)
311*4882a593Smuzhiyun q->near_ev_cache[cl->level] = cl->pq_key;
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun while (*p) {
314*4882a593Smuzhiyun struct htb_class *c;
315*4882a593Smuzhiyun parent = *p;
316*4882a593Smuzhiyun c = rb_entry(parent, struct htb_class, pq_node);
317*4882a593Smuzhiyun if (cl->pq_key >= c->pq_key)
318*4882a593Smuzhiyun p = &parent->rb_right;
319*4882a593Smuzhiyun else
320*4882a593Smuzhiyun p = &parent->rb_left;
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun rb_link_node(&cl->pq_node, parent, p);
323*4882a593Smuzhiyun rb_insert_color(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun /**
327*4882a593Smuzhiyun * htb_next_rb_node - finds next node in binary tree
328*4882a593Smuzhiyun *
329*4882a593Smuzhiyun * When we are past last key we return NULL.
330*4882a593Smuzhiyun * Average complexity is 2 steps per call.
331*4882a593Smuzhiyun */
htb_next_rb_node(struct rb_node ** n)332*4882a593Smuzhiyun static inline void htb_next_rb_node(struct rb_node **n)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun *n = rb_next(*n);
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun /**
338*4882a593Smuzhiyun * htb_add_class_to_row - add class to its row
339*4882a593Smuzhiyun *
340*4882a593Smuzhiyun * The class is added to row at priorities marked in mask.
341*4882a593Smuzhiyun * It does nothing if mask == 0.
342*4882a593Smuzhiyun */
htb_add_class_to_row(struct htb_sched * q,struct htb_class * cl,int mask)343*4882a593Smuzhiyun static inline void htb_add_class_to_row(struct htb_sched *q,
344*4882a593Smuzhiyun struct htb_class *cl, int mask)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun q->row_mask[cl->level] |= mask;
347*4882a593Smuzhiyun while (mask) {
348*4882a593Smuzhiyun int prio = ffz(~mask);
349*4882a593Smuzhiyun mask &= ~(1 << prio);
350*4882a593Smuzhiyun htb_add_to_id_tree(&q->hlevel[cl->level].hprio[prio].row, cl, prio);
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun /* If this triggers, it is a bug in this code, but it need not be fatal */
htb_safe_rb_erase(struct rb_node * rb,struct rb_root * root)355*4882a593Smuzhiyun static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root)
356*4882a593Smuzhiyun {
357*4882a593Smuzhiyun if (RB_EMPTY_NODE(rb)) {
358*4882a593Smuzhiyun WARN_ON(1);
359*4882a593Smuzhiyun } else {
360*4882a593Smuzhiyun rb_erase(rb, root);
361*4882a593Smuzhiyun RB_CLEAR_NODE(rb);
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun /**
367*4882a593Smuzhiyun * htb_remove_class_from_row - removes class from its row
368*4882a593Smuzhiyun *
369*4882a593Smuzhiyun * The class is removed from row at priorities marked in mask.
370*4882a593Smuzhiyun * It does nothing if mask == 0.
371*4882a593Smuzhiyun */
htb_remove_class_from_row(struct htb_sched * q,struct htb_class * cl,int mask)372*4882a593Smuzhiyun static inline void htb_remove_class_from_row(struct htb_sched *q,
373*4882a593Smuzhiyun struct htb_class *cl, int mask)
374*4882a593Smuzhiyun {
375*4882a593Smuzhiyun int m = 0;
376*4882a593Smuzhiyun struct htb_level *hlevel = &q->hlevel[cl->level];
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun while (mask) {
379*4882a593Smuzhiyun int prio = ffz(~mask);
380*4882a593Smuzhiyun struct htb_prio *hprio = &hlevel->hprio[prio];
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun mask &= ~(1 << prio);
383*4882a593Smuzhiyun if (hprio->ptr == cl->node + prio)
384*4882a593Smuzhiyun htb_next_rb_node(&hprio->ptr);
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun htb_safe_rb_erase(cl->node + prio, &hprio->row);
387*4882a593Smuzhiyun if (!hprio->row.rb_node)
388*4882a593Smuzhiyun m |= 1 << prio;
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun q->row_mask[cl->level] &= ~m;
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun /**
394*4882a593Smuzhiyun * htb_activate_prios - creates active classe's feed chain
395*4882a593Smuzhiyun *
396*4882a593Smuzhiyun * The class is connected to ancestors and/or appropriate rows
397*4882a593Smuzhiyun * for priorities it is participating on. cl->cmode must be new
398*4882a593Smuzhiyun * (activated) mode. It does nothing if cl->prio_activity == 0.
399*4882a593Smuzhiyun */
htb_activate_prios(struct htb_sched * q,struct htb_class * cl)400*4882a593Smuzhiyun static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
401*4882a593Smuzhiyun {
402*4882a593Smuzhiyun struct htb_class *p = cl->parent;
403*4882a593Smuzhiyun long m, mask = cl->prio_activity;
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun while (cl->cmode == HTB_MAY_BORROW && p && mask) {
406*4882a593Smuzhiyun m = mask;
407*4882a593Smuzhiyun while (m) {
408*4882a593Smuzhiyun int prio = ffz(~m);
409*4882a593Smuzhiyun m &= ~(1 << prio);
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun if (p->inner.clprio[prio].feed.rb_node)
412*4882a593Smuzhiyun /* parent already has its feed in use so that
413*4882a593Smuzhiyun * reset bit in mask as parent is already ok
414*4882a593Smuzhiyun */
415*4882a593Smuzhiyun mask &= ~(1 << prio);
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun htb_add_to_id_tree(&p->inner.clprio[prio].feed, cl, prio);
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun p->prio_activity |= mask;
420*4882a593Smuzhiyun cl = p;
421*4882a593Smuzhiyun p = cl->parent;
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun if (cl->cmode == HTB_CAN_SEND && mask)
425*4882a593Smuzhiyun htb_add_class_to_row(q, cl, mask);
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun /**
429*4882a593Smuzhiyun * htb_deactivate_prios - remove class from feed chain
430*4882a593Smuzhiyun *
431*4882a593Smuzhiyun * cl->cmode must represent old mode (before deactivation). It does
432*4882a593Smuzhiyun * nothing if cl->prio_activity == 0. Class is removed from all feed
433*4882a593Smuzhiyun * chains and rows.
434*4882a593Smuzhiyun */
htb_deactivate_prios(struct htb_sched * q,struct htb_class * cl)435*4882a593Smuzhiyun static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
436*4882a593Smuzhiyun {
437*4882a593Smuzhiyun struct htb_class *p = cl->parent;
438*4882a593Smuzhiyun long m, mask = cl->prio_activity;
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun while (cl->cmode == HTB_MAY_BORROW && p && mask) {
441*4882a593Smuzhiyun m = mask;
442*4882a593Smuzhiyun mask = 0;
443*4882a593Smuzhiyun while (m) {
444*4882a593Smuzhiyun int prio = ffz(~m);
445*4882a593Smuzhiyun m &= ~(1 << prio);
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun if (p->inner.clprio[prio].ptr == cl->node + prio) {
448*4882a593Smuzhiyun /* we are removing child which is pointed to from
449*4882a593Smuzhiyun * parent feed - forget the pointer but remember
450*4882a593Smuzhiyun * classid
451*4882a593Smuzhiyun */
452*4882a593Smuzhiyun p->inner.clprio[prio].last_ptr_id = cl->common.classid;
453*4882a593Smuzhiyun p->inner.clprio[prio].ptr = NULL;
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun htb_safe_rb_erase(cl->node + prio,
457*4882a593Smuzhiyun &p->inner.clprio[prio].feed);
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun if (!p->inner.clprio[prio].feed.rb_node)
460*4882a593Smuzhiyun mask |= 1 << prio;
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun p->prio_activity &= ~mask;
464*4882a593Smuzhiyun cl = p;
465*4882a593Smuzhiyun p = cl->parent;
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun if (cl->cmode == HTB_CAN_SEND && mask)
469*4882a593Smuzhiyun htb_remove_class_from_row(q, cl, mask);
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun
htb_lowater(const struct htb_class * cl)472*4882a593Smuzhiyun static inline s64 htb_lowater(const struct htb_class *cl)
473*4882a593Smuzhiyun {
474*4882a593Smuzhiyun if (htb_hysteresis)
475*4882a593Smuzhiyun return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
476*4882a593Smuzhiyun else
477*4882a593Smuzhiyun return 0;
478*4882a593Smuzhiyun }
htb_hiwater(const struct htb_class * cl)479*4882a593Smuzhiyun static inline s64 htb_hiwater(const struct htb_class *cl)
480*4882a593Smuzhiyun {
481*4882a593Smuzhiyun if (htb_hysteresis)
482*4882a593Smuzhiyun return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
483*4882a593Smuzhiyun else
484*4882a593Smuzhiyun return 0;
485*4882a593Smuzhiyun }
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun /**
489*4882a593Smuzhiyun * htb_class_mode - computes and returns current class mode
490*4882a593Smuzhiyun *
491*4882a593Smuzhiyun * It computes cl's mode at time cl->t_c+diff and returns it. If mode
492*4882a593Smuzhiyun * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
493*4882a593Smuzhiyun * from now to time when cl will change its state.
494*4882a593Smuzhiyun * Also it is worth to note that class mode doesn't change simply
495*4882a593Smuzhiyun * at cl->{c,}tokens == 0 but there can rather be hysteresis of
496*4882a593Smuzhiyun * 0 .. -cl->{c,}buffer range. It is meant to limit number of
497*4882a593Smuzhiyun * mode transitions per time unit. The speed gain is about 1/6.
498*4882a593Smuzhiyun */
499*4882a593Smuzhiyun static inline enum htb_cmode
htb_class_mode(struct htb_class * cl,s64 * diff)500*4882a593Smuzhiyun htb_class_mode(struct htb_class *cl, s64 *diff)
501*4882a593Smuzhiyun {
502*4882a593Smuzhiyun s64 toks;
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
505*4882a593Smuzhiyun *diff = -toks;
506*4882a593Smuzhiyun return HTB_CANT_SEND;
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl))
510*4882a593Smuzhiyun return HTB_CAN_SEND;
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun *diff = -toks;
513*4882a593Smuzhiyun return HTB_MAY_BORROW;
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun /**
517*4882a593Smuzhiyun * htb_change_class_mode - changes classe's mode
518*4882a593Smuzhiyun *
519*4882a593Smuzhiyun * This should be the only way how to change classe's mode under normal
520*4882a593Smuzhiyun * cirsumstances. Routine will update feed lists linkage, change mode
521*4882a593Smuzhiyun * and add class to the wait event queue if appropriate. New mode should
522*4882a593Smuzhiyun * be different from old one and cl->pq_key has to be valid if changing
523*4882a593Smuzhiyun * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
524*4882a593Smuzhiyun */
525*4882a593Smuzhiyun static void
htb_change_class_mode(struct htb_sched * q,struct htb_class * cl,s64 * diff)526*4882a593Smuzhiyun htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
527*4882a593Smuzhiyun {
528*4882a593Smuzhiyun enum htb_cmode new_mode = htb_class_mode(cl, diff);
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun if (new_mode == cl->cmode)
531*4882a593Smuzhiyun return;
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun if (new_mode == HTB_CANT_SEND) {
534*4882a593Smuzhiyun cl->overlimits++;
535*4882a593Smuzhiyun q->overlimits++;
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun if (cl->prio_activity) { /* not necessary: speed optimization */
539*4882a593Smuzhiyun if (cl->cmode != HTB_CANT_SEND)
540*4882a593Smuzhiyun htb_deactivate_prios(q, cl);
541*4882a593Smuzhiyun cl->cmode = new_mode;
542*4882a593Smuzhiyun if (new_mode != HTB_CANT_SEND)
543*4882a593Smuzhiyun htb_activate_prios(q, cl);
544*4882a593Smuzhiyun } else
545*4882a593Smuzhiyun cl->cmode = new_mode;
546*4882a593Smuzhiyun }
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun /**
549*4882a593Smuzhiyun * htb_activate - inserts leaf cl into appropriate active feeds
550*4882a593Smuzhiyun *
551*4882a593Smuzhiyun * Routine learns (new) priority of leaf and activates feed chain
552*4882a593Smuzhiyun * for the prio. It can be called on already active leaf safely.
553*4882a593Smuzhiyun * It also adds leaf into droplist.
554*4882a593Smuzhiyun */
htb_activate(struct htb_sched * q,struct htb_class * cl)555*4882a593Smuzhiyun static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
556*4882a593Smuzhiyun {
557*4882a593Smuzhiyun WARN_ON(cl->level || !cl->leaf.q || !cl->leaf.q->q.qlen);
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun if (!cl->prio_activity) {
560*4882a593Smuzhiyun cl->prio_activity = 1 << cl->prio;
561*4882a593Smuzhiyun htb_activate_prios(q, cl);
562*4882a593Smuzhiyun }
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun /**
566*4882a593Smuzhiyun * htb_deactivate - remove leaf cl from active feeds
567*4882a593Smuzhiyun *
568*4882a593Smuzhiyun * Make sure that leaf is active. In the other words it can't be called
569*4882a593Smuzhiyun * with non-active leaf. It also removes class from the drop list.
570*4882a593Smuzhiyun */
htb_deactivate(struct htb_sched * q,struct htb_class * cl)571*4882a593Smuzhiyun static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
572*4882a593Smuzhiyun {
573*4882a593Smuzhiyun WARN_ON(!cl->prio_activity);
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun htb_deactivate_prios(q, cl);
576*4882a593Smuzhiyun cl->prio_activity = 0;
577*4882a593Smuzhiyun }
578*4882a593Smuzhiyun
htb_enqueue(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)579*4882a593Smuzhiyun static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
580*4882a593Smuzhiyun struct sk_buff **to_free)
581*4882a593Smuzhiyun {
582*4882a593Smuzhiyun int ret;
583*4882a593Smuzhiyun unsigned int len = qdisc_pkt_len(skb);
584*4882a593Smuzhiyun struct htb_sched *q = qdisc_priv(sch);
585*4882a593Smuzhiyun struct htb_class *cl = htb_classify(skb, sch, &ret);
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun if (cl == HTB_DIRECT) {
588*4882a593Smuzhiyun /* enqueue to helper queue */
589*4882a593Smuzhiyun if (q->direct_queue.qlen < q->direct_qlen) {
590*4882a593Smuzhiyun __qdisc_enqueue_tail(skb, &q->direct_queue);
591*4882a593Smuzhiyun q->direct_pkts++;
592*4882a593Smuzhiyun } else {
593*4882a593Smuzhiyun return qdisc_drop(skb, sch, to_free);
594*4882a593Smuzhiyun }
595*4882a593Smuzhiyun #ifdef CONFIG_NET_CLS_ACT
596*4882a593Smuzhiyun } else if (!cl) {
597*4882a593Smuzhiyun if (ret & __NET_XMIT_BYPASS)
598*4882a593Smuzhiyun qdisc_qstats_drop(sch);
599*4882a593Smuzhiyun __qdisc_drop(skb, to_free);
600*4882a593Smuzhiyun return ret;
601*4882a593Smuzhiyun #endif
602*4882a593Smuzhiyun } else if ((ret = qdisc_enqueue(skb, cl->leaf.q,
603*4882a593Smuzhiyun to_free)) != NET_XMIT_SUCCESS) {
604*4882a593Smuzhiyun if (net_xmit_drop_count(ret)) {
605*4882a593Smuzhiyun qdisc_qstats_drop(sch);
606*4882a593Smuzhiyun cl->drops++;
607*4882a593Smuzhiyun }
608*4882a593Smuzhiyun return ret;
609*4882a593Smuzhiyun } else {
610*4882a593Smuzhiyun htb_activate(q, cl);
611*4882a593Smuzhiyun }
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun sch->qstats.backlog += len;
614*4882a593Smuzhiyun sch->q.qlen++;
615*4882a593Smuzhiyun return NET_XMIT_SUCCESS;
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun
htb_accnt_tokens(struct htb_class * cl,int bytes,s64 diff)618*4882a593Smuzhiyun static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, s64 diff)
619*4882a593Smuzhiyun {
620*4882a593Smuzhiyun s64 toks = diff + cl->tokens;
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun if (toks > cl->buffer)
623*4882a593Smuzhiyun toks = cl->buffer;
624*4882a593Smuzhiyun toks -= (s64) psched_l2t_ns(&cl->rate, bytes);
625*4882a593Smuzhiyun if (toks <= -cl->mbuffer)
626*4882a593Smuzhiyun toks = 1 - cl->mbuffer;
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun cl->tokens = toks;
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun
htb_accnt_ctokens(struct htb_class * cl,int bytes,s64 diff)631*4882a593Smuzhiyun static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, s64 diff)
632*4882a593Smuzhiyun {
633*4882a593Smuzhiyun s64 toks = diff + cl->ctokens;
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun if (toks > cl->cbuffer)
636*4882a593Smuzhiyun toks = cl->cbuffer;
637*4882a593Smuzhiyun toks -= (s64) psched_l2t_ns(&cl->ceil, bytes);
638*4882a593Smuzhiyun if (toks <= -cl->mbuffer)
639*4882a593Smuzhiyun toks = 1 - cl->mbuffer;
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun cl->ctokens = toks;
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun /**
645*4882a593Smuzhiyun * htb_charge_class - charges amount "bytes" to leaf and ancestors
646*4882a593Smuzhiyun *
647*4882a593Smuzhiyun * Routine assumes that packet "bytes" long was dequeued from leaf cl
648*4882a593Smuzhiyun * borrowing from "level". It accounts bytes to ceil leaky bucket for
649*4882a593Smuzhiyun * leaf and all ancestors and to rate bucket for ancestors at levels
650*4882a593Smuzhiyun * "level" and higher. It also handles possible change of mode resulting
651*4882a593Smuzhiyun * from the update. Note that mode can also increase here (MAY_BORROW to
652*4882a593Smuzhiyun * CAN_SEND) because we can use more precise clock that event queue here.
653*4882a593Smuzhiyun * In such case we remove class from event queue first.
654*4882a593Smuzhiyun */
htb_charge_class(struct htb_sched * q,struct htb_class * cl,int level,struct sk_buff * skb)655*4882a593Smuzhiyun static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
656*4882a593Smuzhiyun int level, struct sk_buff *skb)
657*4882a593Smuzhiyun {
658*4882a593Smuzhiyun int bytes = qdisc_pkt_len(skb);
659*4882a593Smuzhiyun enum htb_cmode old_mode;
660*4882a593Smuzhiyun s64 diff;
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun while (cl) {
663*4882a593Smuzhiyun diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
664*4882a593Smuzhiyun if (cl->level >= level) {
665*4882a593Smuzhiyun if (cl->level == level)
666*4882a593Smuzhiyun cl->xstats.lends++;
667*4882a593Smuzhiyun htb_accnt_tokens(cl, bytes, diff);
668*4882a593Smuzhiyun } else {
669*4882a593Smuzhiyun cl->xstats.borrows++;
670*4882a593Smuzhiyun cl->tokens += diff; /* we moved t_c; update tokens */
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun htb_accnt_ctokens(cl, bytes, diff);
673*4882a593Smuzhiyun cl->t_c = q->now;
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun old_mode = cl->cmode;
676*4882a593Smuzhiyun diff = 0;
677*4882a593Smuzhiyun htb_change_class_mode(q, cl, &diff);
678*4882a593Smuzhiyun if (old_mode != cl->cmode) {
679*4882a593Smuzhiyun if (old_mode != HTB_CAN_SEND)
680*4882a593Smuzhiyun htb_safe_rb_erase(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
681*4882a593Smuzhiyun if (cl->cmode != HTB_CAN_SEND)
682*4882a593Smuzhiyun htb_add_to_wait_tree(q, cl, diff);
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun /* update basic stats except for leaves which are already updated */
686*4882a593Smuzhiyun if (cl->level)
687*4882a593Smuzhiyun bstats_update(&cl->bstats, skb);
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun cl = cl->parent;
690*4882a593Smuzhiyun }
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun /**
694*4882a593Smuzhiyun * htb_do_events - make mode changes to classes at the level
695*4882a593Smuzhiyun *
696*4882a593Smuzhiyun * Scans event queue for pending events and applies them. Returns time of
697*4882a593Smuzhiyun * next pending event (0 for no event in pq, q->now for too many events).
698*4882a593Smuzhiyun * Note: Applied are events whose have cl->pq_key <= q->now.
699*4882a593Smuzhiyun */
htb_do_events(struct htb_sched * q,const int level,unsigned long start)700*4882a593Smuzhiyun static s64 htb_do_events(struct htb_sched *q, const int level,
701*4882a593Smuzhiyun unsigned long start)
702*4882a593Smuzhiyun {
703*4882a593Smuzhiyun /* don't run for longer than 2 jiffies; 2 is used instead of
704*4882a593Smuzhiyun * 1 to simplify things when jiffy is going to be incremented
705*4882a593Smuzhiyun * too soon
706*4882a593Smuzhiyun */
707*4882a593Smuzhiyun unsigned long stop_at = start + 2;
708*4882a593Smuzhiyun struct rb_root *wait_pq = &q->hlevel[level].wait_pq;
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun while (time_before(jiffies, stop_at)) {
711*4882a593Smuzhiyun struct htb_class *cl;
712*4882a593Smuzhiyun s64 diff;
713*4882a593Smuzhiyun struct rb_node *p = rb_first(wait_pq);
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun if (!p)
716*4882a593Smuzhiyun return 0;
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun cl = rb_entry(p, struct htb_class, pq_node);
719*4882a593Smuzhiyun if (cl->pq_key > q->now)
720*4882a593Smuzhiyun return cl->pq_key;
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun htb_safe_rb_erase(p, wait_pq);
723*4882a593Smuzhiyun diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
724*4882a593Smuzhiyun htb_change_class_mode(q, cl, &diff);
725*4882a593Smuzhiyun if (cl->cmode != HTB_CAN_SEND)
726*4882a593Smuzhiyun htb_add_to_wait_tree(q, cl, diff);
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun /* too much load - let's continue after a break for scheduling */
730*4882a593Smuzhiyun if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) {
731*4882a593Smuzhiyun pr_warn("htb: too many events!\n");
732*4882a593Smuzhiyun q->warned |= HTB_WARN_TOOMANYEVENTS;
733*4882a593Smuzhiyun }
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun return q->now;
736*4882a593Smuzhiyun }
737*4882a593Smuzhiyun
738*4882a593Smuzhiyun /* Returns class->node+prio from id-tree where classe's id is >= id. NULL
739*4882a593Smuzhiyun * is no such one exists.
740*4882a593Smuzhiyun */
htb_id_find_next_upper(int prio,struct rb_node * n,u32 id)741*4882a593Smuzhiyun static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
742*4882a593Smuzhiyun u32 id)
743*4882a593Smuzhiyun {
744*4882a593Smuzhiyun struct rb_node *r = NULL;
745*4882a593Smuzhiyun while (n) {
746*4882a593Smuzhiyun struct htb_class *cl =
747*4882a593Smuzhiyun rb_entry(n, struct htb_class, node[prio]);
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun if (id > cl->common.classid) {
750*4882a593Smuzhiyun n = n->rb_right;
751*4882a593Smuzhiyun } else if (id < cl->common.classid) {
752*4882a593Smuzhiyun r = n;
753*4882a593Smuzhiyun n = n->rb_left;
754*4882a593Smuzhiyun } else {
755*4882a593Smuzhiyun return n;
756*4882a593Smuzhiyun }
757*4882a593Smuzhiyun }
758*4882a593Smuzhiyun return r;
759*4882a593Smuzhiyun }
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun /**
762*4882a593Smuzhiyun * htb_lookup_leaf - returns next leaf class in DRR order
763*4882a593Smuzhiyun *
764*4882a593Smuzhiyun * Find leaf where current feed pointers points to.
765*4882a593Smuzhiyun */
htb_lookup_leaf(struct htb_prio * hprio,const int prio)766*4882a593Smuzhiyun static struct htb_class *htb_lookup_leaf(struct htb_prio *hprio, const int prio)
767*4882a593Smuzhiyun {
768*4882a593Smuzhiyun int i;
769*4882a593Smuzhiyun struct {
770*4882a593Smuzhiyun struct rb_node *root;
771*4882a593Smuzhiyun struct rb_node **pptr;
772*4882a593Smuzhiyun u32 *pid;
773*4882a593Smuzhiyun } stk[TC_HTB_MAXDEPTH], *sp = stk;
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun BUG_ON(!hprio->row.rb_node);
776*4882a593Smuzhiyun sp->root = hprio->row.rb_node;
777*4882a593Smuzhiyun sp->pptr = &hprio->ptr;
778*4882a593Smuzhiyun sp->pid = &hprio->last_ptr_id;
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun for (i = 0; i < 65535; i++) {
781*4882a593Smuzhiyun if (!*sp->pptr && *sp->pid) {
782*4882a593Smuzhiyun /* ptr was invalidated but id is valid - try to recover
783*4882a593Smuzhiyun * the original or next ptr
784*4882a593Smuzhiyun */
785*4882a593Smuzhiyun *sp->pptr =
786*4882a593Smuzhiyun htb_id_find_next_upper(prio, sp->root, *sp->pid);
787*4882a593Smuzhiyun }
788*4882a593Smuzhiyun *sp->pid = 0; /* ptr is valid now so that remove this hint as it
789*4882a593Smuzhiyun * can become out of date quickly
790*4882a593Smuzhiyun */
791*4882a593Smuzhiyun if (!*sp->pptr) { /* we are at right end; rewind & go up */
792*4882a593Smuzhiyun *sp->pptr = sp->root;
793*4882a593Smuzhiyun while ((*sp->pptr)->rb_left)
794*4882a593Smuzhiyun *sp->pptr = (*sp->pptr)->rb_left;
795*4882a593Smuzhiyun if (sp > stk) {
796*4882a593Smuzhiyun sp--;
797*4882a593Smuzhiyun if (!*sp->pptr) {
798*4882a593Smuzhiyun WARN_ON(1);
799*4882a593Smuzhiyun return NULL;
800*4882a593Smuzhiyun }
801*4882a593Smuzhiyun htb_next_rb_node(sp->pptr);
802*4882a593Smuzhiyun }
803*4882a593Smuzhiyun } else {
804*4882a593Smuzhiyun struct htb_class *cl;
805*4882a593Smuzhiyun struct htb_prio *clp;
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
808*4882a593Smuzhiyun if (!cl->level)
809*4882a593Smuzhiyun return cl;
810*4882a593Smuzhiyun clp = &cl->inner.clprio[prio];
811*4882a593Smuzhiyun (++sp)->root = clp->feed.rb_node;
812*4882a593Smuzhiyun sp->pptr = &clp->ptr;
813*4882a593Smuzhiyun sp->pid = &clp->last_ptr_id;
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun }
816*4882a593Smuzhiyun WARN_ON(1);
817*4882a593Smuzhiyun return NULL;
818*4882a593Smuzhiyun }
819*4882a593Smuzhiyun
820*4882a593Smuzhiyun /* dequeues packet at given priority and level; call only if
821*4882a593Smuzhiyun * you are sure that there is active class at prio/level
822*4882a593Smuzhiyun */
htb_dequeue_tree(struct htb_sched * q,const int prio,const int level)823*4882a593Smuzhiyun static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, const int prio,
824*4882a593Smuzhiyun const int level)
825*4882a593Smuzhiyun {
826*4882a593Smuzhiyun struct sk_buff *skb = NULL;
827*4882a593Smuzhiyun struct htb_class *cl, *start;
828*4882a593Smuzhiyun struct htb_level *hlevel = &q->hlevel[level];
829*4882a593Smuzhiyun struct htb_prio *hprio = &hlevel->hprio[prio];
830*4882a593Smuzhiyun
831*4882a593Smuzhiyun /* look initial class up in the row */
832*4882a593Smuzhiyun start = cl = htb_lookup_leaf(hprio, prio);
833*4882a593Smuzhiyun
834*4882a593Smuzhiyun do {
835*4882a593Smuzhiyun next:
836*4882a593Smuzhiyun if (unlikely(!cl))
837*4882a593Smuzhiyun return NULL;
838*4882a593Smuzhiyun
839*4882a593Smuzhiyun /* class can be empty - it is unlikely but can be true if leaf
840*4882a593Smuzhiyun * qdisc drops packets in enqueue routine or if someone used
841*4882a593Smuzhiyun * graft operation on the leaf since last dequeue;
842*4882a593Smuzhiyun * simply deactivate and skip such class
843*4882a593Smuzhiyun */
844*4882a593Smuzhiyun if (unlikely(cl->leaf.q->q.qlen == 0)) {
845*4882a593Smuzhiyun struct htb_class *next;
846*4882a593Smuzhiyun htb_deactivate(q, cl);
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun /* row/level might become empty */
849*4882a593Smuzhiyun if ((q->row_mask[level] & (1 << prio)) == 0)
850*4882a593Smuzhiyun return NULL;
851*4882a593Smuzhiyun
852*4882a593Smuzhiyun next = htb_lookup_leaf(hprio, prio);
853*4882a593Smuzhiyun
854*4882a593Smuzhiyun if (cl == start) /* fix start if we just deleted it */
855*4882a593Smuzhiyun start = next;
856*4882a593Smuzhiyun cl = next;
857*4882a593Smuzhiyun goto next;
858*4882a593Smuzhiyun }
859*4882a593Smuzhiyun
860*4882a593Smuzhiyun skb = cl->leaf.q->dequeue(cl->leaf.q);
861*4882a593Smuzhiyun if (likely(skb != NULL))
862*4882a593Smuzhiyun break;
863*4882a593Smuzhiyun
864*4882a593Smuzhiyun qdisc_warn_nonwc("htb", cl->leaf.q);
865*4882a593Smuzhiyun htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr:
866*4882a593Smuzhiyun &q->hlevel[0].hprio[prio].ptr);
867*4882a593Smuzhiyun cl = htb_lookup_leaf(hprio, prio);
868*4882a593Smuzhiyun
869*4882a593Smuzhiyun } while (cl != start);
870*4882a593Smuzhiyun
871*4882a593Smuzhiyun if (likely(skb != NULL)) {
872*4882a593Smuzhiyun bstats_update(&cl->bstats, skb);
873*4882a593Smuzhiyun cl->leaf.deficit[level] -= qdisc_pkt_len(skb);
874*4882a593Smuzhiyun if (cl->leaf.deficit[level] < 0) {
875*4882a593Smuzhiyun cl->leaf.deficit[level] += cl->quantum;
876*4882a593Smuzhiyun htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr :
877*4882a593Smuzhiyun &q->hlevel[0].hprio[prio].ptr);
878*4882a593Smuzhiyun }
879*4882a593Smuzhiyun /* this used to be after charge_class but this constelation
880*4882a593Smuzhiyun * gives us slightly better performance
881*4882a593Smuzhiyun */
882*4882a593Smuzhiyun if (!cl->leaf.q->q.qlen)
883*4882a593Smuzhiyun htb_deactivate(q, cl);
884*4882a593Smuzhiyun htb_charge_class(q, cl, level, skb);
885*4882a593Smuzhiyun }
886*4882a593Smuzhiyun return skb;
887*4882a593Smuzhiyun }
888*4882a593Smuzhiyun
htb_dequeue(struct Qdisc * sch)889*4882a593Smuzhiyun static struct sk_buff *htb_dequeue(struct Qdisc *sch)
890*4882a593Smuzhiyun {
891*4882a593Smuzhiyun struct sk_buff *skb;
892*4882a593Smuzhiyun struct htb_sched *q = qdisc_priv(sch);
893*4882a593Smuzhiyun int level;
894*4882a593Smuzhiyun s64 next_event;
895*4882a593Smuzhiyun unsigned long start_at;
896*4882a593Smuzhiyun
897*4882a593Smuzhiyun /* try to dequeue direct packets as high prio (!) to minimize cpu work */
898*4882a593Smuzhiyun skb = __qdisc_dequeue_head(&q->direct_queue);
899*4882a593Smuzhiyun if (skb != NULL) {
900*4882a593Smuzhiyun ok:
901*4882a593Smuzhiyun qdisc_bstats_update(sch, skb);
902*4882a593Smuzhiyun qdisc_qstats_backlog_dec(sch, skb);
903*4882a593Smuzhiyun sch->q.qlen--;
904*4882a593Smuzhiyun return skb;
905*4882a593Smuzhiyun }
906*4882a593Smuzhiyun
907*4882a593Smuzhiyun if (!sch->q.qlen)
908*4882a593Smuzhiyun goto fin;
909*4882a593Smuzhiyun q->now = ktime_get_ns();
910*4882a593Smuzhiyun start_at = jiffies;
911*4882a593Smuzhiyun
912*4882a593Smuzhiyun next_event = q->now + 5LLU * NSEC_PER_SEC;
913*4882a593Smuzhiyun
914*4882a593Smuzhiyun for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
915*4882a593Smuzhiyun /* common case optimization - skip event handler quickly */
916*4882a593Smuzhiyun int m;
917*4882a593Smuzhiyun s64 event = q->near_ev_cache[level];
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun if (q->now >= event) {
920*4882a593Smuzhiyun event = htb_do_events(q, level, start_at);
921*4882a593Smuzhiyun if (!event)
922*4882a593Smuzhiyun event = q->now + NSEC_PER_SEC;
923*4882a593Smuzhiyun q->near_ev_cache[level] = event;
924*4882a593Smuzhiyun }
925*4882a593Smuzhiyun
926*4882a593Smuzhiyun if (next_event > event)
927*4882a593Smuzhiyun next_event = event;
928*4882a593Smuzhiyun
929*4882a593Smuzhiyun m = ~q->row_mask[level];
930*4882a593Smuzhiyun while (m != (int)(-1)) {
931*4882a593Smuzhiyun int prio = ffz(m);
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun m |= 1 << prio;
934*4882a593Smuzhiyun skb = htb_dequeue_tree(q, prio, level);
935*4882a593Smuzhiyun if (likely(skb != NULL))
936*4882a593Smuzhiyun goto ok;
937*4882a593Smuzhiyun }
938*4882a593Smuzhiyun }
939*4882a593Smuzhiyun if (likely(next_event > q->now))
940*4882a593Smuzhiyun qdisc_watchdog_schedule_ns(&q->watchdog, next_event);
941*4882a593Smuzhiyun else
942*4882a593Smuzhiyun schedule_work(&q->work);
943*4882a593Smuzhiyun fin:
944*4882a593Smuzhiyun return skb;
945*4882a593Smuzhiyun }
946*4882a593Smuzhiyun
947*4882a593Smuzhiyun /* reset all classes */
948*4882a593Smuzhiyun /* always caled under BH & queue lock */
htb_reset(struct Qdisc * sch)949*4882a593Smuzhiyun static void htb_reset(struct Qdisc *sch)
950*4882a593Smuzhiyun {
951*4882a593Smuzhiyun struct htb_sched *q = qdisc_priv(sch);
952*4882a593Smuzhiyun struct htb_class *cl;
953*4882a593Smuzhiyun unsigned int i;
954*4882a593Smuzhiyun
955*4882a593Smuzhiyun for (i = 0; i < q->clhash.hashsize; i++) {
956*4882a593Smuzhiyun hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
957*4882a593Smuzhiyun if (cl->level)
958*4882a593Smuzhiyun memset(&cl->inner, 0, sizeof(cl->inner));
959*4882a593Smuzhiyun else {
960*4882a593Smuzhiyun if (cl->leaf.q)
961*4882a593Smuzhiyun qdisc_reset(cl->leaf.q);
962*4882a593Smuzhiyun }
963*4882a593Smuzhiyun cl->prio_activity = 0;
964*4882a593Smuzhiyun cl->cmode = HTB_CAN_SEND;
965*4882a593Smuzhiyun }
966*4882a593Smuzhiyun }
967*4882a593Smuzhiyun qdisc_watchdog_cancel(&q->watchdog);
968*4882a593Smuzhiyun __qdisc_reset_queue(&q->direct_queue);
969*4882a593Smuzhiyun memset(q->hlevel, 0, sizeof(q->hlevel));
970*4882a593Smuzhiyun memset(q->row_mask, 0, sizeof(q->row_mask));
971*4882a593Smuzhiyun }
972*4882a593Smuzhiyun
973*4882a593Smuzhiyun static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = {
974*4882a593Smuzhiyun [TCA_HTB_PARMS] = { .len = sizeof(struct tc_htb_opt) },
975*4882a593Smuzhiyun [TCA_HTB_INIT] = { .len = sizeof(struct tc_htb_glob) },
976*4882a593Smuzhiyun [TCA_HTB_CTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
977*4882a593Smuzhiyun [TCA_HTB_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
978*4882a593Smuzhiyun [TCA_HTB_DIRECT_QLEN] = { .type = NLA_U32 },
979*4882a593Smuzhiyun [TCA_HTB_RATE64] = { .type = NLA_U64 },
980*4882a593Smuzhiyun [TCA_HTB_CEIL64] = { .type = NLA_U64 },
981*4882a593Smuzhiyun };
982*4882a593Smuzhiyun
htb_work_func(struct work_struct * work)983*4882a593Smuzhiyun static void htb_work_func(struct work_struct *work)
984*4882a593Smuzhiyun {
985*4882a593Smuzhiyun struct htb_sched *q = container_of(work, struct htb_sched, work);
986*4882a593Smuzhiyun struct Qdisc *sch = q->watchdog.qdisc;
987*4882a593Smuzhiyun
988*4882a593Smuzhiyun rcu_read_lock();
989*4882a593Smuzhiyun __netif_schedule(qdisc_root(sch));
990*4882a593Smuzhiyun rcu_read_unlock();
991*4882a593Smuzhiyun }
992*4882a593Smuzhiyun
htb_init(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)993*4882a593Smuzhiyun static int htb_init(struct Qdisc *sch, struct nlattr *opt,
994*4882a593Smuzhiyun struct netlink_ext_ack *extack)
995*4882a593Smuzhiyun {
996*4882a593Smuzhiyun struct htb_sched *q = qdisc_priv(sch);
997*4882a593Smuzhiyun struct nlattr *tb[TCA_HTB_MAX + 1];
998*4882a593Smuzhiyun struct tc_htb_glob *gopt;
999*4882a593Smuzhiyun int err;
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun qdisc_watchdog_init(&q->watchdog, sch);
1002*4882a593Smuzhiyun INIT_WORK(&q->work, htb_work_func);
1003*4882a593Smuzhiyun
1004*4882a593Smuzhiyun if (!opt)
1005*4882a593Smuzhiyun return -EINVAL;
1006*4882a593Smuzhiyun
1007*4882a593Smuzhiyun err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
1008*4882a593Smuzhiyun if (err)
1009*4882a593Smuzhiyun return err;
1010*4882a593Smuzhiyun
1011*4882a593Smuzhiyun err = nla_parse_nested_deprecated(tb, TCA_HTB_MAX, opt, htb_policy,
1012*4882a593Smuzhiyun NULL);
1013*4882a593Smuzhiyun if (err < 0)
1014*4882a593Smuzhiyun return err;
1015*4882a593Smuzhiyun
1016*4882a593Smuzhiyun if (!tb[TCA_HTB_INIT])
1017*4882a593Smuzhiyun return -EINVAL;
1018*4882a593Smuzhiyun
1019*4882a593Smuzhiyun gopt = nla_data(tb[TCA_HTB_INIT]);
1020*4882a593Smuzhiyun if (gopt->version != HTB_VER >> 16)
1021*4882a593Smuzhiyun return -EINVAL;
1022*4882a593Smuzhiyun
1023*4882a593Smuzhiyun err = qdisc_class_hash_init(&q->clhash);
1024*4882a593Smuzhiyun if (err < 0)
1025*4882a593Smuzhiyun return err;
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun qdisc_skb_head_init(&q->direct_queue);
1028*4882a593Smuzhiyun
1029*4882a593Smuzhiyun if (tb[TCA_HTB_DIRECT_QLEN])
1030*4882a593Smuzhiyun q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]);
1031*4882a593Smuzhiyun else
1032*4882a593Smuzhiyun q->direct_qlen = qdisc_dev(sch)->tx_queue_len;
1033*4882a593Smuzhiyun
1034*4882a593Smuzhiyun if ((q->rate2quantum = gopt->rate2quantum) < 1)
1035*4882a593Smuzhiyun q->rate2quantum = 1;
1036*4882a593Smuzhiyun q->defcls = gopt->defcls;
1037*4882a593Smuzhiyun
1038*4882a593Smuzhiyun return 0;
1039*4882a593Smuzhiyun }
1040*4882a593Smuzhiyun
htb_dump(struct Qdisc * sch,struct sk_buff * skb)1041*4882a593Smuzhiyun static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1042*4882a593Smuzhiyun {
1043*4882a593Smuzhiyun struct htb_sched *q = qdisc_priv(sch);
1044*4882a593Smuzhiyun struct nlattr *nest;
1045*4882a593Smuzhiyun struct tc_htb_glob gopt;
1046*4882a593Smuzhiyun
1047*4882a593Smuzhiyun sch->qstats.overlimits = q->overlimits;
1048*4882a593Smuzhiyun /* Its safe to not acquire qdisc lock. As we hold RTNL,
1049*4882a593Smuzhiyun * no change can happen on the qdisc parameters.
1050*4882a593Smuzhiyun */
1051*4882a593Smuzhiyun
1052*4882a593Smuzhiyun gopt.direct_pkts = q->direct_pkts;
1053*4882a593Smuzhiyun gopt.version = HTB_VER;
1054*4882a593Smuzhiyun gopt.rate2quantum = q->rate2quantum;
1055*4882a593Smuzhiyun gopt.defcls = q->defcls;
1056*4882a593Smuzhiyun gopt.debug = 0;
1057*4882a593Smuzhiyun
1058*4882a593Smuzhiyun nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
1059*4882a593Smuzhiyun if (nest == NULL)
1060*4882a593Smuzhiyun goto nla_put_failure;
1061*4882a593Smuzhiyun if (nla_put(skb, TCA_HTB_INIT, sizeof(gopt), &gopt) ||
1062*4882a593Smuzhiyun nla_put_u32(skb, TCA_HTB_DIRECT_QLEN, q->direct_qlen))
1063*4882a593Smuzhiyun goto nla_put_failure;
1064*4882a593Smuzhiyun
1065*4882a593Smuzhiyun return nla_nest_end(skb, nest);
1066*4882a593Smuzhiyun
1067*4882a593Smuzhiyun nla_put_failure:
1068*4882a593Smuzhiyun nla_nest_cancel(skb, nest);
1069*4882a593Smuzhiyun return -1;
1070*4882a593Smuzhiyun }
1071*4882a593Smuzhiyun
htb_dump_class(struct Qdisc * sch,unsigned long arg,struct sk_buff * skb,struct tcmsg * tcm)1072*4882a593Smuzhiyun static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1073*4882a593Smuzhiyun struct sk_buff *skb, struct tcmsg *tcm)
1074*4882a593Smuzhiyun {
1075*4882a593Smuzhiyun struct htb_class *cl = (struct htb_class *)arg;
1076*4882a593Smuzhiyun struct nlattr *nest;
1077*4882a593Smuzhiyun struct tc_htb_opt opt;
1078*4882a593Smuzhiyun
1079*4882a593Smuzhiyun /* Its safe to not acquire qdisc lock. As we hold RTNL,
1080*4882a593Smuzhiyun * no change can happen on the class parameters.
1081*4882a593Smuzhiyun */
1082*4882a593Smuzhiyun tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
1083*4882a593Smuzhiyun tcm->tcm_handle = cl->common.classid;
1084*4882a593Smuzhiyun if (!cl->level && cl->leaf.q)
1085*4882a593Smuzhiyun tcm->tcm_info = cl->leaf.q->handle;
1086*4882a593Smuzhiyun
1087*4882a593Smuzhiyun nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
1088*4882a593Smuzhiyun if (nest == NULL)
1089*4882a593Smuzhiyun goto nla_put_failure;
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun memset(&opt, 0, sizeof(opt));
1092*4882a593Smuzhiyun
1093*4882a593Smuzhiyun psched_ratecfg_getrate(&opt.rate, &cl->rate);
1094*4882a593Smuzhiyun opt.buffer = PSCHED_NS2TICKS(cl->buffer);
1095*4882a593Smuzhiyun psched_ratecfg_getrate(&opt.ceil, &cl->ceil);
1096*4882a593Smuzhiyun opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer);
1097*4882a593Smuzhiyun opt.quantum = cl->quantum;
1098*4882a593Smuzhiyun opt.prio = cl->prio;
1099*4882a593Smuzhiyun opt.level = cl->level;
1100*4882a593Smuzhiyun if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt))
1101*4882a593Smuzhiyun goto nla_put_failure;
1102*4882a593Smuzhiyun if ((cl->rate.rate_bytes_ps >= (1ULL << 32)) &&
1103*4882a593Smuzhiyun nla_put_u64_64bit(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps,
1104*4882a593Smuzhiyun TCA_HTB_PAD))
1105*4882a593Smuzhiyun goto nla_put_failure;
1106*4882a593Smuzhiyun if ((cl->ceil.rate_bytes_ps >= (1ULL << 32)) &&
1107*4882a593Smuzhiyun nla_put_u64_64bit(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps,
1108*4882a593Smuzhiyun TCA_HTB_PAD))
1109*4882a593Smuzhiyun goto nla_put_failure;
1110*4882a593Smuzhiyun
1111*4882a593Smuzhiyun return nla_nest_end(skb, nest);
1112*4882a593Smuzhiyun
1113*4882a593Smuzhiyun nla_put_failure:
1114*4882a593Smuzhiyun nla_nest_cancel(skb, nest);
1115*4882a593Smuzhiyun return -1;
1116*4882a593Smuzhiyun }
1117*4882a593Smuzhiyun
1118*4882a593Smuzhiyun static int
htb_dump_class_stats(struct Qdisc * sch,unsigned long arg,struct gnet_dump * d)1119*4882a593Smuzhiyun htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
1120*4882a593Smuzhiyun {
1121*4882a593Smuzhiyun struct htb_class *cl = (struct htb_class *)arg;
1122*4882a593Smuzhiyun struct gnet_stats_queue qs = {
1123*4882a593Smuzhiyun .drops = cl->drops,
1124*4882a593Smuzhiyun .overlimits = cl->overlimits,
1125*4882a593Smuzhiyun };
1126*4882a593Smuzhiyun __u32 qlen = 0;
1127*4882a593Smuzhiyun
1128*4882a593Smuzhiyun if (!cl->level && cl->leaf.q)
1129*4882a593Smuzhiyun qdisc_qstats_qlen_backlog(cl->leaf.q, &qlen, &qs.backlog);
1130*4882a593Smuzhiyun
1131*4882a593Smuzhiyun cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens),
1132*4882a593Smuzhiyun INT_MIN, INT_MAX);
1133*4882a593Smuzhiyun cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens),
1134*4882a593Smuzhiyun INT_MIN, INT_MAX);
1135*4882a593Smuzhiyun
1136*4882a593Smuzhiyun if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
1137*4882a593Smuzhiyun d, NULL, &cl->bstats) < 0 ||
1138*4882a593Smuzhiyun gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1139*4882a593Smuzhiyun gnet_stats_copy_queue(d, NULL, &qs, qlen) < 0)
1140*4882a593Smuzhiyun return -1;
1141*4882a593Smuzhiyun
1142*4882a593Smuzhiyun return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1143*4882a593Smuzhiyun }
1144*4882a593Smuzhiyun
htb_graft(struct Qdisc * sch,unsigned long arg,struct Qdisc * new,struct Qdisc ** old,struct netlink_ext_ack * extack)1145*4882a593Smuzhiyun static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1146*4882a593Smuzhiyun struct Qdisc **old, struct netlink_ext_ack *extack)
1147*4882a593Smuzhiyun {
1148*4882a593Smuzhiyun struct htb_class *cl = (struct htb_class *)arg;
1149*4882a593Smuzhiyun
1150*4882a593Smuzhiyun if (cl->level)
1151*4882a593Smuzhiyun return -EINVAL;
1152*4882a593Smuzhiyun if (new == NULL &&
1153*4882a593Smuzhiyun (new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1154*4882a593Smuzhiyun cl->common.classid, extack)) == NULL)
1155*4882a593Smuzhiyun return -ENOBUFS;
1156*4882a593Smuzhiyun
1157*4882a593Smuzhiyun *old = qdisc_replace(sch, new, &cl->leaf.q);
1158*4882a593Smuzhiyun return 0;
1159*4882a593Smuzhiyun }
1160*4882a593Smuzhiyun
htb_leaf(struct Qdisc * sch,unsigned long arg)1161*4882a593Smuzhiyun static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
1162*4882a593Smuzhiyun {
1163*4882a593Smuzhiyun struct htb_class *cl = (struct htb_class *)arg;
1164*4882a593Smuzhiyun return !cl->level ? cl->leaf.q : NULL;
1165*4882a593Smuzhiyun }
1166*4882a593Smuzhiyun
htb_qlen_notify(struct Qdisc * sch,unsigned long arg)1167*4882a593Smuzhiyun static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
1168*4882a593Smuzhiyun {
1169*4882a593Smuzhiyun struct htb_class *cl = (struct htb_class *)arg;
1170*4882a593Smuzhiyun
1171*4882a593Smuzhiyun htb_deactivate(qdisc_priv(sch), cl);
1172*4882a593Smuzhiyun }
1173*4882a593Smuzhiyun
htb_parent_last_child(struct htb_class * cl)1174*4882a593Smuzhiyun static inline int htb_parent_last_child(struct htb_class *cl)
1175*4882a593Smuzhiyun {
1176*4882a593Smuzhiyun if (!cl->parent)
1177*4882a593Smuzhiyun /* the root class */
1178*4882a593Smuzhiyun return 0;
1179*4882a593Smuzhiyun if (cl->parent->children > 1)
1180*4882a593Smuzhiyun /* not the last child */
1181*4882a593Smuzhiyun return 0;
1182*4882a593Smuzhiyun return 1;
1183*4882a593Smuzhiyun }
1184*4882a593Smuzhiyun
htb_parent_to_leaf(struct htb_sched * q,struct htb_class * cl,struct Qdisc * new_q)1185*4882a593Smuzhiyun static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
1186*4882a593Smuzhiyun struct Qdisc *new_q)
1187*4882a593Smuzhiyun {
1188*4882a593Smuzhiyun struct htb_class *parent = cl->parent;
1189*4882a593Smuzhiyun
1190*4882a593Smuzhiyun WARN_ON(cl->level || !cl->leaf.q || cl->prio_activity);
1191*4882a593Smuzhiyun
1192*4882a593Smuzhiyun if (parent->cmode != HTB_CAN_SEND)
1193*4882a593Smuzhiyun htb_safe_rb_erase(&parent->pq_node,
1194*4882a593Smuzhiyun &q->hlevel[parent->level].wait_pq);
1195*4882a593Smuzhiyun
1196*4882a593Smuzhiyun parent->level = 0;
1197*4882a593Smuzhiyun memset(&parent->inner, 0, sizeof(parent->inner));
1198*4882a593Smuzhiyun parent->leaf.q = new_q ? new_q : &noop_qdisc;
1199*4882a593Smuzhiyun parent->tokens = parent->buffer;
1200*4882a593Smuzhiyun parent->ctokens = parent->cbuffer;
1201*4882a593Smuzhiyun parent->t_c = ktime_get_ns();
1202*4882a593Smuzhiyun parent->cmode = HTB_CAN_SEND;
1203*4882a593Smuzhiyun }
1204*4882a593Smuzhiyun
htb_destroy_class(struct Qdisc * sch,struct htb_class * cl)1205*4882a593Smuzhiyun static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
1206*4882a593Smuzhiyun {
1207*4882a593Smuzhiyun if (!cl->level) {
1208*4882a593Smuzhiyun WARN_ON(!cl->leaf.q);
1209*4882a593Smuzhiyun qdisc_put(cl->leaf.q);
1210*4882a593Smuzhiyun }
1211*4882a593Smuzhiyun gen_kill_estimator(&cl->rate_est);
1212*4882a593Smuzhiyun tcf_block_put(cl->block);
1213*4882a593Smuzhiyun kfree(cl);
1214*4882a593Smuzhiyun }
1215*4882a593Smuzhiyun
htb_destroy(struct Qdisc * sch)1216*4882a593Smuzhiyun static void htb_destroy(struct Qdisc *sch)
1217*4882a593Smuzhiyun {
1218*4882a593Smuzhiyun struct htb_sched *q = qdisc_priv(sch);
1219*4882a593Smuzhiyun struct hlist_node *next;
1220*4882a593Smuzhiyun struct htb_class *cl;
1221*4882a593Smuzhiyun unsigned int i;
1222*4882a593Smuzhiyun
1223*4882a593Smuzhiyun cancel_work_sync(&q->work);
1224*4882a593Smuzhiyun qdisc_watchdog_cancel(&q->watchdog);
1225*4882a593Smuzhiyun /* This line used to be after htb_destroy_class call below
1226*4882a593Smuzhiyun * and surprisingly it worked in 2.4. But it must precede it
1227*4882a593Smuzhiyun * because filter need its target class alive to be able to call
1228*4882a593Smuzhiyun * unbind_filter on it (without Oops).
1229*4882a593Smuzhiyun */
1230*4882a593Smuzhiyun tcf_block_put(q->block);
1231*4882a593Smuzhiyun
1232*4882a593Smuzhiyun for (i = 0; i < q->clhash.hashsize; i++) {
1233*4882a593Smuzhiyun hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
1234*4882a593Smuzhiyun tcf_block_put(cl->block);
1235*4882a593Smuzhiyun cl->block = NULL;
1236*4882a593Smuzhiyun }
1237*4882a593Smuzhiyun }
1238*4882a593Smuzhiyun for (i = 0; i < q->clhash.hashsize; i++) {
1239*4882a593Smuzhiyun hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
1240*4882a593Smuzhiyun common.hnode)
1241*4882a593Smuzhiyun htb_destroy_class(sch, cl);
1242*4882a593Smuzhiyun }
1243*4882a593Smuzhiyun qdisc_class_hash_destroy(&q->clhash);
1244*4882a593Smuzhiyun __qdisc_reset_queue(&q->direct_queue);
1245*4882a593Smuzhiyun }
1246*4882a593Smuzhiyun
htb_delete(struct Qdisc * sch,unsigned long arg)1247*4882a593Smuzhiyun static int htb_delete(struct Qdisc *sch, unsigned long arg)
1248*4882a593Smuzhiyun {
1249*4882a593Smuzhiyun struct htb_sched *q = qdisc_priv(sch);
1250*4882a593Smuzhiyun struct htb_class *cl = (struct htb_class *)arg;
1251*4882a593Smuzhiyun struct Qdisc *new_q = NULL;
1252*4882a593Smuzhiyun int last_child = 0;
1253*4882a593Smuzhiyun
1254*4882a593Smuzhiyun /* TODO: why don't allow to delete subtree ? references ? does
1255*4882a593Smuzhiyun * tc subsys guarantee us that in htb_destroy it holds no class
1256*4882a593Smuzhiyun * refs so that we can remove children safely there ?
1257*4882a593Smuzhiyun */
1258*4882a593Smuzhiyun if (cl->children || cl->filter_cnt)
1259*4882a593Smuzhiyun return -EBUSY;
1260*4882a593Smuzhiyun
1261*4882a593Smuzhiyun if (!cl->level && htb_parent_last_child(cl)) {
1262*4882a593Smuzhiyun new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1263*4882a593Smuzhiyun cl->parent->common.classid,
1264*4882a593Smuzhiyun NULL);
1265*4882a593Smuzhiyun last_child = 1;
1266*4882a593Smuzhiyun }
1267*4882a593Smuzhiyun
1268*4882a593Smuzhiyun sch_tree_lock(sch);
1269*4882a593Smuzhiyun
1270*4882a593Smuzhiyun if (!cl->level)
1271*4882a593Smuzhiyun qdisc_purge_queue(cl->leaf.q);
1272*4882a593Smuzhiyun
1273*4882a593Smuzhiyun /* delete from hash and active; remainder in destroy_class */
1274*4882a593Smuzhiyun qdisc_class_hash_remove(&q->clhash, &cl->common);
1275*4882a593Smuzhiyun if (cl->parent)
1276*4882a593Smuzhiyun cl->parent->children--;
1277*4882a593Smuzhiyun
1278*4882a593Smuzhiyun if (cl->prio_activity)
1279*4882a593Smuzhiyun htb_deactivate(q, cl);
1280*4882a593Smuzhiyun
1281*4882a593Smuzhiyun if (cl->cmode != HTB_CAN_SEND)
1282*4882a593Smuzhiyun htb_safe_rb_erase(&cl->pq_node,
1283*4882a593Smuzhiyun &q->hlevel[cl->level].wait_pq);
1284*4882a593Smuzhiyun
1285*4882a593Smuzhiyun if (last_child)
1286*4882a593Smuzhiyun htb_parent_to_leaf(q, cl, new_q);
1287*4882a593Smuzhiyun
1288*4882a593Smuzhiyun sch_tree_unlock(sch);
1289*4882a593Smuzhiyun
1290*4882a593Smuzhiyun htb_destroy_class(sch, cl);
1291*4882a593Smuzhiyun return 0;
1292*4882a593Smuzhiyun }
1293*4882a593Smuzhiyun
htb_change_class(struct Qdisc * sch,u32 classid,u32 parentid,struct nlattr ** tca,unsigned long * arg,struct netlink_ext_ack * extack)1294*4882a593Smuzhiyun static int htb_change_class(struct Qdisc *sch, u32 classid,
1295*4882a593Smuzhiyun u32 parentid, struct nlattr **tca,
1296*4882a593Smuzhiyun unsigned long *arg, struct netlink_ext_ack *extack)
1297*4882a593Smuzhiyun {
1298*4882a593Smuzhiyun int err = -EINVAL;
1299*4882a593Smuzhiyun struct htb_sched *q = qdisc_priv(sch);
1300*4882a593Smuzhiyun struct htb_class *cl = (struct htb_class *)*arg, *parent;
1301*4882a593Smuzhiyun struct nlattr *opt = tca[TCA_OPTIONS];
1302*4882a593Smuzhiyun struct nlattr *tb[TCA_HTB_MAX + 1];
1303*4882a593Smuzhiyun struct Qdisc *parent_qdisc = NULL;
1304*4882a593Smuzhiyun struct tc_htb_opt *hopt;
1305*4882a593Smuzhiyun u64 rate64, ceil64;
1306*4882a593Smuzhiyun int warn = 0;
1307*4882a593Smuzhiyun
1308*4882a593Smuzhiyun /* extract all subattrs from opt attr */
1309*4882a593Smuzhiyun if (!opt)
1310*4882a593Smuzhiyun goto failure;
1311*4882a593Smuzhiyun
1312*4882a593Smuzhiyun err = nla_parse_nested_deprecated(tb, TCA_HTB_MAX, opt, htb_policy,
1313*4882a593Smuzhiyun NULL);
1314*4882a593Smuzhiyun if (err < 0)
1315*4882a593Smuzhiyun goto failure;
1316*4882a593Smuzhiyun
1317*4882a593Smuzhiyun err = -EINVAL;
1318*4882a593Smuzhiyun if (tb[TCA_HTB_PARMS] == NULL)
1319*4882a593Smuzhiyun goto failure;
1320*4882a593Smuzhiyun
1321*4882a593Smuzhiyun parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);
1322*4882a593Smuzhiyun
1323*4882a593Smuzhiyun hopt = nla_data(tb[TCA_HTB_PARMS]);
1324*4882a593Smuzhiyun if (!hopt->rate.rate || !hopt->ceil.rate)
1325*4882a593Smuzhiyun goto failure;
1326*4882a593Smuzhiyun
1327*4882a593Smuzhiyun /* Keeping backward compatible with rate_table based iproute2 tc */
1328*4882a593Smuzhiyun if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
1329*4882a593Smuzhiyun qdisc_put_rtab(qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB],
1330*4882a593Smuzhiyun NULL));
1331*4882a593Smuzhiyun
1332*4882a593Smuzhiyun if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE)
1333*4882a593Smuzhiyun qdisc_put_rtab(qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB],
1334*4882a593Smuzhiyun NULL));
1335*4882a593Smuzhiyun
1336*4882a593Smuzhiyun if (!cl) { /* new class */
1337*4882a593Smuzhiyun struct Qdisc *new_q;
1338*4882a593Smuzhiyun int prio;
1339*4882a593Smuzhiyun struct {
1340*4882a593Smuzhiyun struct nlattr nla;
1341*4882a593Smuzhiyun struct gnet_estimator opt;
1342*4882a593Smuzhiyun } est = {
1343*4882a593Smuzhiyun .nla = {
1344*4882a593Smuzhiyun .nla_len = nla_attr_size(sizeof(est.opt)),
1345*4882a593Smuzhiyun .nla_type = TCA_RATE,
1346*4882a593Smuzhiyun },
1347*4882a593Smuzhiyun .opt = {
1348*4882a593Smuzhiyun /* 4s interval, 16s averaging constant */
1349*4882a593Smuzhiyun .interval = 2,
1350*4882a593Smuzhiyun .ewma_log = 2,
1351*4882a593Smuzhiyun },
1352*4882a593Smuzhiyun };
1353*4882a593Smuzhiyun
1354*4882a593Smuzhiyun /* check for valid classid */
1355*4882a593Smuzhiyun if (!classid || TC_H_MAJ(classid ^ sch->handle) ||
1356*4882a593Smuzhiyun htb_find(classid, sch))
1357*4882a593Smuzhiyun goto failure;
1358*4882a593Smuzhiyun
1359*4882a593Smuzhiyun /* check maximal depth */
1360*4882a593Smuzhiyun if (parent && parent->parent && parent->parent->level < 2) {
1361*4882a593Smuzhiyun pr_err("htb: tree is too deep\n");
1362*4882a593Smuzhiyun goto failure;
1363*4882a593Smuzhiyun }
1364*4882a593Smuzhiyun err = -ENOBUFS;
1365*4882a593Smuzhiyun cl = kzalloc(sizeof(*cl), GFP_KERNEL);
1366*4882a593Smuzhiyun if (!cl)
1367*4882a593Smuzhiyun goto failure;
1368*4882a593Smuzhiyun
1369*4882a593Smuzhiyun err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
1370*4882a593Smuzhiyun if (err) {
1371*4882a593Smuzhiyun kfree(cl);
1372*4882a593Smuzhiyun goto failure;
1373*4882a593Smuzhiyun }
1374*4882a593Smuzhiyun if (htb_rate_est || tca[TCA_RATE]) {
1375*4882a593Smuzhiyun err = gen_new_estimator(&cl->bstats, NULL,
1376*4882a593Smuzhiyun &cl->rate_est,
1377*4882a593Smuzhiyun NULL,
1378*4882a593Smuzhiyun qdisc_root_sleeping_running(sch),
1379*4882a593Smuzhiyun tca[TCA_RATE] ? : &est.nla);
1380*4882a593Smuzhiyun if (err) {
1381*4882a593Smuzhiyun tcf_block_put(cl->block);
1382*4882a593Smuzhiyun kfree(cl);
1383*4882a593Smuzhiyun goto failure;
1384*4882a593Smuzhiyun }
1385*4882a593Smuzhiyun }
1386*4882a593Smuzhiyun
1387*4882a593Smuzhiyun cl->children = 0;
1388*4882a593Smuzhiyun RB_CLEAR_NODE(&cl->pq_node);
1389*4882a593Smuzhiyun
1390*4882a593Smuzhiyun for (prio = 0; prio < TC_HTB_NUMPRIO; prio++)
1391*4882a593Smuzhiyun RB_CLEAR_NODE(&cl->node[prio]);
1392*4882a593Smuzhiyun
1393*4882a593Smuzhiyun /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
1394*4882a593Smuzhiyun * so that can't be used inside of sch_tree_lock
1395*4882a593Smuzhiyun * -- thanks to Karlis Peisenieks
1396*4882a593Smuzhiyun */
1397*4882a593Smuzhiyun new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1398*4882a593Smuzhiyun classid, NULL);
1399*4882a593Smuzhiyun sch_tree_lock(sch);
1400*4882a593Smuzhiyun if (parent && !parent->level) {
1401*4882a593Smuzhiyun /* turn parent into inner node */
1402*4882a593Smuzhiyun qdisc_purge_queue(parent->leaf.q);
1403*4882a593Smuzhiyun parent_qdisc = parent->leaf.q;
1404*4882a593Smuzhiyun if (parent->prio_activity)
1405*4882a593Smuzhiyun htb_deactivate(q, parent);
1406*4882a593Smuzhiyun
1407*4882a593Smuzhiyun /* remove from evt list because of level change */
1408*4882a593Smuzhiyun if (parent->cmode != HTB_CAN_SEND) {
1409*4882a593Smuzhiyun htb_safe_rb_erase(&parent->pq_node, &q->hlevel[0].wait_pq);
1410*4882a593Smuzhiyun parent->cmode = HTB_CAN_SEND;
1411*4882a593Smuzhiyun }
1412*4882a593Smuzhiyun parent->level = (parent->parent ? parent->parent->level
1413*4882a593Smuzhiyun : TC_HTB_MAXDEPTH) - 1;
1414*4882a593Smuzhiyun memset(&parent->inner, 0, sizeof(parent->inner));
1415*4882a593Smuzhiyun }
1416*4882a593Smuzhiyun /* leaf (we) needs elementary qdisc */
1417*4882a593Smuzhiyun cl->leaf.q = new_q ? new_q : &noop_qdisc;
1418*4882a593Smuzhiyun
1419*4882a593Smuzhiyun cl->common.classid = classid;
1420*4882a593Smuzhiyun cl->parent = parent;
1421*4882a593Smuzhiyun
1422*4882a593Smuzhiyun /* set class to be in HTB_CAN_SEND state */
1423*4882a593Smuzhiyun cl->tokens = PSCHED_TICKS2NS(hopt->buffer);
1424*4882a593Smuzhiyun cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer);
1425*4882a593Smuzhiyun cl->mbuffer = 60ULL * NSEC_PER_SEC; /* 1min */
1426*4882a593Smuzhiyun cl->t_c = ktime_get_ns();
1427*4882a593Smuzhiyun cl->cmode = HTB_CAN_SEND;
1428*4882a593Smuzhiyun
1429*4882a593Smuzhiyun /* attach to the hash list and parent's family */
1430*4882a593Smuzhiyun qdisc_class_hash_insert(&q->clhash, &cl->common);
1431*4882a593Smuzhiyun if (parent)
1432*4882a593Smuzhiyun parent->children++;
1433*4882a593Smuzhiyun if (cl->leaf.q != &noop_qdisc)
1434*4882a593Smuzhiyun qdisc_hash_add(cl->leaf.q, true);
1435*4882a593Smuzhiyun } else {
1436*4882a593Smuzhiyun if (tca[TCA_RATE]) {
1437*4882a593Smuzhiyun err = gen_replace_estimator(&cl->bstats, NULL,
1438*4882a593Smuzhiyun &cl->rate_est,
1439*4882a593Smuzhiyun NULL,
1440*4882a593Smuzhiyun qdisc_root_sleeping_running(sch),
1441*4882a593Smuzhiyun tca[TCA_RATE]);
1442*4882a593Smuzhiyun if (err)
1443*4882a593Smuzhiyun return err;
1444*4882a593Smuzhiyun }
1445*4882a593Smuzhiyun sch_tree_lock(sch);
1446*4882a593Smuzhiyun }
1447*4882a593Smuzhiyun
1448*4882a593Smuzhiyun rate64 = tb[TCA_HTB_RATE64] ? nla_get_u64(tb[TCA_HTB_RATE64]) : 0;
1449*4882a593Smuzhiyun
1450*4882a593Smuzhiyun ceil64 = tb[TCA_HTB_CEIL64] ? nla_get_u64(tb[TCA_HTB_CEIL64]) : 0;
1451*4882a593Smuzhiyun
1452*4882a593Smuzhiyun psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64);
1453*4882a593Smuzhiyun psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64);
1454*4882a593Smuzhiyun
1455*4882a593Smuzhiyun /* it used to be a nasty bug here, we have to check that node
1456*4882a593Smuzhiyun * is really leaf before changing cl->leaf !
1457*4882a593Smuzhiyun */
1458*4882a593Smuzhiyun if (!cl->level) {
1459*4882a593Smuzhiyun u64 quantum = cl->rate.rate_bytes_ps;
1460*4882a593Smuzhiyun
1461*4882a593Smuzhiyun do_div(quantum, q->rate2quantum);
1462*4882a593Smuzhiyun cl->quantum = min_t(u64, quantum, INT_MAX);
1463*4882a593Smuzhiyun
1464*4882a593Smuzhiyun if (!hopt->quantum && cl->quantum < 1000) {
1465*4882a593Smuzhiyun warn = -1;
1466*4882a593Smuzhiyun cl->quantum = 1000;
1467*4882a593Smuzhiyun }
1468*4882a593Smuzhiyun if (!hopt->quantum && cl->quantum > 200000) {
1469*4882a593Smuzhiyun warn = 1;
1470*4882a593Smuzhiyun cl->quantum = 200000;
1471*4882a593Smuzhiyun }
1472*4882a593Smuzhiyun if (hopt->quantum)
1473*4882a593Smuzhiyun cl->quantum = hopt->quantum;
1474*4882a593Smuzhiyun if ((cl->prio = hopt->prio) >= TC_HTB_NUMPRIO)
1475*4882a593Smuzhiyun cl->prio = TC_HTB_NUMPRIO - 1;
1476*4882a593Smuzhiyun }
1477*4882a593Smuzhiyun
1478*4882a593Smuzhiyun cl->buffer = PSCHED_TICKS2NS(hopt->buffer);
1479*4882a593Smuzhiyun cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer);
1480*4882a593Smuzhiyun
1481*4882a593Smuzhiyun sch_tree_unlock(sch);
1482*4882a593Smuzhiyun qdisc_put(parent_qdisc);
1483*4882a593Smuzhiyun
1484*4882a593Smuzhiyun if (warn)
1485*4882a593Smuzhiyun pr_warn("HTB: quantum of class %X is %s. Consider r2q change.\n",
1486*4882a593Smuzhiyun cl->common.classid, (warn == -1 ? "small" : "big"));
1487*4882a593Smuzhiyun
1488*4882a593Smuzhiyun qdisc_class_hash_grow(sch, &q->clhash);
1489*4882a593Smuzhiyun
1490*4882a593Smuzhiyun *arg = (unsigned long)cl;
1491*4882a593Smuzhiyun return 0;
1492*4882a593Smuzhiyun
1493*4882a593Smuzhiyun failure:
1494*4882a593Smuzhiyun return err;
1495*4882a593Smuzhiyun }
1496*4882a593Smuzhiyun
htb_tcf_block(struct Qdisc * sch,unsigned long arg,struct netlink_ext_ack * extack)1497*4882a593Smuzhiyun static struct tcf_block *htb_tcf_block(struct Qdisc *sch, unsigned long arg,
1498*4882a593Smuzhiyun struct netlink_ext_ack *extack)
1499*4882a593Smuzhiyun {
1500*4882a593Smuzhiyun struct htb_sched *q = qdisc_priv(sch);
1501*4882a593Smuzhiyun struct htb_class *cl = (struct htb_class *)arg;
1502*4882a593Smuzhiyun
1503*4882a593Smuzhiyun return cl ? cl->block : q->block;
1504*4882a593Smuzhiyun }
1505*4882a593Smuzhiyun
htb_bind_filter(struct Qdisc * sch,unsigned long parent,u32 classid)1506*4882a593Smuzhiyun static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
1507*4882a593Smuzhiyun u32 classid)
1508*4882a593Smuzhiyun {
1509*4882a593Smuzhiyun struct htb_class *cl = htb_find(classid, sch);
1510*4882a593Smuzhiyun
1511*4882a593Smuzhiyun /*if (cl && !cl->level) return 0;
1512*4882a593Smuzhiyun * The line above used to be there to prevent attaching filters to
1513*4882a593Smuzhiyun * leaves. But at least tc_index filter uses this just to get class
1514*4882a593Smuzhiyun * for other reasons so that we have to allow for it.
1515*4882a593Smuzhiyun * ----
1516*4882a593Smuzhiyun * 19.6.2002 As Werner explained it is ok - bind filter is just
1517*4882a593Smuzhiyun * another way to "lock" the class - unlike "get" this lock can
1518*4882a593Smuzhiyun * be broken by class during destroy IIUC.
1519*4882a593Smuzhiyun */
1520*4882a593Smuzhiyun if (cl)
1521*4882a593Smuzhiyun cl->filter_cnt++;
1522*4882a593Smuzhiyun return (unsigned long)cl;
1523*4882a593Smuzhiyun }
1524*4882a593Smuzhiyun
htb_unbind_filter(struct Qdisc * sch,unsigned long arg)1525*4882a593Smuzhiyun static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
1526*4882a593Smuzhiyun {
1527*4882a593Smuzhiyun struct htb_class *cl = (struct htb_class *)arg;
1528*4882a593Smuzhiyun
1529*4882a593Smuzhiyun if (cl)
1530*4882a593Smuzhiyun cl->filter_cnt--;
1531*4882a593Smuzhiyun }
1532*4882a593Smuzhiyun
htb_walk(struct Qdisc * sch,struct qdisc_walker * arg)1533*4882a593Smuzhiyun static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1534*4882a593Smuzhiyun {
1535*4882a593Smuzhiyun struct htb_sched *q = qdisc_priv(sch);
1536*4882a593Smuzhiyun struct htb_class *cl;
1537*4882a593Smuzhiyun unsigned int i;
1538*4882a593Smuzhiyun
1539*4882a593Smuzhiyun if (arg->stop)
1540*4882a593Smuzhiyun return;
1541*4882a593Smuzhiyun
1542*4882a593Smuzhiyun for (i = 0; i < q->clhash.hashsize; i++) {
1543*4882a593Smuzhiyun hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
1544*4882a593Smuzhiyun if (arg->count < arg->skip) {
1545*4882a593Smuzhiyun arg->count++;
1546*4882a593Smuzhiyun continue;
1547*4882a593Smuzhiyun }
1548*4882a593Smuzhiyun if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1549*4882a593Smuzhiyun arg->stop = 1;
1550*4882a593Smuzhiyun return;
1551*4882a593Smuzhiyun }
1552*4882a593Smuzhiyun arg->count++;
1553*4882a593Smuzhiyun }
1554*4882a593Smuzhiyun }
1555*4882a593Smuzhiyun }
1556*4882a593Smuzhiyun
1557*4882a593Smuzhiyun static const struct Qdisc_class_ops htb_class_ops = {
1558*4882a593Smuzhiyun .graft = htb_graft,
1559*4882a593Smuzhiyun .leaf = htb_leaf,
1560*4882a593Smuzhiyun .qlen_notify = htb_qlen_notify,
1561*4882a593Smuzhiyun .find = htb_search,
1562*4882a593Smuzhiyun .change = htb_change_class,
1563*4882a593Smuzhiyun .delete = htb_delete,
1564*4882a593Smuzhiyun .walk = htb_walk,
1565*4882a593Smuzhiyun .tcf_block = htb_tcf_block,
1566*4882a593Smuzhiyun .bind_tcf = htb_bind_filter,
1567*4882a593Smuzhiyun .unbind_tcf = htb_unbind_filter,
1568*4882a593Smuzhiyun .dump = htb_dump_class,
1569*4882a593Smuzhiyun .dump_stats = htb_dump_class_stats,
1570*4882a593Smuzhiyun };
1571*4882a593Smuzhiyun
1572*4882a593Smuzhiyun static struct Qdisc_ops htb_qdisc_ops __read_mostly = {
1573*4882a593Smuzhiyun .cl_ops = &htb_class_ops,
1574*4882a593Smuzhiyun .id = "htb",
1575*4882a593Smuzhiyun .priv_size = sizeof(struct htb_sched),
1576*4882a593Smuzhiyun .enqueue = htb_enqueue,
1577*4882a593Smuzhiyun .dequeue = htb_dequeue,
1578*4882a593Smuzhiyun .peek = qdisc_peek_dequeued,
1579*4882a593Smuzhiyun .init = htb_init,
1580*4882a593Smuzhiyun .reset = htb_reset,
1581*4882a593Smuzhiyun .destroy = htb_destroy,
1582*4882a593Smuzhiyun .dump = htb_dump,
1583*4882a593Smuzhiyun .owner = THIS_MODULE,
1584*4882a593Smuzhiyun };
1585*4882a593Smuzhiyun
htb_module_init(void)1586*4882a593Smuzhiyun static int __init htb_module_init(void)
1587*4882a593Smuzhiyun {
1588*4882a593Smuzhiyun return register_qdisc(&htb_qdisc_ops);
1589*4882a593Smuzhiyun }
htb_module_exit(void)1590*4882a593Smuzhiyun static void __exit htb_module_exit(void)
1591*4882a593Smuzhiyun {
1592*4882a593Smuzhiyun unregister_qdisc(&htb_qdisc_ops);
1593*4882a593Smuzhiyun }
1594*4882a593Smuzhiyun
1595*4882a593Smuzhiyun module_init(htb_module_init)
1596*4882a593Smuzhiyun module_exit(htb_module_exit)
1597*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1598