1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef __NET_SCHED_GENERIC_H
3*4882a593Smuzhiyun #define __NET_SCHED_GENERIC_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun #include <linux/netdevice.h>
6*4882a593Smuzhiyun #include <linux/types.h>
7*4882a593Smuzhiyun #include <linux/rcupdate.h>
8*4882a593Smuzhiyun #include <linux/pkt_sched.h>
9*4882a593Smuzhiyun #include <linux/pkt_cls.h>
10*4882a593Smuzhiyun #include <linux/percpu.h>
11*4882a593Smuzhiyun #include <linux/dynamic_queue_limits.h>
12*4882a593Smuzhiyun #include <linux/list.h>
13*4882a593Smuzhiyun #include <linux/refcount.h>
14*4882a593Smuzhiyun #include <linux/workqueue.h>
15*4882a593Smuzhiyun #include <linux/mutex.h>
16*4882a593Smuzhiyun #include <linux/rwsem.h>
17*4882a593Smuzhiyun #include <linux/atomic.h>
18*4882a593Smuzhiyun #include <linux/hashtable.h>
19*4882a593Smuzhiyun #include <linux/android_kabi.h>
20*4882a593Smuzhiyun #include <net/gen_stats.h>
21*4882a593Smuzhiyun #include <net/rtnetlink.h>
22*4882a593Smuzhiyun #include <net/flow_offload.h>
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun struct Qdisc_ops;
25*4882a593Smuzhiyun struct qdisc_walker;
26*4882a593Smuzhiyun struct tcf_walker;
27*4882a593Smuzhiyun struct module;
28*4882a593Smuzhiyun struct bpf_flow_keys;
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun struct qdisc_rate_table {
31*4882a593Smuzhiyun struct tc_ratespec rate;
32*4882a593Smuzhiyun u32 data[256];
33*4882a593Smuzhiyun struct qdisc_rate_table *next;
34*4882a593Smuzhiyun int refcnt;
35*4882a593Smuzhiyun };
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun enum qdisc_state_t {
38*4882a593Smuzhiyun __QDISC_STATE_SCHED,
39*4882a593Smuzhiyun __QDISC_STATE_DEACTIVATED,
40*4882a593Smuzhiyun __QDISC_STATE_MISSED,
41*4882a593Smuzhiyun };
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun struct qdisc_size_table {
44*4882a593Smuzhiyun struct rcu_head rcu;
45*4882a593Smuzhiyun struct list_head list;
46*4882a593Smuzhiyun struct tc_sizespec szopts;
47*4882a593Smuzhiyun int refcnt;
48*4882a593Smuzhiyun u16 data[];
49*4882a593Smuzhiyun };
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun /* similar to sk_buff_head, but skb->prev pointer is undefined. */
52*4882a593Smuzhiyun struct qdisc_skb_head {
53*4882a593Smuzhiyun struct sk_buff *head;
54*4882a593Smuzhiyun struct sk_buff *tail;
55*4882a593Smuzhiyun __u32 qlen;
56*4882a593Smuzhiyun spinlock_t lock;
57*4882a593Smuzhiyun };
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun struct Qdisc {
60*4882a593Smuzhiyun int (*enqueue)(struct sk_buff *skb,
61*4882a593Smuzhiyun struct Qdisc *sch,
62*4882a593Smuzhiyun struct sk_buff **to_free);
63*4882a593Smuzhiyun struct sk_buff * (*dequeue)(struct Qdisc *sch);
64*4882a593Smuzhiyun unsigned int flags;
65*4882a593Smuzhiyun #define TCQ_F_BUILTIN 1
66*4882a593Smuzhiyun #define TCQ_F_INGRESS 2
67*4882a593Smuzhiyun #define TCQ_F_CAN_BYPASS 4
68*4882a593Smuzhiyun #define TCQ_F_MQROOT 8
69*4882a593Smuzhiyun #define TCQ_F_ONETXQUEUE 0x10 /* dequeue_skb() can assume all skbs are for
70*4882a593Smuzhiyun * q->dev_queue : It can test
71*4882a593Smuzhiyun * netif_xmit_frozen_or_stopped() before
72*4882a593Smuzhiyun * dequeueing next packet.
73*4882a593Smuzhiyun * Its true for MQ/MQPRIO slaves, or non
74*4882a593Smuzhiyun * multiqueue device.
75*4882a593Smuzhiyun */
76*4882a593Smuzhiyun #define TCQ_F_WARN_NONWC (1 << 16)
77*4882a593Smuzhiyun #define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */
78*4882a593Smuzhiyun #define TCQ_F_NOPARENT 0x40 /* root of its hierarchy :
79*4882a593Smuzhiyun * qdisc_tree_decrease_qlen() should stop.
80*4882a593Smuzhiyun */
81*4882a593Smuzhiyun #define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */
82*4882a593Smuzhiyun #define TCQ_F_NOLOCK 0x100 /* qdisc does not require locking */
83*4882a593Smuzhiyun #define TCQ_F_OFFLOADED 0x200 /* qdisc is offloaded to HW */
84*4882a593Smuzhiyun u32 limit;
85*4882a593Smuzhiyun const struct Qdisc_ops *ops;
86*4882a593Smuzhiyun struct qdisc_size_table __rcu *stab;
87*4882a593Smuzhiyun struct hlist_node hash;
88*4882a593Smuzhiyun u32 handle;
89*4882a593Smuzhiyun u32 parent;
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun struct netdev_queue *dev_queue;
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun struct net_rate_estimator __rcu *rate_est;
94*4882a593Smuzhiyun struct gnet_stats_basic_cpu __percpu *cpu_bstats;
95*4882a593Smuzhiyun struct gnet_stats_queue __percpu *cpu_qstats;
96*4882a593Smuzhiyun int pad;
97*4882a593Smuzhiyun refcount_t refcnt;
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun /*
100*4882a593Smuzhiyun * For performance sake on SMP, we put highly modified fields at the end
101*4882a593Smuzhiyun */
102*4882a593Smuzhiyun struct sk_buff_head gso_skb ____cacheline_aligned_in_smp;
103*4882a593Smuzhiyun struct qdisc_skb_head q;
104*4882a593Smuzhiyun struct gnet_stats_basic_packed bstats;
105*4882a593Smuzhiyun seqcount_t running;
106*4882a593Smuzhiyun struct gnet_stats_queue qstats;
107*4882a593Smuzhiyun unsigned long state;
108*4882a593Smuzhiyun struct Qdisc *next_sched;
109*4882a593Smuzhiyun struct sk_buff_head skb_bad_txq;
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun spinlock_t busylock ____cacheline_aligned_in_smp;
112*4882a593Smuzhiyun spinlock_t seqlock;
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun /* for NOLOCK qdisc, true if there are no enqueued skbs */
115*4882a593Smuzhiyun bool empty;
116*4882a593Smuzhiyun struct rcu_head rcu;
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun ANDROID_KABI_RESERVE(1);
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun /* private data */
121*4882a593Smuzhiyun long privdata[] ____cacheline_aligned;
122*4882a593Smuzhiyun };
123*4882a593Smuzhiyun
qdisc_refcount_inc(struct Qdisc * qdisc)124*4882a593Smuzhiyun static inline void qdisc_refcount_inc(struct Qdisc *qdisc)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun if (qdisc->flags & TCQ_F_BUILTIN)
127*4882a593Smuzhiyun return;
128*4882a593Smuzhiyun refcount_inc(&qdisc->refcnt);
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun /* Intended to be used by unlocked users, when concurrent qdisc release is
132*4882a593Smuzhiyun * possible.
133*4882a593Smuzhiyun */
134*4882a593Smuzhiyun
qdisc_refcount_inc_nz(struct Qdisc * qdisc)135*4882a593Smuzhiyun static inline struct Qdisc *qdisc_refcount_inc_nz(struct Qdisc *qdisc)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun if (qdisc->flags & TCQ_F_BUILTIN)
138*4882a593Smuzhiyun return qdisc;
139*4882a593Smuzhiyun if (refcount_inc_not_zero(&qdisc->refcnt))
140*4882a593Smuzhiyun return qdisc;
141*4882a593Smuzhiyun return NULL;
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun
qdisc_is_running(struct Qdisc * qdisc)144*4882a593Smuzhiyun static inline bool qdisc_is_running(struct Qdisc *qdisc)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun if (qdisc->flags & TCQ_F_NOLOCK)
147*4882a593Smuzhiyun return spin_is_locked(&qdisc->seqlock);
148*4882a593Smuzhiyun return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun
qdisc_is_percpu_stats(const struct Qdisc * q)151*4882a593Smuzhiyun static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun return q->flags & TCQ_F_CPUSTATS;
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun
qdisc_is_empty(const struct Qdisc * qdisc)156*4882a593Smuzhiyun static inline bool qdisc_is_empty(const struct Qdisc *qdisc)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun if (qdisc_is_percpu_stats(qdisc))
159*4882a593Smuzhiyun return READ_ONCE(qdisc->empty);
160*4882a593Smuzhiyun return !READ_ONCE(qdisc->q.qlen);
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun
qdisc_run_begin(struct Qdisc * qdisc)163*4882a593Smuzhiyun static inline bool qdisc_run_begin(struct Qdisc *qdisc)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun if (qdisc->flags & TCQ_F_NOLOCK) {
166*4882a593Smuzhiyun if (spin_trylock(&qdisc->seqlock))
167*4882a593Smuzhiyun goto nolock_empty;
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun /* No need to insist if the MISSED flag was already set.
170*4882a593Smuzhiyun * Note that test_and_set_bit() also gives us memory ordering
171*4882a593Smuzhiyun * guarantees wrt potential earlier enqueue() and below
172*4882a593Smuzhiyun * spin_trylock(), both of which are necessary to prevent races
173*4882a593Smuzhiyun */
174*4882a593Smuzhiyun if (test_and_set_bit(__QDISC_STATE_MISSED, &qdisc->state))
175*4882a593Smuzhiyun return false;
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun /* Try to take the lock again to make sure that we will either
178*4882a593Smuzhiyun * grab it or the CPU that still has it will see MISSED set
179*4882a593Smuzhiyun * when testing it in qdisc_run_end()
180*4882a593Smuzhiyun */
181*4882a593Smuzhiyun if (!spin_trylock(&qdisc->seqlock))
182*4882a593Smuzhiyun return false;
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun nolock_empty:
185*4882a593Smuzhiyun WRITE_ONCE(qdisc->empty, false);
186*4882a593Smuzhiyun } else if (qdisc_is_running(qdisc)) {
187*4882a593Smuzhiyun return false;
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun /* Variant of write_seqcount_begin() telling lockdep a trylock
190*4882a593Smuzhiyun * was attempted.
191*4882a593Smuzhiyun */
192*4882a593Smuzhiyun raw_write_seqcount_begin(&qdisc->running);
193*4882a593Smuzhiyun seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_);
194*4882a593Smuzhiyun return true;
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
qdisc_run_end(struct Qdisc * qdisc)197*4882a593Smuzhiyun static inline void qdisc_run_end(struct Qdisc *qdisc)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun write_seqcount_end(&qdisc->running);
200*4882a593Smuzhiyun if (qdisc->flags & TCQ_F_NOLOCK) {
201*4882a593Smuzhiyun spin_unlock(&qdisc->seqlock);
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun /* spin_unlock() only has store-release semantic. The unlock
204*4882a593Smuzhiyun * and test_bit() ordering is a store-load ordering, so a full
205*4882a593Smuzhiyun * memory barrier is needed here.
206*4882a593Smuzhiyun */
207*4882a593Smuzhiyun smp_mb();
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun if (unlikely(test_bit(__QDISC_STATE_MISSED,
210*4882a593Smuzhiyun &qdisc->state))) {
211*4882a593Smuzhiyun clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
212*4882a593Smuzhiyun __netif_schedule(qdisc);
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun
qdisc_may_bulk(const struct Qdisc * qdisc)217*4882a593Smuzhiyun static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun return qdisc->flags & TCQ_F_ONETXQUEUE;
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun
qdisc_avail_bulklimit(const struct netdev_queue * txq)222*4882a593Smuzhiyun static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun #ifdef CONFIG_BQL
225*4882a593Smuzhiyun /* Non-BQL migrated drivers will return 0, too. */
226*4882a593Smuzhiyun return dql_avail(&txq->dql);
227*4882a593Smuzhiyun #else
228*4882a593Smuzhiyun return 0;
229*4882a593Smuzhiyun #endif
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun struct Qdisc_class_ops {
233*4882a593Smuzhiyun unsigned int flags;
234*4882a593Smuzhiyun /* Child qdisc manipulation */
235*4882a593Smuzhiyun struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *);
236*4882a593Smuzhiyun int (*graft)(struct Qdisc *, unsigned long cl,
237*4882a593Smuzhiyun struct Qdisc *, struct Qdisc **,
238*4882a593Smuzhiyun struct netlink_ext_ack *extack);
239*4882a593Smuzhiyun struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl);
240*4882a593Smuzhiyun void (*qlen_notify)(struct Qdisc *, unsigned long);
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun /* Class manipulation routines */
243*4882a593Smuzhiyun unsigned long (*find)(struct Qdisc *, u32 classid);
244*4882a593Smuzhiyun int (*change)(struct Qdisc *, u32, u32,
245*4882a593Smuzhiyun struct nlattr **, unsigned long *,
246*4882a593Smuzhiyun struct netlink_ext_ack *);
247*4882a593Smuzhiyun int (*delete)(struct Qdisc *, unsigned long);
248*4882a593Smuzhiyun void (*walk)(struct Qdisc *, struct qdisc_walker * arg);
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun /* Filter manipulation */
251*4882a593Smuzhiyun struct tcf_block * (*tcf_block)(struct Qdisc *sch,
252*4882a593Smuzhiyun unsigned long arg,
253*4882a593Smuzhiyun struct netlink_ext_ack *extack);
254*4882a593Smuzhiyun unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
255*4882a593Smuzhiyun u32 classid);
256*4882a593Smuzhiyun void (*unbind_tcf)(struct Qdisc *, unsigned long);
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun /* rtnetlink specific */
259*4882a593Smuzhiyun int (*dump)(struct Qdisc *, unsigned long,
260*4882a593Smuzhiyun struct sk_buff *skb, struct tcmsg*);
261*4882a593Smuzhiyun int (*dump_stats)(struct Qdisc *, unsigned long,
262*4882a593Smuzhiyun struct gnet_dump *);
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun ANDROID_KABI_RESERVE(1);
265*4882a593Smuzhiyun };
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun /* Qdisc_class_ops flag values */
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun /* Implements API that doesn't require rtnl lock */
270*4882a593Smuzhiyun enum qdisc_class_ops_flags {
271*4882a593Smuzhiyun QDISC_CLASS_OPS_DOIT_UNLOCKED = 1,
272*4882a593Smuzhiyun };
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun struct Qdisc_ops {
275*4882a593Smuzhiyun struct Qdisc_ops *next;
276*4882a593Smuzhiyun const struct Qdisc_class_ops *cl_ops;
277*4882a593Smuzhiyun char id[IFNAMSIZ];
278*4882a593Smuzhiyun int priv_size;
279*4882a593Smuzhiyun unsigned int static_flags;
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun int (*enqueue)(struct sk_buff *skb,
282*4882a593Smuzhiyun struct Qdisc *sch,
283*4882a593Smuzhiyun struct sk_buff **to_free);
284*4882a593Smuzhiyun struct sk_buff * (*dequeue)(struct Qdisc *);
285*4882a593Smuzhiyun struct sk_buff * (*peek)(struct Qdisc *);
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun int (*init)(struct Qdisc *sch, struct nlattr *arg,
288*4882a593Smuzhiyun struct netlink_ext_ack *extack);
289*4882a593Smuzhiyun void (*reset)(struct Qdisc *);
290*4882a593Smuzhiyun void (*destroy)(struct Qdisc *);
291*4882a593Smuzhiyun int (*change)(struct Qdisc *sch,
292*4882a593Smuzhiyun struct nlattr *arg,
293*4882a593Smuzhiyun struct netlink_ext_ack *extack);
294*4882a593Smuzhiyun void (*attach)(struct Qdisc *sch);
295*4882a593Smuzhiyun int (*change_tx_queue_len)(struct Qdisc *, unsigned int);
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun int (*dump)(struct Qdisc *, struct sk_buff *);
298*4882a593Smuzhiyun int (*dump_stats)(struct Qdisc *, struct gnet_dump *);
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun void (*ingress_block_set)(struct Qdisc *sch,
301*4882a593Smuzhiyun u32 block_index);
302*4882a593Smuzhiyun void (*egress_block_set)(struct Qdisc *sch,
303*4882a593Smuzhiyun u32 block_index);
304*4882a593Smuzhiyun u32 (*ingress_block_get)(struct Qdisc *sch);
305*4882a593Smuzhiyun u32 (*egress_block_get)(struct Qdisc *sch);
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun struct module *owner;
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun ANDROID_KABI_RESERVE(1);
310*4882a593Smuzhiyun };
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun struct tcf_result {
314*4882a593Smuzhiyun union {
315*4882a593Smuzhiyun struct {
316*4882a593Smuzhiyun unsigned long class;
317*4882a593Smuzhiyun u32 classid;
318*4882a593Smuzhiyun };
319*4882a593Smuzhiyun const struct tcf_proto *goto_tp;
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun /* used in the skb_tc_reinsert function */
322*4882a593Smuzhiyun struct {
323*4882a593Smuzhiyun bool ingress;
324*4882a593Smuzhiyun struct gnet_stats_queue *qstats;
325*4882a593Smuzhiyun };
326*4882a593Smuzhiyun };
327*4882a593Smuzhiyun };
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun struct tcf_chain;
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun struct tcf_proto_ops {
332*4882a593Smuzhiyun struct list_head head;
333*4882a593Smuzhiyun char kind[IFNAMSIZ];
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun int (*classify)(struct sk_buff *,
336*4882a593Smuzhiyun const struct tcf_proto *,
337*4882a593Smuzhiyun struct tcf_result *);
338*4882a593Smuzhiyun int (*init)(struct tcf_proto*);
339*4882a593Smuzhiyun void (*destroy)(struct tcf_proto *tp, bool rtnl_held,
340*4882a593Smuzhiyun struct netlink_ext_ack *extack);
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun void* (*get)(struct tcf_proto*, u32 handle);
343*4882a593Smuzhiyun void (*put)(struct tcf_proto *tp, void *f);
344*4882a593Smuzhiyun int (*change)(struct net *net, struct sk_buff *,
345*4882a593Smuzhiyun struct tcf_proto*, unsigned long,
346*4882a593Smuzhiyun u32 handle, struct nlattr **,
347*4882a593Smuzhiyun void **, bool, bool,
348*4882a593Smuzhiyun struct netlink_ext_ack *);
349*4882a593Smuzhiyun int (*delete)(struct tcf_proto *tp, void *arg,
350*4882a593Smuzhiyun bool *last, bool rtnl_held,
351*4882a593Smuzhiyun struct netlink_ext_ack *);
352*4882a593Smuzhiyun bool (*delete_empty)(struct tcf_proto *tp);
353*4882a593Smuzhiyun void (*walk)(struct tcf_proto *tp,
354*4882a593Smuzhiyun struct tcf_walker *arg, bool rtnl_held);
355*4882a593Smuzhiyun int (*reoffload)(struct tcf_proto *tp, bool add,
356*4882a593Smuzhiyun flow_setup_cb_t *cb, void *cb_priv,
357*4882a593Smuzhiyun struct netlink_ext_ack *extack);
358*4882a593Smuzhiyun void (*hw_add)(struct tcf_proto *tp,
359*4882a593Smuzhiyun void *type_data);
360*4882a593Smuzhiyun void (*hw_del)(struct tcf_proto *tp,
361*4882a593Smuzhiyun void *type_data);
362*4882a593Smuzhiyun void (*bind_class)(void *, u32, unsigned long,
363*4882a593Smuzhiyun void *, unsigned long);
364*4882a593Smuzhiyun void * (*tmplt_create)(struct net *net,
365*4882a593Smuzhiyun struct tcf_chain *chain,
366*4882a593Smuzhiyun struct nlattr **tca,
367*4882a593Smuzhiyun struct netlink_ext_ack *extack);
368*4882a593Smuzhiyun void (*tmplt_destroy)(void *tmplt_priv);
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun /* rtnetlink specific */
371*4882a593Smuzhiyun int (*dump)(struct net*, struct tcf_proto*, void *,
372*4882a593Smuzhiyun struct sk_buff *skb, struct tcmsg*,
373*4882a593Smuzhiyun bool);
374*4882a593Smuzhiyun int (*terse_dump)(struct net *net,
375*4882a593Smuzhiyun struct tcf_proto *tp, void *fh,
376*4882a593Smuzhiyun struct sk_buff *skb,
377*4882a593Smuzhiyun struct tcmsg *t, bool rtnl_held);
378*4882a593Smuzhiyun int (*tmplt_dump)(struct sk_buff *skb,
379*4882a593Smuzhiyun struct net *net,
380*4882a593Smuzhiyun void *tmplt_priv);
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun struct module *owner;
383*4882a593Smuzhiyun int flags;
384*4882a593Smuzhiyun };
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun /* Classifiers setting TCF_PROTO_OPS_DOIT_UNLOCKED in tcf_proto_ops->flags
387*4882a593Smuzhiyun * are expected to implement tcf_proto_ops->delete_empty(), otherwise race
388*4882a593Smuzhiyun * conditions can occur when filters are inserted/deleted simultaneously.
389*4882a593Smuzhiyun */
390*4882a593Smuzhiyun enum tcf_proto_ops_flags {
391*4882a593Smuzhiyun TCF_PROTO_OPS_DOIT_UNLOCKED = 1,
392*4882a593Smuzhiyun };
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun struct tcf_proto {
395*4882a593Smuzhiyun /* Fast access part */
396*4882a593Smuzhiyun struct tcf_proto __rcu *next;
397*4882a593Smuzhiyun void __rcu *root;
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun /* called under RCU BH lock*/
400*4882a593Smuzhiyun int (*classify)(struct sk_buff *,
401*4882a593Smuzhiyun const struct tcf_proto *,
402*4882a593Smuzhiyun struct tcf_result *);
403*4882a593Smuzhiyun __be16 protocol;
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun /* All the rest */
406*4882a593Smuzhiyun u32 prio;
407*4882a593Smuzhiyun void *data;
408*4882a593Smuzhiyun const struct tcf_proto_ops *ops;
409*4882a593Smuzhiyun struct tcf_chain *chain;
410*4882a593Smuzhiyun /* Lock protects tcf_proto shared state and can be used by unlocked
411*4882a593Smuzhiyun * classifiers to protect their private data.
412*4882a593Smuzhiyun */
413*4882a593Smuzhiyun spinlock_t lock;
414*4882a593Smuzhiyun bool deleting;
415*4882a593Smuzhiyun refcount_t refcnt;
416*4882a593Smuzhiyun struct rcu_head rcu;
417*4882a593Smuzhiyun struct hlist_node destroy_ht_node;
418*4882a593Smuzhiyun };
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun struct qdisc_skb_cb {
421*4882a593Smuzhiyun struct {
422*4882a593Smuzhiyun unsigned int pkt_len;
423*4882a593Smuzhiyun u16 slave_dev_queue_mapping;
424*4882a593Smuzhiyun u16 tc_classid;
425*4882a593Smuzhiyun };
426*4882a593Smuzhiyun #define QDISC_CB_PRIV_LEN 20
427*4882a593Smuzhiyun unsigned char data[QDISC_CB_PRIV_LEN];
428*4882a593Smuzhiyun u16 mru;
429*4882a593Smuzhiyun };
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv);
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun struct tcf_chain {
434*4882a593Smuzhiyun /* Protects filter_chain. */
435*4882a593Smuzhiyun struct mutex filter_chain_lock;
436*4882a593Smuzhiyun struct tcf_proto __rcu *filter_chain;
437*4882a593Smuzhiyun struct list_head list;
438*4882a593Smuzhiyun struct tcf_block *block;
439*4882a593Smuzhiyun u32 index; /* chain index */
440*4882a593Smuzhiyun unsigned int refcnt;
441*4882a593Smuzhiyun unsigned int action_refcnt;
442*4882a593Smuzhiyun bool explicitly_created;
443*4882a593Smuzhiyun bool flushing;
444*4882a593Smuzhiyun const struct tcf_proto_ops *tmplt_ops;
445*4882a593Smuzhiyun void *tmplt_priv;
446*4882a593Smuzhiyun struct rcu_head rcu;
447*4882a593Smuzhiyun };
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun struct tcf_block {
450*4882a593Smuzhiyun /* Lock protects tcf_block and lifetime-management data of chains
451*4882a593Smuzhiyun * attached to the block (refcnt, action_refcnt, explicitly_created).
452*4882a593Smuzhiyun */
453*4882a593Smuzhiyun struct mutex lock;
454*4882a593Smuzhiyun struct list_head chain_list;
455*4882a593Smuzhiyun u32 index; /* block index for shared blocks */
456*4882a593Smuzhiyun u32 classid; /* which class this block belongs to */
457*4882a593Smuzhiyun refcount_t refcnt;
458*4882a593Smuzhiyun struct net *net;
459*4882a593Smuzhiyun struct Qdisc *q;
460*4882a593Smuzhiyun struct rw_semaphore cb_lock; /* protects cb_list and offload counters */
461*4882a593Smuzhiyun struct flow_block flow_block;
462*4882a593Smuzhiyun struct list_head owner_list;
463*4882a593Smuzhiyun bool keep_dst;
464*4882a593Smuzhiyun atomic_t offloadcnt; /* Number of oddloaded filters */
465*4882a593Smuzhiyun unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */
466*4882a593Smuzhiyun unsigned int lockeddevcnt; /* Number of devs that require rtnl lock. */
467*4882a593Smuzhiyun struct {
468*4882a593Smuzhiyun struct tcf_chain *chain;
469*4882a593Smuzhiyun struct list_head filter_chain_list;
470*4882a593Smuzhiyun } chain0;
471*4882a593Smuzhiyun struct rcu_head rcu;
472*4882a593Smuzhiyun DECLARE_HASHTABLE(proto_destroy_ht, 7);
473*4882a593Smuzhiyun struct mutex proto_destroy_lock; /* Lock for proto_destroy hashtable. */
474*4882a593Smuzhiyun };
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun #ifdef CONFIG_PROVE_LOCKING
lockdep_tcf_chain_is_locked(struct tcf_chain * chain)477*4882a593Smuzhiyun static inline bool lockdep_tcf_chain_is_locked(struct tcf_chain *chain)
478*4882a593Smuzhiyun {
479*4882a593Smuzhiyun return lockdep_is_held(&chain->filter_chain_lock);
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun
lockdep_tcf_proto_is_locked(struct tcf_proto * tp)482*4882a593Smuzhiyun static inline bool lockdep_tcf_proto_is_locked(struct tcf_proto *tp)
483*4882a593Smuzhiyun {
484*4882a593Smuzhiyun return lockdep_is_held(&tp->lock);
485*4882a593Smuzhiyun }
486*4882a593Smuzhiyun #else
lockdep_tcf_chain_is_locked(struct tcf_block * chain)487*4882a593Smuzhiyun static inline bool lockdep_tcf_chain_is_locked(struct tcf_block *chain)
488*4882a593Smuzhiyun {
489*4882a593Smuzhiyun return true;
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun
lockdep_tcf_proto_is_locked(struct tcf_proto * tp)492*4882a593Smuzhiyun static inline bool lockdep_tcf_proto_is_locked(struct tcf_proto *tp)
493*4882a593Smuzhiyun {
494*4882a593Smuzhiyun return true;
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun #endif /* #ifdef CONFIG_PROVE_LOCKING */
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun #define tcf_chain_dereference(p, chain) \
499*4882a593Smuzhiyun rcu_dereference_protected(p, lockdep_tcf_chain_is_locked(chain))
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun #define tcf_proto_dereference(p, tp) \
502*4882a593Smuzhiyun rcu_dereference_protected(p, lockdep_tcf_proto_is_locked(tp))
503*4882a593Smuzhiyun
qdisc_cb_private_validate(const struct sk_buff * skb,int sz)504*4882a593Smuzhiyun static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
505*4882a593Smuzhiyun {
506*4882a593Smuzhiyun struct qdisc_skb_cb *qcb;
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun BUILD_BUG_ON(sizeof(skb->cb) < sizeof(*qcb));
509*4882a593Smuzhiyun BUILD_BUG_ON(sizeof(qcb->data) < sz);
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun
qdisc_qlen_cpu(const struct Qdisc * q)512*4882a593Smuzhiyun static inline int qdisc_qlen_cpu(const struct Qdisc *q)
513*4882a593Smuzhiyun {
514*4882a593Smuzhiyun return this_cpu_ptr(q->cpu_qstats)->qlen;
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun
qdisc_qlen(const struct Qdisc * q)517*4882a593Smuzhiyun static inline int qdisc_qlen(const struct Qdisc *q)
518*4882a593Smuzhiyun {
519*4882a593Smuzhiyun return q->q.qlen;
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun
qdisc_qlen_sum(const struct Qdisc * q)522*4882a593Smuzhiyun static inline int qdisc_qlen_sum(const struct Qdisc *q)
523*4882a593Smuzhiyun {
524*4882a593Smuzhiyun __u32 qlen = q->qstats.qlen;
525*4882a593Smuzhiyun int i;
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun if (qdisc_is_percpu_stats(q)) {
528*4882a593Smuzhiyun for_each_possible_cpu(i)
529*4882a593Smuzhiyun qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen;
530*4882a593Smuzhiyun } else {
531*4882a593Smuzhiyun qlen += q->q.qlen;
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun return qlen;
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun
qdisc_skb_cb(const struct sk_buff * skb)537*4882a593Smuzhiyun static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
538*4882a593Smuzhiyun {
539*4882a593Smuzhiyun return (struct qdisc_skb_cb *)skb->cb;
540*4882a593Smuzhiyun }
541*4882a593Smuzhiyun
qdisc_lock(struct Qdisc * qdisc)542*4882a593Smuzhiyun static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
543*4882a593Smuzhiyun {
544*4882a593Smuzhiyun return &qdisc->q.lock;
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun
qdisc_root(const struct Qdisc * qdisc)547*4882a593Smuzhiyun static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc)
548*4882a593Smuzhiyun {
549*4882a593Smuzhiyun struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc);
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun return q;
552*4882a593Smuzhiyun }
553*4882a593Smuzhiyun
qdisc_root_bh(const struct Qdisc * qdisc)554*4882a593Smuzhiyun static inline struct Qdisc *qdisc_root_bh(const struct Qdisc *qdisc)
555*4882a593Smuzhiyun {
556*4882a593Smuzhiyun return rcu_dereference_bh(qdisc->dev_queue->qdisc);
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun
qdisc_root_sleeping(const struct Qdisc * qdisc)559*4882a593Smuzhiyun static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
560*4882a593Smuzhiyun {
561*4882a593Smuzhiyun return qdisc->dev_queue->qdisc_sleeping;
562*4882a593Smuzhiyun }
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun /* The qdisc root lock is a mechanism by which to top level
565*4882a593Smuzhiyun * of a qdisc tree can be locked from any qdisc node in the
566*4882a593Smuzhiyun * forest. This allows changing the configuration of some
567*4882a593Smuzhiyun * aspect of the qdisc tree while blocking out asynchronous
568*4882a593Smuzhiyun * qdisc access in the packet processing paths.
569*4882a593Smuzhiyun *
570*4882a593Smuzhiyun * It is only legal to do this when the root will not change
571*4882a593Smuzhiyun * on us. Otherwise we'll potentially lock the wrong qdisc
572*4882a593Smuzhiyun * root. This is enforced by holding the RTNL semaphore, which
573*4882a593Smuzhiyun * all users of this lock accessor must do.
574*4882a593Smuzhiyun */
qdisc_root_lock(const struct Qdisc * qdisc)575*4882a593Smuzhiyun static inline spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc)
576*4882a593Smuzhiyun {
577*4882a593Smuzhiyun struct Qdisc *root = qdisc_root(qdisc);
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun ASSERT_RTNL();
580*4882a593Smuzhiyun return qdisc_lock(root);
581*4882a593Smuzhiyun }
582*4882a593Smuzhiyun
qdisc_root_sleeping_lock(const struct Qdisc * qdisc)583*4882a593Smuzhiyun static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
584*4882a593Smuzhiyun {
585*4882a593Smuzhiyun struct Qdisc *root = qdisc_root_sleeping(qdisc);
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun ASSERT_RTNL();
588*4882a593Smuzhiyun return qdisc_lock(root);
589*4882a593Smuzhiyun }
590*4882a593Smuzhiyun
qdisc_root_sleeping_running(const struct Qdisc * qdisc)591*4882a593Smuzhiyun static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc)
592*4882a593Smuzhiyun {
593*4882a593Smuzhiyun struct Qdisc *root = qdisc_root_sleeping(qdisc);
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun ASSERT_RTNL();
596*4882a593Smuzhiyun return &root->running;
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun
qdisc_dev(const struct Qdisc * qdisc)599*4882a593Smuzhiyun static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc)
600*4882a593Smuzhiyun {
601*4882a593Smuzhiyun return qdisc->dev_queue->dev;
602*4882a593Smuzhiyun }
603*4882a593Smuzhiyun
sch_tree_lock(const struct Qdisc * q)604*4882a593Smuzhiyun static inline void sch_tree_lock(const struct Qdisc *q)
605*4882a593Smuzhiyun {
606*4882a593Smuzhiyun spin_lock_bh(qdisc_root_sleeping_lock(q));
607*4882a593Smuzhiyun }
608*4882a593Smuzhiyun
sch_tree_unlock(const struct Qdisc * q)609*4882a593Smuzhiyun static inline void sch_tree_unlock(const struct Qdisc *q)
610*4882a593Smuzhiyun {
611*4882a593Smuzhiyun spin_unlock_bh(qdisc_root_sleeping_lock(q));
612*4882a593Smuzhiyun }
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun extern struct Qdisc noop_qdisc;
615*4882a593Smuzhiyun extern struct Qdisc_ops noop_qdisc_ops;
616*4882a593Smuzhiyun extern struct Qdisc_ops pfifo_fast_ops;
617*4882a593Smuzhiyun extern struct Qdisc_ops mq_qdisc_ops;
618*4882a593Smuzhiyun extern struct Qdisc_ops noqueue_qdisc_ops;
619*4882a593Smuzhiyun extern const struct Qdisc_ops *default_qdisc_ops;
620*4882a593Smuzhiyun static inline const struct Qdisc_ops *
get_default_qdisc_ops(const struct net_device * dev,int ntx)621*4882a593Smuzhiyun get_default_qdisc_ops(const struct net_device *dev, int ntx)
622*4882a593Smuzhiyun {
623*4882a593Smuzhiyun return ntx < dev->real_num_tx_queues ?
624*4882a593Smuzhiyun default_qdisc_ops : &pfifo_fast_ops;
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun struct Qdisc_class_common {
628*4882a593Smuzhiyun u32 classid;
629*4882a593Smuzhiyun struct hlist_node hnode;
630*4882a593Smuzhiyun };
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun struct Qdisc_class_hash {
633*4882a593Smuzhiyun struct hlist_head *hash;
634*4882a593Smuzhiyun unsigned int hashsize;
635*4882a593Smuzhiyun unsigned int hashmask;
636*4882a593Smuzhiyun unsigned int hashelems;
637*4882a593Smuzhiyun };
638*4882a593Smuzhiyun
qdisc_class_hash(u32 id,u32 mask)639*4882a593Smuzhiyun static inline unsigned int qdisc_class_hash(u32 id, u32 mask)
640*4882a593Smuzhiyun {
641*4882a593Smuzhiyun id ^= id >> 8;
642*4882a593Smuzhiyun id ^= id >> 4;
643*4882a593Smuzhiyun return id & mask;
644*4882a593Smuzhiyun }
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun static inline struct Qdisc_class_common *
qdisc_class_find(const struct Qdisc_class_hash * hash,u32 id)647*4882a593Smuzhiyun qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id)
648*4882a593Smuzhiyun {
649*4882a593Smuzhiyun struct Qdisc_class_common *cl;
650*4882a593Smuzhiyun unsigned int h;
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun if (!id)
653*4882a593Smuzhiyun return NULL;
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun h = qdisc_class_hash(id, hash->hashmask);
656*4882a593Smuzhiyun hlist_for_each_entry(cl, &hash->hash[h], hnode) {
657*4882a593Smuzhiyun if (cl->classid == id)
658*4882a593Smuzhiyun return cl;
659*4882a593Smuzhiyun }
660*4882a593Smuzhiyun return NULL;
661*4882a593Smuzhiyun }
662*4882a593Smuzhiyun
tc_classid_to_hwtc(struct net_device * dev,u32 classid)663*4882a593Smuzhiyun static inline int tc_classid_to_hwtc(struct net_device *dev, u32 classid)
664*4882a593Smuzhiyun {
665*4882a593Smuzhiyun u32 hwtc = TC_H_MIN(classid) - TC_H_MIN_PRIORITY;
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun return (hwtc < netdev_get_num_tc(dev)) ? hwtc : -EINVAL;
668*4882a593Smuzhiyun }
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun int qdisc_class_hash_init(struct Qdisc_class_hash *);
671*4882a593Smuzhiyun void qdisc_class_hash_insert(struct Qdisc_class_hash *,
672*4882a593Smuzhiyun struct Qdisc_class_common *);
673*4882a593Smuzhiyun void qdisc_class_hash_remove(struct Qdisc_class_hash *,
674*4882a593Smuzhiyun struct Qdisc_class_common *);
675*4882a593Smuzhiyun void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
676*4882a593Smuzhiyun void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun int dev_qdisc_change_tx_queue_len(struct net_device *dev);
679*4882a593Smuzhiyun void dev_init_scheduler(struct net_device *dev);
680*4882a593Smuzhiyun void dev_shutdown(struct net_device *dev);
681*4882a593Smuzhiyun void dev_activate(struct net_device *dev);
682*4882a593Smuzhiyun void dev_deactivate(struct net_device *dev);
683*4882a593Smuzhiyun void dev_deactivate_many(struct list_head *head);
684*4882a593Smuzhiyun struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
685*4882a593Smuzhiyun struct Qdisc *qdisc);
686*4882a593Smuzhiyun void qdisc_reset(struct Qdisc *qdisc);
687*4882a593Smuzhiyun void qdisc_put(struct Qdisc *qdisc);
688*4882a593Smuzhiyun void qdisc_put_unlocked(struct Qdisc *qdisc);
689*4882a593Smuzhiyun void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, int n, int len);
690*4882a593Smuzhiyun #ifdef CONFIG_NET_SCHED
691*4882a593Smuzhiyun int qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type,
692*4882a593Smuzhiyun void *type_data);
693*4882a593Smuzhiyun void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
694*4882a593Smuzhiyun struct Qdisc *new, struct Qdisc *old,
695*4882a593Smuzhiyun enum tc_setup_type type, void *type_data,
696*4882a593Smuzhiyun struct netlink_ext_ack *extack);
697*4882a593Smuzhiyun #else
698*4882a593Smuzhiyun static inline int
qdisc_offload_dump_helper(struct Qdisc * q,enum tc_setup_type type,void * type_data)699*4882a593Smuzhiyun qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type,
700*4882a593Smuzhiyun void *type_data)
701*4882a593Smuzhiyun {
702*4882a593Smuzhiyun q->flags &= ~TCQ_F_OFFLOADED;
703*4882a593Smuzhiyun return 0;
704*4882a593Smuzhiyun }
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun static inline void
qdisc_offload_graft_helper(struct net_device * dev,struct Qdisc * sch,struct Qdisc * new,struct Qdisc * old,enum tc_setup_type type,void * type_data,struct netlink_ext_ack * extack)707*4882a593Smuzhiyun qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
708*4882a593Smuzhiyun struct Qdisc *new, struct Qdisc *old,
709*4882a593Smuzhiyun enum tc_setup_type type, void *type_data,
710*4882a593Smuzhiyun struct netlink_ext_ack *extack)
711*4882a593Smuzhiyun {
712*4882a593Smuzhiyun }
713*4882a593Smuzhiyun #endif
714*4882a593Smuzhiyun struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
715*4882a593Smuzhiyun const struct Qdisc_ops *ops,
716*4882a593Smuzhiyun struct netlink_ext_ack *extack);
717*4882a593Smuzhiyun void qdisc_free(struct Qdisc *qdisc);
718*4882a593Smuzhiyun struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
719*4882a593Smuzhiyun const struct Qdisc_ops *ops, u32 parentid,
720*4882a593Smuzhiyun struct netlink_ext_ack *extack);
721*4882a593Smuzhiyun void __qdisc_calculate_pkt_len(struct sk_buff *skb,
722*4882a593Smuzhiyun const struct qdisc_size_table *stab);
723*4882a593Smuzhiyun int skb_do_redirect(struct sk_buff *);
724*4882a593Smuzhiyun
skb_at_tc_ingress(const struct sk_buff * skb)725*4882a593Smuzhiyun static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
726*4882a593Smuzhiyun {
727*4882a593Smuzhiyun #ifdef CONFIG_NET_CLS_ACT
728*4882a593Smuzhiyun return skb->tc_at_ingress;
729*4882a593Smuzhiyun #else
730*4882a593Smuzhiyun return false;
731*4882a593Smuzhiyun #endif
732*4882a593Smuzhiyun }
733*4882a593Smuzhiyun
skb_skip_tc_classify(struct sk_buff * skb)734*4882a593Smuzhiyun static inline bool skb_skip_tc_classify(struct sk_buff *skb)
735*4882a593Smuzhiyun {
736*4882a593Smuzhiyun #ifdef CONFIG_NET_CLS_ACT
737*4882a593Smuzhiyun if (skb->tc_skip_classify) {
738*4882a593Smuzhiyun skb->tc_skip_classify = 0;
739*4882a593Smuzhiyun return true;
740*4882a593Smuzhiyun }
741*4882a593Smuzhiyun #endif
742*4882a593Smuzhiyun return false;
743*4882a593Smuzhiyun }
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun /* Reset all TX qdiscs greater than index of a device. */
qdisc_reset_all_tx_gt(struct net_device * dev,unsigned int i)746*4882a593Smuzhiyun static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
747*4882a593Smuzhiyun {
748*4882a593Smuzhiyun struct Qdisc *qdisc;
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun for (; i < dev->num_tx_queues; i++) {
751*4882a593Smuzhiyun qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
752*4882a593Smuzhiyun if (qdisc) {
753*4882a593Smuzhiyun spin_lock_bh(qdisc_lock(qdisc));
754*4882a593Smuzhiyun qdisc_reset(qdisc);
755*4882a593Smuzhiyun spin_unlock_bh(qdisc_lock(qdisc));
756*4882a593Smuzhiyun }
757*4882a593Smuzhiyun }
758*4882a593Smuzhiyun }
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun /* Are all TX queues of the device empty? */
qdisc_all_tx_empty(const struct net_device * dev)761*4882a593Smuzhiyun static inline bool qdisc_all_tx_empty(const struct net_device *dev)
762*4882a593Smuzhiyun {
763*4882a593Smuzhiyun unsigned int i;
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun rcu_read_lock();
766*4882a593Smuzhiyun for (i = 0; i < dev->num_tx_queues; i++) {
767*4882a593Smuzhiyun struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
768*4882a593Smuzhiyun const struct Qdisc *q = rcu_dereference(txq->qdisc);
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun if (!qdisc_is_empty(q)) {
771*4882a593Smuzhiyun rcu_read_unlock();
772*4882a593Smuzhiyun return false;
773*4882a593Smuzhiyun }
774*4882a593Smuzhiyun }
775*4882a593Smuzhiyun rcu_read_unlock();
776*4882a593Smuzhiyun return true;
777*4882a593Smuzhiyun }
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun /* Are any of the TX qdiscs changing? */
qdisc_tx_changing(const struct net_device * dev)780*4882a593Smuzhiyun static inline bool qdisc_tx_changing(const struct net_device *dev)
781*4882a593Smuzhiyun {
782*4882a593Smuzhiyun unsigned int i;
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun for (i = 0; i < dev->num_tx_queues; i++) {
785*4882a593Smuzhiyun struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
786*4882a593Smuzhiyun if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping)
787*4882a593Smuzhiyun return true;
788*4882a593Smuzhiyun }
789*4882a593Smuzhiyun return false;
790*4882a593Smuzhiyun }
791*4882a593Smuzhiyun
792*4882a593Smuzhiyun /* Is the device using the noop qdisc on all queues? */
qdisc_tx_is_noop(const struct net_device * dev)793*4882a593Smuzhiyun static inline bool qdisc_tx_is_noop(const struct net_device *dev)
794*4882a593Smuzhiyun {
795*4882a593Smuzhiyun unsigned int i;
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun for (i = 0; i < dev->num_tx_queues; i++) {
798*4882a593Smuzhiyun struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
799*4882a593Smuzhiyun if (rcu_access_pointer(txq->qdisc) != &noop_qdisc)
800*4882a593Smuzhiyun return false;
801*4882a593Smuzhiyun }
802*4882a593Smuzhiyun return true;
803*4882a593Smuzhiyun }
804*4882a593Smuzhiyun
qdisc_pkt_len(const struct sk_buff * skb)805*4882a593Smuzhiyun static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb)
806*4882a593Smuzhiyun {
807*4882a593Smuzhiyun return qdisc_skb_cb(skb)->pkt_len;
808*4882a593Smuzhiyun }
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun /* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
811*4882a593Smuzhiyun enum net_xmit_qdisc_t {
812*4882a593Smuzhiyun __NET_XMIT_STOLEN = 0x00010000,
813*4882a593Smuzhiyun __NET_XMIT_BYPASS = 0x00020000,
814*4882a593Smuzhiyun };
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun #ifdef CONFIG_NET_CLS_ACT
817*4882a593Smuzhiyun #define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1)
818*4882a593Smuzhiyun #else
819*4882a593Smuzhiyun #define net_xmit_drop_count(e) (1)
820*4882a593Smuzhiyun #endif
821*4882a593Smuzhiyun
qdisc_calculate_pkt_len(struct sk_buff * skb,const struct Qdisc * sch)822*4882a593Smuzhiyun static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
823*4882a593Smuzhiyun const struct Qdisc *sch)
824*4882a593Smuzhiyun {
825*4882a593Smuzhiyun #ifdef CONFIG_NET_SCHED
826*4882a593Smuzhiyun struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab);
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun if (stab)
829*4882a593Smuzhiyun __qdisc_calculate_pkt_len(skb, stab);
830*4882a593Smuzhiyun #endif
831*4882a593Smuzhiyun }
832*4882a593Smuzhiyun
qdisc_enqueue(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)833*4882a593Smuzhiyun static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
834*4882a593Smuzhiyun struct sk_buff **to_free)
835*4882a593Smuzhiyun {
836*4882a593Smuzhiyun qdisc_calculate_pkt_len(skb, sch);
837*4882a593Smuzhiyun return sch->enqueue(skb, sch, to_free);
838*4882a593Smuzhiyun }
839*4882a593Smuzhiyun
_bstats_update(struct gnet_stats_basic_packed * bstats,__u64 bytes,__u32 packets)840*4882a593Smuzhiyun static inline void _bstats_update(struct gnet_stats_basic_packed *bstats,
841*4882a593Smuzhiyun __u64 bytes, __u32 packets)
842*4882a593Smuzhiyun {
843*4882a593Smuzhiyun bstats->bytes += bytes;
844*4882a593Smuzhiyun bstats->packets += packets;
845*4882a593Smuzhiyun }
846*4882a593Smuzhiyun
bstats_update(struct gnet_stats_basic_packed * bstats,const struct sk_buff * skb)847*4882a593Smuzhiyun static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
848*4882a593Smuzhiyun const struct sk_buff *skb)
849*4882a593Smuzhiyun {
850*4882a593Smuzhiyun _bstats_update(bstats,
851*4882a593Smuzhiyun qdisc_pkt_len(skb),
852*4882a593Smuzhiyun skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1);
853*4882a593Smuzhiyun }
854*4882a593Smuzhiyun
_bstats_cpu_update(struct gnet_stats_basic_cpu * bstats,__u64 bytes,__u32 packets)855*4882a593Smuzhiyun static inline void _bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
856*4882a593Smuzhiyun __u64 bytes, __u32 packets)
857*4882a593Smuzhiyun {
858*4882a593Smuzhiyun u64_stats_update_begin(&bstats->syncp);
859*4882a593Smuzhiyun _bstats_update(&bstats->bstats, bytes, packets);
860*4882a593Smuzhiyun u64_stats_update_end(&bstats->syncp);
861*4882a593Smuzhiyun }
862*4882a593Smuzhiyun
bstats_cpu_update(struct gnet_stats_basic_cpu * bstats,const struct sk_buff * skb)863*4882a593Smuzhiyun static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
864*4882a593Smuzhiyun const struct sk_buff *skb)
865*4882a593Smuzhiyun {
866*4882a593Smuzhiyun u64_stats_update_begin(&bstats->syncp);
867*4882a593Smuzhiyun bstats_update(&bstats->bstats, skb);
868*4882a593Smuzhiyun u64_stats_update_end(&bstats->syncp);
869*4882a593Smuzhiyun }
870*4882a593Smuzhiyun
qdisc_bstats_cpu_update(struct Qdisc * sch,const struct sk_buff * skb)871*4882a593Smuzhiyun static inline void qdisc_bstats_cpu_update(struct Qdisc *sch,
872*4882a593Smuzhiyun const struct sk_buff *skb)
873*4882a593Smuzhiyun {
874*4882a593Smuzhiyun bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb);
875*4882a593Smuzhiyun }
876*4882a593Smuzhiyun
qdisc_bstats_update(struct Qdisc * sch,const struct sk_buff * skb)877*4882a593Smuzhiyun static inline void qdisc_bstats_update(struct Qdisc *sch,
878*4882a593Smuzhiyun const struct sk_buff *skb)
879*4882a593Smuzhiyun {
880*4882a593Smuzhiyun bstats_update(&sch->bstats, skb);
881*4882a593Smuzhiyun }
882*4882a593Smuzhiyun
qdisc_qstats_backlog_dec(struct Qdisc * sch,const struct sk_buff * skb)883*4882a593Smuzhiyun static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch,
884*4882a593Smuzhiyun const struct sk_buff *skb)
885*4882a593Smuzhiyun {
886*4882a593Smuzhiyun sch->qstats.backlog -= qdisc_pkt_len(skb);
887*4882a593Smuzhiyun }
888*4882a593Smuzhiyun
qdisc_qstats_cpu_backlog_dec(struct Qdisc * sch,const struct sk_buff * skb)889*4882a593Smuzhiyun static inline void qdisc_qstats_cpu_backlog_dec(struct Qdisc *sch,
890*4882a593Smuzhiyun const struct sk_buff *skb)
891*4882a593Smuzhiyun {
892*4882a593Smuzhiyun this_cpu_sub(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
893*4882a593Smuzhiyun }
894*4882a593Smuzhiyun
qdisc_qstats_backlog_inc(struct Qdisc * sch,const struct sk_buff * skb)895*4882a593Smuzhiyun static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch,
896*4882a593Smuzhiyun const struct sk_buff *skb)
897*4882a593Smuzhiyun {
898*4882a593Smuzhiyun sch->qstats.backlog += qdisc_pkt_len(skb);
899*4882a593Smuzhiyun }
900*4882a593Smuzhiyun
qdisc_qstats_cpu_backlog_inc(struct Qdisc * sch,const struct sk_buff * skb)901*4882a593Smuzhiyun static inline void qdisc_qstats_cpu_backlog_inc(struct Qdisc *sch,
902*4882a593Smuzhiyun const struct sk_buff *skb)
903*4882a593Smuzhiyun {
904*4882a593Smuzhiyun this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
905*4882a593Smuzhiyun }
906*4882a593Smuzhiyun
qdisc_qstats_cpu_qlen_inc(struct Qdisc * sch)907*4882a593Smuzhiyun static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch)
908*4882a593Smuzhiyun {
909*4882a593Smuzhiyun this_cpu_inc(sch->cpu_qstats->qlen);
910*4882a593Smuzhiyun }
911*4882a593Smuzhiyun
qdisc_qstats_cpu_qlen_dec(struct Qdisc * sch)912*4882a593Smuzhiyun static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch)
913*4882a593Smuzhiyun {
914*4882a593Smuzhiyun this_cpu_dec(sch->cpu_qstats->qlen);
915*4882a593Smuzhiyun }
916*4882a593Smuzhiyun
qdisc_qstats_cpu_requeues_inc(struct Qdisc * sch)917*4882a593Smuzhiyun static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch)
918*4882a593Smuzhiyun {
919*4882a593Smuzhiyun this_cpu_inc(sch->cpu_qstats->requeues);
920*4882a593Smuzhiyun }
921*4882a593Smuzhiyun
__qdisc_qstats_drop(struct Qdisc * sch,int count)922*4882a593Smuzhiyun static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count)
923*4882a593Smuzhiyun {
924*4882a593Smuzhiyun sch->qstats.drops += count;
925*4882a593Smuzhiyun }
926*4882a593Smuzhiyun
qstats_drop_inc(struct gnet_stats_queue * qstats)927*4882a593Smuzhiyun static inline void qstats_drop_inc(struct gnet_stats_queue *qstats)
928*4882a593Smuzhiyun {
929*4882a593Smuzhiyun qstats->drops++;
930*4882a593Smuzhiyun }
931*4882a593Smuzhiyun
qstats_overlimit_inc(struct gnet_stats_queue * qstats)932*4882a593Smuzhiyun static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats)
933*4882a593Smuzhiyun {
934*4882a593Smuzhiyun qstats->overlimits++;
935*4882a593Smuzhiyun }
936*4882a593Smuzhiyun
qdisc_qstats_drop(struct Qdisc * sch)937*4882a593Smuzhiyun static inline void qdisc_qstats_drop(struct Qdisc *sch)
938*4882a593Smuzhiyun {
939*4882a593Smuzhiyun qstats_drop_inc(&sch->qstats);
940*4882a593Smuzhiyun }
941*4882a593Smuzhiyun
qdisc_qstats_cpu_drop(struct Qdisc * sch)942*4882a593Smuzhiyun static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch)
943*4882a593Smuzhiyun {
944*4882a593Smuzhiyun this_cpu_inc(sch->cpu_qstats->drops);
945*4882a593Smuzhiyun }
946*4882a593Smuzhiyun
qdisc_qstats_overlimit(struct Qdisc * sch)947*4882a593Smuzhiyun static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
948*4882a593Smuzhiyun {
949*4882a593Smuzhiyun sch->qstats.overlimits++;
950*4882a593Smuzhiyun }
951*4882a593Smuzhiyun
qdisc_qstats_copy(struct gnet_dump * d,struct Qdisc * sch)952*4882a593Smuzhiyun static inline int qdisc_qstats_copy(struct gnet_dump *d, struct Qdisc *sch)
953*4882a593Smuzhiyun {
954*4882a593Smuzhiyun __u32 qlen = qdisc_qlen_sum(sch);
955*4882a593Smuzhiyun
956*4882a593Smuzhiyun return gnet_stats_copy_queue(d, sch->cpu_qstats, &sch->qstats, qlen);
957*4882a593Smuzhiyun }
958*4882a593Smuzhiyun
qdisc_qstats_qlen_backlog(struct Qdisc * sch,__u32 * qlen,__u32 * backlog)959*4882a593Smuzhiyun static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch, __u32 *qlen,
960*4882a593Smuzhiyun __u32 *backlog)
961*4882a593Smuzhiyun {
962*4882a593Smuzhiyun struct gnet_stats_queue qstats = { 0 };
963*4882a593Smuzhiyun __u32 len = qdisc_qlen_sum(sch);
964*4882a593Smuzhiyun
965*4882a593Smuzhiyun __gnet_stats_copy_queue(&qstats, sch->cpu_qstats, &sch->qstats, len);
966*4882a593Smuzhiyun *qlen = qstats.qlen;
967*4882a593Smuzhiyun *backlog = qstats.backlog;
968*4882a593Smuzhiyun }
969*4882a593Smuzhiyun
qdisc_tree_flush_backlog(struct Qdisc * sch)970*4882a593Smuzhiyun static inline void qdisc_tree_flush_backlog(struct Qdisc *sch)
971*4882a593Smuzhiyun {
972*4882a593Smuzhiyun __u32 qlen, backlog;
973*4882a593Smuzhiyun
974*4882a593Smuzhiyun qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
975*4882a593Smuzhiyun qdisc_tree_reduce_backlog(sch, qlen, backlog);
976*4882a593Smuzhiyun }
977*4882a593Smuzhiyun
qdisc_purge_queue(struct Qdisc * sch)978*4882a593Smuzhiyun static inline void qdisc_purge_queue(struct Qdisc *sch)
979*4882a593Smuzhiyun {
980*4882a593Smuzhiyun __u32 qlen, backlog;
981*4882a593Smuzhiyun
982*4882a593Smuzhiyun qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
983*4882a593Smuzhiyun qdisc_reset(sch);
984*4882a593Smuzhiyun qdisc_tree_reduce_backlog(sch, qlen, backlog);
985*4882a593Smuzhiyun }
986*4882a593Smuzhiyun
qdisc_skb_head_init(struct qdisc_skb_head * qh)987*4882a593Smuzhiyun static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh)
988*4882a593Smuzhiyun {
989*4882a593Smuzhiyun qh->head = NULL;
990*4882a593Smuzhiyun qh->tail = NULL;
991*4882a593Smuzhiyun qh->qlen = 0;
992*4882a593Smuzhiyun }
993*4882a593Smuzhiyun
__qdisc_enqueue_tail(struct sk_buff * skb,struct qdisc_skb_head * qh)994*4882a593Smuzhiyun static inline void __qdisc_enqueue_tail(struct sk_buff *skb,
995*4882a593Smuzhiyun struct qdisc_skb_head *qh)
996*4882a593Smuzhiyun {
997*4882a593Smuzhiyun struct sk_buff *last = qh->tail;
998*4882a593Smuzhiyun
999*4882a593Smuzhiyun if (last) {
1000*4882a593Smuzhiyun skb->next = NULL;
1001*4882a593Smuzhiyun last->next = skb;
1002*4882a593Smuzhiyun qh->tail = skb;
1003*4882a593Smuzhiyun } else {
1004*4882a593Smuzhiyun qh->tail = skb;
1005*4882a593Smuzhiyun qh->head = skb;
1006*4882a593Smuzhiyun }
1007*4882a593Smuzhiyun qh->qlen++;
1008*4882a593Smuzhiyun }
1009*4882a593Smuzhiyun
qdisc_enqueue_tail(struct sk_buff * skb,struct Qdisc * sch)1010*4882a593Smuzhiyun static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
1011*4882a593Smuzhiyun {
1012*4882a593Smuzhiyun __qdisc_enqueue_tail(skb, &sch->q);
1013*4882a593Smuzhiyun qdisc_qstats_backlog_inc(sch, skb);
1014*4882a593Smuzhiyun return NET_XMIT_SUCCESS;
1015*4882a593Smuzhiyun }
1016*4882a593Smuzhiyun
__qdisc_enqueue_head(struct sk_buff * skb,struct qdisc_skb_head * qh)1017*4882a593Smuzhiyun static inline void __qdisc_enqueue_head(struct sk_buff *skb,
1018*4882a593Smuzhiyun struct qdisc_skb_head *qh)
1019*4882a593Smuzhiyun {
1020*4882a593Smuzhiyun skb->next = qh->head;
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun if (!qh->head)
1023*4882a593Smuzhiyun qh->tail = skb;
1024*4882a593Smuzhiyun qh->head = skb;
1025*4882a593Smuzhiyun qh->qlen++;
1026*4882a593Smuzhiyun }
1027*4882a593Smuzhiyun
__qdisc_dequeue_head(struct qdisc_skb_head * qh)1028*4882a593Smuzhiyun static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh)
1029*4882a593Smuzhiyun {
1030*4882a593Smuzhiyun struct sk_buff *skb = qh->head;
1031*4882a593Smuzhiyun
1032*4882a593Smuzhiyun if (likely(skb != NULL)) {
1033*4882a593Smuzhiyun qh->head = skb->next;
1034*4882a593Smuzhiyun qh->qlen--;
1035*4882a593Smuzhiyun if (qh->head == NULL)
1036*4882a593Smuzhiyun qh->tail = NULL;
1037*4882a593Smuzhiyun skb->next = NULL;
1038*4882a593Smuzhiyun }
1039*4882a593Smuzhiyun
1040*4882a593Smuzhiyun return skb;
1041*4882a593Smuzhiyun }
1042*4882a593Smuzhiyun
qdisc_dequeue_head(struct Qdisc * sch)1043*4882a593Smuzhiyun static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
1044*4882a593Smuzhiyun {
1045*4882a593Smuzhiyun struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
1046*4882a593Smuzhiyun
1047*4882a593Smuzhiyun if (likely(skb != NULL)) {
1048*4882a593Smuzhiyun qdisc_qstats_backlog_dec(sch, skb);
1049*4882a593Smuzhiyun qdisc_bstats_update(sch, skb);
1050*4882a593Smuzhiyun }
1051*4882a593Smuzhiyun
1052*4882a593Smuzhiyun return skb;
1053*4882a593Smuzhiyun }
1054*4882a593Smuzhiyun
1055*4882a593Smuzhiyun /* Instead of calling kfree_skb() while root qdisc lock is held,
1056*4882a593Smuzhiyun * queue the skb for future freeing at end of __dev_xmit_skb()
1057*4882a593Smuzhiyun */
__qdisc_drop(struct sk_buff * skb,struct sk_buff ** to_free)1058*4882a593Smuzhiyun static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free)
1059*4882a593Smuzhiyun {
1060*4882a593Smuzhiyun skb->next = *to_free;
1061*4882a593Smuzhiyun *to_free = skb;
1062*4882a593Smuzhiyun }
1063*4882a593Smuzhiyun
__qdisc_drop_all(struct sk_buff * skb,struct sk_buff ** to_free)1064*4882a593Smuzhiyun static inline void __qdisc_drop_all(struct sk_buff *skb,
1065*4882a593Smuzhiyun struct sk_buff **to_free)
1066*4882a593Smuzhiyun {
1067*4882a593Smuzhiyun if (skb->prev)
1068*4882a593Smuzhiyun skb->prev->next = *to_free;
1069*4882a593Smuzhiyun else
1070*4882a593Smuzhiyun skb->next = *to_free;
1071*4882a593Smuzhiyun *to_free = skb;
1072*4882a593Smuzhiyun }
1073*4882a593Smuzhiyun
__qdisc_queue_drop_head(struct Qdisc * sch,struct qdisc_skb_head * qh,struct sk_buff ** to_free)1074*4882a593Smuzhiyun static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
1075*4882a593Smuzhiyun struct qdisc_skb_head *qh,
1076*4882a593Smuzhiyun struct sk_buff **to_free)
1077*4882a593Smuzhiyun {
1078*4882a593Smuzhiyun struct sk_buff *skb = __qdisc_dequeue_head(qh);
1079*4882a593Smuzhiyun
1080*4882a593Smuzhiyun if (likely(skb != NULL)) {
1081*4882a593Smuzhiyun unsigned int len = qdisc_pkt_len(skb);
1082*4882a593Smuzhiyun
1083*4882a593Smuzhiyun qdisc_qstats_backlog_dec(sch, skb);
1084*4882a593Smuzhiyun __qdisc_drop(skb, to_free);
1085*4882a593Smuzhiyun return len;
1086*4882a593Smuzhiyun }
1087*4882a593Smuzhiyun
1088*4882a593Smuzhiyun return 0;
1089*4882a593Smuzhiyun }
1090*4882a593Smuzhiyun
qdisc_peek_head(struct Qdisc * sch)1091*4882a593Smuzhiyun static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
1092*4882a593Smuzhiyun {
1093*4882a593Smuzhiyun const struct qdisc_skb_head *qh = &sch->q;
1094*4882a593Smuzhiyun
1095*4882a593Smuzhiyun return qh->head;
1096*4882a593Smuzhiyun }
1097*4882a593Smuzhiyun
1098*4882a593Smuzhiyun /* generic pseudo peek method for non-work-conserving qdisc */
qdisc_peek_dequeued(struct Qdisc * sch)1099*4882a593Smuzhiyun static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
1100*4882a593Smuzhiyun {
1101*4882a593Smuzhiyun struct sk_buff *skb = skb_peek(&sch->gso_skb);
1102*4882a593Smuzhiyun
1103*4882a593Smuzhiyun /* we can reuse ->gso_skb because peek isn't called for root qdiscs */
1104*4882a593Smuzhiyun if (!skb) {
1105*4882a593Smuzhiyun skb = sch->dequeue(sch);
1106*4882a593Smuzhiyun
1107*4882a593Smuzhiyun if (skb) {
1108*4882a593Smuzhiyun __skb_queue_head(&sch->gso_skb, skb);
1109*4882a593Smuzhiyun /* it's still part of the queue */
1110*4882a593Smuzhiyun qdisc_qstats_backlog_inc(sch, skb);
1111*4882a593Smuzhiyun sch->q.qlen++;
1112*4882a593Smuzhiyun }
1113*4882a593Smuzhiyun }
1114*4882a593Smuzhiyun
1115*4882a593Smuzhiyun return skb;
1116*4882a593Smuzhiyun }
1117*4882a593Smuzhiyun
qdisc_update_stats_at_dequeue(struct Qdisc * sch,struct sk_buff * skb)1118*4882a593Smuzhiyun static inline void qdisc_update_stats_at_dequeue(struct Qdisc *sch,
1119*4882a593Smuzhiyun struct sk_buff *skb)
1120*4882a593Smuzhiyun {
1121*4882a593Smuzhiyun if (qdisc_is_percpu_stats(sch)) {
1122*4882a593Smuzhiyun qdisc_qstats_cpu_backlog_dec(sch, skb);
1123*4882a593Smuzhiyun qdisc_bstats_cpu_update(sch, skb);
1124*4882a593Smuzhiyun qdisc_qstats_cpu_qlen_dec(sch);
1125*4882a593Smuzhiyun } else {
1126*4882a593Smuzhiyun qdisc_qstats_backlog_dec(sch, skb);
1127*4882a593Smuzhiyun qdisc_bstats_update(sch, skb);
1128*4882a593Smuzhiyun sch->q.qlen--;
1129*4882a593Smuzhiyun }
1130*4882a593Smuzhiyun }
1131*4882a593Smuzhiyun
qdisc_update_stats_at_enqueue(struct Qdisc * sch,unsigned int pkt_len)1132*4882a593Smuzhiyun static inline void qdisc_update_stats_at_enqueue(struct Qdisc *sch,
1133*4882a593Smuzhiyun unsigned int pkt_len)
1134*4882a593Smuzhiyun {
1135*4882a593Smuzhiyun if (qdisc_is_percpu_stats(sch)) {
1136*4882a593Smuzhiyun qdisc_qstats_cpu_qlen_inc(sch);
1137*4882a593Smuzhiyun this_cpu_add(sch->cpu_qstats->backlog, pkt_len);
1138*4882a593Smuzhiyun } else {
1139*4882a593Smuzhiyun sch->qstats.backlog += pkt_len;
1140*4882a593Smuzhiyun sch->q.qlen++;
1141*4882a593Smuzhiyun }
1142*4882a593Smuzhiyun }
1143*4882a593Smuzhiyun
1144*4882a593Smuzhiyun /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
qdisc_dequeue_peeked(struct Qdisc * sch)1145*4882a593Smuzhiyun static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
1146*4882a593Smuzhiyun {
1147*4882a593Smuzhiyun struct sk_buff *skb = skb_peek(&sch->gso_skb);
1148*4882a593Smuzhiyun
1149*4882a593Smuzhiyun if (skb) {
1150*4882a593Smuzhiyun skb = __skb_dequeue(&sch->gso_skb);
1151*4882a593Smuzhiyun if (qdisc_is_percpu_stats(sch)) {
1152*4882a593Smuzhiyun qdisc_qstats_cpu_backlog_dec(sch, skb);
1153*4882a593Smuzhiyun qdisc_qstats_cpu_qlen_dec(sch);
1154*4882a593Smuzhiyun } else {
1155*4882a593Smuzhiyun qdisc_qstats_backlog_dec(sch, skb);
1156*4882a593Smuzhiyun sch->q.qlen--;
1157*4882a593Smuzhiyun }
1158*4882a593Smuzhiyun } else {
1159*4882a593Smuzhiyun skb = sch->dequeue(sch);
1160*4882a593Smuzhiyun }
1161*4882a593Smuzhiyun
1162*4882a593Smuzhiyun return skb;
1163*4882a593Smuzhiyun }
1164*4882a593Smuzhiyun
__qdisc_reset_queue(struct qdisc_skb_head * qh)1165*4882a593Smuzhiyun static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh)
1166*4882a593Smuzhiyun {
1167*4882a593Smuzhiyun /*
1168*4882a593Smuzhiyun * We do not know the backlog in bytes of this list, it
1169*4882a593Smuzhiyun * is up to the caller to correct it
1170*4882a593Smuzhiyun */
1171*4882a593Smuzhiyun ASSERT_RTNL();
1172*4882a593Smuzhiyun if (qh->qlen) {
1173*4882a593Smuzhiyun rtnl_kfree_skbs(qh->head, qh->tail);
1174*4882a593Smuzhiyun
1175*4882a593Smuzhiyun qh->head = NULL;
1176*4882a593Smuzhiyun qh->tail = NULL;
1177*4882a593Smuzhiyun qh->qlen = 0;
1178*4882a593Smuzhiyun }
1179*4882a593Smuzhiyun }
1180*4882a593Smuzhiyun
qdisc_reset_queue(struct Qdisc * sch)1181*4882a593Smuzhiyun static inline void qdisc_reset_queue(struct Qdisc *sch)
1182*4882a593Smuzhiyun {
1183*4882a593Smuzhiyun __qdisc_reset_queue(&sch->q);
1184*4882a593Smuzhiyun }
1185*4882a593Smuzhiyun
qdisc_replace(struct Qdisc * sch,struct Qdisc * new,struct Qdisc ** pold)1186*4882a593Smuzhiyun static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
1187*4882a593Smuzhiyun struct Qdisc **pold)
1188*4882a593Smuzhiyun {
1189*4882a593Smuzhiyun struct Qdisc *old;
1190*4882a593Smuzhiyun
1191*4882a593Smuzhiyun sch_tree_lock(sch);
1192*4882a593Smuzhiyun old = *pold;
1193*4882a593Smuzhiyun *pold = new;
1194*4882a593Smuzhiyun if (old != NULL)
1195*4882a593Smuzhiyun qdisc_purge_queue(old);
1196*4882a593Smuzhiyun sch_tree_unlock(sch);
1197*4882a593Smuzhiyun
1198*4882a593Smuzhiyun return old;
1199*4882a593Smuzhiyun }
1200*4882a593Smuzhiyun
rtnl_qdisc_drop(struct sk_buff * skb,struct Qdisc * sch)1201*4882a593Smuzhiyun static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
1202*4882a593Smuzhiyun {
1203*4882a593Smuzhiyun rtnl_kfree_skbs(skb, skb);
1204*4882a593Smuzhiyun qdisc_qstats_drop(sch);
1205*4882a593Smuzhiyun }
1206*4882a593Smuzhiyun
qdisc_drop_cpu(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)1207*4882a593Smuzhiyun static inline int qdisc_drop_cpu(struct sk_buff *skb, struct Qdisc *sch,
1208*4882a593Smuzhiyun struct sk_buff **to_free)
1209*4882a593Smuzhiyun {
1210*4882a593Smuzhiyun __qdisc_drop(skb, to_free);
1211*4882a593Smuzhiyun qdisc_qstats_cpu_drop(sch);
1212*4882a593Smuzhiyun
1213*4882a593Smuzhiyun return NET_XMIT_DROP;
1214*4882a593Smuzhiyun }
1215*4882a593Smuzhiyun
qdisc_drop(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)1216*4882a593Smuzhiyun static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch,
1217*4882a593Smuzhiyun struct sk_buff **to_free)
1218*4882a593Smuzhiyun {
1219*4882a593Smuzhiyun __qdisc_drop(skb, to_free);
1220*4882a593Smuzhiyun qdisc_qstats_drop(sch);
1221*4882a593Smuzhiyun
1222*4882a593Smuzhiyun return NET_XMIT_DROP;
1223*4882a593Smuzhiyun }
1224*4882a593Smuzhiyun
qdisc_drop_all(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)1225*4882a593Smuzhiyun static inline int qdisc_drop_all(struct sk_buff *skb, struct Qdisc *sch,
1226*4882a593Smuzhiyun struct sk_buff **to_free)
1227*4882a593Smuzhiyun {
1228*4882a593Smuzhiyun __qdisc_drop_all(skb, to_free);
1229*4882a593Smuzhiyun qdisc_qstats_drop(sch);
1230*4882a593Smuzhiyun
1231*4882a593Smuzhiyun return NET_XMIT_DROP;
1232*4882a593Smuzhiyun }
1233*4882a593Smuzhiyun
1234*4882a593Smuzhiyun /* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
1235*4882a593Smuzhiyun long it will take to send a packet given its size.
1236*4882a593Smuzhiyun */
qdisc_l2t(struct qdisc_rate_table * rtab,unsigned int pktlen)1237*4882a593Smuzhiyun static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
1238*4882a593Smuzhiyun {
1239*4882a593Smuzhiyun int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead;
1240*4882a593Smuzhiyun if (slot < 0)
1241*4882a593Smuzhiyun slot = 0;
1242*4882a593Smuzhiyun slot >>= rtab->rate.cell_log;
1243*4882a593Smuzhiyun if (slot > 255)
1244*4882a593Smuzhiyun return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF];
1245*4882a593Smuzhiyun return rtab->data[slot];
1246*4882a593Smuzhiyun }
1247*4882a593Smuzhiyun
1248*4882a593Smuzhiyun struct psched_ratecfg {
1249*4882a593Smuzhiyun u64 rate_bytes_ps; /* bytes per second */
1250*4882a593Smuzhiyun u32 mult;
1251*4882a593Smuzhiyun u16 overhead;
1252*4882a593Smuzhiyun u16 mpu;
1253*4882a593Smuzhiyun u8 linklayer;
1254*4882a593Smuzhiyun u8 shift;
1255*4882a593Smuzhiyun };
1256*4882a593Smuzhiyun
psched_l2t_ns(const struct psched_ratecfg * r,unsigned int len)1257*4882a593Smuzhiyun static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
1258*4882a593Smuzhiyun unsigned int len)
1259*4882a593Smuzhiyun {
1260*4882a593Smuzhiyun len += r->overhead;
1261*4882a593Smuzhiyun
1262*4882a593Smuzhiyun if (len < r->mpu)
1263*4882a593Smuzhiyun len = r->mpu;
1264*4882a593Smuzhiyun
1265*4882a593Smuzhiyun if (unlikely(r->linklayer == TC_LINKLAYER_ATM))
1266*4882a593Smuzhiyun return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift;
1267*4882a593Smuzhiyun
1268*4882a593Smuzhiyun return ((u64)len * r->mult) >> r->shift;
1269*4882a593Smuzhiyun }
1270*4882a593Smuzhiyun
1271*4882a593Smuzhiyun void psched_ratecfg_precompute(struct psched_ratecfg *r,
1272*4882a593Smuzhiyun const struct tc_ratespec *conf,
1273*4882a593Smuzhiyun u64 rate64);
1274*4882a593Smuzhiyun
psched_ratecfg_getrate(struct tc_ratespec * res,const struct psched_ratecfg * r)1275*4882a593Smuzhiyun static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
1276*4882a593Smuzhiyun const struct psched_ratecfg *r)
1277*4882a593Smuzhiyun {
1278*4882a593Smuzhiyun memset(res, 0, sizeof(*res));
1279*4882a593Smuzhiyun
1280*4882a593Smuzhiyun /* legacy struct tc_ratespec has a 32bit @rate field
1281*4882a593Smuzhiyun * Qdisc using 64bit rate should add new attributes
1282*4882a593Smuzhiyun * in order to maintain compatibility.
1283*4882a593Smuzhiyun */
1284*4882a593Smuzhiyun res->rate = min_t(u64, r->rate_bytes_ps, ~0U);
1285*4882a593Smuzhiyun
1286*4882a593Smuzhiyun res->overhead = r->overhead;
1287*4882a593Smuzhiyun res->mpu = r->mpu;
1288*4882a593Smuzhiyun res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
1289*4882a593Smuzhiyun }
1290*4882a593Smuzhiyun
1291*4882a593Smuzhiyun /* Mini Qdisc serves for specific needs of ingress/clsact Qdisc.
1292*4882a593Smuzhiyun * The fast path only needs to access filter list and to update stats
1293*4882a593Smuzhiyun */
1294*4882a593Smuzhiyun struct mini_Qdisc {
1295*4882a593Smuzhiyun struct tcf_proto *filter_list;
1296*4882a593Smuzhiyun struct tcf_block *block;
1297*4882a593Smuzhiyun struct gnet_stats_basic_cpu __percpu *cpu_bstats;
1298*4882a593Smuzhiyun struct gnet_stats_queue __percpu *cpu_qstats;
1299*4882a593Smuzhiyun struct rcu_head rcu;
1300*4882a593Smuzhiyun };
1301*4882a593Smuzhiyun
mini_qdisc_bstats_cpu_update(struct mini_Qdisc * miniq,const struct sk_buff * skb)1302*4882a593Smuzhiyun static inline void mini_qdisc_bstats_cpu_update(struct mini_Qdisc *miniq,
1303*4882a593Smuzhiyun const struct sk_buff *skb)
1304*4882a593Smuzhiyun {
1305*4882a593Smuzhiyun bstats_cpu_update(this_cpu_ptr(miniq->cpu_bstats), skb);
1306*4882a593Smuzhiyun }
1307*4882a593Smuzhiyun
mini_qdisc_qstats_cpu_drop(struct mini_Qdisc * miniq)1308*4882a593Smuzhiyun static inline void mini_qdisc_qstats_cpu_drop(struct mini_Qdisc *miniq)
1309*4882a593Smuzhiyun {
1310*4882a593Smuzhiyun this_cpu_inc(miniq->cpu_qstats->drops);
1311*4882a593Smuzhiyun }
1312*4882a593Smuzhiyun
1313*4882a593Smuzhiyun struct mini_Qdisc_pair {
1314*4882a593Smuzhiyun struct mini_Qdisc miniq1;
1315*4882a593Smuzhiyun struct mini_Qdisc miniq2;
1316*4882a593Smuzhiyun struct mini_Qdisc __rcu **p_miniq;
1317*4882a593Smuzhiyun };
1318*4882a593Smuzhiyun
1319*4882a593Smuzhiyun void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
1320*4882a593Smuzhiyun struct tcf_proto *tp_head);
1321*4882a593Smuzhiyun void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
1322*4882a593Smuzhiyun struct mini_Qdisc __rcu **p_miniq);
1323*4882a593Smuzhiyun void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp,
1324*4882a593Smuzhiyun struct tcf_block *block);
1325*4882a593Smuzhiyun
skb_tc_reinsert(struct sk_buff * skb,struct tcf_result * res)1326*4882a593Smuzhiyun static inline int skb_tc_reinsert(struct sk_buff *skb, struct tcf_result *res)
1327*4882a593Smuzhiyun {
1328*4882a593Smuzhiyun return res->ingress ? netif_receive_skb(skb) : dev_queue_xmit(skb);
1329*4882a593Smuzhiyun }
1330*4882a593Smuzhiyun
1331*4882a593Smuzhiyun #endif
1332