xref: /OK3568_Linux_fs/kernel/include/uapi/linux/pkt_sched.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2*4882a593Smuzhiyun #ifndef __LINUX_PKT_SCHED_H
3*4882a593Smuzhiyun #define __LINUX_PKT_SCHED_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <linux/const.h>
6*4882a593Smuzhiyun #include <linux/types.h>
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun /* Logical priority bands not depending on specific packet scheduler.
9*4882a593Smuzhiyun    Every scheduler will map them to real traffic classes, if it has
10*4882a593Smuzhiyun    no more precise mechanism to classify packets.
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun    These numbers have no special meaning, though their coincidence
13*4882a593Smuzhiyun    with obsolete IPv6 values is not occasional :-). New IPv6 drafts
14*4882a593Smuzhiyun    preferred full anarchy inspired by diffserv group.
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun    Note: TC_PRIO_BESTEFFORT does not mean that it is the most unhappy
17*4882a593Smuzhiyun    class, actually, as rule it will be handled with more care than
18*4882a593Smuzhiyun    filler or even bulk.
19*4882a593Smuzhiyun  */
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun #define TC_PRIO_BESTEFFORT		0
22*4882a593Smuzhiyun #define TC_PRIO_FILLER			1
23*4882a593Smuzhiyun #define TC_PRIO_BULK			2
24*4882a593Smuzhiyun #define TC_PRIO_INTERACTIVE_BULK	4
25*4882a593Smuzhiyun #define TC_PRIO_INTERACTIVE		6
26*4882a593Smuzhiyun #define TC_PRIO_CONTROL			7
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #define TC_PRIO_MAX			15
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun /* Generic queue statistics, available for all the elements.
31*4882a593Smuzhiyun    Particular schedulers may have also their private records.
32*4882a593Smuzhiyun  */
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun struct tc_stats {
35*4882a593Smuzhiyun 	__u64	bytes;			/* Number of enqueued bytes */
36*4882a593Smuzhiyun 	__u32	packets;		/* Number of enqueued packets	*/
37*4882a593Smuzhiyun 	__u32	drops;			/* Packets dropped because of lack of resources */
38*4882a593Smuzhiyun 	__u32	overlimits;		/* Number of throttle events when this
39*4882a593Smuzhiyun 					 * flow goes out of allocated bandwidth */
40*4882a593Smuzhiyun 	__u32	bps;			/* Current flow byte rate */
41*4882a593Smuzhiyun 	__u32	pps;			/* Current flow packet rate */
42*4882a593Smuzhiyun 	__u32	qlen;
43*4882a593Smuzhiyun 	__u32	backlog;
44*4882a593Smuzhiyun };
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun struct tc_estimator {
47*4882a593Smuzhiyun 	signed char	interval;
48*4882a593Smuzhiyun 	unsigned char	ewma_log;
49*4882a593Smuzhiyun };
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun /* "Handles"
52*4882a593Smuzhiyun    ---------
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun     All the traffic control objects have 32bit identifiers, or "handles".
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun     They can be considered as opaque numbers from user API viewpoint,
57*4882a593Smuzhiyun     but actually they always consist of two fields: major and
58*4882a593Smuzhiyun     minor numbers, which are interpreted by kernel specially,
59*4882a593Smuzhiyun     that may be used by applications, though not recommended.
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun     F.e. qdisc handles always have minor number equal to zero,
62*4882a593Smuzhiyun     classes (or flows) have major equal to parent qdisc major, and
63*4882a593Smuzhiyun     minor uniquely identifying class inside qdisc.
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun     Macros to manipulate handles:
66*4882a593Smuzhiyun  */
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun #define TC_H_MAJ_MASK (0xFFFF0000U)
69*4882a593Smuzhiyun #define TC_H_MIN_MASK (0x0000FFFFU)
70*4882a593Smuzhiyun #define TC_H_MAJ(h) ((h)&TC_H_MAJ_MASK)
71*4882a593Smuzhiyun #define TC_H_MIN(h) ((h)&TC_H_MIN_MASK)
72*4882a593Smuzhiyun #define TC_H_MAKE(maj,min) (((maj)&TC_H_MAJ_MASK)|((min)&TC_H_MIN_MASK))
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun #define TC_H_UNSPEC	(0U)
75*4882a593Smuzhiyun #define TC_H_ROOT	(0xFFFFFFFFU)
76*4882a593Smuzhiyun #define TC_H_INGRESS    (0xFFFFFFF1U)
77*4882a593Smuzhiyun #define TC_H_CLSACT	TC_H_INGRESS
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun #define TC_H_MIN_PRIORITY	0xFFE0U
80*4882a593Smuzhiyun #define TC_H_MIN_INGRESS	0xFFF2U
81*4882a593Smuzhiyun #define TC_H_MIN_EGRESS		0xFFF3U
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun /* Need to corrospond to iproute2 tc/tc_core.h "enum link_layer" */
84*4882a593Smuzhiyun enum tc_link_layer {
85*4882a593Smuzhiyun 	TC_LINKLAYER_UNAWARE, /* Indicate unaware old iproute2 util */
86*4882a593Smuzhiyun 	TC_LINKLAYER_ETHERNET,
87*4882a593Smuzhiyun 	TC_LINKLAYER_ATM,
88*4882a593Smuzhiyun };
89*4882a593Smuzhiyun #define TC_LINKLAYER_MASK 0x0F /* limit use to lower 4 bits */
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun struct tc_ratespec {
92*4882a593Smuzhiyun 	unsigned char	cell_log;
93*4882a593Smuzhiyun 	__u8		linklayer; /* lower 4 bits */
94*4882a593Smuzhiyun 	unsigned short	overhead;
95*4882a593Smuzhiyun 	short		cell_align;
96*4882a593Smuzhiyun 	unsigned short	mpu;
97*4882a593Smuzhiyun 	__u32		rate;
98*4882a593Smuzhiyun };
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun #define TC_RTAB_SIZE	1024
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun struct tc_sizespec {
103*4882a593Smuzhiyun 	unsigned char	cell_log;
104*4882a593Smuzhiyun 	unsigned char	size_log;
105*4882a593Smuzhiyun 	short		cell_align;
106*4882a593Smuzhiyun 	int		overhead;
107*4882a593Smuzhiyun 	unsigned int	linklayer;
108*4882a593Smuzhiyun 	unsigned int	mpu;
109*4882a593Smuzhiyun 	unsigned int	mtu;
110*4882a593Smuzhiyun 	unsigned int	tsize;
111*4882a593Smuzhiyun };
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun enum {
114*4882a593Smuzhiyun 	TCA_STAB_UNSPEC,
115*4882a593Smuzhiyun 	TCA_STAB_BASE,
116*4882a593Smuzhiyun 	TCA_STAB_DATA,
117*4882a593Smuzhiyun 	__TCA_STAB_MAX
118*4882a593Smuzhiyun };
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun #define TCA_STAB_MAX (__TCA_STAB_MAX - 1)
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun /* FIFO section */
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun struct tc_fifo_qopt {
125*4882a593Smuzhiyun 	__u32	limit;	/* Queue length: bytes for bfifo, packets for pfifo */
126*4882a593Smuzhiyun };
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun /* SKBPRIO section */
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun /*
131*4882a593Smuzhiyun  * Priorities go from zero to (SKBPRIO_MAX_PRIORITY - 1).
132*4882a593Smuzhiyun  * SKBPRIO_MAX_PRIORITY should be at least 64 in order for skbprio to be able
133*4882a593Smuzhiyun  * to map one to one the DS field of IPV4 and IPV6 headers.
134*4882a593Smuzhiyun  * Memory allocation grows linearly with SKBPRIO_MAX_PRIORITY.
135*4882a593Smuzhiyun  */
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun #define SKBPRIO_MAX_PRIORITY 64
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun struct tc_skbprio_qopt {
140*4882a593Smuzhiyun 	__u32	limit;		/* Queue length in packets. */
141*4882a593Smuzhiyun };
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun /* PRIO section */
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun #define TCQ_PRIO_BANDS	16
146*4882a593Smuzhiyun #define TCQ_MIN_PRIO_BANDS 2
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun struct tc_prio_qopt {
149*4882a593Smuzhiyun 	int	bands;			/* Number of bands */
150*4882a593Smuzhiyun 	__u8	priomap[TC_PRIO_MAX+1];	/* Map: logical priority -> PRIO band */
151*4882a593Smuzhiyun };
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun /* MULTIQ section */
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun struct tc_multiq_qopt {
156*4882a593Smuzhiyun 	__u16	bands;			/* Number of bands */
157*4882a593Smuzhiyun 	__u16	max_bands;		/* Maximum number of queues */
158*4882a593Smuzhiyun };
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun /* PLUG section */
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun #define TCQ_PLUG_BUFFER                0
163*4882a593Smuzhiyun #define TCQ_PLUG_RELEASE_ONE           1
164*4882a593Smuzhiyun #define TCQ_PLUG_RELEASE_INDEFINITE    2
165*4882a593Smuzhiyun #define TCQ_PLUG_LIMIT                 3
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun struct tc_plug_qopt {
168*4882a593Smuzhiyun 	/* TCQ_PLUG_BUFFER: Inset a plug into the queue and
169*4882a593Smuzhiyun 	 *  buffer any incoming packets
170*4882a593Smuzhiyun 	 * TCQ_PLUG_RELEASE_ONE: Dequeue packets from queue head
171*4882a593Smuzhiyun 	 *   to beginning of the next plug.
172*4882a593Smuzhiyun 	 * TCQ_PLUG_RELEASE_INDEFINITE: Dequeue all packets from queue.
173*4882a593Smuzhiyun 	 *   Stop buffering packets until the next TCQ_PLUG_BUFFER
174*4882a593Smuzhiyun 	 *   command is received (just act as a pass-thru queue).
175*4882a593Smuzhiyun 	 * TCQ_PLUG_LIMIT: Increase/decrease queue size
176*4882a593Smuzhiyun 	 */
177*4882a593Smuzhiyun 	int             action;
178*4882a593Smuzhiyun 	__u32           limit;
179*4882a593Smuzhiyun };
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun /* TBF section */
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun struct tc_tbf_qopt {
184*4882a593Smuzhiyun 	struct tc_ratespec rate;
185*4882a593Smuzhiyun 	struct tc_ratespec peakrate;
186*4882a593Smuzhiyun 	__u32		limit;
187*4882a593Smuzhiyun 	__u32		buffer;
188*4882a593Smuzhiyun 	__u32		mtu;
189*4882a593Smuzhiyun };
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun enum {
192*4882a593Smuzhiyun 	TCA_TBF_UNSPEC,
193*4882a593Smuzhiyun 	TCA_TBF_PARMS,
194*4882a593Smuzhiyun 	TCA_TBF_RTAB,
195*4882a593Smuzhiyun 	TCA_TBF_PTAB,
196*4882a593Smuzhiyun 	TCA_TBF_RATE64,
197*4882a593Smuzhiyun 	TCA_TBF_PRATE64,
198*4882a593Smuzhiyun 	TCA_TBF_BURST,
199*4882a593Smuzhiyun 	TCA_TBF_PBURST,
200*4882a593Smuzhiyun 	TCA_TBF_PAD,
201*4882a593Smuzhiyun 	__TCA_TBF_MAX,
202*4882a593Smuzhiyun };
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun #define TCA_TBF_MAX (__TCA_TBF_MAX - 1)
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun /* TEQL section */
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun /* TEQL does not require any parameters */
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun /* SFQ section */
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun struct tc_sfq_qopt {
214*4882a593Smuzhiyun 	unsigned	quantum;	/* Bytes per round allocated to flow */
215*4882a593Smuzhiyun 	int		perturb_period;	/* Period of hash perturbation */
216*4882a593Smuzhiyun 	__u32		limit;		/* Maximal packets in queue */
217*4882a593Smuzhiyun 	unsigned	divisor;	/* Hash divisor  */
218*4882a593Smuzhiyun 	unsigned	flows;		/* Maximal number of flows  */
219*4882a593Smuzhiyun };
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun struct tc_sfqred_stats {
222*4882a593Smuzhiyun 	__u32           prob_drop;      /* Early drops, below max threshold */
223*4882a593Smuzhiyun 	__u32           forced_drop;	/* Early drops, after max threshold */
224*4882a593Smuzhiyun 	__u32           prob_mark;      /* Marked packets, below max threshold */
225*4882a593Smuzhiyun 	__u32           forced_mark;    /* Marked packets, after max threshold */
226*4882a593Smuzhiyun 	__u32           prob_mark_head; /* Marked packets, below max threshold */
227*4882a593Smuzhiyun 	__u32           forced_mark_head;/* Marked packets, after max threshold */
228*4882a593Smuzhiyun };
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun struct tc_sfq_qopt_v1 {
231*4882a593Smuzhiyun 	struct tc_sfq_qopt v0;
232*4882a593Smuzhiyun 	unsigned int	depth;		/* max number of packets per flow */
233*4882a593Smuzhiyun 	unsigned int	headdrop;
234*4882a593Smuzhiyun /* SFQRED parameters */
235*4882a593Smuzhiyun 	__u32		limit;		/* HARD maximal flow queue length (bytes) */
236*4882a593Smuzhiyun 	__u32		qth_min;	/* Min average length threshold (bytes) */
237*4882a593Smuzhiyun 	__u32		qth_max;	/* Max average length threshold (bytes) */
238*4882a593Smuzhiyun 	unsigned char   Wlog;		/* log(W)		*/
239*4882a593Smuzhiyun 	unsigned char   Plog;		/* log(P_max/(qth_max-qth_min))	*/
240*4882a593Smuzhiyun 	unsigned char   Scell_log;	/* cell size for idle damping */
241*4882a593Smuzhiyun 	unsigned char	flags;
242*4882a593Smuzhiyun 	__u32		max_P;		/* probability, high resolution */
243*4882a593Smuzhiyun /* SFQRED stats */
244*4882a593Smuzhiyun 	struct tc_sfqred_stats stats;
245*4882a593Smuzhiyun };
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun struct tc_sfq_xstats {
249*4882a593Smuzhiyun 	__s32		allot;
250*4882a593Smuzhiyun };
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun /* RED section */
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun enum {
255*4882a593Smuzhiyun 	TCA_RED_UNSPEC,
256*4882a593Smuzhiyun 	TCA_RED_PARMS,
257*4882a593Smuzhiyun 	TCA_RED_STAB,
258*4882a593Smuzhiyun 	TCA_RED_MAX_P,
259*4882a593Smuzhiyun 	TCA_RED_FLAGS,		/* bitfield32 */
260*4882a593Smuzhiyun 	TCA_RED_EARLY_DROP_BLOCK, /* u32 */
261*4882a593Smuzhiyun 	TCA_RED_MARK_BLOCK,	/* u32 */
262*4882a593Smuzhiyun 	__TCA_RED_MAX,
263*4882a593Smuzhiyun };
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun #define TCA_RED_MAX (__TCA_RED_MAX - 1)
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun struct tc_red_qopt {
268*4882a593Smuzhiyun 	__u32		limit;		/* HARD maximal queue length (bytes)	*/
269*4882a593Smuzhiyun 	__u32		qth_min;	/* Min average length threshold (bytes) */
270*4882a593Smuzhiyun 	__u32		qth_max;	/* Max average length threshold (bytes) */
271*4882a593Smuzhiyun 	unsigned char   Wlog;		/* log(W)		*/
272*4882a593Smuzhiyun 	unsigned char   Plog;		/* log(P_max/(qth_max-qth_min))	*/
273*4882a593Smuzhiyun 	unsigned char   Scell_log;	/* cell size for idle damping */
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	/* This field can be used for flags that a RED-like qdisc has
276*4882a593Smuzhiyun 	 * historically supported. E.g. when configuring RED, it can be used for
277*4882a593Smuzhiyun 	 * ECN, HARDDROP and ADAPTATIVE. For SFQ it can be used for ECN,
278*4882a593Smuzhiyun 	 * HARDDROP. Etc. Because this field has not been validated, and is
279*4882a593Smuzhiyun 	 * copied back on dump, any bits besides those to which a given qdisc
280*4882a593Smuzhiyun 	 * has assigned a historical meaning need to be considered for free use
281*4882a593Smuzhiyun 	 * by userspace tools.
282*4882a593Smuzhiyun 	 *
283*4882a593Smuzhiyun 	 * Any further flags need to be passed differently, e.g. through an
284*4882a593Smuzhiyun 	 * attribute (such as TCA_RED_FLAGS above). Such attribute should allow
285*4882a593Smuzhiyun 	 * passing both recent and historic flags in one value.
286*4882a593Smuzhiyun 	 */
287*4882a593Smuzhiyun 	unsigned char	flags;
288*4882a593Smuzhiyun #define TC_RED_ECN		1
289*4882a593Smuzhiyun #define TC_RED_HARDDROP		2
290*4882a593Smuzhiyun #define TC_RED_ADAPTATIVE	4
291*4882a593Smuzhiyun #define TC_RED_NODROP		8
292*4882a593Smuzhiyun };
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun #define TC_RED_HISTORIC_FLAGS (TC_RED_ECN | TC_RED_HARDDROP | TC_RED_ADAPTATIVE)
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun struct tc_red_xstats {
297*4882a593Smuzhiyun 	__u32           early;          /* Early drops */
298*4882a593Smuzhiyun 	__u32           pdrop;          /* Drops due to queue limits */
299*4882a593Smuzhiyun 	__u32           other;          /* Drops due to drop() calls */
300*4882a593Smuzhiyun 	__u32           marked;         /* Marked packets */
301*4882a593Smuzhiyun };
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun /* GRED section */
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun #define MAX_DPs 16
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun enum {
308*4882a593Smuzhiyun        TCA_GRED_UNSPEC,
309*4882a593Smuzhiyun        TCA_GRED_PARMS,
310*4882a593Smuzhiyun        TCA_GRED_STAB,
311*4882a593Smuzhiyun        TCA_GRED_DPS,
312*4882a593Smuzhiyun        TCA_GRED_MAX_P,
313*4882a593Smuzhiyun        TCA_GRED_LIMIT,
314*4882a593Smuzhiyun        TCA_GRED_VQ_LIST,	/* nested TCA_GRED_VQ_ENTRY */
315*4882a593Smuzhiyun        __TCA_GRED_MAX,
316*4882a593Smuzhiyun };
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun #define TCA_GRED_MAX (__TCA_GRED_MAX - 1)
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun enum {
321*4882a593Smuzhiyun 	TCA_GRED_VQ_ENTRY_UNSPEC,
322*4882a593Smuzhiyun 	TCA_GRED_VQ_ENTRY,	/* nested TCA_GRED_VQ_* */
323*4882a593Smuzhiyun 	__TCA_GRED_VQ_ENTRY_MAX,
324*4882a593Smuzhiyun };
325*4882a593Smuzhiyun #define TCA_GRED_VQ_ENTRY_MAX (__TCA_GRED_VQ_ENTRY_MAX - 1)
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun enum {
328*4882a593Smuzhiyun 	TCA_GRED_VQ_UNSPEC,
329*4882a593Smuzhiyun 	TCA_GRED_VQ_PAD,
330*4882a593Smuzhiyun 	TCA_GRED_VQ_DP,			/* u32 */
331*4882a593Smuzhiyun 	TCA_GRED_VQ_STAT_BYTES,		/* u64 */
332*4882a593Smuzhiyun 	TCA_GRED_VQ_STAT_PACKETS,	/* u32 */
333*4882a593Smuzhiyun 	TCA_GRED_VQ_STAT_BACKLOG,	/* u32 */
334*4882a593Smuzhiyun 	TCA_GRED_VQ_STAT_PROB_DROP,	/* u32 */
335*4882a593Smuzhiyun 	TCA_GRED_VQ_STAT_PROB_MARK,	/* u32 */
336*4882a593Smuzhiyun 	TCA_GRED_VQ_STAT_FORCED_DROP,	/* u32 */
337*4882a593Smuzhiyun 	TCA_GRED_VQ_STAT_FORCED_MARK,	/* u32 */
338*4882a593Smuzhiyun 	TCA_GRED_VQ_STAT_PDROP,		/* u32 */
339*4882a593Smuzhiyun 	TCA_GRED_VQ_STAT_OTHER,		/* u32 */
340*4882a593Smuzhiyun 	TCA_GRED_VQ_FLAGS,		/* u32 */
341*4882a593Smuzhiyun 	__TCA_GRED_VQ_MAX
342*4882a593Smuzhiyun };
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun #define TCA_GRED_VQ_MAX (__TCA_GRED_VQ_MAX - 1)
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun struct tc_gred_qopt {
347*4882a593Smuzhiyun 	__u32		limit;        /* HARD maximal queue length (bytes)    */
348*4882a593Smuzhiyun 	__u32		qth_min;      /* Min average length threshold (bytes) */
349*4882a593Smuzhiyun 	__u32		qth_max;      /* Max average length threshold (bytes) */
350*4882a593Smuzhiyun 	__u32		DP;           /* up to 2^32 DPs */
351*4882a593Smuzhiyun 	__u32		backlog;
352*4882a593Smuzhiyun 	__u32		qave;
353*4882a593Smuzhiyun 	__u32		forced;
354*4882a593Smuzhiyun 	__u32		early;
355*4882a593Smuzhiyun 	__u32		other;
356*4882a593Smuzhiyun 	__u32		pdrop;
357*4882a593Smuzhiyun 	__u8		Wlog;         /* log(W)               */
358*4882a593Smuzhiyun 	__u8		Plog;         /* log(P_max/(qth_max-qth_min)) */
359*4882a593Smuzhiyun 	__u8		Scell_log;    /* cell size for idle damping */
360*4882a593Smuzhiyun 	__u8		prio;         /* prio of this VQ */
361*4882a593Smuzhiyun 	__u32		packets;
362*4882a593Smuzhiyun 	__u32		bytesin;
363*4882a593Smuzhiyun };
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun /* gred setup */
366*4882a593Smuzhiyun struct tc_gred_sopt {
367*4882a593Smuzhiyun 	__u32		DPs;
368*4882a593Smuzhiyun 	__u32		def_DP;
369*4882a593Smuzhiyun 	__u8		grio;
370*4882a593Smuzhiyun 	__u8		flags;
371*4882a593Smuzhiyun 	__u16		pad1;
372*4882a593Smuzhiyun };
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun /* CHOKe section */
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun enum {
377*4882a593Smuzhiyun 	TCA_CHOKE_UNSPEC,
378*4882a593Smuzhiyun 	TCA_CHOKE_PARMS,
379*4882a593Smuzhiyun 	TCA_CHOKE_STAB,
380*4882a593Smuzhiyun 	TCA_CHOKE_MAX_P,
381*4882a593Smuzhiyun 	__TCA_CHOKE_MAX,
382*4882a593Smuzhiyun };
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun #define TCA_CHOKE_MAX (__TCA_CHOKE_MAX - 1)
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun struct tc_choke_qopt {
387*4882a593Smuzhiyun 	__u32		limit;		/* Hard queue length (packets)	*/
388*4882a593Smuzhiyun 	__u32		qth_min;	/* Min average threshold (packets) */
389*4882a593Smuzhiyun 	__u32		qth_max;	/* Max average threshold (packets) */
390*4882a593Smuzhiyun 	unsigned char   Wlog;		/* log(W)		*/
391*4882a593Smuzhiyun 	unsigned char   Plog;		/* log(P_max/(qth_max-qth_min))	*/
392*4882a593Smuzhiyun 	unsigned char   Scell_log;	/* cell size for idle damping */
393*4882a593Smuzhiyun 	unsigned char	flags;		/* see RED flags */
394*4882a593Smuzhiyun };
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun struct tc_choke_xstats {
397*4882a593Smuzhiyun 	__u32		early;          /* Early drops */
398*4882a593Smuzhiyun 	__u32		pdrop;          /* Drops due to queue limits */
399*4882a593Smuzhiyun 	__u32		other;          /* Drops due to drop() calls */
400*4882a593Smuzhiyun 	__u32		marked;         /* Marked packets */
401*4882a593Smuzhiyun 	__u32		matched;	/* Drops due to flow match */
402*4882a593Smuzhiyun };
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun /* HTB section */
405*4882a593Smuzhiyun #define TC_HTB_NUMPRIO		8
406*4882a593Smuzhiyun #define TC_HTB_MAXDEPTH		8
407*4882a593Smuzhiyun #define TC_HTB_PROTOVER		3 /* the same as HTB and TC's major */
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun struct tc_htb_opt {
410*4882a593Smuzhiyun 	struct tc_ratespec 	rate;
411*4882a593Smuzhiyun 	struct tc_ratespec 	ceil;
412*4882a593Smuzhiyun 	__u32	buffer;
413*4882a593Smuzhiyun 	__u32	cbuffer;
414*4882a593Smuzhiyun 	__u32	quantum;
415*4882a593Smuzhiyun 	__u32	level;		/* out only */
416*4882a593Smuzhiyun 	__u32	prio;
417*4882a593Smuzhiyun };
418*4882a593Smuzhiyun struct tc_htb_glob {
419*4882a593Smuzhiyun 	__u32 version;		/* to match HTB/TC */
420*4882a593Smuzhiyun     	__u32 rate2quantum;	/* bps->quantum divisor */
421*4882a593Smuzhiyun     	__u32 defcls;		/* default class number */
422*4882a593Smuzhiyun 	__u32 debug;		/* debug flags */
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 	/* stats */
425*4882a593Smuzhiyun 	__u32 direct_pkts; /* count of non shaped packets */
426*4882a593Smuzhiyun };
427*4882a593Smuzhiyun enum {
428*4882a593Smuzhiyun 	TCA_HTB_UNSPEC,
429*4882a593Smuzhiyun 	TCA_HTB_PARMS,
430*4882a593Smuzhiyun 	TCA_HTB_INIT,
431*4882a593Smuzhiyun 	TCA_HTB_CTAB,
432*4882a593Smuzhiyun 	TCA_HTB_RTAB,
433*4882a593Smuzhiyun 	TCA_HTB_DIRECT_QLEN,
434*4882a593Smuzhiyun 	TCA_HTB_RATE64,
435*4882a593Smuzhiyun 	TCA_HTB_CEIL64,
436*4882a593Smuzhiyun 	TCA_HTB_PAD,
437*4882a593Smuzhiyun 	__TCA_HTB_MAX,
438*4882a593Smuzhiyun };
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun #define TCA_HTB_MAX (__TCA_HTB_MAX - 1)
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun struct tc_htb_xstats {
443*4882a593Smuzhiyun 	__u32 lends;
444*4882a593Smuzhiyun 	__u32 borrows;
445*4882a593Smuzhiyun 	__u32 giants;	/* unused since 'Make HTB scheduler work with TSO.' */
446*4882a593Smuzhiyun 	__s32 tokens;
447*4882a593Smuzhiyun 	__s32 ctokens;
448*4882a593Smuzhiyun };
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun /* HFSC section */
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun struct tc_hfsc_qopt {
453*4882a593Smuzhiyun 	__u16	defcls;		/* default class */
454*4882a593Smuzhiyun };
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun struct tc_service_curve {
457*4882a593Smuzhiyun 	__u32	m1;		/* slope of the first segment in bps */
458*4882a593Smuzhiyun 	__u32	d;		/* x-projection of the first segment in us */
459*4882a593Smuzhiyun 	__u32	m2;		/* slope of the second segment in bps */
460*4882a593Smuzhiyun };
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun struct tc_hfsc_stats {
463*4882a593Smuzhiyun 	__u64	work;		/* total work done */
464*4882a593Smuzhiyun 	__u64	rtwork;		/* work done by real-time criteria */
465*4882a593Smuzhiyun 	__u32	period;		/* current period */
466*4882a593Smuzhiyun 	__u32	level;		/* class level in hierarchy */
467*4882a593Smuzhiyun };
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun enum {
470*4882a593Smuzhiyun 	TCA_HFSC_UNSPEC,
471*4882a593Smuzhiyun 	TCA_HFSC_RSC,
472*4882a593Smuzhiyun 	TCA_HFSC_FSC,
473*4882a593Smuzhiyun 	TCA_HFSC_USC,
474*4882a593Smuzhiyun 	__TCA_HFSC_MAX,
475*4882a593Smuzhiyun };
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun #define TCA_HFSC_MAX (__TCA_HFSC_MAX - 1)
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun /* CBQ section */
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun #define TC_CBQ_MAXPRIO		8
483*4882a593Smuzhiyun #define TC_CBQ_MAXLEVEL		8
484*4882a593Smuzhiyun #define TC_CBQ_DEF_EWMA		5
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun struct tc_cbq_lssopt {
487*4882a593Smuzhiyun 	unsigned char	change;
488*4882a593Smuzhiyun 	unsigned char	flags;
489*4882a593Smuzhiyun #define TCF_CBQ_LSS_BOUNDED	1
490*4882a593Smuzhiyun #define TCF_CBQ_LSS_ISOLATED	2
491*4882a593Smuzhiyun 	unsigned char  	ewma_log;
492*4882a593Smuzhiyun 	unsigned char  	level;
493*4882a593Smuzhiyun #define TCF_CBQ_LSS_FLAGS	1
494*4882a593Smuzhiyun #define TCF_CBQ_LSS_EWMA	2
495*4882a593Smuzhiyun #define TCF_CBQ_LSS_MAXIDLE	4
496*4882a593Smuzhiyun #define TCF_CBQ_LSS_MINIDLE	8
497*4882a593Smuzhiyun #define TCF_CBQ_LSS_OFFTIME	0x10
498*4882a593Smuzhiyun #define TCF_CBQ_LSS_AVPKT	0x20
499*4882a593Smuzhiyun 	__u32		maxidle;
500*4882a593Smuzhiyun 	__u32		minidle;
501*4882a593Smuzhiyun 	__u32		offtime;
502*4882a593Smuzhiyun 	__u32		avpkt;
503*4882a593Smuzhiyun };
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun struct tc_cbq_wrropt {
506*4882a593Smuzhiyun 	unsigned char	flags;
507*4882a593Smuzhiyun 	unsigned char	priority;
508*4882a593Smuzhiyun 	unsigned char	cpriority;
509*4882a593Smuzhiyun 	unsigned char	__reserved;
510*4882a593Smuzhiyun 	__u32		allot;
511*4882a593Smuzhiyun 	__u32		weight;
512*4882a593Smuzhiyun };
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun struct tc_cbq_ovl {
515*4882a593Smuzhiyun 	unsigned char	strategy;
516*4882a593Smuzhiyun #define	TC_CBQ_OVL_CLASSIC	0
517*4882a593Smuzhiyun #define	TC_CBQ_OVL_DELAY	1
518*4882a593Smuzhiyun #define	TC_CBQ_OVL_LOWPRIO	2
519*4882a593Smuzhiyun #define	TC_CBQ_OVL_DROP		3
520*4882a593Smuzhiyun #define	TC_CBQ_OVL_RCLASSIC	4
521*4882a593Smuzhiyun 	unsigned char	priority2;
522*4882a593Smuzhiyun 	__u16		pad;
523*4882a593Smuzhiyun 	__u32		penalty;
524*4882a593Smuzhiyun };
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun struct tc_cbq_police {
527*4882a593Smuzhiyun 	unsigned char	police;
528*4882a593Smuzhiyun 	unsigned char	__res1;
529*4882a593Smuzhiyun 	unsigned short	__res2;
530*4882a593Smuzhiyun };
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun struct tc_cbq_fopt {
533*4882a593Smuzhiyun 	__u32		split;
534*4882a593Smuzhiyun 	__u32		defmap;
535*4882a593Smuzhiyun 	__u32		defchange;
536*4882a593Smuzhiyun };
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun struct tc_cbq_xstats {
539*4882a593Smuzhiyun 	__u32		borrows;
540*4882a593Smuzhiyun 	__u32		overactions;
541*4882a593Smuzhiyun 	__s32		avgidle;
542*4882a593Smuzhiyun 	__s32		undertime;
543*4882a593Smuzhiyun };
544*4882a593Smuzhiyun 
545*4882a593Smuzhiyun enum {
546*4882a593Smuzhiyun 	TCA_CBQ_UNSPEC,
547*4882a593Smuzhiyun 	TCA_CBQ_LSSOPT,
548*4882a593Smuzhiyun 	TCA_CBQ_WRROPT,
549*4882a593Smuzhiyun 	TCA_CBQ_FOPT,
550*4882a593Smuzhiyun 	TCA_CBQ_OVL_STRATEGY,
551*4882a593Smuzhiyun 	TCA_CBQ_RATE,
552*4882a593Smuzhiyun 	TCA_CBQ_RTAB,
553*4882a593Smuzhiyun 	TCA_CBQ_POLICE,
554*4882a593Smuzhiyun 	__TCA_CBQ_MAX,
555*4882a593Smuzhiyun };
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun #define TCA_CBQ_MAX	(__TCA_CBQ_MAX - 1)
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun /* dsmark section */
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun enum {
562*4882a593Smuzhiyun 	TCA_DSMARK_UNSPEC,
563*4882a593Smuzhiyun 	TCA_DSMARK_INDICES,
564*4882a593Smuzhiyun 	TCA_DSMARK_DEFAULT_INDEX,
565*4882a593Smuzhiyun 	TCA_DSMARK_SET_TC_INDEX,
566*4882a593Smuzhiyun 	TCA_DSMARK_MASK,
567*4882a593Smuzhiyun 	TCA_DSMARK_VALUE,
568*4882a593Smuzhiyun 	__TCA_DSMARK_MAX,
569*4882a593Smuzhiyun };
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun #define TCA_DSMARK_MAX (__TCA_DSMARK_MAX - 1)
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun /* ATM  section */
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun enum {
576*4882a593Smuzhiyun 	TCA_ATM_UNSPEC,
577*4882a593Smuzhiyun 	TCA_ATM_FD,		/* file/socket descriptor */
578*4882a593Smuzhiyun 	TCA_ATM_PTR,		/* pointer to descriptor - later */
579*4882a593Smuzhiyun 	TCA_ATM_HDR,		/* LL header */
580*4882a593Smuzhiyun 	TCA_ATM_EXCESS,		/* excess traffic class (0 for CLP)  */
581*4882a593Smuzhiyun 	TCA_ATM_ADDR,		/* PVC address (for output only) */
582*4882a593Smuzhiyun 	TCA_ATM_STATE,		/* VC state (ATM_VS_*; for output only) */
583*4882a593Smuzhiyun 	__TCA_ATM_MAX,
584*4882a593Smuzhiyun };
585*4882a593Smuzhiyun 
586*4882a593Smuzhiyun #define TCA_ATM_MAX	(__TCA_ATM_MAX - 1)
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun /* Network emulator */
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun enum {
591*4882a593Smuzhiyun 	TCA_NETEM_UNSPEC,
592*4882a593Smuzhiyun 	TCA_NETEM_CORR,
593*4882a593Smuzhiyun 	TCA_NETEM_DELAY_DIST,
594*4882a593Smuzhiyun 	TCA_NETEM_REORDER,
595*4882a593Smuzhiyun 	TCA_NETEM_CORRUPT,
596*4882a593Smuzhiyun 	TCA_NETEM_LOSS,
597*4882a593Smuzhiyun 	TCA_NETEM_RATE,
598*4882a593Smuzhiyun 	TCA_NETEM_ECN,
599*4882a593Smuzhiyun 	TCA_NETEM_RATE64,
600*4882a593Smuzhiyun 	TCA_NETEM_PAD,
601*4882a593Smuzhiyun 	TCA_NETEM_LATENCY64,
602*4882a593Smuzhiyun 	TCA_NETEM_JITTER64,
603*4882a593Smuzhiyun 	TCA_NETEM_SLOT,
604*4882a593Smuzhiyun 	TCA_NETEM_SLOT_DIST,
605*4882a593Smuzhiyun 	__TCA_NETEM_MAX,
606*4882a593Smuzhiyun };
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun #define TCA_NETEM_MAX (__TCA_NETEM_MAX - 1)
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun struct tc_netem_qopt {
611*4882a593Smuzhiyun 	__u32	latency;	/* added delay (us) */
612*4882a593Smuzhiyun 	__u32   limit;		/* fifo limit (packets) */
613*4882a593Smuzhiyun 	__u32	loss;		/* random packet loss (0=none ~0=100%) */
614*4882a593Smuzhiyun 	__u32	gap;		/* re-ordering gap (0 for none) */
615*4882a593Smuzhiyun 	__u32   duplicate;	/* random packet dup  (0=none ~0=100%) */
616*4882a593Smuzhiyun 	__u32	jitter;		/* random jitter in latency (us) */
617*4882a593Smuzhiyun };
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun struct tc_netem_corr {
620*4882a593Smuzhiyun 	__u32	delay_corr;	/* delay correlation */
621*4882a593Smuzhiyun 	__u32	loss_corr;	/* packet loss correlation */
622*4882a593Smuzhiyun 	__u32	dup_corr;	/* duplicate correlation  */
623*4882a593Smuzhiyun };
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun struct tc_netem_reorder {
626*4882a593Smuzhiyun 	__u32	probability;
627*4882a593Smuzhiyun 	__u32	correlation;
628*4882a593Smuzhiyun };
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun struct tc_netem_corrupt {
631*4882a593Smuzhiyun 	__u32	probability;
632*4882a593Smuzhiyun 	__u32	correlation;
633*4882a593Smuzhiyun };
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun struct tc_netem_rate {
636*4882a593Smuzhiyun 	__u32	rate;	/* byte/s */
637*4882a593Smuzhiyun 	__s32	packet_overhead;
638*4882a593Smuzhiyun 	__u32	cell_size;
639*4882a593Smuzhiyun 	__s32	cell_overhead;
640*4882a593Smuzhiyun };
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun struct tc_netem_slot {
643*4882a593Smuzhiyun 	__s64   min_delay; /* nsec */
644*4882a593Smuzhiyun 	__s64   max_delay;
645*4882a593Smuzhiyun 	__s32   max_packets;
646*4882a593Smuzhiyun 	__s32   max_bytes;
647*4882a593Smuzhiyun 	__s64	dist_delay; /* nsec */
648*4882a593Smuzhiyun 	__s64	dist_jitter; /* nsec */
649*4882a593Smuzhiyun };
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun enum {
652*4882a593Smuzhiyun 	NETEM_LOSS_UNSPEC,
653*4882a593Smuzhiyun 	NETEM_LOSS_GI,		/* General Intuitive - 4 state model */
654*4882a593Smuzhiyun 	NETEM_LOSS_GE,		/* Gilbert Elliot models */
655*4882a593Smuzhiyun 	__NETEM_LOSS_MAX
656*4882a593Smuzhiyun };
657*4882a593Smuzhiyun #define NETEM_LOSS_MAX (__NETEM_LOSS_MAX - 1)
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun /* State transition probabilities for 4 state model */
660*4882a593Smuzhiyun struct tc_netem_gimodel {
661*4882a593Smuzhiyun 	__u32	p13;
662*4882a593Smuzhiyun 	__u32	p31;
663*4882a593Smuzhiyun 	__u32	p32;
664*4882a593Smuzhiyun 	__u32	p14;
665*4882a593Smuzhiyun 	__u32	p23;
666*4882a593Smuzhiyun };
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun /* Gilbert-Elliot models */
669*4882a593Smuzhiyun struct tc_netem_gemodel {
670*4882a593Smuzhiyun 	__u32 p;
671*4882a593Smuzhiyun 	__u32 r;
672*4882a593Smuzhiyun 	__u32 h;
673*4882a593Smuzhiyun 	__u32 k1;
674*4882a593Smuzhiyun };
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun #define NETEM_DIST_SCALE	8192
677*4882a593Smuzhiyun #define NETEM_DIST_MAX		16384
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun /* DRR */
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun enum {
682*4882a593Smuzhiyun 	TCA_DRR_UNSPEC,
683*4882a593Smuzhiyun 	TCA_DRR_QUANTUM,
684*4882a593Smuzhiyun 	__TCA_DRR_MAX
685*4882a593Smuzhiyun };
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun #define TCA_DRR_MAX	(__TCA_DRR_MAX - 1)
688*4882a593Smuzhiyun 
689*4882a593Smuzhiyun struct tc_drr_stats {
690*4882a593Smuzhiyun 	__u32	deficit;
691*4882a593Smuzhiyun };
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun /* MQPRIO */
694*4882a593Smuzhiyun #define TC_QOPT_BITMASK 15
695*4882a593Smuzhiyun #define TC_QOPT_MAX_QUEUE 16
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun enum {
698*4882a593Smuzhiyun 	TC_MQPRIO_HW_OFFLOAD_NONE,	/* no offload requested */
699*4882a593Smuzhiyun 	TC_MQPRIO_HW_OFFLOAD_TCS,	/* offload TCs, no queue counts */
700*4882a593Smuzhiyun 	__TC_MQPRIO_HW_OFFLOAD_MAX
701*4882a593Smuzhiyun };
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun #define TC_MQPRIO_HW_OFFLOAD_MAX (__TC_MQPRIO_HW_OFFLOAD_MAX - 1)
704*4882a593Smuzhiyun 
705*4882a593Smuzhiyun enum {
706*4882a593Smuzhiyun 	TC_MQPRIO_MODE_DCB,
707*4882a593Smuzhiyun 	TC_MQPRIO_MODE_CHANNEL,
708*4882a593Smuzhiyun 	__TC_MQPRIO_MODE_MAX
709*4882a593Smuzhiyun };
710*4882a593Smuzhiyun 
711*4882a593Smuzhiyun #define __TC_MQPRIO_MODE_MAX (__TC_MQPRIO_MODE_MAX - 1)
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun enum {
714*4882a593Smuzhiyun 	TC_MQPRIO_SHAPER_DCB,
715*4882a593Smuzhiyun 	TC_MQPRIO_SHAPER_BW_RATE,	/* Add new shapers below */
716*4882a593Smuzhiyun 	__TC_MQPRIO_SHAPER_MAX
717*4882a593Smuzhiyun };
718*4882a593Smuzhiyun 
719*4882a593Smuzhiyun #define __TC_MQPRIO_SHAPER_MAX (__TC_MQPRIO_SHAPER_MAX - 1)
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun struct tc_mqprio_qopt {
722*4882a593Smuzhiyun 	__u8	num_tc;
723*4882a593Smuzhiyun 	__u8	prio_tc_map[TC_QOPT_BITMASK + 1];
724*4882a593Smuzhiyun 	__u8	hw;
725*4882a593Smuzhiyun 	__u16	count[TC_QOPT_MAX_QUEUE];
726*4882a593Smuzhiyun 	__u16	offset[TC_QOPT_MAX_QUEUE];
727*4882a593Smuzhiyun };
728*4882a593Smuzhiyun 
729*4882a593Smuzhiyun #define TC_MQPRIO_F_MODE		0x1
730*4882a593Smuzhiyun #define TC_MQPRIO_F_SHAPER		0x2
731*4882a593Smuzhiyun #define TC_MQPRIO_F_MIN_RATE		0x4
732*4882a593Smuzhiyun #define TC_MQPRIO_F_MAX_RATE		0x8
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun enum {
735*4882a593Smuzhiyun 	TCA_MQPRIO_UNSPEC,
736*4882a593Smuzhiyun 	TCA_MQPRIO_MODE,
737*4882a593Smuzhiyun 	TCA_MQPRIO_SHAPER,
738*4882a593Smuzhiyun 	TCA_MQPRIO_MIN_RATE64,
739*4882a593Smuzhiyun 	TCA_MQPRIO_MAX_RATE64,
740*4882a593Smuzhiyun 	__TCA_MQPRIO_MAX,
741*4882a593Smuzhiyun };
742*4882a593Smuzhiyun 
743*4882a593Smuzhiyun #define TCA_MQPRIO_MAX (__TCA_MQPRIO_MAX - 1)
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun /* SFB */
746*4882a593Smuzhiyun 
747*4882a593Smuzhiyun enum {
748*4882a593Smuzhiyun 	TCA_SFB_UNSPEC,
749*4882a593Smuzhiyun 	TCA_SFB_PARMS,
750*4882a593Smuzhiyun 	__TCA_SFB_MAX,
751*4882a593Smuzhiyun };
752*4882a593Smuzhiyun 
753*4882a593Smuzhiyun #define TCA_SFB_MAX (__TCA_SFB_MAX - 1)
754*4882a593Smuzhiyun 
755*4882a593Smuzhiyun /*
756*4882a593Smuzhiyun  * Note: increment, decrement are Q0.16 fixed-point values.
757*4882a593Smuzhiyun  */
758*4882a593Smuzhiyun struct tc_sfb_qopt {
759*4882a593Smuzhiyun 	__u32 rehash_interval;	/* delay between hash move, in ms */
760*4882a593Smuzhiyun 	__u32 warmup_time;	/* double buffering warmup time in ms (warmup_time < rehash_interval) */
761*4882a593Smuzhiyun 	__u32 max;		/* max len of qlen_min */
762*4882a593Smuzhiyun 	__u32 bin_size;		/* maximum queue length per bin */
763*4882a593Smuzhiyun 	__u32 increment;	/* probability increment, (d1 in Blue) */
764*4882a593Smuzhiyun 	__u32 decrement;	/* probability decrement, (d2 in Blue) */
765*4882a593Smuzhiyun 	__u32 limit;		/* max SFB queue length */
766*4882a593Smuzhiyun 	__u32 penalty_rate;	/* inelastic flows are rate limited to 'rate' pps */
767*4882a593Smuzhiyun 	__u32 penalty_burst;
768*4882a593Smuzhiyun };
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun struct tc_sfb_xstats {
771*4882a593Smuzhiyun 	__u32 earlydrop;
772*4882a593Smuzhiyun 	__u32 penaltydrop;
773*4882a593Smuzhiyun 	__u32 bucketdrop;
774*4882a593Smuzhiyun 	__u32 queuedrop;
775*4882a593Smuzhiyun 	__u32 childdrop; /* drops in child qdisc */
776*4882a593Smuzhiyun 	__u32 marked;
777*4882a593Smuzhiyun 	__u32 maxqlen;
778*4882a593Smuzhiyun 	__u32 maxprob;
779*4882a593Smuzhiyun 	__u32 avgprob;
780*4882a593Smuzhiyun };
781*4882a593Smuzhiyun 
782*4882a593Smuzhiyun #define SFB_MAX_PROB 0xFFFF
783*4882a593Smuzhiyun 
784*4882a593Smuzhiyun /* QFQ */
785*4882a593Smuzhiyun enum {
786*4882a593Smuzhiyun 	TCA_QFQ_UNSPEC,
787*4882a593Smuzhiyun 	TCA_QFQ_WEIGHT,
788*4882a593Smuzhiyun 	TCA_QFQ_LMAX,
789*4882a593Smuzhiyun 	__TCA_QFQ_MAX
790*4882a593Smuzhiyun };
791*4882a593Smuzhiyun 
792*4882a593Smuzhiyun #define TCA_QFQ_MAX	(__TCA_QFQ_MAX - 1)
793*4882a593Smuzhiyun 
794*4882a593Smuzhiyun struct tc_qfq_stats {
795*4882a593Smuzhiyun 	__u32 weight;
796*4882a593Smuzhiyun 	__u32 lmax;
797*4882a593Smuzhiyun };
798*4882a593Smuzhiyun 
799*4882a593Smuzhiyun /* CODEL */
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun enum {
802*4882a593Smuzhiyun 	TCA_CODEL_UNSPEC,
803*4882a593Smuzhiyun 	TCA_CODEL_TARGET,
804*4882a593Smuzhiyun 	TCA_CODEL_LIMIT,
805*4882a593Smuzhiyun 	TCA_CODEL_INTERVAL,
806*4882a593Smuzhiyun 	TCA_CODEL_ECN,
807*4882a593Smuzhiyun 	TCA_CODEL_CE_THRESHOLD,
808*4882a593Smuzhiyun 	__TCA_CODEL_MAX
809*4882a593Smuzhiyun };
810*4882a593Smuzhiyun 
811*4882a593Smuzhiyun #define TCA_CODEL_MAX	(__TCA_CODEL_MAX - 1)
812*4882a593Smuzhiyun 
813*4882a593Smuzhiyun struct tc_codel_xstats {
814*4882a593Smuzhiyun 	__u32	maxpacket; /* largest packet we've seen so far */
815*4882a593Smuzhiyun 	__u32	count;	   /* how many drops we've done since the last time we
816*4882a593Smuzhiyun 			    * entered dropping state
817*4882a593Smuzhiyun 			    */
818*4882a593Smuzhiyun 	__u32	lastcount; /* count at entry to dropping state */
819*4882a593Smuzhiyun 	__u32	ldelay;    /* in-queue delay seen by most recently dequeued packet */
820*4882a593Smuzhiyun 	__s32	drop_next; /* time to drop next packet */
821*4882a593Smuzhiyun 	__u32	drop_overlimit; /* number of time max qdisc packet limit was hit */
822*4882a593Smuzhiyun 	__u32	ecn_mark;  /* number of packets we ECN marked instead of dropped */
823*4882a593Smuzhiyun 	__u32	dropping;  /* are we in dropping state ? */
824*4882a593Smuzhiyun 	__u32	ce_mark;   /* number of CE marked packets because of ce_threshold */
825*4882a593Smuzhiyun };
826*4882a593Smuzhiyun 
827*4882a593Smuzhiyun /* FQ_CODEL */
828*4882a593Smuzhiyun 
829*4882a593Smuzhiyun #define FQ_CODEL_QUANTUM_MAX (1 << 20)
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun enum {
832*4882a593Smuzhiyun 	TCA_FQ_CODEL_UNSPEC,
833*4882a593Smuzhiyun 	TCA_FQ_CODEL_TARGET,
834*4882a593Smuzhiyun 	TCA_FQ_CODEL_LIMIT,
835*4882a593Smuzhiyun 	TCA_FQ_CODEL_INTERVAL,
836*4882a593Smuzhiyun 	TCA_FQ_CODEL_ECN,
837*4882a593Smuzhiyun 	TCA_FQ_CODEL_FLOWS,
838*4882a593Smuzhiyun 	TCA_FQ_CODEL_QUANTUM,
839*4882a593Smuzhiyun 	TCA_FQ_CODEL_CE_THRESHOLD,
840*4882a593Smuzhiyun 	TCA_FQ_CODEL_DROP_BATCH_SIZE,
841*4882a593Smuzhiyun 	TCA_FQ_CODEL_MEMORY_LIMIT,
842*4882a593Smuzhiyun 	__TCA_FQ_CODEL_MAX
843*4882a593Smuzhiyun };
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun #define TCA_FQ_CODEL_MAX	(__TCA_FQ_CODEL_MAX - 1)
846*4882a593Smuzhiyun 
847*4882a593Smuzhiyun enum {
848*4882a593Smuzhiyun 	TCA_FQ_CODEL_XSTATS_QDISC,
849*4882a593Smuzhiyun 	TCA_FQ_CODEL_XSTATS_CLASS,
850*4882a593Smuzhiyun };
851*4882a593Smuzhiyun 
852*4882a593Smuzhiyun struct tc_fq_codel_qd_stats {
853*4882a593Smuzhiyun 	__u32	maxpacket;	/* largest packet we've seen so far */
854*4882a593Smuzhiyun 	__u32	drop_overlimit; /* number of time max qdisc
855*4882a593Smuzhiyun 				 * packet limit was hit
856*4882a593Smuzhiyun 				 */
857*4882a593Smuzhiyun 	__u32	ecn_mark;	/* number of packets we ECN marked
858*4882a593Smuzhiyun 				 * instead of being dropped
859*4882a593Smuzhiyun 				 */
860*4882a593Smuzhiyun 	__u32	new_flow_count; /* number of time packets
861*4882a593Smuzhiyun 				 * created a 'new flow'
862*4882a593Smuzhiyun 				 */
863*4882a593Smuzhiyun 	__u32	new_flows_len;	/* count of flows in new list */
864*4882a593Smuzhiyun 	__u32	old_flows_len;	/* count of flows in old list */
865*4882a593Smuzhiyun 	__u32	ce_mark;	/* packets above ce_threshold */
866*4882a593Smuzhiyun 	__u32	memory_usage;	/* in bytes */
867*4882a593Smuzhiyun 	__u32	drop_overmemory;
868*4882a593Smuzhiyun };
869*4882a593Smuzhiyun 
870*4882a593Smuzhiyun struct tc_fq_codel_cl_stats {
871*4882a593Smuzhiyun 	__s32	deficit;
872*4882a593Smuzhiyun 	__u32	ldelay;		/* in-queue delay seen by most recently
873*4882a593Smuzhiyun 				 * dequeued packet
874*4882a593Smuzhiyun 				 */
875*4882a593Smuzhiyun 	__u32	count;
876*4882a593Smuzhiyun 	__u32	lastcount;
877*4882a593Smuzhiyun 	__u32	dropping;
878*4882a593Smuzhiyun 	__s32	drop_next;
879*4882a593Smuzhiyun };
880*4882a593Smuzhiyun 
881*4882a593Smuzhiyun struct tc_fq_codel_xstats {
882*4882a593Smuzhiyun 	__u32	type;
883*4882a593Smuzhiyun 	union {
884*4882a593Smuzhiyun 		struct tc_fq_codel_qd_stats qdisc_stats;
885*4882a593Smuzhiyun 		struct tc_fq_codel_cl_stats class_stats;
886*4882a593Smuzhiyun 	};
887*4882a593Smuzhiyun };
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun /* FQ */
890*4882a593Smuzhiyun 
891*4882a593Smuzhiyun enum {
892*4882a593Smuzhiyun 	TCA_FQ_UNSPEC,
893*4882a593Smuzhiyun 
894*4882a593Smuzhiyun 	TCA_FQ_PLIMIT,		/* limit of total number of packets in queue */
895*4882a593Smuzhiyun 
896*4882a593Smuzhiyun 	TCA_FQ_FLOW_PLIMIT,	/* limit of packets per flow */
897*4882a593Smuzhiyun 
898*4882a593Smuzhiyun 	TCA_FQ_QUANTUM,		/* RR quantum */
899*4882a593Smuzhiyun 
900*4882a593Smuzhiyun 	TCA_FQ_INITIAL_QUANTUM,		/* RR quantum for new flow */
901*4882a593Smuzhiyun 
902*4882a593Smuzhiyun 	TCA_FQ_RATE_ENABLE,	/* enable/disable rate limiting */
903*4882a593Smuzhiyun 
904*4882a593Smuzhiyun 	TCA_FQ_FLOW_DEFAULT_RATE,/* obsolete, do not use */
905*4882a593Smuzhiyun 
906*4882a593Smuzhiyun 	TCA_FQ_FLOW_MAX_RATE,	/* per flow max rate */
907*4882a593Smuzhiyun 
908*4882a593Smuzhiyun 	TCA_FQ_BUCKETS_LOG,	/* log2(number of buckets) */
909*4882a593Smuzhiyun 
910*4882a593Smuzhiyun 	TCA_FQ_FLOW_REFILL_DELAY,	/* flow credit refill delay in usec */
911*4882a593Smuzhiyun 
912*4882a593Smuzhiyun 	TCA_FQ_ORPHAN_MASK,	/* mask applied to orphaned skb hashes */
913*4882a593Smuzhiyun 
914*4882a593Smuzhiyun 	TCA_FQ_LOW_RATE_THRESHOLD, /* per packet delay under this rate */
915*4882a593Smuzhiyun 
916*4882a593Smuzhiyun 	TCA_FQ_CE_THRESHOLD,	/* DCTCP-like CE-marking threshold */
917*4882a593Smuzhiyun 
918*4882a593Smuzhiyun 	TCA_FQ_TIMER_SLACK,	/* timer slack */
919*4882a593Smuzhiyun 
920*4882a593Smuzhiyun 	TCA_FQ_HORIZON,		/* time horizon in us */
921*4882a593Smuzhiyun 
922*4882a593Smuzhiyun 	TCA_FQ_HORIZON_DROP,	/* drop packets beyond horizon, or cap their EDT */
923*4882a593Smuzhiyun 
924*4882a593Smuzhiyun 	__TCA_FQ_MAX
925*4882a593Smuzhiyun };
926*4882a593Smuzhiyun 
927*4882a593Smuzhiyun #define TCA_FQ_MAX	(__TCA_FQ_MAX - 1)
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun struct tc_fq_qd_stats {
930*4882a593Smuzhiyun 	__u64	gc_flows;
931*4882a593Smuzhiyun 	__u64	highprio_packets;
932*4882a593Smuzhiyun 	__u64	tcp_retrans;
933*4882a593Smuzhiyun 	__u64	throttled;
934*4882a593Smuzhiyun 	__u64	flows_plimit;
935*4882a593Smuzhiyun 	__u64	pkts_too_long;
936*4882a593Smuzhiyun 	__u64	allocation_errors;
937*4882a593Smuzhiyun 	__s64	time_next_delayed_flow;
938*4882a593Smuzhiyun 	__u32	flows;
939*4882a593Smuzhiyun 	__u32	inactive_flows;
940*4882a593Smuzhiyun 	__u32	throttled_flows;
941*4882a593Smuzhiyun 	__u32	unthrottle_latency_ns;
942*4882a593Smuzhiyun 	__u64	ce_mark;		/* packets above ce_threshold */
943*4882a593Smuzhiyun 	__u64	horizon_drops;
944*4882a593Smuzhiyun 	__u64	horizon_caps;
945*4882a593Smuzhiyun };
946*4882a593Smuzhiyun 
947*4882a593Smuzhiyun /* Heavy-Hitter Filter */
948*4882a593Smuzhiyun 
949*4882a593Smuzhiyun enum {
950*4882a593Smuzhiyun 	TCA_HHF_UNSPEC,
951*4882a593Smuzhiyun 	TCA_HHF_BACKLOG_LIMIT,
952*4882a593Smuzhiyun 	TCA_HHF_QUANTUM,
953*4882a593Smuzhiyun 	TCA_HHF_HH_FLOWS_LIMIT,
954*4882a593Smuzhiyun 	TCA_HHF_RESET_TIMEOUT,
955*4882a593Smuzhiyun 	TCA_HHF_ADMIT_BYTES,
956*4882a593Smuzhiyun 	TCA_HHF_EVICT_TIMEOUT,
957*4882a593Smuzhiyun 	TCA_HHF_NON_HH_WEIGHT,
958*4882a593Smuzhiyun 	__TCA_HHF_MAX
959*4882a593Smuzhiyun };
960*4882a593Smuzhiyun 
961*4882a593Smuzhiyun #define TCA_HHF_MAX	(__TCA_HHF_MAX - 1)
962*4882a593Smuzhiyun 
963*4882a593Smuzhiyun struct tc_hhf_xstats {
964*4882a593Smuzhiyun 	__u32	drop_overlimit; /* number of times max qdisc packet limit
965*4882a593Smuzhiyun 				 * was hit
966*4882a593Smuzhiyun 				 */
967*4882a593Smuzhiyun 	__u32	hh_overlimit;   /* number of times max heavy-hitters was hit */
968*4882a593Smuzhiyun 	__u32	hh_tot_count;   /* number of captured heavy-hitters so far */
969*4882a593Smuzhiyun 	__u32	hh_cur_count;   /* number of current heavy-hitters */
970*4882a593Smuzhiyun };
971*4882a593Smuzhiyun 
972*4882a593Smuzhiyun /* PIE */
973*4882a593Smuzhiyun enum {
974*4882a593Smuzhiyun 	TCA_PIE_UNSPEC,
975*4882a593Smuzhiyun 	TCA_PIE_TARGET,
976*4882a593Smuzhiyun 	TCA_PIE_LIMIT,
977*4882a593Smuzhiyun 	TCA_PIE_TUPDATE,
978*4882a593Smuzhiyun 	TCA_PIE_ALPHA,
979*4882a593Smuzhiyun 	TCA_PIE_BETA,
980*4882a593Smuzhiyun 	TCA_PIE_ECN,
981*4882a593Smuzhiyun 	TCA_PIE_BYTEMODE,
982*4882a593Smuzhiyun 	TCA_PIE_DQ_RATE_ESTIMATOR,
983*4882a593Smuzhiyun 	__TCA_PIE_MAX
984*4882a593Smuzhiyun };
985*4882a593Smuzhiyun #define TCA_PIE_MAX   (__TCA_PIE_MAX - 1)
986*4882a593Smuzhiyun 
987*4882a593Smuzhiyun struct tc_pie_xstats {
988*4882a593Smuzhiyun 	__u64 prob;			/* current probability */
989*4882a593Smuzhiyun 	__u32 delay;			/* current delay in ms */
990*4882a593Smuzhiyun 	__u32 avg_dq_rate;		/* current average dq_rate in
991*4882a593Smuzhiyun 					 * bits/pie_time
992*4882a593Smuzhiyun 					 */
993*4882a593Smuzhiyun 	__u32 dq_rate_estimating;	/* is avg_dq_rate being calculated? */
994*4882a593Smuzhiyun 	__u32 packets_in;		/* total number of packets enqueued */
995*4882a593Smuzhiyun 	__u32 dropped;			/* packets dropped due to pie_action */
996*4882a593Smuzhiyun 	__u32 overlimit;		/* dropped due to lack of space
997*4882a593Smuzhiyun 					 * in queue
998*4882a593Smuzhiyun 					 */
999*4882a593Smuzhiyun 	__u32 maxq;			/* maximum queue size */
1000*4882a593Smuzhiyun 	__u32 ecn_mark;			/* packets marked with ecn*/
1001*4882a593Smuzhiyun };
1002*4882a593Smuzhiyun 
1003*4882a593Smuzhiyun /* FQ PIE */
1004*4882a593Smuzhiyun enum {
1005*4882a593Smuzhiyun 	TCA_FQ_PIE_UNSPEC,
1006*4882a593Smuzhiyun 	TCA_FQ_PIE_LIMIT,
1007*4882a593Smuzhiyun 	TCA_FQ_PIE_FLOWS,
1008*4882a593Smuzhiyun 	TCA_FQ_PIE_TARGET,
1009*4882a593Smuzhiyun 	TCA_FQ_PIE_TUPDATE,
1010*4882a593Smuzhiyun 	TCA_FQ_PIE_ALPHA,
1011*4882a593Smuzhiyun 	TCA_FQ_PIE_BETA,
1012*4882a593Smuzhiyun 	TCA_FQ_PIE_QUANTUM,
1013*4882a593Smuzhiyun 	TCA_FQ_PIE_MEMORY_LIMIT,
1014*4882a593Smuzhiyun 	TCA_FQ_PIE_ECN_PROB,
1015*4882a593Smuzhiyun 	TCA_FQ_PIE_ECN,
1016*4882a593Smuzhiyun 	TCA_FQ_PIE_BYTEMODE,
1017*4882a593Smuzhiyun 	TCA_FQ_PIE_DQ_RATE_ESTIMATOR,
1018*4882a593Smuzhiyun 	__TCA_FQ_PIE_MAX
1019*4882a593Smuzhiyun };
1020*4882a593Smuzhiyun #define TCA_FQ_PIE_MAX   (__TCA_FQ_PIE_MAX - 1)
1021*4882a593Smuzhiyun 
1022*4882a593Smuzhiyun struct tc_fq_pie_xstats {
1023*4882a593Smuzhiyun 	__u32 packets_in;	/* total number of packets enqueued */
1024*4882a593Smuzhiyun 	__u32 dropped;		/* packets dropped due to fq_pie_action */
1025*4882a593Smuzhiyun 	__u32 overlimit;	/* dropped due to lack of space in queue */
1026*4882a593Smuzhiyun 	__u32 overmemory;	/* dropped due to lack of memory in queue */
1027*4882a593Smuzhiyun 	__u32 ecn_mark;		/* packets marked with ecn */
1028*4882a593Smuzhiyun 	__u32 new_flow_count;	/* count of new flows created by packets */
1029*4882a593Smuzhiyun 	__u32 new_flows_len;	/* count of flows in new list */
1030*4882a593Smuzhiyun 	__u32 old_flows_len;	/* count of flows in old list */
1031*4882a593Smuzhiyun 	__u32 memory_usage;	/* total memory across all queues */
1032*4882a593Smuzhiyun };
1033*4882a593Smuzhiyun 
1034*4882a593Smuzhiyun /* CBS */
1035*4882a593Smuzhiyun struct tc_cbs_qopt {
1036*4882a593Smuzhiyun 	__u8 offload;
1037*4882a593Smuzhiyun 	__u8 _pad[3];
1038*4882a593Smuzhiyun 	__s32 hicredit;
1039*4882a593Smuzhiyun 	__s32 locredit;
1040*4882a593Smuzhiyun 	__s32 idleslope;
1041*4882a593Smuzhiyun 	__s32 sendslope;
1042*4882a593Smuzhiyun };
1043*4882a593Smuzhiyun 
1044*4882a593Smuzhiyun enum {
1045*4882a593Smuzhiyun 	TCA_CBS_UNSPEC,
1046*4882a593Smuzhiyun 	TCA_CBS_PARMS,
1047*4882a593Smuzhiyun 	__TCA_CBS_MAX,
1048*4882a593Smuzhiyun };
1049*4882a593Smuzhiyun 
1050*4882a593Smuzhiyun #define TCA_CBS_MAX (__TCA_CBS_MAX - 1)
1051*4882a593Smuzhiyun 
1052*4882a593Smuzhiyun 
1053*4882a593Smuzhiyun /* ETF */
1054*4882a593Smuzhiyun struct tc_etf_qopt {
1055*4882a593Smuzhiyun 	__s32 delta;
1056*4882a593Smuzhiyun 	__s32 clockid;
1057*4882a593Smuzhiyun 	__u32 flags;
1058*4882a593Smuzhiyun #define TC_ETF_DEADLINE_MODE_ON	_BITUL(0)
1059*4882a593Smuzhiyun #define TC_ETF_OFFLOAD_ON	_BITUL(1)
1060*4882a593Smuzhiyun #define TC_ETF_SKIP_SOCK_CHECK	_BITUL(2)
1061*4882a593Smuzhiyun };
1062*4882a593Smuzhiyun 
1063*4882a593Smuzhiyun enum {
1064*4882a593Smuzhiyun 	TCA_ETF_UNSPEC,
1065*4882a593Smuzhiyun 	TCA_ETF_PARMS,
1066*4882a593Smuzhiyun 	__TCA_ETF_MAX,
1067*4882a593Smuzhiyun };
1068*4882a593Smuzhiyun 
1069*4882a593Smuzhiyun #define TCA_ETF_MAX (__TCA_ETF_MAX - 1)
1070*4882a593Smuzhiyun 
1071*4882a593Smuzhiyun 
1072*4882a593Smuzhiyun /* CAKE */
1073*4882a593Smuzhiyun enum {
1074*4882a593Smuzhiyun 	TCA_CAKE_UNSPEC,
1075*4882a593Smuzhiyun 	TCA_CAKE_PAD,
1076*4882a593Smuzhiyun 	TCA_CAKE_BASE_RATE64,
1077*4882a593Smuzhiyun 	TCA_CAKE_DIFFSERV_MODE,
1078*4882a593Smuzhiyun 	TCA_CAKE_ATM,
1079*4882a593Smuzhiyun 	TCA_CAKE_FLOW_MODE,
1080*4882a593Smuzhiyun 	TCA_CAKE_OVERHEAD,
1081*4882a593Smuzhiyun 	TCA_CAKE_RTT,
1082*4882a593Smuzhiyun 	TCA_CAKE_TARGET,
1083*4882a593Smuzhiyun 	TCA_CAKE_AUTORATE,
1084*4882a593Smuzhiyun 	TCA_CAKE_MEMORY,
1085*4882a593Smuzhiyun 	TCA_CAKE_NAT,
1086*4882a593Smuzhiyun 	TCA_CAKE_RAW,
1087*4882a593Smuzhiyun 	TCA_CAKE_WASH,
1088*4882a593Smuzhiyun 	TCA_CAKE_MPU,
1089*4882a593Smuzhiyun 	TCA_CAKE_INGRESS,
1090*4882a593Smuzhiyun 	TCA_CAKE_ACK_FILTER,
1091*4882a593Smuzhiyun 	TCA_CAKE_SPLIT_GSO,
1092*4882a593Smuzhiyun 	TCA_CAKE_FWMARK,
1093*4882a593Smuzhiyun 	__TCA_CAKE_MAX
1094*4882a593Smuzhiyun };
1095*4882a593Smuzhiyun #define TCA_CAKE_MAX	(__TCA_CAKE_MAX - 1)
1096*4882a593Smuzhiyun 
1097*4882a593Smuzhiyun enum {
1098*4882a593Smuzhiyun 	__TCA_CAKE_STATS_INVALID,
1099*4882a593Smuzhiyun 	TCA_CAKE_STATS_PAD,
1100*4882a593Smuzhiyun 	TCA_CAKE_STATS_CAPACITY_ESTIMATE64,
1101*4882a593Smuzhiyun 	TCA_CAKE_STATS_MEMORY_LIMIT,
1102*4882a593Smuzhiyun 	TCA_CAKE_STATS_MEMORY_USED,
1103*4882a593Smuzhiyun 	TCA_CAKE_STATS_AVG_NETOFF,
1104*4882a593Smuzhiyun 	TCA_CAKE_STATS_MIN_NETLEN,
1105*4882a593Smuzhiyun 	TCA_CAKE_STATS_MAX_NETLEN,
1106*4882a593Smuzhiyun 	TCA_CAKE_STATS_MIN_ADJLEN,
1107*4882a593Smuzhiyun 	TCA_CAKE_STATS_MAX_ADJLEN,
1108*4882a593Smuzhiyun 	TCA_CAKE_STATS_TIN_STATS,
1109*4882a593Smuzhiyun 	TCA_CAKE_STATS_DEFICIT,
1110*4882a593Smuzhiyun 	TCA_CAKE_STATS_COBALT_COUNT,
1111*4882a593Smuzhiyun 	TCA_CAKE_STATS_DROPPING,
1112*4882a593Smuzhiyun 	TCA_CAKE_STATS_DROP_NEXT_US,
1113*4882a593Smuzhiyun 	TCA_CAKE_STATS_P_DROP,
1114*4882a593Smuzhiyun 	TCA_CAKE_STATS_BLUE_TIMER_US,
1115*4882a593Smuzhiyun 	__TCA_CAKE_STATS_MAX
1116*4882a593Smuzhiyun };
1117*4882a593Smuzhiyun #define TCA_CAKE_STATS_MAX (__TCA_CAKE_STATS_MAX - 1)
1118*4882a593Smuzhiyun 
1119*4882a593Smuzhiyun enum {
1120*4882a593Smuzhiyun 	__TCA_CAKE_TIN_STATS_INVALID,
1121*4882a593Smuzhiyun 	TCA_CAKE_TIN_STATS_PAD,
1122*4882a593Smuzhiyun 	TCA_CAKE_TIN_STATS_SENT_PACKETS,
1123*4882a593Smuzhiyun 	TCA_CAKE_TIN_STATS_SENT_BYTES64,
1124*4882a593Smuzhiyun 	TCA_CAKE_TIN_STATS_DROPPED_PACKETS,
1125*4882a593Smuzhiyun 	TCA_CAKE_TIN_STATS_DROPPED_BYTES64,
1126*4882a593Smuzhiyun 	TCA_CAKE_TIN_STATS_ACKS_DROPPED_PACKETS,
1127*4882a593Smuzhiyun 	TCA_CAKE_TIN_STATS_ACKS_DROPPED_BYTES64,
1128*4882a593Smuzhiyun 	TCA_CAKE_TIN_STATS_ECN_MARKED_PACKETS,
1129*4882a593Smuzhiyun 	TCA_CAKE_TIN_STATS_ECN_MARKED_BYTES64,
1130*4882a593Smuzhiyun 	TCA_CAKE_TIN_STATS_BACKLOG_PACKETS,
1131*4882a593Smuzhiyun 	TCA_CAKE_TIN_STATS_BACKLOG_BYTES,
1132*4882a593Smuzhiyun 	TCA_CAKE_TIN_STATS_THRESHOLD_RATE64,
1133*4882a593Smuzhiyun 	TCA_CAKE_TIN_STATS_TARGET_US,
1134*4882a593Smuzhiyun 	TCA_CAKE_TIN_STATS_INTERVAL_US,
1135*4882a593Smuzhiyun 	TCA_CAKE_TIN_STATS_WAY_INDIRECT_HITS,
1136*4882a593Smuzhiyun 	TCA_CAKE_TIN_STATS_WAY_MISSES,
1137*4882a593Smuzhiyun 	TCA_CAKE_TIN_STATS_WAY_COLLISIONS,
1138*4882a593Smuzhiyun 	TCA_CAKE_TIN_STATS_PEAK_DELAY_US,
1139*4882a593Smuzhiyun 	TCA_CAKE_TIN_STATS_AVG_DELAY_US,
1140*4882a593Smuzhiyun 	TCA_CAKE_TIN_STATS_BASE_DELAY_US,
1141*4882a593Smuzhiyun 	TCA_CAKE_TIN_STATS_SPARSE_FLOWS,
1142*4882a593Smuzhiyun 	TCA_CAKE_TIN_STATS_BULK_FLOWS,
1143*4882a593Smuzhiyun 	TCA_CAKE_TIN_STATS_UNRESPONSIVE_FLOWS,
1144*4882a593Smuzhiyun 	TCA_CAKE_TIN_STATS_MAX_SKBLEN,
1145*4882a593Smuzhiyun 	TCA_CAKE_TIN_STATS_FLOW_QUANTUM,
1146*4882a593Smuzhiyun 	__TCA_CAKE_TIN_STATS_MAX
1147*4882a593Smuzhiyun };
1148*4882a593Smuzhiyun #define TCA_CAKE_TIN_STATS_MAX (__TCA_CAKE_TIN_STATS_MAX - 1)
1149*4882a593Smuzhiyun #define TC_CAKE_MAX_TINS (8)
1150*4882a593Smuzhiyun 
1151*4882a593Smuzhiyun enum {
1152*4882a593Smuzhiyun 	CAKE_FLOW_NONE = 0,
1153*4882a593Smuzhiyun 	CAKE_FLOW_SRC_IP,
1154*4882a593Smuzhiyun 	CAKE_FLOW_DST_IP,
1155*4882a593Smuzhiyun 	CAKE_FLOW_HOSTS,    /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_DST_IP */
1156*4882a593Smuzhiyun 	CAKE_FLOW_FLOWS,
1157*4882a593Smuzhiyun 	CAKE_FLOW_DUAL_SRC, /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_FLOWS */
1158*4882a593Smuzhiyun 	CAKE_FLOW_DUAL_DST, /* = CAKE_FLOW_DST_IP | CAKE_FLOW_FLOWS */
1159*4882a593Smuzhiyun 	CAKE_FLOW_TRIPLE,   /* = CAKE_FLOW_HOSTS  | CAKE_FLOW_FLOWS */
1160*4882a593Smuzhiyun 	CAKE_FLOW_MAX,
1161*4882a593Smuzhiyun };
1162*4882a593Smuzhiyun 
1163*4882a593Smuzhiyun enum {
1164*4882a593Smuzhiyun 	CAKE_DIFFSERV_DIFFSERV3 = 0,
1165*4882a593Smuzhiyun 	CAKE_DIFFSERV_DIFFSERV4,
1166*4882a593Smuzhiyun 	CAKE_DIFFSERV_DIFFSERV8,
1167*4882a593Smuzhiyun 	CAKE_DIFFSERV_BESTEFFORT,
1168*4882a593Smuzhiyun 	CAKE_DIFFSERV_PRECEDENCE,
1169*4882a593Smuzhiyun 	CAKE_DIFFSERV_MAX
1170*4882a593Smuzhiyun };
1171*4882a593Smuzhiyun 
1172*4882a593Smuzhiyun enum {
1173*4882a593Smuzhiyun 	CAKE_ACK_NONE = 0,
1174*4882a593Smuzhiyun 	CAKE_ACK_FILTER,
1175*4882a593Smuzhiyun 	CAKE_ACK_AGGRESSIVE,
1176*4882a593Smuzhiyun 	CAKE_ACK_MAX
1177*4882a593Smuzhiyun };
1178*4882a593Smuzhiyun 
1179*4882a593Smuzhiyun enum {
1180*4882a593Smuzhiyun 	CAKE_ATM_NONE = 0,
1181*4882a593Smuzhiyun 	CAKE_ATM_ATM,
1182*4882a593Smuzhiyun 	CAKE_ATM_PTM,
1183*4882a593Smuzhiyun 	CAKE_ATM_MAX
1184*4882a593Smuzhiyun };
1185*4882a593Smuzhiyun 
1186*4882a593Smuzhiyun 
1187*4882a593Smuzhiyun /* TAPRIO */
1188*4882a593Smuzhiyun enum {
1189*4882a593Smuzhiyun 	TC_TAPRIO_CMD_SET_GATES = 0x00,
1190*4882a593Smuzhiyun 	TC_TAPRIO_CMD_SET_AND_HOLD = 0x01,
1191*4882a593Smuzhiyun 	TC_TAPRIO_CMD_SET_AND_RELEASE = 0x02,
1192*4882a593Smuzhiyun };
1193*4882a593Smuzhiyun 
1194*4882a593Smuzhiyun enum {
1195*4882a593Smuzhiyun 	TCA_TAPRIO_SCHED_ENTRY_UNSPEC,
1196*4882a593Smuzhiyun 	TCA_TAPRIO_SCHED_ENTRY_INDEX, /* u32 */
1197*4882a593Smuzhiyun 	TCA_TAPRIO_SCHED_ENTRY_CMD, /* u8 */
1198*4882a593Smuzhiyun 	TCA_TAPRIO_SCHED_ENTRY_GATE_MASK, /* u32 */
1199*4882a593Smuzhiyun 	TCA_TAPRIO_SCHED_ENTRY_INTERVAL, /* u32 */
1200*4882a593Smuzhiyun 	__TCA_TAPRIO_SCHED_ENTRY_MAX,
1201*4882a593Smuzhiyun };
1202*4882a593Smuzhiyun #define TCA_TAPRIO_SCHED_ENTRY_MAX (__TCA_TAPRIO_SCHED_ENTRY_MAX - 1)
1203*4882a593Smuzhiyun 
1204*4882a593Smuzhiyun /* The format for schedule entry list is:
1205*4882a593Smuzhiyun  * [TCA_TAPRIO_SCHED_ENTRY_LIST]
1206*4882a593Smuzhiyun  *   [TCA_TAPRIO_SCHED_ENTRY]
1207*4882a593Smuzhiyun  *     [TCA_TAPRIO_SCHED_ENTRY_CMD]
1208*4882a593Smuzhiyun  *     [TCA_TAPRIO_SCHED_ENTRY_GATES]
1209*4882a593Smuzhiyun  *     [TCA_TAPRIO_SCHED_ENTRY_INTERVAL]
1210*4882a593Smuzhiyun  */
1211*4882a593Smuzhiyun enum {
1212*4882a593Smuzhiyun 	TCA_TAPRIO_SCHED_UNSPEC,
1213*4882a593Smuzhiyun 	TCA_TAPRIO_SCHED_ENTRY,
1214*4882a593Smuzhiyun 	__TCA_TAPRIO_SCHED_MAX,
1215*4882a593Smuzhiyun };
1216*4882a593Smuzhiyun 
1217*4882a593Smuzhiyun #define TCA_TAPRIO_SCHED_MAX (__TCA_TAPRIO_SCHED_MAX - 1)
1218*4882a593Smuzhiyun 
1219*4882a593Smuzhiyun /* The format for the admin sched (dump only):
1220*4882a593Smuzhiyun  * [TCA_TAPRIO_SCHED_ADMIN_SCHED]
1221*4882a593Smuzhiyun  *   [TCA_TAPRIO_ATTR_SCHED_BASE_TIME]
1222*4882a593Smuzhiyun  *   [TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST]
1223*4882a593Smuzhiyun  *     [TCA_TAPRIO_ATTR_SCHED_ENTRY]
1224*4882a593Smuzhiyun  *       [TCA_TAPRIO_ATTR_SCHED_ENTRY_CMD]
1225*4882a593Smuzhiyun  *       [TCA_TAPRIO_ATTR_SCHED_ENTRY_GATES]
1226*4882a593Smuzhiyun  *       [TCA_TAPRIO_ATTR_SCHED_ENTRY_INTERVAL]
1227*4882a593Smuzhiyun  */
1228*4882a593Smuzhiyun 
1229*4882a593Smuzhiyun #define TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST	_BITUL(0)
1230*4882a593Smuzhiyun #define TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD	_BITUL(1)
1231*4882a593Smuzhiyun 
1232*4882a593Smuzhiyun enum {
1233*4882a593Smuzhiyun 	TCA_TAPRIO_ATTR_UNSPEC,
1234*4882a593Smuzhiyun 	TCA_TAPRIO_ATTR_PRIOMAP, /* struct tc_mqprio_qopt */
1235*4882a593Smuzhiyun 	TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST, /* nested of entry */
1236*4882a593Smuzhiyun 	TCA_TAPRIO_ATTR_SCHED_BASE_TIME, /* s64 */
1237*4882a593Smuzhiyun 	TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY, /* single entry */
1238*4882a593Smuzhiyun 	TCA_TAPRIO_ATTR_SCHED_CLOCKID, /* s32 */
1239*4882a593Smuzhiyun 	TCA_TAPRIO_PAD,
1240*4882a593Smuzhiyun 	TCA_TAPRIO_ATTR_ADMIN_SCHED, /* The admin sched, only used in dump */
1241*4882a593Smuzhiyun 	TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME, /* s64 */
1242*4882a593Smuzhiyun 	TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION, /* s64 */
1243*4882a593Smuzhiyun 	TCA_TAPRIO_ATTR_FLAGS, /* u32 */
1244*4882a593Smuzhiyun 	TCA_TAPRIO_ATTR_TXTIME_DELAY, /* u32 */
1245*4882a593Smuzhiyun 	__TCA_TAPRIO_ATTR_MAX,
1246*4882a593Smuzhiyun };
1247*4882a593Smuzhiyun 
1248*4882a593Smuzhiyun #define TCA_TAPRIO_ATTR_MAX (__TCA_TAPRIO_ATTR_MAX - 1)
1249*4882a593Smuzhiyun 
1250*4882a593Smuzhiyun /* ETS */
1251*4882a593Smuzhiyun 
1252*4882a593Smuzhiyun #define TCQ_ETS_MAX_BANDS 16
1253*4882a593Smuzhiyun 
1254*4882a593Smuzhiyun enum {
1255*4882a593Smuzhiyun 	TCA_ETS_UNSPEC,
1256*4882a593Smuzhiyun 	TCA_ETS_NBANDS,		/* u8 */
1257*4882a593Smuzhiyun 	TCA_ETS_NSTRICT,	/* u8 */
1258*4882a593Smuzhiyun 	TCA_ETS_QUANTA,		/* nested TCA_ETS_QUANTA_BAND */
1259*4882a593Smuzhiyun 	TCA_ETS_QUANTA_BAND,	/* u32 */
1260*4882a593Smuzhiyun 	TCA_ETS_PRIOMAP,	/* nested TCA_ETS_PRIOMAP_BAND */
1261*4882a593Smuzhiyun 	TCA_ETS_PRIOMAP_BAND,	/* u8 */
1262*4882a593Smuzhiyun 	__TCA_ETS_MAX,
1263*4882a593Smuzhiyun };
1264*4882a593Smuzhiyun 
1265*4882a593Smuzhiyun #define TCA_ETS_MAX (__TCA_ETS_MAX - 1)
1266*4882a593Smuzhiyun 
1267*4882a593Smuzhiyun #endif
1268