xref: /OK3568_Linux_fs/kernel/tools/include/uapi/linux/pkt_sched.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2*4882a593Smuzhiyun #ifndef __LINUX_PKT_SCHED_H
3*4882a593Smuzhiyun #define __LINUX_PKT_SCHED_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <linux/types.h>
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun /* Logical priority bands not depending on specific packet scheduler.
8*4882a593Smuzhiyun    Every scheduler will map them to real traffic classes, if it has
9*4882a593Smuzhiyun    no more precise mechanism to classify packets.
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun    These numbers have no special meaning, though their coincidence
12*4882a593Smuzhiyun    with obsolete IPv6 values is not occasional :-). New IPv6 drafts
13*4882a593Smuzhiyun    preferred full anarchy inspired by diffserv group.
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun    Note: TC_PRIO_BESTEFFORT does not mean that it is the most unhappy
16*4882a593Smuzhiyun    class, actually, as rule it will be handled with more care than
17*4882a593Smuzhiyun    filler or even bulk.
18*4882a593Smuzhiyun  */
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #define TC_PRIO_BESTEFFORT		0
21*4882a593Smuzhiyun #define TC_PRIO_FILLER			1
22*4882a593Smuzhiyun #define TC_PRIO_BULK			2
23*4882a593Smuzhiyun #define TC_PRIO_INTERACTIVE_BULK	4
24*4882a593Smuzhiyun #define TC_PRIO_INTERACTIVE		6
25*4882a593Smuzhiyun #define TC_PRIO_CONTROL			7
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #define TC_PRIO_MAX			15
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun /* Generic queue statistics, available for all the elements.
30*4882a593Smuzhiyun    Particular schedulers may have also their private records.
31*4882a593Smuzhiyun  */
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun struct tc_stats {
34*4882a593Smuzhiyun 	__u64	bytes;			/* Number of enqueued bytes */
35*4882a593Smuzhiyun 	__u32	packets;		/* Number of enqueued packets	*/
36*4882a593Smuzhiyun 	__u32	drops;			/* Packets dropped because of lack of resources */
37*4882a593Smuzhiyun 	__u32	overlimits;		/* Number of throttle events when this
38*4882a593Smuzhiyun 					 * flow goes out of allocated bandwidth */
39*4882a593Smuzhiyun 	__u32	bps;			/* Current flow byte rate */
40*4882a593Smuzhiyun 	__u32	pps;			/* Current flow packet rate */
41*4882a593Smuzhiyun 	__u32	qlen;
42*4882a593Smuzhiyun 	__u32	backlog;
43*4882a593Smuzhiyun };
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun struct tc_estimator {
46*4882a593Smuzhiyun 	signed char	interval;
47*4882a593Smuzhiyun 	unsigned char	ewma_log;
48*4882a593Smuzhiyun };
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun /* "Handles"
51*4882a593Smuzhiyun    ---------
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun     All the traffic control objects have 32bit identifiers, or "handles".
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun     They can be considered as opaque numbers from user API viewpoint,
56*4882a593Smuzhiyun     but actually they always consist of two fields: major and
57*4882a593Smuzhiyun     minor numbers, which are interpreted by kernel specially,
58*4882a593Smuzhiyun     that may be used by applications, though not recommended.
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun     F.e. qdisc handles always have minor number equal to zero,
61*4882a593Smuzhiyun     classes (or flows) have major equal to parent qdisc major, and
62*4882a593Smuzhiyun     minor uniquely identifying class inside qdisc.
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun     Macros to manipulate handles:
65*4882a593Smuzhiyun  */
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun #define TC_H_MAJ_MASK (0xFFFF0000U)
68*4882a593Smuzhiyun #define TC_H_MIN_MASK (0x0000FFFFU)
69*4882a593Smuzhiyun #define TC_H_MAJ(h) ((h)&TC_H_MAJ_MASK)
70*4882a593Smuzhiyun #define TC_H_MIN(h) ((h)&TC_H_MIN_MASK)
71*4882a593Smuzhiyun #define TC_H_MAKE(maj,min) (((maj)&TC_H_MAJ_MASK)|((min)&TC_H_MIN_MASK))
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun #define TC_H_UNSPEC	(0U)
74*4882a593Smuzhiyun #define TC_H_ROOT	(0xFFFFFFFFU)
75*4882a593Smuzhiyun #define TC_H_INGRESS    (0xFFFFFFF1U)
76*4882a593Smuzhiyun #define TC_H_CLSACT	TC_H_INGRESS
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun #define TC_H_MIN_PRIORITY	0xFFE0U
79*4882a593Smuzhiyun #define TC_H_MIN_INGRESS	0xFFF2U
80*4882a593Smuzhiyun #define TC_H_MIN_EGRESS		0xFFF3U
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun /* Need to corrospond to iproute2 tc/tc_core.h "enum link_layer" */
83*4882a593Smuzhiyun enum tc_link_layer {
84*4882a593Smuzhiyun 	TC_LINKLAYER_UNAWARE, /* Indicate unaware old iproute2 util */
85*4882a593Smuzhiyun 	TC_LINKLAYER_ETHERNET,
86*4882a593Smuzhiyun 	TC_LINKLAYER_ATM,
87*4882a593Smuzhiyun };
88*4882a593Smuzhiyun #define TC_LINKLAYER_MASK 0x0F /* limit use to lower 4 bits */
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun struct tc_ratespec {
91*4882a593Smuzhiyun 	unsigned char	cell_log;
92*4882a593Smuzhiyun 	__u8		linklayer; /* lower 4 bits */
93*4882a593Smuzhiyun 	unsigned short	overhead;
94*4882a593Smuzhiyun 	short		cell_align;
95*4882a593Smuzhiyun 	unsigned short	mpu;
96*4882a593Smuzhiyun 	__u32		rate;
97*4882a593Smuzhiyun };
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun #define TC_RTAB_SIZE	1024
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun struct tc_sizespec {
102*4882a593Smuzhiyun 	unsigned char	cell_log;
103*4882a593Smuzhiyun 	unsigned char	size_log;
104*4882a593Smuzhiyun 	short		cell_align;
105*4882a593Smuzhiyun 	int		overhead;
106*4882a593Smuzhiyun 	unsigned int	linklayer;
107*4882a593Smuzhiyun 	unsigned int	mpu;
108*4882a593Smuzhiyun 	unsigned int	mtu;
109*4882a593Smuzhiyun 	unsigned int	tsize;
110*4882a593Smuzhiyun };
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun enum {
113*4882a593Smuzhiyun 	TCA_STAB_UNSPEC,
114*4882a593Smuzhiyun 	TCA_STAB_BASE,
115*4882a593Smuzhiyun 	TCA_STAB_DATA,
116*4882a593Smuzhiyun 	__TCA_STAB_MAX
117*4882a593Smuzhiyun };
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun #define TCA_STAB_MAX (__TCA_STAB_MAX - 1)
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun /* FIFO section */
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun struct tc_fifo_qopt {
124*4882a593Smuzhiyun 	__u32	limit;	/* Queue length: bytes for bfifo, packets for pfifo */
125*4882a593Smuzhiyun };
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun /* SKBPRIO section */
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun /*
130*4882a593Smuzhiyun  * Priorities go from zero to (SKBPRIO_MAX_PRIORITY - 1).
131*4882a593Smuzhiyun  * SKBPRIO_MAX_PRIORITY should be at least 64 in order for skbprio to be able
132*4882a593Smuzhiyun  * to map one to one the DS field of IPV4 and IPV6 headers.
133*4882a593Smuzhiyun  * Memory allocation grows linearly with SKBPRIO_MAX_PRIORITY.
134*4882a593Smuzhiyun  */
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun #define SKBPRIO_MAX_PRIORITY 64
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun struct tc_skbprio_qopt {
139*4882a593Smuzhiyun 	__u32	limit;		/* Queue length in packets. */
140*4882a593Smuzhiyun };
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun /* PRIO section */
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun #define TCQ_PRIO_BANDS	16
145*4882a593Smuzhiyun #define TCQ_MIN_PRIO_BANDS 2
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun struct tc_prio_qopt {
148*4882a593Smuzhiyun 	int	bands;			/* Number of bands */
149*4882a593Smuzhiyun 	__u8	priomap[TC_PRIO_MAX+1];	/* Map: logical priority -> PRIO band */
150*4882a593Smuzhiyun };
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun /* MULTIQ section */
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun struct tc_multiq_qopt {
155*4882a593Smuzhiyun 	__u16	bands;			/* Number of bands */
156*4882a593Smuzhiyun 	__u16	max_bands;		/* Maximum number of queues */
157*4882a593Smuzhiyun };
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun /* PLUG section */
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun #define TCQ_PLUG_BUFFER                0
162*4882a593Smuzhiyun #define TCQ_PLUG_RELEASE_ONE           1
163*4882a593Smuzhiyun #define TCQ_PLUG_RELEASE_INDEFINITE    2
164*4882a593Smuzhiyun #define TCQ_PLUG_LIMIT                 3
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun struct tc_plug_qopt {
167*4882a593Smuzhiyun 	/* TCQ_PLUG_BUFFER: Inset a plug into the queue and
168*4882a593Smuzhiyun 	 *  buffer any incoming packets
169*4882a593Smuzhiyun 	 * TCQ_PLUG_RELEASE_ONE: Dequeue packets from queue head
170*4882a593Smuzhiyun 	 *   to beginning of the next plug.
171*4882a593Smuzhiyun 	 * TCQ_PLUG_RELEASE_INDEFINITE: Dequeue all packets from queue.
172*4882a593Smuzhiyun 	 *   Stop buffering packets until the next TCQ_PLUG_BUFFER
173*4882a593Smuzhiyun 	 *   command is received (just act as a pass-thru queue).
174*4882a593Smuzhiyun 	 * TCQ_PLUG_LIMIT: Increase/decrease queue size
175*4882a593Smuzhiyun 	 */
176*4882a593Smuzhiyun 	int             action;
177*4882a593Smuzhiyun 	__u32           limit;
178*4882a593Smuzhiyun };
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun /* TBF section */
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun struct tc_tbf_qopt {
183*4882a593Smuzhiyun 	struct tc_ratespec rate;
184*4882a593Smuzhiyun 	struct tc_ratespec peakrate;
185*4882a593Smuzhiyun 	__u32		limit;
186*4882a593Smuzhiyun 	__u32		buffer;
187*4882a593Smuzhiyun 	__u32		mtu;
188*4882a593Smuzhiyun };
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun enum {
191*4882a593Smuzhiyun 	TCA_TBF_UNSPEC,
192*4882a593Smuzhiyun 	TCA_TBF_PARMS,
193*4882a593Smuzhiyun 	TCA_TBF_RTAB,
194*4882a593Smuzhiyun 	TCA_TBF_PTAB,
195*4882a593Smuzhiyun 	TCA_TBF_RATE64,
196*4882a593Smuzhiyun 	TCA_TBF_PRATE64,
197*4882a593Smuzhiyun 	TCA_TBF_BURST,
198*4882a593Smuzhiyun 	TCA_TBF_PBURST,
199*4882a593Smuzhiyun 	TCA_TBF_PAD,
200*4882a593Smuzhiyun 	__TCA_TBF_MAX,
201*4882a593Smuzhiyun };
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun #define TCA_TBF_MAX (__TCA_TBF_MAX - 1)
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun /* TEQL section */
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun /* TEQL does not require any parameters */
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun /* SFQ section */
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun struct tc_sfq_qopt {
213*4882a593Smuzhiyun 	unsigned	quantum;	/* Bytes per round allocated to flow */
214*4882a593Smuzhiyun 	int		perturb_period;	/* Period of hash perturbation */
215*4882a593Smuzhiyun 	__u32		limit;		/* Maximal packets in queue */
216*4882a593Smuzhiyun 	unsigned	divisor;	/* Hash divisor  */
217*4882a593Smuzhiyun 	unsigned	flows;		/* Maximal number of flows  */
218*4882a593Smuzhiyun };
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun struct tc_sfqred_stats {
221*4882a593Smuzhiyun 	__u32           prob_drop;      /* Early drops, below max threshold */
222*4882a593Smuzhiyun 	__u32           forced_drop;	/* Early drops, after max threshold */
223*4882a593Smuzhiyun 	__u32           prob_mark;      /* Marked packets, below max threshold */
224*4882a593Smuzhiyun 	__u32           forced_mark;    /* Marked packets, after max threshold */
225*4882a593Smuzhiyun 	__u32           prob_mark_head; /* Marked packets, below max threshold */
226*4882a593Smuzhiyun 	__u32           forced_mark_head;/* Marked packets, after max threshold */
227*4882a593Smuzhiyun };
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun struct tc_sfq_qopt_v1 {
230*4882a593Smuzhiyun 	struct tc_sfq_qopt v0;
231*4882a593Smuzhiyun 	unsigned int	depth;		/* max number of packets per flow */
232*4882a593Smuzhiyun 	unsigned int	headdrop;
233*4882a593Smuzhiyun /* SFQRED parameters */
234*4882a593Smuzhiyun 	__u32		limit;		/* HARD maximal flow queue length (bytes) */
235*4882a593Smuzhiyun 	__u32		qth_min;	/* Min average length threshold (bytes) */
236*4882a593Smuzhiyun 	__u32		qth_max;	/* Max average length threshold (bytes) */
237*4882a593Smuzhiyun 	unsigned char   Wlog;		/* log(W)		*/
238*4882a593Smuzhiyun 	unsigned char   Plog;		/* log(P_max/(qth_max-qth_min))	*/
239*4882a593Smuzhiyun 	unsigned char   Scell_log;	/* cell size for idle damping */
240*4882a593Smuzhiyun 	unsigned char	flags;
241*4882a593Smuzhiyun 	__u32		max_P;		/* probability, high resolution */
242*4882a593Smuzhiyun /* SFQRED stats */
243*4882a593Smuzhiyun 	struct tc_sfqred_stats stats;
244*4882a593Smuzhiyun };
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun struct tc_sfq_xstats {
248*4882a593Smuzhiyun 	__s32		allot;
249*4882a593Smuzhiyun };
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun /* RED section */
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun enum {
254*4882a593Smuzhiyun 	TCA_RED_UNSPEC,
255*4882a593Smuzhiyun 	TCA_RED_PARMS,
256*4882a593Smuzhiyun 	TCA_RED_STAB,
257*4882a593Smuzhiyun 	TCA_RED_MAX_P,
258*4882a593Smuzhiyun 	__TCA_RED_MAX,
259*4882a593Smuzhiyun };
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun #define TCA_RED_MAX (__TCA_RED_MAX - 1)
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun struct tc_red_qopt {
264*4882a593Smuzhiyun 	__u32		limit;		/* HARD maximal queue length (bytes)	*/
265*4882a593Smuzhiyun 	__u32		qth_min;	/* Min average length threshold (bytes) */
266*4882a593Smuzhiyun 	__u32		qth_max;	/* Max average length threshold (bytes) */
267*4882a593Smuzhiyun 	unsigned char   Wlog;		/* log(W)		*/
268*4882a593Smuzhiyun 	unsigned char   Plog;		/* log(P_max/(qth_max-qth_min))	*/
269*4882a593Smuzhiyun 	unsigned char   Scell_log;	/* cell size for idle damping */
270*4882a593Smuzhiyun 	unsigned char	flags;
271*4882a593Smuzhiyun #define TC_RED_ECN		1
272*4882a593Smuzhiyun #define TC_RED_HARDDROP		2
273*4882a593Smuzhiyun #define TC_RED_ADAPTATIVE	4
274*4882a593Smuzhiyun };
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun struct tc_red_xstats {
277*4882a593Smuzhiyun 	__u32           early;          /* Early drops */
278*4882a593Smuzhiyun 	__u32           pdrop;          /* Drops due to queue limits */
279*4882a593Smuzhiyun 	__u32           other;          /* Drops due to drop() calls */
280*4882a593Smuzhiyun 	__u32           marked;         /* Marked packets */
281*4882a593Smuzhiyun };
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun /* GRED section */
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun #define MAX_DPs 16
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun enum {
288*4882a593Smuzhiyun        TCA_GRED_UNSPEC,
289*4882a593Smuzhiyun        TCA_GRED_PARMS,
290*4882a593Smuzhiyun        TCA_GRED_STAB,
291*4882a593Smuzhiyun        TCA_GRED_DPS,
292*4882a593Smuzhiyun        TCA_GRED_MAX_P,
293*4882a593Smuzhiyun        TCA_GRED_LIMIT,
294*4882a593Smuzhiyun        TCA_GRED_VQ_LIST,	/* nested TCA_GRED_VQ_ENTRY */
295*4882a593Smuzhiyun        __TCA_GRED_MAX,
296*4882a593Smuzhiyun };
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun #define TCA_GRED_MAX (__TCA_GRED_MAX - 1)
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun enum {
301*4882a593Smuzhiyun 	TCA_GRED_VQ_ENTRY_UNSPEC,
302*4882a593Smuzhiyun 	TCA_GRED_VQ_ENTRY,	/* nested TCA_GRED_VQ_* */
303*4882a593Smuzhiyun 	__TCA_GRED_VQ_ENTRY_MAX,
304*4882a593Smuzhiyun };
305*4882a593Smuzhiyun #define TCA_GRED_VQ_ENTRY_MAX (__TCA_GRED_VQ_ENTRY_MAX - 1)
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun enum {
308*4882a593Smuzhiyun 	TCA_GRED_VQ_UNSPEC,
309*4882a593Smuzhiyun 	TCA_GRED_VQ_PAD,
310*4882a593Smuzhiyun 	TCA_GRED_VQ_DP,			/* u32 */
311*4882a593Smuzhiyun 	TCA_GRED_VQ_STAT_BYTES,		/* u64 */
312*4882a593Smuzhiyun 	TCA_GRED_VQ_STAT_PACKETS,	/* u32 */
313*4882a593Smuzhiyun 	TCA_GRED_VQ_STAT_BACKLOG,	/* u32 */
314*4882a593Smuzhiyun 	TCA_GRED_VQ_STAT_PROB_DROP,	/* u32 */
315*4882a593Smuzhiyun 	TCA_GRED_VQ_STAT_PROB_MARK,	/* u32 */
316*4882a593Smuzhiyun 	TCA_GRED_VQ_STAT_FORCED_DROP,	/* u32 */
317*4882a593Smuzhiyun 	TCA_GRED_VQ_STAT_FORCED_MARK,	/* u32 */
318*4882a593Smuzhiyun 	TCA_GRED_VQ_STAT_PDROP,		/* u32 */
319*4882a593Smuzhiyun 	TCA_GRED_VQ_STAT_OTHER,		/* u32 */
320*4882a593Smuzhiyun 	TCA_GRED_VQ_FLAGS,		/* u32 */
321*4882a593Smuzhiyun 	__TCA_GRED_VQ_MAX
322*4882a593Smuzhiyun };
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun #define TCA_GRED_VQ_MAX (__TCA_GRED_VQ_MAX - 1)
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun struct tc_gred_qopt {
327*4882a593Smuzhiyun 	__u32		limit;        /* HARD maximal queue length (bytes)    */
328*4882a593Smuzhiyun 	__u32		qth_min;      /* Min average length threshold (bytes) */
329*4882a593Smuzhiyun 	__u32		qth_max;      /* Max average length threshold (bytes) */
330*4882a593Smuzhiyun 	__u32		DP;           /* up to 2^32 DPs */
331*4882a593Smuzhiyun 	__u32		backlog;
332*4882a593Smuzhiyun 	__u32		qave;
333*4882a593Smuzhiyun 	__u32		forced;
334*4882a593Smuzhiyun 	__u32		early;
335*4882a593Smuzhiyun 	__u32		other;
336*4882a593Smuzhiyun 	__u32		pdrop;
337*4882a593Smuzhiyun 	__u8		Wlog;         /* log(W)               */
338*4882a593Smuzhiyun 	__u8		Plog;         /* log(P_max/(qth_max-qth_min)) */
339*4882a593Smuzhiyun 	__u8		Scell_log;    /* cell size for idle damping */
340*4882a593Smuzhiyun 	__u8		prio;         /* prio of this VQ */
341*4882a593Smuzhiyun 	__u32		packets;
342*4882a593Smuzhiyun 	__u32		bytesin;
343*4882a593Smuzhiyun };
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun /* gred setup */
346*4882a593Smuzhiyun struct tc_gred_sopt {
347*4882a593Smuzhiyun 	__u32		DPs;
348*4882a593Smuzhiyun 	__u32		def_DP;
349*4882a593Smuzhiyun 	__u8		grio;
350*4882a593Smuzhiyun 	__u8		flags;
351*4882a593Smuzhiyun 	__u16		pad1;
352*4882a593Smuzhiyun };
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun /* CHOKe section */
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun enum {
357*4882a593Smuzhiyun 	TCA_CHOKE_UNSPEC,
358*4882a593Smuzhiyun 	TCA_CHOKE_PARMS,
359*4882a593Smuzhiyun 	TCA_CHOKE_STAB,
360*4882a593Smuzhiyun 	TCA_CHOKE_MAX_P,
361*4882a593Smuzhiyun 	__TCA_CHOKE_MAX,
362*4882a593Smuzhiyun };
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun #define TCA_CHOKE_MAX (__TCA_CHOKE_MAX - 1)
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun struct tc_choke_qopt {
367*4882a593Smuzhiyun 	__u32		limit;		/* Hard queue length (packets)	*/
368*4882a593Smuzhiyun 	__u32		qth_min;	/* Min average threshold (packets) */
369*4882a593Smuzhiyun 	__u32		qth_max;	/* Max average threshold (packets) */
370*4882a593Smuzhiyun 	unsigned char   Wlog;		/* log(W)		*/
371*4882a593Smuzhiyun 	unsigned char   Plog;		/* log(P_max/(qth_max-qth_min))	*/
372*4882a593Smuzhiyun 	unsigned char   Scell_log;	/* cell size for idle damping */
373*4882a593Smuzhiyun 	unsigned char	flags;		/* see RED flags */
374*4882a593Smuzhiyun };
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun struct tc_choke_xstats {
377*4882a593Smuzhiyun 	__u32		early;          /* Early drops */
378*4882a593Smuzhiyun 	__u32		pdrop;          /* Drops due to queue limits */
379*4882a593Smuzhiyun 	__u32		other;          /* Drops due to drop() calls */
380*4882a593Smuzhiyun 	__u32		marked;         /* Marked packets */
381*4882a593Smuzhiyun 	__u32		matched;	/* Drops due to flow match */
382*4882a593Smuzhiyun };
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun /* HTB section */
385*4882a593Smuzhiyun #define TC_HTB_NUMPRIO		8
386*4882a593Smuzhiyun #define TC_HTB_MAXDEPTH		8
387*4882a593Smuzhiyun #define TC_HTB_PROTOVER		3 /* the same as HTB and TC's major */
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun struct tc_htb_opt {
390*4882a593Smuzhiyun 	struct tc_ratespec 	rate;
391*4882a593Smuzhiyun 	struct tc_ratespec 	ceil;
392*4882a593Smuzhiyun 	__u32	buffer;
393*4882a593Smuzhiyun 	__u32	cbuffer;
394*4882a593Smuzhiyun 	__u32	quantum;
395*4882a593Smuzhiyun 	__u32	level;		/* out only */
396*4882a593Smuzhiyun 	__u32	prio;
397*4882a593Smuzhiyun };
398*4882a593Smuzhiyun struct tc_htb_glob {
399*4882a593Smuzhiyun 	__u32 version;		/* to match HTB/TC */
400*4882a593Smuzhiyun     	__u32 rate2quantum;	/* bps->quantum divisor */
401*4882a593Smuzhiyun     	__u32 defcls;		/* default class number */
402*4882a593Smuzhiyun 	__u32 debug;		/* debug flags */
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	/* stats */
405*4882a593Smuzhiyun 	__u32 direct_pkts; /* count of non shaped packets */
406*4882a593Smuzhiyun };
407*4882a593Smuzhiyun enum {
408*4882a593Smuzhiyun 	TCA_HTB_UNSPEC,
409*4882a593Smuzhiyun 	TCA_HTB_PARMS,
410*4882a593Smuzhiyun 	TCA_HTB_INIT,
411*4882a593Smuzhiyun 	TCA_HTB_CTAB,
412*4882a593Smuzhiyun 	TCA_HTB_RTAB,
413*4882a593Smuzhiyun 	TCA_HTB_DIRECT_QLEN,
414*4882a593Smuzhiyun 	TCA_HTB_RATE64,
415*4882a593Smuzhiyun 	TCA_HTB_CEIL64,
416*4882a593Smuzhiyun 	TCA_HTB_PAD,
417*4882a593Smuzhiyun 	__TCA_HTB_MAX,
418*4882a593Smuzhiyun };
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun #define TCA_HTB_MAX (__TCA_HTB_MAX - 1)
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun struct tc_htb_xstats {
423*4882a593Smuzhiyun 	__u32 lends;
424*4882a593Smuzhiyun 	__u32 borrows;
425*4882a593Smuzhiyun 	__u32 giants;	/* unused since 'Make HTB scheduler work with TSO.' */
426*4882a593Smuzhiyun 	__s32 tokens;
427*4882a593Smuzhiyun 	__s32 ctokens;
428*4882a593Smuzhiyun };
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun /* HFSC section */
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun struct tc_hfsc_qopt {
433*4882a593Smuzhiyun 	__u16	defcls;		/* default class */
434*4882a593Smuzhiyun };
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun struct tc_service_curve {
437*4882a593Smuzhiyun 	__u32	m1;		/* slope of the first segment in bps */
438*4882a593Smuzhiyun 	__u32	d;		/* x-projection of the first segment in us */
439*4882a593Smuzhiyun 	__u32	m2;		/* slope of the second segment in bps */
440*4882a593Smuzhiyun };
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun struct tc_hfsc_stats {
443*4882a593Smuzhiyun 	__u64	work;		/* total work done */
444*4882a593Smuzhiyun 	__u64	rtwork;		/* work done by real-time criteria */
445*4882a593Smuzhiyun 	__u32	period;		/* current period */
446*4882a593Smuzhiyun 	__u32	level;		/* class level in hierarchy */
447*4882a593Smuzhiyun };
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun enum {
450*4882a593Smuzhiyun 	TCA_HFSC_UNSPEC,
451*4882a593Smuzhiyun 	TCA_HFSC_RSC,
452*4882a593Smuzhiyun 	TCA_HFSC_FSC,
453*4882a593Smuzhiyun 	TCA_HFSC_USC,
454*4882a593Smuzhiyun 	__TCA_HFSC_MAX,
455*4882a593Smuzhiyun };
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun #define TCA_HFSC_MAX (__TCA_HFSC_MAX - 1)
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun /* CBQ section */
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun #define TC_CBQ_MAXPRIO		8
463*4882a593Smuzhiyun #define TC_CBQ_MAXLEVEL		8
464*4882a593Smuzhiyun #define TC_CBQ_DEF_EWMA		5
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun struct tc_cbq_lssopt {
467*4882a593Smuzhiyun 	unsigned char	change;
468*4882a593Smuzhiyun 	unsigned char	flags;
469*4882a593Smuzhiyun #define TCF_CBQ_LSS_BOUNDED	1
470*4882a593Smuzhiyun #define TCF_CBQ_LSS_ISOLATED	2
471*4882a593Smuzhiyun 	unsigned char  	ewma_log;
472*4882a593Smuzhiyun 	unsigned char  	level;
473*4882a593Smuzhiyun #define TCF_CBQ_LSS_FLAGS	1
474*4882a593Smuzhiyun #define TCF_CBQ_LSS_EWMA	2
475*4882a593Smuzhiyun #define TCF_CBQ_LSS_MAXIDLE	4
476*4882a593Smuzhiyun #define TCF_CBQ_LSS_MINIDLE	8
477*4882a593Smuzhiyun #define TCF_CBQ_LSS_OFFTIME	0x10
478*4882a593Smuzhiyun #define TCF_CBQ_LSS_AVPKT	0x20
479*4882a593Smuzhiyun 	__u32		maxidle;
480*4882a593Smuzhiyun 	__u32		minidle;
481*4882a593Smuzhiyun 	__u32		offtime;
482*4882a593Smuzhiyun 	__u32		avpkt;
483*4882a593Smuzhiyun };
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun struct tc_cbq_wrropt {
486*4882a593Smuzhiyun 	unsigned char	flags;
487*4882a593Smuzhiyun 	unsigned char	priority;
488*4882a593Smuzhiyun 	unsigned char	cpriority;
489*4882a593Smuzhiyun 	unsigned char	__reserved;
490*4882a593Smuzhiyun 	__u32		allot;
491*4882a593Smuzhiyun 	__u32		weight;
492*4882a593Smuzhiyun };
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun struct tc_cbq_ovl {
495*4882a593Smuzhiyun 	unsigned char	strategy;
496*4882a593Smuzhiyun #define	TC_CBQ_OVL_CLASSIC	0
497*4882a593Smuzhiyun #define	TC_CBQ_OVL_DELAY	1
498*4882a593Smuzhiyun #define	TC_CBQ_OVL_LOWPRIO	2
499*4882a593Smuzhiyun #define	TC_CBQ_OVL_DROP		3
500*4882a593Smuzhiyun #define	TC_CBQ_OVL_RCLASSIC	4
501*4882a593Smuzhiyun 	unsigned char	priority2;
502*4882a593Smuzhiyun 	__u16		pad;
503*4882a593Smuzhiyun 	__u32		penalty;
504*4882a593Smuzhiyun };
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun struct tc_cbq_police {
507*4882a593Smuzhiyun 	unsigned char	police;
508*4882a593Smuzhiyun 	unsigned char	__res1;
509*4882a593Smuzhiyun 	unsigned short	__res2;
510*4882a593Smuzhiyun };
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun struct tc_cbq_fopt {
513*4882a593Smuzhiyun 	__u32		split;
514*4882a593Smuzhiyun 	__u32		defmap;
515*4882a593Smuzhiyun 	__u32		defchange;
516*4882a593Smuzhiyun };
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun struct tc_cbq_xstats {
519*4882a593Smuzhiyun 	__u32		borrows;
520*4882a593Smuzhiyun 	__u32		overactions;
521*4882a593Smuzhiyun 	__s32		avgidle;
522*4882a593Smuzhiyun 	__s32		undertime;
523*4882a593Smuzhiyun };
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun enum {
526*4882a593Smuzhiyun 	TCA_CBQ_UNSPEC,
527*4882a593Smuzhiyun 	TCA_CBQ_LSSOPT,
528*4882a593Smuzhiyun 	TCA_CBQ_WRROPT,
529*4882a593Smuzhiyun 	TCA_CBQ_FOPT,
530*4882a593Smuzhiyun 	TCA_CBQ_OVL_STRATEGY,
531*4882a593Smuzhiyun 	TCA_CBQ_RATE,
532*4882a593Smuzhiyun 	TCA_CBQ_RTAB,
533*4882a593Smuzhiyun 	TCA_CBQ_POLICE,
534*4882a593Smuzhiyun 	__TCA_CBQ_MAX,
535*4882a593Smuzhiyun };
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun #define TCA_CBQ_MAX	(__TCA_CBQ_MAX - 1)
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun /* dsmark section */
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun enum {
542*4882a593Smuzhiyun 	TCA_DSMARK_UNSPEC,
543*4882a593Smuzhiyun 	TCA_DSMARK_INDICES,
544*4882a593Smuzhiyun 	TCA_DSMARK_DEFAULT_INDEX,
545*4882a593Smuzhiyun 	TCA_DSMARK_SET_TC_INDEX,
546*4882a593Smuzhiyun 	TCA_DSMARK_MASK,
547*4882a593Smuzhiyun 	TCA_DSMARK_VALUE,
548*4882a593Smuzhiyun 	__TCA_DSMARK_MAX,
549*4882a593Smuzhiyun };
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun #define TCA_DSMARK_MAX (__TCA_DSMARK_MAX - 1)
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun /* ATM  section */
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun enum {
556*4882a593Smuzhiyun 	TCA_ATM_UNSPEC,
557*4882a593Smuzhiyun 	TCA_ATM_FD,		/* file/socket descriptor */
558*4882a593Smuzhiyun 	TCA_ATM_PTR,		/* pointer to descriptor - later */
559*4882a593Smuzhiyun 	TCA_ATM_HDR,		/* LL header */
560*4882a593Smuzhiyun 	TCA_ATM_EXCESS,		/* excess traffic class (0 for CLP)  */
561*4882a593Smuzhiyun 	TCA_ATM_ADDR,		/* PVC address (for output only) */
562*4882a593Smuzhiyun 	TCA_ATM_STATE,		/* VC state (ATM_VS_*; for output only) */
563*4882a593Smuzhiyun 	__TCA_ATM_MAX,
564*4882a593Smuzhiyun };
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun #define TCA_ATM_MAX	(__TCA_ATM_MAX - 1)
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun /* Network emulator */
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun enum {
571*4882a593Smuzhiyun 	TCA_NETEM_UNSPEC,
572*4882a593Smuzhiyun 	TCA_NETEM_CORR,
573*4882a593Smuzhiyun 	TCA_NETEM_DELAY_DIST,
574*4882a593Smuzhiyun 	TCA_NETEM_REORDER,
575*4882a593Smuzhiyun 	TCA_NETEM_CORRUPT,
576*4882a593Smuzhiyun 	TCA_NETEM_LOSS,
577*4882a593Smuzhiyun 	TCA_NETEM_RATE,
578*4882a593Smuzhiyun 	TCA_NETEM_ECN,
579*4882a593Smuzhiyun 	TCA_NETEM_RATE64,
580*4882a593Smuzhiyun 	TCA_NETEM_PAD,
581*4882a593Smuzhiyun 	TCA_NETEM_LATENCY64,
582*4882a593Smuzhiyun 	TCA_NETEM_JITTER64,
583*4882a593Smuzhiyun 	TCA_NETEM_SLOT,
584*4882a593Smuzhiyun 	TCA_NETEM_SLOT_DIST,
585*4882a593Smuzhiyun 	__TCA_NETEM_MAX,
586*4882a593Smuzhiyun };
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun #define TCA_NETEM_MAX (__TCA_NETEM_MAX - 1)
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun struct tc_netem_qopt {
591*4882a593Smuzhiyun 	__u32	latency;	/* added delay (us) */
592*4882a593Smuzhiyun 	__u32   limit;		/* fifo limit (packets) */
593*4882a593Smuzhiyun 	__u32	loss;		/* random packet loss (0=none ~0=100%) */
594*4882a593Smuzhiyun 	__u32	gap;		/* re-ordering gap (0 for none) */
595*4882a593Smuzhiyun 	__u32   duplicate;	/* random packet dup  (0=none ~0=100%) */
596*4882a593Smuzhiyun 	__u32	jitter;		/* random jitter in latency (us) */
597*4882a593Smuzhiyun };
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun struct tc_netem_corr {
600*4882a593Smuzhiyun 	__u32	delay_corr;	/* delay correlation */
601*4882a593Smuzhiyun 	__u32	loss_corr;	/* packet loss correlation */
602*4882a593Smuzhiyun 	__u32	dup_corr;	/* duplicate correlation  */
603*4882a593Smuzhiyun };
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun struct tc_netem_reorder {
606*4882a593Smuzhiyun 	__u32	probability;
607*4882a593Smuzhiyun 	__u32	correlation;
608*4882a593Smuzhiyun };
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun struct tc_netem_corrupt {
611*4882a593Smuzhiyun 	__u32	probability;
612*4882a593Smuzhiyun 	__u32	correlation;
613*4882a593Smuzhiyun };
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun struct tc_netem_rate {
616*4882a593Smuzhiyun 	__u32	rate;	/* byte/s */
617*4882a593Smuzhiyun 	__s32	packet_overhead;
618*4882a593Smuzhiyun 	__u32	cell_size;
619*4882a593Smuzhiyun 	__s32	cell_overhead;
620*4882a593Smuzhiyun };
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun struct tc_netem_slot {
623*4882a593Smuzhiyun 	__s64   min_delay; /* nsec */
624*4882a593Smuzhiyun 	__s64   max_delay;
625*4882a593Smuzhiyun 	__s32   max_packets;
626*4882a593Smuzhiyun 	__s32   max_bytes;
627*4882a593Smuzhiyun 	__s64	dist_delay; /* nsec */
628*4882a593Smuzhiyun 	__s64	dist_jitter; /* nsec */
629*4882a593Smuzhiyun };
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun enum {
632*4882a593Smuzhiyun 	NETEM_LOSS_UNSPEC,
633*4882a593Smuzhiyun 	NETEM_LOSS_GI,		/* General Intuitive - 4 state model */
634*4882a593Smuzhiyun 	NETEM_LOSS_GE,		/* Gilbert Elliot models */
635*4882a593Smuzhiyun 	__NETEM_LOSS_MAX
636*4882a593Smuzhiyun };
637*4882a593Smuzhiyun #define NETEM_LOSS_MAX (__NETEM_LOSS_MAX - 1)
638*4882a593Smuzhiyun 
639*4882a593Smuzhiyun /* State transition probabilities for 4 state model */
640*4882a593Smuzhiyun struct tc_netem_gimodel {
641*4882a593Smuzhiyun 	__u32	p13;
642*4882a593Smuzhiyun 	__u32	p31;
643*4882a593Smuzhiyun 	__u32	p32;
644*4882a593Smuzhiyun 	__u32	p14;
645*4882a593Smuzhiyun 	__u32	p23;
646*4882a593Smuzhiyun };
647*4882a593Smuzhiyun 
648*4882a593Smuzhiyun /* Gilbert-Elliot models */
649*4882a593Smuzhiyun struct tc_netem_gemodel {
650*4882a593Smuzhiyun 	__u32 p;
651*4882a593Smuzhiyun 	__u32 r;
652*4882a593Smuzhiyun 	__u32 h;
653*4882a593Smuzhiyun 	__u32 k1;
654*4882a593Smuzhiyun };
655*4882a593Smuzhiyun 
656*4882a593Smuzhiyun #define NETEM_DIST_SCALE	8192
657*4882a593Smuzhiyun #define NETEM_DIST_MAX		16384
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun /* DRR */
660*4882a593Smuzhiyun 
661*4882a593Smuzhiyun enum {
662*4882a593Smuzhiyun 	TCA_DRR_UNSPEC,
663*4882a593Smuzhiyun 	TCA_DRR_QUANTUM,
664*4882a593Smuzhiyun 	__TCA_DRR_MAX
665*4882a593Smuzhiyun };
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun #define TCA_DRR_MAX	(__TCA_DRR_MAX - 1)
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun struct tc_drr_stats {
670*4882a593Smuzhiyun 	__u32	deficit;
671*4882a593Smuzhiyun };
672*4882a593Smuzhiyun 
673*4882a593Smuzhiyun /* MQPRIO */
674*4882a593Smuzhiyun #define TC_QOPT_BITMASK 15
675*4882a593Smuzhiyun #define TC_QOPT_MAX_QUEUE 16
676*4882a593Smuzhiyun 
677*4882a593Smuzhiyun enum {
678*4882a593Smuzhiyun 	TC_MQPRIO_HW_OFFLOAD_NONE,	/* no offload requested */
679*4882a593Smuzhiyun 	TC_MQPRIO_HW_OFFLOAD_TCS,	/* offload TCs, no queue counts */
680*4882a593Smuzhiyun 	__TC_MQPRIO_HW_OFFLOAD_MAX
681*4882a593Smuzhiyun };
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun #define TC_MQPRIO_HW_OFFLOAD_MAX (__TC_MQPRIO_HW_OFFLOAD_MAX - 1)
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun enum {
686*4882a593Smuzhiyun 	TC_MQPRIO_MODE_DCB,
687*4882a593Smuzhiyun 	TC_MQPRIO_MODE_CHANNEL,
688*4882a593Smuzhiyun 	__TC_MQPRIO_MODE_MAX
689*4882a593Smuzhiyun };
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun #define __TC_MQPRIO_MODE_MAX (__TC_MQPRIO_MODE_MAX - 1)
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun enum {
694*4882a593Smuzhiyun 	TC_MQPRIO_SHAPER_DCB,
695*4882a593Smuzhiyun 	TC_MQPRIO_SHAPER_BW_RATE,	/* Add new shapers below */
696*4882a593Smuzhiyun 	__TC_MQPRIO_SHAPER_MAX
697*4882a593Smuzhiyun };
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun #define __TC_MQPRIO_SHAPER_MAX (__TC_MQPRIO_SHAPER_MAX - 1)
700*4882a593Smuzhiyun 
701*4882a593Smuzhiyun struct tc_mqprio_qopt {
702*4882a593Smuzhiyun 	__u8	num_tc;
703*4882a593Smuzhiyun 	__u8	prio_tc_map[TC_QOPT_BITMASK + 1];
704*4882a593Smuzhiyun 	__u8	hw;
705*4882a593Smuzhiyun 	__u16	count[TC_QOPT_MAX_QUEUE];
706*4882a593Smuzhiyun 	__u16	offset[TC_QOPT_MAX_QUEUE];
707*4882a593Smuzhiyun };
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun #define TC_MQPRIO_F_MODE		0x1
710*4882a593Smuzhiyun #define TC_MQPRIO_F_SHAPER		0x2
711*4882a593Smuzhiyun #define TC_MQPRIO_F_MIN_RATE		0x4
712*4882a593Smuzhiyun #define TC_MQPRIO_F_MAX_RATE		0x8
713*4882a593Smuzhiyun 
714*4882a593Smuzhiyun enum {
715*4882a593Smuzhiyun 	TCA_MQPRIO_UNSPEC,
716*4882a593Smuzhiyun 	TCA_MQPRIO_MODE,
717*4882a593Smuzhiyun 	TCA_MQPRIO_SHAPER,
718*4882a593Smuzhiyun 	TCA_MQPRIO_MIN_RATE64,
719*4882a593Smuzhiyun 	TCA_MQPRIO_MAX_RATE64,
720*4882a593Smuzhiyun 	__TCA_MQPRIO_MAX,
721*4882a593Smuzhiyun };
722*4882a593Smuzhiyun 
723*4882a593Smuzhiyun #define TCA_MQPRIO_MAX (__TCA_MQPRIO_MAX - 1)
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun /* SFB */
726*4882a593Smuzhiyun 
727*4882a593Smuzhiyun enum {
728*4882a593Smuzhiyun 	TCA_SFB_UNSPEC,
729*4882a593Smuzhiyun 	TCA_SFB_PARMS,
730*4882a593Smuzhiyun 	__TCA_SFB_MAX,
731*4882a593Smuzhiyun };
732*4882a593Smuzhiyun 
733*4882a593Smuzhiyun #define TCA_SFB_MAX (__TCA_SFB_MAX - 1)
734*4882a593Smuzhiyun 
735*4882a593Smuzhiyun /*
736*4882a593Smuzhiyun  * Note: increment, decrement are Q0.16 fixed-point values.
737*4882a593Smuzhiyun  */
738*4882a593Smuzhiyun struct tc_sfb_qopt {
739*4882a593Smuzhiyun 	__u32 rehash_interval;	/* delay between hash move, in ms */
740*4882a593Smuzhiyun 	__u32 warmup_time;	/* double buffering warmup time in ms (warmup_time < rehash_interval) */
741*4882a593Smuzhiyun 	__u32 max;		/* max len of qlen_min */
742*4882a593Smuzhiyun 	__u32 bin_size;		/* maximum queue length per bin */
743*4882a593Smuzhiyun 	__u32 increment;	/* probability increment, (d1 in Blue) */
744*4882a593Smuzhiyun 	__u32 decrement;	/* probability decrement, (d2 in Blue) */
745*4882a593Smuzhiyun 	__u32 limit;		/* max SFB queue length */
746*4882a593Smuzhiyun 	__u32 penalty_rate;	/* inelastic flows are rate limited to 'rate' pps */
747*4882a593Smuzhiyun 	__u32 penalty_burst;
748*4882a593Smuzhiyun };
749*4882a593Smuzhiyun 
750*4882a593Smuzhiyun struct tc_sfb_xstats {
751*4882a593Smuzhiyun 	__u32 earlydrop;
752*4882a593Smuzhiyun 	__u32 penaltydrop;
753*4882a593Smuzhiyun 	__u32 bucketdrop;
754*4882a593Smuzhiyun 	__u32 queuedrop;
755*4882a593Smuzhiyun 	__u32 childdrop; /* drops in child qdisc */
756*4882a593Smuzhiyun 	__u32 marked;
757*4882a593Smuzhiyun 	__u32 maxqlen;
758*4882a593Smuzhiyun 	__u32 maxprob;
759*4882a593Smuzhiyun 	__u32 avgprob;
760*4882a593Smuzhiyun };
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun #define SFB_MAX_PROB 0xFFFF
763*4882a593Smuzhiyun 
764*4882a593Smuzhiyun /* QFQ */
765*4882a593Smuzhiyun enum {
766*4882a593Smuzhiyun 	TCA_QFQ_UNSPEC,
767*4882a593Smuzhiyun 	TCA_QFQ_WEIGHT,
768*4882a593Smuzhiyun 	TCA_QFQ_LMAX,
769*4882a593Smuzhiyun 	__TCA_QFQ_MAX
770*4882a593Smuzhiyun };
771*4882a593Smuzhiyun 
772*4882a593Smuzhiyun #define TCA_QFQ_MAX	(__TCA_QFQ_MAX - 1)
773*4882a593Smuzhiyun 
774*4882a593Smuzhiyun struct tc_qfq_stats {
775*4882a593Smuzhiyun 	__u32 weight;
776*4882a593Smuzhiyun 	__u32 lmax;
777*4882a593Smuzhiyun };
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun /* CODEL */
780*4882a593Smuzhiyun 
781*4882a593Smuzhiyun enum {
782*4882a593Smuzhiyun 	TCA_CODEL_UNSPEC,
783*4882a593Smuzhiyun 	TCA_CODEL_TARGET,
784*4882a593Smuzhiyun 	TCA_CODEL_LIMIT,
785*4882a593Smuzhiyun 	TCA_CODEL_INTERVAL,
786*4882a593Smuzhiyun 	TCA_CODEL_ECN,
787*4882a593Smuzhiyun 	TCA_CODEL_CE_THRESHOLD,
788*4882a593Smuzhiyun 	__TCA_CODEL_MAX
789*4882a593Smuzhiyun };
790*4882a593Smuzhiyun 
791*4882a593Smuzhiyun #define TCA_CODEL_MAX	(__TCA_CODEL_MAX - 1)
792*4882a593Smuzhiyun 
793*4882a593Smuzhiyun struct tc_codel_xstats {
794*4882a593Smuzhiyun 	__u32	maxpacket; /* largest packet we've seen so far */
795*4882a593Smuzhiyun 	__u32	count;	   /* how many drops we've done since the last time we
796*4882a593Smuzhiyun 			    * entered dropping state
797*4882a593Smuzhiyun 			    */
798*4882a593Smuzhiyun 	__u32	lastcount; /* count at entry to dropping state */
799*4882a593Smuzhiyun 	__u32	ldelay;    /* in-queue delay seen by most recently dequeued packet */
800*4882a593Smuzhiyun 	__s32	drop_next; /* time to drop next packet */
801*4882a593Smuzhiyun 	__u32	drop_overlimit; /* number of time max qdisc packet limit was hit */
802*4882a593Smuzhiyun 	__u32	ecn_mark;  /* number of packets we ECN marked instead of dropped */
803*4882a593Smuzhiyun 	__u32	dropping;  /* are we in dropping state ? */
804*4882a593Smuzhiyun 	__u32	ce_mark;   /* number of CE marked packets because of ce_threshold */
805*4882a593Smuzhiyun };
806*4882a593Smuzhiyun 
807*4882a593Smuzhiyun /* FQ_CODEL */
808*4882a593Smuzhiyun 
809*4882a593Smuzhiyun enum {
810*4882a593Smuzhiyun 	TCA_FQ_CODEL_UNSPEC,
811*4882a593Smuzhiyun 	TCA_FQ_CODEL_TARGET,
812*4882a593Smuzhiyun 	TCA_FQ_CODEL_LIMIT,
813*4882a593Smuzhiyun 	TCA_FQ_CODEL_INTERVAL,
814*4882a593Smuzhiyun 	TCA_FQ_CODEL_ECN,
815*4882a593Smuzhiyun 	TCA_FQ_CODEL_FLOWS,
816*4882a593Smuzhiyun 	TCA_FQ_CODEL_QUANTUM,
817*4882a593Smuzhiyun 	TCA_FQ_CODEL_CE_THRESHOLD,
818*4882a593Smuzhiyun 	TCA_FQ_CODEL_DROP_BATCH_SIZE,
819*4882a593Smuzhiyun 	TCA_FQ_CODEL_MEMORY_LIMIT,
820*4882a593Smuzhiyun 	__TCA_FQ_CODEL_MAX
821*4882a593Smuzhiyun };
822*4882a593Smuzhiyun 
823*4882a593Smuzhiyun #define TCA_FQ_CODEL_MAX	(__TCA_FQ_CODEL_MAX - 1)
824*4882a593Smuzhiyun 
825*4882a593Smuzhiyun enum {
826*4882a593Smuzhiyun 	TCA_FQ_CODEL_XSTATS_QDISC,
827*4882a593Smuzhiyun 	TCA_FQ_CODEL_XSTATS_CLASS,
828*4882a593Smuzhiyun };
829*4882a593Smuzhiyun 
830*4882a593Smuzhiyun struct tc_fq_codel_qd_stats {
831*4882a593Smuzhiyun 	__u32	maxpacket;	/* largest packet we've seen so far */
832*4882a593Smuzhiyun 	__u32	drop_overlimit; /* number of time max qdisc
833*4882a593Smuzhiyun 				 * packet limit was hit
834*4882a593Smuzhiyun 				 */
835*4882a593Smuzhiyun 	__u32	ecn_mark;	/* number of packets we ECN marked
836*4882a593Smuzhiyun 				 * instead of being dropped
837*4882a593Smuzhiyun 				 */
838*4882a593Smuzhiyun 	__u32	new_flow_count; /* number of time packets
839*4882a593Smuzhiyun 				 * created a 'new flow'
840*4882a593Smuzhiyun 				 */
841*4882a593Smuzhiyun 	__u32	new_flows_len;	/* count of flows in new list */
842*4882a593Smuzhiyun 	__u32	old_flows_len;	/* count of flows in old list */
843*4882a593Smuzhiyun 	__u32	ce_mark;	/* packets above ce_threshold */
844*4882a593Smuzhiyun 	__u32	memory_usage;	/* in bytes */
845*4882a593Smuzhiyun 	__u32	drop_overmemory;
846*4882a593Smuzhiyun };
847*4882a593Smuzhiyun 
848*4882a593Smuzhiyun struct tc_fq_codel_cl_stats {
849*4882a593Smuzhiyun 	__s32	deficit;
850*4882a593Smuzhiyun 	__u32	ldelay;		/* in-queue delay seen by most recently
851*4882a593Smuzhiyun 				 * dequeued packet
852*4882a593Smuzhiyun 				 */
853*4882a593Smuzhiyun 	__u32	count;
854*4882a593Smuzhiyun 	__u32	lastcount;
855*4882a593Smuzhiyun 	__u32	dropping;
856*4882a593Smuzhiyun 	__s32	drop_next;
857*4882a593Smuzhiyun };
858*4882a593Smuzhiyun 
859*4882a593Smuzhiyun struct tc_fq_codel_xstats {
860*4882a593Smuzhiyun 	__u32	type;
861*4882a593Smuzhiyun 	union {
862*4882a593Smuzhiyun 		struct tc_fq_codel_qd_stats qdisc_stats;
863*4882a593Smuzhiyun 		struct tc_fq_codel_cl_stats class_stats;
864*4882a593Smuzhiyun 	};
865*4882a593Smuzhiyun };
866*4882a593Smuzhiyun 
867*4882a593Smuzhiyun /* FQ */
868*4882a593Smuzhiyun 
869*4882a593Smuzhiyun enum {
870*4882a593Smuzhiyun 	TCA_FQ_UNSPEC,
871*4882a593Smuzhiyun 
872*4882a593Smuzhiyun 	TCA_FQ_PLIMIT,		/* limit of total number of packets in queue */
873*4882a593Smuzhiyun 
874*4882a593Smuzhiyun 	TCA_FQ_FLOW_PLIMIT,	/* limit of packets per flow */
875*4882a593Smuzhiyun 
876*4882a593Smuzhiyun 	TCA_FQ_QUANTUM,		/* RR quantum */
877*4882a593Smuzhiyun 
878*4882a593Smuzhiyun 	TCA_FQ_INITIAL_QUANTUM,		/* RR quantum for new flow */
879*4882a593Smuzhiyun 
880*4882a593Smuzhiyun 	TCA_FQ_RATE_ENABLE,	/* enable/disable rate limiting */
881*4882a593Smuzhiyun 
882*4882a593Smuzhiyun 	TCA_FQ_FLOW_DEFAULT_RATE,/* obsolete, do not use */
883*4882a593Smuzhiyun 
884*4882a593Smuzhiyun 	TCA_FQ_FLOW_MAX_RATE,	/* per flow max rate */
885*4882a593Smuzhiyun 
886*4882a593Smuzhiyun 	TCA_FQ_BUCKETS_LOG,	/* log2(number of buckets) */
887*4882a593Smuzhiyun 
888*4882a593Smuzhiyun 	TCA_FQ_FLOW_REFILL_DELAY,	/* flow credit refill delay in usec */
889*4882a593Smuzhiyun 
890*4882a593Smuzhiyun 	TCA_FQ_ORPHAN_MASK,	/* mask applied to orphaned skb hashes */
891*4882a593Smuzhiyun 
892*4882a593Smuzhiyun 	TCA_FQ_LOW_RATE_THRESHOLD, /* per packet delay under this rate */
893*4882a593Smuzhiyun 
894*4882a593Smuzhiyun 	TCA_FQ_CE_THRESHOLD,	/* DCTCP-like CE-marking threshold */
895*4882a593Smuzhiyun 
896*4882a593Smuzhiyun 	__TCA_FQ_MAX
897*4882a593Smuzhiyun };
898*4882a593Smuzhiyun 
899*4882a593Smuzhiyun #define TCA_FQ_MAX	(__TCA_FQ_MAX - 1)
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun struct tc_fq_qd_stats {
902*4882a593Smuzhiyun 	__u64	gc_flows;
903*4882a593Smuzhiyun 	__u64	highprio_packets;
904*4882a593Smuzhiyun 	__u64	tcp_retrans;
905*4882a593Smuzhiyun 	__u64	throttled;
906*4882a593Smuzhiyun 	__u64	flows_plimit;
907*4882a593Smuzhiyun 	__u64	pkts_too_long;
908*4882a593Smuzhiyun 	__u64	allocation_errors;
909*4882a593Smuzhiyun 	__s64	time_next_delayed_flow;
910*4882a593Smuzhiyun 	__u32	flows;
911*4882a593Smuzhiyun 	__u32	inactive_flows;
912*4882a593Smuzhiyun 	__u32	throttled_flows;
913*4882a593Smuzhiyun 	__u32	unthrottle_latency_ns;
914*4882a593Smuzhiyun 	__u64	ce_mark;		/* packets above ce_threshold */
915*4882a593Smuzhiyun };
916*4882a593Smuzhiyun 
917*4882a593Smuzhiyun /* Heavy-Hitter Filter */
918*4882a593Smuzhiyun 
919*4882a593Smuzhiyun enum {
920*4882a593Smuzhiyun 	TCA_HHF_UNSPEC,
921*4882a593Smuzhiyun 	TCA_HHF_BACKLOG_LIMIT,
922*4882a593Smuzhiyun 	TCA_HHF_QUANTUM,
923*4882a593Smuzhiyun 	TCA_HHF_HH_FLOWS_LIMIT,
924*4882a593Smuzhiyun 	TCA_HHF_RESET_TIMEOUT,
925*4882a593Smuzhiyun 	TCA_HHF_ADMIT_BYTES,
926*4882a593Smuzhiyun 	TCA_HHF_EVICT_TIMEOUT,
927*4882a593Smuzhiyun 	TCA_HHF_NON_HH_WEIGHT,
928*4882a593Smuzhiyun 	__TCA_HHF_MAX
929*4882a593Smuzhiyun };
930*4882a593Smuzhiyun 
931*4882a593Smuzhiyun #define TCA_HHF_MAX	(__TCA_HHF_MAX - 1)
932*4882a593Smuzhiyun 
933*4882a593Smuzhiyun struct tc_hhf_xstats {
934*4882a593Smuzhiyun 	__u32	drop_overlimit; /* number of times max qdisc packet limit
935*4882a593Smuzhiyun 				 * was hit
936*4882a593Smuzhiyun 				 */
937*4882a593Smuzhiyun 	__u32	hh_overlimit;   /* number of times max heavy-hitters was hit */
938*4882a593Smuzhiyun 	__u32	hh_tot_count;   /* number of captured heavy-hitters so far */
939*4882a593Smuzhiyun 	__u32	hh_cur_count;   /* number of current heavy-hitters */
940*4882a593Smuzhiyun };
941*4882a593Smuzhiyun 
942*4882a593Smuzhiyun /* PIE */
943*4882a593Smuzhiyun enum {
944*4882a593Smuzhiyun 	TCA_PIE_UNSPEC,
945*4882a593Smuzhiyun 	TCA_PIE_TARGET,
946*4882a593Smuzhiyun 	TCA_PIE_LIMIT,
947*4882a593Smuzhiyun 	TCA_PIE_TUPDATE,
948*4882a593Smuzhiyun 	TCA_PIE_ALPHA,
949*4882a593Smuzhiyun 	TCA_PIE_BETA,
950*4882a593Smuzhiyun 	TCA_PIE_ECN,
951*4882a593Smuzhiyun 	TCA_PIE_BYTEMODE,
952*4882a593Smuzhiyun 	__TCA_PIE_MAX
953*4882a593Smuzhiyun };
954*4882a593Smuzhiyun #define TCA_PIE_MAX   (__TCA_PIE_MAX - 1)
955*4882a593Smuzhiyun 
956*4882a593Smuzhiyun struct tc_pie_xstats {
957*4882a593Smuzhiyun 	__u32 prob;             /* current probability */
958*4882a593Smuzhiyun 	__u32 delay;            /* current delay in ms */
959*4882a593Smuzhiyun 	__u32 avg_dq_rate;      /* current average dq_rate in bits/pie_time */
960*4882a593Smuzhiyun 	__u32 packets_in;       /* total number of packets enqueued */
961*4882a593Smuzhiyun 	__u32 dropped;          /* packets dropped due to pie_action */
962*4882a593Smuzhiyun 	__u32 overlimit;        /* dropped due to lack of space in queue */
963*4882a593Smuzhiyun 	__u32 maxq;             /* maximum queue size */
964*4882a593Smuzhiyun 	__u32 ecn_mark;         /* packets marked with ecn*/
965*4882a593Smuzhiyun };
966*4882a593Smuzhiyun 
967*4882a593Smuzhiyun /* CBS */
968*4882a593Smuzhiyun struct tc_cbs_qopt {
969*4882a593Smuzhiyun 	__u8 offload;
970*4882a593Smuzhiyun 	__u8 _pad[3];
971*4882a593Smuzhiyun 	__s32 hicredit;
972*4882a593Smuzhiyun 	__s32 locredit;
973*4882a593Smuzhiyun 	__s32 idleslope;
974*4882a593Smuzhiyun 	__s32 sendslope;
975*4882a593Smuzhiyun };
976*4882a593Smuzhiyun 
977*4882a593Smuzhiyun enum {
978*4882a593Smuzhiyun 	TCA_CBS_UNSPEC,
979*4882a593Smuzhiyun 	TCA_CBS_PARMS,
980*4882a593Smuzhiyun 	__TCA_CBS_MAX,
981*4882a593Smuzhiyun };
982*4882a593Smuzhiyun 
983*4882a593Smuzhiyun #define TCA_CBS_MAX (__TCA_CBS_MAX - 1)
984*4882a593Smuzhiyun 
985*4882a593Smuzhiyun 
986*4882a593Smuzhiyun /* ETF */
987*4882a593Smuzhiyun struct tc_etf_qopt {
988*4882a593Smuzhiyun 	__s32 delta;
989*4882a593Smuzhiyun 	__s32 clockid;
990*4882a593Smuzhiyun 	__u32 flags;
991*4882a593Smuzhiyun #define TC_ETF_DEADLINE_MODE_ON	BIT(0)
992*4882a593Smuzhiyun #define TC_ETF_OFFLOAD_ON	BIT(1)
993*4882a593Smuzhiyun };
994*4882a593Smuzhiyun 
995*4882a593Smuzhiyun enum {
996*4882a593Smuzhiyun 	TCA_ETF_UNSPEC,
997*4882a593Smuzhiyun 	TCA_ETF_PARMS,
998*4882a593Smuzhiyun 	__TCA_ETF_MAX,
999*4882a593Smuzhiyun };
1000*4882a593Smuzhiyun 
1001*4882a593Smuzhiyun #define TCA_ETF_MAX (__TCA_ETF_MAX - 1)
1002*4882a593Smuzhiyun 
1003*4882a593Smuzhiyun 
1004*4882a593Smuzhiyun /* CAKE */
1005*4882a593Smuzhiyun enum {
1006*4882a593Smuzhiyun 	TCA_CAKE_UNSPEC,
1007*4882a593Smuzhiyun 	TCA_CAKE_PAD,
1008*4882a593Smuzhiyun 	TCA_CAKE_BASE_RATE64,
1009*4882a593Smuzhiyun 	TCA_CAKE_DIFFSERV_MODE,
1010*4882a593Smuzhiyun 	TCA_CAKE_ATM,
1011*4882a593Smuzhiyun 	TCA_CAKE_FLOW_MODE,
1012*4882a593Smuzhiyun 	TCA_CAKE_OVERHEAD,
1013*4882a593Smuzhiyun 	TCA_CAKE_RTT,
1014*4882a593Smuzhiyun 	TCA_CAKE_TARGET,
1015*4882a593Smuzhiyun 	TCA_CAKE_AUTORATE,
1016*4882a593Smuzhiyun 	TCA_CAKE_MEMORY,
1017*4882a593Smuzhiyun 	TCA_CAKE_NAT,
1018*4882a593Smuzhiyun 	TCA_CAKE_RAW,
1019*4882a593Smuzhiyun 	TCA_CAKE_WASH,
1020*4882a593Smuzhiyun 	TCA_CAKE_MPU,
1021*4882a593Smuzhiyun 	TCA_CAKE_INGRESS,
1022*4882a593Smuzhiyun 	TCA_CAKE_ACK_FILTER,
1023*4882a593Smuzhiyun 	TCA_CAKE_SPLIT_GSO,
1024*4882a593Smuzhiyun 	__TCA_CAKE_MAX
1025*4882a593Smuzhiyun };
1026*4882a593Smuzhiyun #define TCA_CAKE_MAX	(__TCA_CAKE_MAX - 1)
1027*4882a593Smuzhiyun 
1028*4882a593Smuzhiyun enum {
1029*4882a593Smuzhiyun 	__TCA_CAKE_STATS_INVALID,
1030*4882a593Smuzhiyun 	TCA_CAKE_STATS_PAD,
1031*4882a593Smuzhiyun 	TCA_CAKE_STATS_CAPACITY_ESTIMATE64,
1032*4882a593Smuzhiyun 	TCA_CAKE_STATS_MEMORY_LIMIT,
1033*4882a593Smuzhiyun 	TCA_CAKE_STATS_MEMORY_USED,
1034*4882a593Smuzhiyun 	TCA_CAKE_STATS_AVG_NETOFF,
1035*4882a593Smuzhiyun 	TCA_CAKE_STATS_MIN_NETLEN,
1036*4882a593Smuzhiyun 	TCA_CAKE_STATS_MAX_NETLEN,
1037*4882a593Smuzhiyun 	TCA_CAKE_STATS_MIN_ADJLEN,
1038*4882a593Smuzhiyun 	TCA_CAKE_STATS_MAX_ADJLEN,
1039*4882a593Smuzhiyun 	TCA_CAKE_STATS_TIN_STATS,
1040*4882a593Smuzhiyun 	TCA_CAKE_STATS_DEFICIT,
1041*4882a593Smuzhiyun 	TCA_CAKE_STATS_COBALT_COUNT,
1042*4882a593Smuzhiyun 	TCA_CAKE_STATS_DROPPING,
1043*4882a593Smuzhiyun 	TCA_CAKE_STATS_DROP_NEXT_US,
1044*4882a593Smuzhiyun 	TCA_CAKE_STATS_P_DROP,
1045*4882a593Smuzhiyun 	TCA_CAKE_STATS_BLUE_TIMER_US,
1046*4882a593Smuzhiyun 	__TCA_CAKE_STATS_MAX
1047*4882a593Smuzhiyun };
1048*4882a593Smuzhiyun #define TCA_CAKE_STATS_MAX (__TCA_CAKE_STATS_MAX - 1)
1049*4882a593Smuzhiyun 
1050*4882a593Smuzhiyun enum {
1051*4882a593Smuzhiyun 	__TCA_CAKE_TIN_STATS_INVALID,
1052*4882a593Smuzhiyun 	TCA_CAKE_TIN_STATS_PAD,
1053*4882a593Smuzhiyun 	TCA_CAKE_TIN_STATS_SENT_PACKETS,
1054*4882a593Smuzhiyun 	TCA_CAKE_TIN_STATS_SENT_BYTES64,
1055*4882a593Smuzhiyun 	TCA_CAKE_TIN_STATS_DROPPED_PACKETS,
1056*4882a593Smuzhiyun 	TCA_CAKE_TIN_STATS_DROPPED_BYTES64,
1057*4882a593Smuzhiyun 	TCA_CAKE_TIN_STATS_ACKS_DROPPED_PACKETS,
1058*4882a593Smuzhiyun 	TCA_CAKE_TIN_STATS_ACKS_DROPPED_BYTES64,
1059*4882a593Smuzhiyun 	TCA_CAKE_TIN_STATS_ECN_MARKED_PACKETS,
1060*4882a593Smuzhiyun 	TCA_CAKE_TIN_STATS_ECN_MARKED_BYTES64,
1061*4882a593Smuzhiyun 	TCA_CAKE_TIN_STATS_BACKLOG_PACKETS,
1062*4882a593Smuzhiyun 	TCA_CAKE_TIN_STATS_BACKLOG_BYTES,
1063*4882a593Smuzhiyun 	TCA_CAKE_TIN_STATS_THRESHOLD_RATE64,
1064*4882a593Smuzhiyun 	TCA_CAKE_TIN_STATS_TARGET_US,
1065*4882a593Smuzhiyun 	TCA_CAKE_TIN_STATS_INTERVAL_US,
1066*4882a593Smuzhiyun 	TCA_CAKE_TIN_STATS_WAY_INDIRECT_HITS,
1067*4882a593Smuzhiyun 	TCA_CAKE_TIN_STATS_WAY_MISSES,
1068*4882a593Smuzhiyun 	TCA_CAKE_TIN_STATS_WAY_COLLISIONS,
1069*4882a593Smuzhiyun 	TCA_CAKE_TIN_STATS_PEAK_DELAY_US,
1070*4882a593Smuzhiyun 	TCA_CAKE_TIN_STATS_AVG_DELAY_US,
1071*4882a593Smuzhiyun 	TCA_CAKE_TIN_STATS_BASE_DELAY_US,
1072*4882a593Smuzhiyun 	TCA_CAKE_TIN_STATS_SPARSE_FLOWS,
1073*4882a593Smuzhiyun 	TCA_CAKE_TIN_STATS_BULK_FLOWS,
1074*4882a593Smuzhiyun 	TCA_CAKE_TIN_STATS_UNRESPONSIVE_FLOWS,
1075*4882a593Smuzhiyun 	TCA_CAKE_TIN_STATS_MAX_SKBLEN,
1076*4882a593Smuzhiyun 	TCA_CAKE_TIN_STATS_FLOW_QUANTUM,
1077*4882a593Smuzhiyun 	__TCA_CAKE_TIN_STATS_MAX
1078*4882a593Smuzhiyun };
1079*4882a593Smuzhiyun #define TCA_CAKE_TIN_STATS_MAX (__TCA_CAKE_TIN_STATS_MAX - 1)
1080*4882a593Smuzhiyun #define TC_CAKE_MAX_TINS (8)
1081*4882a593Smuzhiyun 
1082*4882a593Smuzhiyun enum {
1083*4882a593Smuzhiyun 	CAKE_FLOW_NONE = 0,
1084*4882a593Smuzhiyun 	CAKE_FLOW_SRC_IP,
1085*4882a593Smuzhiyun 	CAKE_FLOW_DST_IP,
1086*4882a593Smuzhiyun 	CAKE_FLOW_HOSTS,    /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_DST_IP */
1087*4882a593Smuzhiyun 	CAKE_FLOW_FLOWS,
1088*4882a593Smuzhiyun 	CAKE_FLOW_DUAL_SRC, /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_FLOWS */
1089*4882a593Smuzhiyun 	CAKE_FLOW_DUAL_DST, /* = CAKE_FLOW_DST_IP | CAKE_FLOW_FLOWS */
1090*4882a593Smuzhiyun 	CAKE_FLOW_TRIPLE,   /* = CAKE_FLOW_HOSTS  | CAKE_FLOW_FLOWS */
1091*4882a593Smuzhiyun 	CAKE_FLOW_MAX,
1092*4882a593Smuzhiyun };
1093*4882a593Smuzhiyun 
1094*4882a593Smuzhiyun enum {
1095*4882a593Smuzhiyun 	CAKE_DIFFSERV_DIFFSERV3 = 0,
1096*4882a593Smuzhiyun 	CAKE_DIFFSERV_DIFFSERV4,
1097*4882a593Smuzhiyun 	CAKE_DIFFSERV_DIFFSERV8,
1098*4882a593Smuzhiyun 	CAKE_DIFFSERV_BESTEFFORT,
1099*4882a593Smuzhiyun 	CAKE_DIFFSERV_PRECEDENCE,
1100*4882a593Smuzhiyun 	CAKE_DIFFSERV_MAX
1101*4882a593Smuzhiyun };
1102*4882a593Smuzhiyun 
1103*4882a593Smuzhiyun enum {
1104*4882a593Smuzhiyun 	CAKE_ACK_NONE = 0,
1105*4882a593Smuzhiyun 	CAKE_ACK_FILTER,
1106*4882a593Smuzhiyun 	CAKE_ACK_AGGRESSIVE,
1107*4882a593Smuzhiyun 	CAKE_ACK_MAX
1108*4882a593Smuzhiyun };
1109*4882a593Smuzhiyun 
1110*4882a593Smuzhiyun enum {
1111*4882a593Smuzhiyun 	CAKE_ATM_NONE = 0,
1112*4882a593Smuzhiyun 	CAKE_ATM_ATM,
1113*4882a593Smuzhiyun 	CAKE_ATM_PTM,
1114*4882a593Smuzhiyun 	CAKE_ATM_MAX
1115*4882a593Smuzhiyun };
1116*4882a593Smuzhiyun 
1117*4882a593Smuzhiyun 
1118*4882a593Smuzhiyun /* TAPRIO */
1119*4882a593Smuzhiyun enum {
1120*4882a593Smuzhiyun 	TC_TAPRIO_CMD_SET_GATES = 0x00,
1121*4882a593Smuzhiyun 	TC_TAPRIO_CMD_SET_AND_HOLD = 0x01,
1122*4882a593Smuzhiyun 	TC_TAPRIO_CMD_SET_AND_RELEASE = 0x02,
1123*4882a593Smuzhiyun };
1124*4882a593Smuzhiyun 
1125*4882a593Smuzhiyun enum {
1126*4882a593Smuzhiyun 	TCA_TAPRIO_SCHED_ENTRY_UNSPEC,
1127*4882a593Smuzhiyun 	TCA_TAPRIO_SCHED_ENTRY_INDEX, /* u32 */
1128*4882a593Smuzhiyun 	TCA_TAPRIO_SCHED_ENTRY_CMD, /* u8 */
1129*4882a593Smuzhiyun 	TCA_TAPRIO_SCHED_ENTRY_GATE_MASK, /* u32 */
1130*4882a593Smuzhiyun 	TCA_TAPRIO_SCHED_ENTRY_INTERVAL, /* u32 */
1131*4882a593Smuzhiyun 	__TCA_TAPRIO_SCHED_ENTRY_MAX,
1132*4882a593Smuzhiyun };
1133*4882a593Smuzhiyun #define TCA_TAPRIO_SCHED_ENTRY_MAX (__TCA_TAPRIO_SCHED_ENTRY_MAX - 1)
1134*4882a593Smuzhiyun 
1135*4882a593Smuzhiyun /* The format for schedule entry list is:
1136*4882a593Smuzhiyun  * [TCA_TAPRIO_SCHED_ENTRY_LIST]
1137*4882a593Smuzhiyun  *   [TCA_TAPRIO_SCHED_ENTRY]
1138*4882a593Smuzhiyun  *     [TCA_TAPRIO_SCHED_ENTRY_CMD]
1139*4882a593Smuzhiyun  *     [TCA_TAPRIO_SCHED_ENTRY_GATES]
1140*4882a593Smuzhiyun  *     [TCA_TAPRIO_SCHED_ENTRY_INTERVAL]
1141*4882a593Smuzhiyun  */
1142*4882a593Smuzhiyun enum {
1143*4882a593Smuzhiyun 	TCA_TAPRIO_SCHED_UNSPEC,
1144*4882a593Smuzhiyun 	TCA_TAPRIO_SCHED_ENTRY,
1145*4882a593Smuzhiyun 	__TCA_TAPRIO_SCHED_MAX,
1146*4882a593Smuzhiyun };
1147*4882a593Smuzhiyun 
1148*4882a593Smuzhiyun #define TCA_TAPRIO_SCHED_MAX (__TCA_TAPRIO_SCHED_MAX - 1)
1149*4882a593Smuzhiyun 
1150*4882a593Smuzhiyun enum {
1151*4882a593Smuzhiyun 	TCA_TAPRIO_ATTR_UNSPEC,
1152*4882a593Smuzhiyun 	TCA_TAPRIO_ATTR_PRIOMAP, /* struct tc_mqprio_qopt */
1153*4882a593Smuzhiyun 	TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST, /* nested of entry */
1154*4882a593Smuzhiyun 	TCA_TAPRIO_ATTR_SCHED_BASE_TIME, /* s64 */
1155*4882a593Smuzhiyun 	TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY, /* single entry */
1156*4882a593Smuzhiyun 	TCA_TAPRIO_ATTR_SCHED_CLOCKID, /* s32 */
1157*4882a593Smuzhiyun 	TCA_TAPRIO_PAD,
1158*4882a593Smuzhiyun 	__TCA_TAPRIO_ATTR_MAX,
1159*4882a593Smuzhiyun };
1160*4882a593Smuzhiyun 
1161*4882a593Smuzhiyun #define TCA_TAPRIO_ATTR_MAX (__TCA_TAPRIO_ATTR_MAX - 1)
1162*4882a593Smuzhiyun 
1163*4882a593Smuzhiyun #endif
1164