xref: /OK3568_Linux_fs/kernel/include/trace/events/xdp.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #undef TRACE_SYSTEM
3*4882a593Smuzhiyun #define TRACE_SYSTEM xdp
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #if !defined(_TRACE_XDP_H) || defined(TRACE_HEADER_MULTI_READ)
6*4882a593Smuzhiyun #define _TRACE_XDP_H
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/netdevice.h>
9*4882a593Smuzhiyun #include <linux/filter.h>
10*4882a593Smuzhiyun #include <linux/tracepoint.h>
11*4882a593Smuzhiyun #include <linux/bpf.h>
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #define __XDP_ACT_MAP(FN)	\
14*4882a593Smuzhiyun 	FN(ABORTED)		\
15*4882a593Smuzhiyun 	FN(DROP)		\
16*4882a593Smuzhiyun 	FN(PASS)		\
17*4882a593Smuzhiyun 	FN(TX)			\
18*4882a593Smuzhiyun 	FN(REDIRECT)
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #define __XDP_ACT_TP_FN(x)	\
21*4882a593Smuzhiyun 	TRACE_DEFINE_ENUM(XDP_##x);
22*4882a593Smuzhiyun #define __XDP_ACT_SYM_FN(x)	\
23*4882a593Smuzhiyun 	{ XDP_##x, #x },
24*4882a593Smuzhiyun #define __XDP_ACT_SYM_TAB	\
25*4882a593Smuzhiyun 	__XDP_ACT_MAP(__XDP_ACT_SYM_FN) { -1, NULL }
26*4882a593Smuzhiyun __XDP_ACT_MAP(__XDP_ACT_TP_FN)
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun TRACE_EVENT(xdp_exception,
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun 	TP_PROTO(const struct net_device *dev,
31*4882a593Smuzhiyun 		 const struct bpf_prog *xdp, u32 act),
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun 	TP_ARGS(dev, xdp, act),
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun 	TP_STRUCT__entry(
36*4882a593Smuzhiyun 		__field(int, prog_id)
37*4882a593Smuzhiyun 		__field(u32, act)
38*4882a593Smuzhiyun 		__field(int, ifindex)
39*4882a593Smuzhiyun 	),
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun 	TP_fast_assign(
42*4882a593Smuzhiyun 		__entry->prog_id	= xdp->aux->id;
43*4882a593Smuzhiyun 		__entry->act		= act;
44*4882a593Smuzhiyun 		__entry->ifindex	= dev->ifindex;
45*4882a593Smuzhiyun 	),
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun 	TP_printk("prog_id=%d action=%s ifindex=%d",
48*4882a593Smuzhiyun 		  __entry->prog_id,
49*4882a593Smuzhiyun 		  __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
50*4882a593Smuzhiyun 		  __entry->ifindex)
51*4882a593Smuzhiyun );
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun TRACE_EVENT(xdp_bulk_tx,
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	TP_PROTO(const struct net_device *dev,
56*4882a593Smuzhiyun 		 int sent, int drops, int err),
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 	TP_ARGS(dev, sent, drops, err),
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	TP_STRUCT__entry(
61*4882a593Smuzhiyun 		__field(int, ifindex)
62*4882a593Smuzhiyun 		__field(u32, act)
63*4882a593Smuzhiyun 		__field(int, drops)
64*4882a593Smuzhiyun 		__field(int, sent)
65*4882a593Smuzhiyun 		__field(int, err)
66*4882a593Smuzhiyun 	),
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	TP_fast_assign(
69*4882a593Smuzhiyun 		__entry->ifindex	= dev->ifindex;
70*4882a593Smuzhiyun 		__entry->act		= XDP_TX;
71*4882a593Smuzhiyun 		__entry->drops		= drops;
72*4882a593Smuzhiyun 		__entry->sent		= sent;
73*4882a593Smuzhiyun 		__entry->err		= err;
74*4882a593Smuzhiyun 	),
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	TP_printk("ifindex=%d action=%s sent=%d drops=%d err=%d",
77*4882a593Smuzhiyun 		  __entry->ifindex,
78*4882a593Smuzhiyun 		  __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
79*4882a593Smuzhiyun 		  __entry->sent, __entry->drops, __entry->err)
80*4882a593Smuzhiyun );
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun #ifndef __DEVMAP_OBJ_TYPE
83*4882a593Smuzhiyun #define __DEVMAP_OBJ_TYPE
84*4882a593Smuzhiyun struct _bpf_dtab_netdev {
85*4882a593Smuzhiyun 	struct net_device *dev;
86*4882a593Smuzhiyun };
87*4882a593Smuzhiyun #endif /* __DEVMAP_OBJ_TYPE */
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun #define devmap_ifindex(tgt, map)				\
90*4882a593Smuzhiyun 	(((map->map_type == BPF_MAP_TYPE_DEVMAP ||	\
91*4882a593Smuzhiyun 		  map->map_type == BPF_MAP_TYPE_DEVMAP_HASH)) ? \
92*4882a593Smuzhiyun 	  ((struct _bpf_dtab_netdev *)tgt)->dev->ifindex : 0)
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun DECLARE_EVENT_CLASS(xdp_redirect_template,
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	TP_PROTO(const struct net_device *dev,
97*4882a593Smuzhiyun 		 const struct bpf_prog *xdp,
98*4882a593Smuzhiyun 		 const void *tgt, int err,
99*4882a593Smuzhiyun 		 const struct bpf_map *map, u32 index),
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	TP_ARGS(dev, xdp, tgt, err, map, index),
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	TP_STRUCT__entry(
104*4882a593Smuzhiyun 		__field(int, prog_id)
105*4882a593Smuzhiyun 		__field(u32, act)
106*4882a593Smuzhiyun 		__field(int, ifindex)
107*4882a593Smuzhiyun 		__field(int, err)
108*4882a593Smuzhiyun 		__field(int, to_ifindex)
109*4882a593Smuzhiyun 		__field(u32, map_id)
110*4882a593Smuzhiyun 		__field(int, map_index)
111*4882a593Smuzhiyun 	),
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	TP_fast_assign(
114*4882a593Smuzhiyun 		__entry->prog_id	= xdp->aux->id;
115*4882a593Smuzhiyun 		__entry->act		= XDP_REDIRECT;
116*4882a593Smuzhiyun 		__entry->ifindex	= dev->ifindex;
117*4882a593Smuzhiyun 		__entry->err		= err;
118*4882a593Smuzhiyun 		__entry->to_ifindex	= map ? devmap_ifindex(tgt, map) :
119*4882a593Smuzhiyun 						index;
120*4882a593Smuzhiyun 		__entry->map_id		= map ? map->id : 0;
121*4882a593Smuzhiyun 		__entry->map_index	= map ? index : 0;
122*4882a593Smuzhiyun 	),
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d"
125*4882a593Smuzhiyun 		  " map_id=%d map_index=%d",
126*4882a593Smuzhiyun 		  __entry->prog_id,
127*4882a593Smuzhiyun 		  __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
128*4882a593Smuzhiyun 		  __entry->ifindex, __entry->to_ifindex,
129*4882a593Smuzhiyun 		  __entry->err, __entry->map_id, __entry->map_index)
130*4882a593Smuzhiyun );
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun DEFINE_EVENT(xdp_redirect_template, xdp_redirect,
133*4882a593Smuzhiyun 	TP_PROTO(const struct net_device *dev,
134*4882a593Smuzhiyun 		 const struct bpf_prog *xdp,
135*4882a593Smuzhiyun 		 const void *tgt, int err,
136*4882a593Smuzhiyun 		 const struct bpf_map *map, u32 index),
137*4882a593Smuzhiyun 	TP_ARGS(dev, xdp, tgt, err, map, index)
138*4882a593Smuzhiyun );
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun DEFINE_EVENT(xdp_redirect_template, xdp_redirect_err,
141*4882a593Smuzhiyun 	TP_PROTO(const struct net_device *dev,
142*4882a593Smuzhiyun 		 const struct bpf_prog *xdp,
143*4882a593Smuzhiyun 		 const void *tgt, int err,
144*4882a593Smuzhiyun 		 const struct bpf_map *map, u32 index),
145*4882a593Smuzhiyun 	TP_ARGS(dev, xdp, tgt, err, map, index)
146*4882a593Smuzhiyun );
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun #define _trace_xdp_redirect(dev, xdp, to)		\
149*4882a593Smuzhiyun 	 trace_xdp_redirect(dev, xdp, NULL, 0, NULL, to);
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun #define _trace_xdp_redirect_err(dev, xdp, to, err)	\
152*4882a593Smuzhiyun 	 trace_xdp_redirect_err(dev, xdp, NULL, err, NULL, to);
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun #define _trace_xdp_redirect_map(dev, xdp, to, map, index)		\
155*4882a593Smuzhiyun 	 trace_xdp_redirect(dev, xdp, to, 0, map, index);
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun #define _trace_xdp_redirect_map_err(dev, xdp, to, map, index, err)	\
158*4882a593Smuzhiyun 	 trace_xdp_redirect_err(dev, xdp, to, err, map, index);
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun /* not used anymore, but kept around so as not to break old programs */
161*4882a593Smuzhiyun DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map,
162*4882a593Smuzhiyun 	TP_PROTO(const struct net_device *dev,
163*4882a593Smuzhiyun 		 const struct bpf_prog *xdp,
164*4882a593Smuzhiyun 		 const void *tgt, int err,
165*4882a593Smuzhiyun 		 const struct bpf_map *map, u32 index),
166*4882a593Smuzhiyun 	TP_ARGS(dev, xdp, tgt, err, map, index)
167*4882a593Smuzhiyun );
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map_err,
170*4882a593Smuzhiyun 	TP_PROTO(const struct net_device *dev,
171*4882a593Smuzhiyun 		 const struct bpf_prog *xdp,
172*4882a593Smuzhiyun 		 const void *tgt, int err,
173*4882a593Smuzhiyun 		 const struct bpf_map *map, u32 index),
174*4882a593Smuzhiyun 	TP_ARGS(dev, xdp, tgt, err, map, index)
175*4882a593Smuzhiyun );
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun TRACE_EVENT(xdp_cpumap_kthread,
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	TP_PROTO(int map_id, unsigned int processed,  unsigned int drops,
180*4882a593Smuzhiyun 		 int sched, struct xdp_cpumap_stats *xdp_stats),
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	TP_ARGS(map_id, processed, drops, sched, xdp_stats),
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	TP_STRUCT__entry(
185*4882a593Smuzhiyun 		__field(int, map_id)
186*4882a593Smuzhiyun 		__field(u32, act)
187*4882a593Smuzhiyun 		__field(int, cpu)
188*4882a593Smuzhiyun 		__field(unsigned int, drops)
189*4882a593Smuzhiyun 		__field(unsigned int, processed)
190*4882a593Smuzhiyun 		__field(int, sched)
191*4882a593Smuzhiyun 		__field(unsigned int, xdp_pass)
192*4882a593Smuzhiyun 		__field(unsigned int, xdp_drop)
193*4882a593Smuzhiyun 		__field(unsigned int, xdp_redirect)
194*4882a593Smuzhiyun 	),
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	TP_fast_assign(
197*4882a593Smuzhiyun 		__entry->map_id		= map_id;
198*4882a593Smuzhiyun 		__entry->act		= XDP_REDIRECT;
199*4882a593Smuzhiyun 		__entry->cpu		= smp_processor_id();
200*4882a593Smuzhiyun 		__entry->drops		= drops;
201*4882a593Smuzhiyun 		__entry->processed	= processed;
202*4882a593Smuzhiyun 		__entry->sched	= sched;
203*4882a593Smuzhiyun 		__entry->xdp_pass	= xdp_stats->pass;
204*4882a593Smuzhiyun 		__entry->xdp_drop	= xdp_stats->drop;
205*4882a593Smuzhiyun 		__entry->xdp_redirect	= xdp_stats->redirect;
206*4882a593Smuzhiyun 	),
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	TP_printk("kthread"
209*4882a593Smuzhiyun 		  " cpu=%d map_id=%d action=%s"
210*4882a593Smuzhiyun 		  " processed=%u drops=%u"
211*4882a593Smuzhiyun 		  " sched=%d"
212*4882a593Smuzhiyun 		  " xdp_pass=%u xdp_drop=%u xdp_redirect=%u",
213*4882a593Smuzhiyun 		  __entry->cpu, __entry->map_id,
214*4882a593Smuzhiyun 		  __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
215*4882a593Smuzhiyun 		  __entry->processed, __entry->drops,
216*4882a593Smuzhiyun 		  __entry->sched,
217*4882a593Smuzhiyun 		  __entry->xdp_pass, __entry->xdp_drop, __entry->xdp_redirect)
218*4882a593Smuzhiyun );
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun TRACE_EVENT(xdp_cpumap_enqueue,
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	TP_PROTO(int map_id, unsigned int processed,  unsigned int drops,
223*4882a593Smuzhiyun 		 int to_cpu),
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	TP_ARGS(map_id, processed, drops, to_cpu),
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	TP_STRUCT__entry(
228*4882a593Smuzhiyun 		__field(int, map_id)
229*4882a593Smuzhiyun 		__field(u32, act)
230*4882a593Smuzhiyun 		__field(int, cpu)
231*4882a593Smuzhiyun 		__field(unsigned int, drops)
232*4882a593Smuzhiyun 		__field(unsigned int, processed)
233*4882a593Smuzhiyun 		__field(int, to_cpu)
234*4882a593Smuzhiyun 	),
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	TP_fast_assign(
237*4882a593Smuzhiyun 		__entry->map_id		= map_id;
238*4882a593Smuzhiyun 		__entry->act		= XDP_REDIRECT;
239*4882a593Smuzhiyun 		__entry->cpu		= smp_processor_id();
240*4882a593Smuzhiyun 		__entry->drops		= drops;
241*4882a593Smuzhiyun 		__entry->processed	= processed;
242*4882a593Smuzhiyun 		__entry->to_cpu		= to_cpu;
243*4882a593Smuzhiyun 	),
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	TP_printk("enqueue"
246*4882a593Smuzhiyun 		  " cpu=%d map_id=%d action=%s"
247*4882a593Smuzhiyun 		  " processed=%u drops=%u"
248*4882a593Smuzhiyun 		  " to_cpu=%d",
249*4882a593Smuzhiyun 		  __entry->cpu, __entry->map_id,
250*4882a593Smuzhiyun 		  __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
251*4882a593Smuzhiyun 		  __entry->processed, __entry->drops,
252*4882a593Smuzhiyun 		  __entry->to_cpu)
253*4882a593Smuzhiyun );
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun TRACE_EVENT(xdp_devmap_xmit,
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	TP_PROTO(const struct net_device *from_dev,
258*4882a593Smuzhiyun 		 const struct net_device *to_dev,
259*4882a593Smuzhiyun 		 int sent, int drops, int err),
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	TP_ARGS(from_dev, to_dev, sent, drops, err),
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	TP_STRUCT__entry(
264*4882a593Smuzhiyun 		__field(int, from_ifindex)
265*4882a593Smuzhiyun 		__field(u32, act)
266*4882a593Smuzhiyun 		__field(int, to_ifindex)
267*4882a593Smuzhiyun 		__field(int, drops)
268*4882a593Smuzhiyun 		__field(int, sent)
269*4882a593Smuzhiyun 		__field(int, err)
270*4882a593Smuzhiyun 	),
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	TP_fast_assign(
273*4882a593Smuzhiyun 		__entry->from_ifindex	= from_dev->ifindex;
274*4882a593Smuzhiyun 		__entry->act		= XDP_REDIRECT;
275*4882a593Smuzhiyun 		__entry->to_ifindex	= to_dev->ifindex;
276*4882a593Smuzhiyun 		__entry->drops		= drops;
277*4882a593Smuzhiyun 		__entry->sent		= sent;
278*4882a593Smuzhiyun 		__entry->err		= err;
279*4882a593Smuzhiyun 	),
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	TP_printk("ndo_xdp_xmit"
282*4882a593Smuzhiyun 		  " from_ifindex=%d to_ifindex=%d action=%s"
283*4882a593Smuzhiyun 		  " sent=%d drops=%d"
284*4882a593Smuzhiyun 		  " err=%d",
285*4882a593Smuzhiyun 		  __entry->from_ifindex, __entry->to_ifindex,
286*4882a593Smuzhiyun 		  __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
287*4882a593Smuzhiyun 		  __entry->sent, __entry->drops,
288*4882a593Smuzhiyun 		  __entry->err)
289*4882a593Smuzhiyun );
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun /* Expect users already include <net/xdp.h>, but not xdp_priv.h */
292*4882a593Smuzhiyun #include <net/xdp_priv.h>
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun #define __MEM_TYPE_MAP(FN)	\
295*4882a593Smuzhiyun 	FN(PAGE_SHARED)		\
296*4882a593Smuzhiyun 	FN(PAGE_ORDER0)		\
297*4882a593Smuzhiyun 	FN(PAGE_POOL)		\
298*4882a593Smuzhiyun 	FN(XSK_BUFF_POOL)
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun #define __MEM_TYPE_TP_FN(x)	\
301*4882a593Smuzhiyun 	TRACE_DEFINE_ENUM(MEM_TYPE_##x);
302*4882a593Smuzhiyun #define __MEM_TYPE_SYM_FN(x)	\
303*4882a593Smuzhiyun 	{ MEM_TYPE_##x, #x },
304*4882a593Smuzhiyun #define __MEM_TYPE_SYM_TAB	\
305*4882a593Smuzhiyun 	__MEM_TYPE_MAP(__MEM_TYPE_SYM_FN) { -1, 0 }
306*4882a593Smuzhiyun __MEM_TYPE_MAP(__MEM_TYPE_TP_FN)
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun TRACE_EVENT(mem_disconnect,
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	TP_PROTO(const struct xdp_mem_allocator *xa),
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	TP_ARGS(xa),
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	TP_STRUCT__entry(
315*4882a593Smuzhiyun 		__field(const struct xdp_mem_allocator *,	xa)
316*4882a593Smuzhiyun 		__field(u32,		mem_id)
317*4882a593Smuzhiyun 		__field(u32,		mem_type)
318*4882a593Smuzhiyun 		__field(const void *,	allocator)
319*4882a593Smuzhiyun 	),
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	TP_fast_assign(
322*4882a593Smuzhiyun 		__entry->xa		= xa;
323*4882a593Smuzhiyun 		__entry->mem_id		= xa->mem.id;
324*4882a593Smuzhiyun 		__entry->mem_type	= xa->mem.type;
325*4882a593Smuzhiyun 		__entry->allocator	= xa->allocator;
326*4882a593Smuzhiyun 	),
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	TP_printk("mem_id=%d mem_type=%s allocator=%p",
329*4882a593Smuzhiyun 		  __entry->mem_id,
330*4882a593Smuzhiyun 		  __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
331*4882a593Smuzhiyun 		  __entry->allocator
332*4882a593Smuzhiyun 	)
333*4882a593Smuzhiyun );
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun TRACE_EVENT(mem_connect,
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	TP_PROTO(const struct xdp_mem_allocator *xa,
338*4882a593Smuzhiyun 		 const struct xdp_rxq_info *rxq),
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	TP_ARGS(xa, rxq),
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	TP_STRUCT__entry(
343*4882a593Smuzhiyun 		__field(const struct xdp_mem_allocator *,	xa)
344*4882a593Smuzhiyun 		__field(u32,		mem_id)
345*4882a593Smuzhiyun 		__field(u32,		mem_type)
346*4882a593Smuzhiyun 		__field(const void *,	allocator)
347*4882a593Smuzhiyun 		__field(const struct xdp_rxq_info *,		rxq)
348*4882a593Smuzhiyun 		__field(int,		ifindex)
349*4882a593Smuzhiyun 	),
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	TP_fast_assign(
352*4882a593Smuzhiyun 		__entry->xa		= xa;
353*4882a593Smuzhiyun 		__entry->mem_id		= xa->mem.id;
354*4882a593Smuzhiyun 		__entry->mem_type	= xa->mem.type;
355*4882a593Smuzhiyun 		__entry->allocator	= xa->allocator;
356*4882a593Smuzhiyun 		__entry->rxq		= rxq;
357*4882a593Smuzhiyun 		__entry->ifindex	= rxq->dev->ifindex;
358*4882a593Smuzhiyun 	),
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	TP_printk("mem_id=%d mem_type=%s allocator=%p"
361*4882a593Smuzhiyun 		  " ifindex=%d",
362*4882a593Smuzhiyun 		  __entry->mem_id,
363*4882a593Smuzhiyun 		  __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
364*4882a593Smuzhiyun 		  __entry->allocator,
365*4882a593Smuzhiyun 		  __entry->ifindex
366*4882a593Smuzhiyun 	)
367*4882a593Smuzhiyun );
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun TRACE_EVENT(mem_return_failed,
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	TP_PROTO(const struct xdp_mem_info *mem,
372*4882a593Smuzhiyun 		 const struct page *page),
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 	TP_ARGS(mem, page),
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 	TP_STRUCT__entry(
377*4882a593Smuzhiyun 		__field(const struct page *,	page)
378*4882a593Smuzhiyun 		__field(u32,		mem_id)
379*4882a593Smuzhiyun 		__field(u32,		mem_type)
380*4882a593Smuzhiyun 	),
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	TP_fast_assign(
383*4882a593Smuzhiyun 		__entry->page		= page;
384*4882a593Smuzhiyun 		__entry->mem_id		= mem->id;
385*4882a593Smuzhiyun 		__entry->mem_type	= mem->type;
386*4882a593Smuzhiyun 	),
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 	TP_printk("mem_id=%d mem_type=%s page=%p",
389*4882a593Smuzhiyun 		  __entry->mem_id,
390*4882a593Smuzhiyun 		  __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
391*4882a593Smuzhiyun 		  __entry->page
392*4882a593Smuzhiyun 	)
393*4882a593Smuzhiyun );
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun #endif /* _TRACE_XDP_H */
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun #include <trace/define_trace.h>
398