xref: /OK3568_Linux_fs/kernel/include/trace/events/io_uring.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #undef TRACE_SYSTEM
3*4882a593Smuzhiyun #define TRACE_SYSTEM io_uring
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #if !defined(_TRACE_IO_URING_H) || defined(TRACE_HEADER_MULTI_READ)
6*4882a593Smuzhiyun #define _TRACE_IO_URING_H
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/tracepoint.h>
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun struct io_wq_work;
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun /**
13*4882a593Smuzhiyun  * io_uring_create - called after a new io_uring context was prepared
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  * @fd:		corresponding file descriptor
16*4882a593Smuzhiyun  * @ctx:	pointer to a ring context structure
17*4882a593Smuzhiyun  * @sq_entries:	actual SQ size
18*4882a593Smuzhiyun  * @cq_entries:	actual CQ size
19*4882a593Smuzhiyun  * @flags:	SQ ring flags, provided to io_uring_setup(2)
20*4882a593Smuzhiyun  *
21*4882a593Smuzhiyun  * Allows to trace io_uring creation and provide pointer to a context, that can
22*4882a593Smuzhiyun  * be used later to find correlated events.
23*4882a593Smuzhiyun  */
24*4882a593Smuzhiyun TRACE_EVENT(io_uring_create,
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun 	TP_PROTO(int fd, void *ctx, u32 sq_entries, u32 cq_entries, u32 flags),
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun 	TP_ARGS(fd, ctx, sq_entries, cq_entries, flags),
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun 	TP_STRUCT__entry (
31*4882a593Smuzhiyun 		__field(  int,		fd			)
32*4882a593Smuzhiyun 		__field(  void *,	ctx			)
33*4882a593Smuzhiyun 		__field(  u32,		sq_entries	)
34*4882a593Smuzhiyun 		__field(  u32,		cq_entries	)
35*4882a593Smuzhiyun 		__field(  u32,		flags		)
36*4882a593Smuzhiyun 	),
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun 	TP_fast_assign(
39*4882a593Smuzhiyun 		__entry->fd			= fd;
40*4882a593Smuzhiyun 		__entry->ctx		= ctx;
41*4882a593Smuzhiyun 		__entry->sq_entries	= sq_entries;
42*4882a593Smuzhiyun 		__entry->cq_entries	= cq_entries;
43*4882a593Smuzhiyun 		__entry->flags		= flags;
44*4882a593Smuzhiyun 	),
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 	TP_printk("ring %p, fd %d sq size %d, cq size %d, flags %d",
47*4882a593Smuzhiyun 			  __entry->ctx, __entry->fd, __entry->sq_entries,
48*4882a593Smuzhiyun 			  __entry->cq_entries, __entry->flags)
49*4882a593Smuzhiyun );
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun /**
52*4882a593Smuzhiyun  * io_uring_register - called after a buffer/file/eventfd was successfully
53*4882a593Smuzhiyun  * 					   registered for a ring
54*4882a593Smuzhiyun  *
55*4882a593Smuzhiyun  * @ctx:		pointer to a ring context structure
56*4882a593Smuzhiyun  * @opcode:		describes which operation to perform
57*4882a593Smuzhiyun  * @nr_user_files:	number of registered files
58*4882a593Smuzhiyun  * @nr_user_bufs:	number of registered buffers
59*4882a593Smuzhiyun  * @cq_ev_fd:		whether eventfs registered or not
60*4882a593Smuzhiyun  * @ret:		return code
61*4882a593Smuzhiyun  *
62*4882a593Smuzhiyun  * Allows to trace fixed files/buffers/eventfds, that could be registered to
63*4882a593Smuzhiyun  * avoid an overhead of getting references to them for every operation. This
64*4882a593Smuzhiyun  * event, together with io_uring_file_get, can provide a full picture of how
65*4882a593Smuzhiyun  * much overhead one can reduce via fixing.
66*4882a593Smuzhiyun  */
67*4882a593Smuzhiyun TRACE_EVENT(io_uring_register,
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	TP_PROTO(void *ctx, unsigned opcode, unsigned nr_files,
70*4882a593Smuzhiyun 			 unsigned nr_bufs, bool eventfd, long ret),
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	TP_ARGS(ctx, opcode, nr_files, nr_bufs, eventfd, ret),
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	TP_STRUCT__entry (
75*4882a593Smuzhiyun 		__field(  void *,	ctx			)
76*4882a593Smuzhiyun 		__field(  unsigned,	opcode		)
77*4882a593Smuzhiyun 		__field(  unsigned,	nr_files	)
78*4882a593Smuzhiyun 		__field(  unsigned,	nr_bufs		)
79*4882a593Smuzhiyun 		__field(  bool,		eventfd		)
80*4882a593Smuzhiyun 		__field(  long,		ret			)
81*4882a593Smuzhiyun 	),
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	TP_fast_assign(
84*4882a593Smuzhiyun 		__entry->ctx		= ctx;
85*4882a593Smuzhiyun 		__entry->opcode		= opcode;
86*4882a593Smuzhiyun 		__entry->nr_files	= nr_files;
87*4882a593Smuzhiyun 		__entry->nr_bufs	= nr_bufs;
88*4882a593Smuzhiyun 		__entry->eventfd	= eventfd;
89*4882a593Smuzhiyun 		__entry->ret		= ret;
90*4882a593Smuzhiyun 	),
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	TP_printk("ring %p, opcode %d, nr_user_files %d, nr_user_bufs %d, "
93*4882a593Smuzhiyun 			  "eventfd %d, ret %ld",
94*4882a593Smuzhiyun 			  __entry->ctx, __entry->opcode, __entry->nr_files,
95*4882a593Smuzhiyun 			  __entry->nr_bufs, __entry->eventfd, __entry->ret)
96*4882a593Smuzhiyun );
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun /**
99*4882a593Smuzhiyun  * io_uring_file_get - called before getting references to an SQE file
100*4882a593Smuzhiyun  *
101*4882a593Smuzhiyun  * @ctx:	pointer to a ring context structure
102*4882a593Smuzhiyun  * @fd:		SQE file descriptor
103*4882a593Smuzhiyun  *
104*4882a593Smuzhiyun  * Allows to trace out how often an SQE file reference is obtained, which can
105*4882a593Smuzhiyun  * help figuring out if it makes sense to use fixed files, or check that fixed
106*4882a593Smuzhiyun  * files are used correctly.
107*4882a593Smuzhiyun  */
108*4882a593Smuzhiyun TRACE_EVENT(io_uring_file_get,
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	TP_PROTO(void *ctx, int fd),
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	TP_ARGS(ctx, fd),
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	TP_STRUCT__entry (
115*4882a593Smuzhiyun 		__field(  void *,	ctx	)
116*4882a593Smuzhiyun 		__field(  int,		fd	)
117*4882a593Smuzhiyun 	),
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	TP_fast_assign(
120*4882a593Smuzhiyun 		__entry->ctx	= ctx;
121*4882a593Smuzhiyun 		__entry->fd		= fd;
122*4882a593Smuzhiyun 	),
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	TP_printk("ring %p, fd %d", __entry->ctx, __entry->fd)
125*4882a593Smuzhiyun );
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun /**
128*4882a593Smuzhiyun  * io_uring_queue_async_work - called before submitting a new async work
129*4882a593Smuzhiyun  *
130*4882a593Smuzhiyun  * @ctx:	pointer to a ring context structure
131*4882a593Smuzhiyun  * @hashed:	type of workqueue, hashed or normal
132*4882a593Smuzhiyun  * @req:	pointer to a submitted request
133*4882a593Smuzhiyun  * @work:	pointer to a submitted io_wq_work
134*4882a593Smuzhiyun  *
135*4882a593Smuzhiyun  * Allows to trace asynchronous work submission.
136*4882a593Smuzhiyun  */
137*4882a593Smuzhiyun TRACE_EVENT(io_uring_queue_async_work,
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	TP_PROTO(void *ctx, int rw, void * req, struct io_wq_work *work,
140*4882a593Smuzhiyun 			 unsigned int flags),
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	TP_ARGS(ctx, rw, req, work, flags),
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	TP_STRUCT__entry (
145*4882a593Smuzhiyun 		__field(  void *,			ctx	)
146*4882a593Smuzhiyun 		__field(  int,				rw	)
147*4882a593Smuzhiyun 		__field(  void *,			req	)
148*4882a593Smuzhiyun 		__field(  struct io_wq_work *,		work	)
149*4882a593Smuzhiyun 		__field(  unsigned int,			flags	)
150*4882a593Smuzhiyun 	),
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	TP_fast_assign(
153*4882a593Smuzhiyun 		__entry->ctx	= ctx;
154*4882a593Smuzhiyun 		__entry->rw	= rw;
155*4882a593Smuzhiyun 		__entry->req	= req;
156*4882a593Smuzhiyun 		__entry->work	= work;
157*4882a593Smuzhiyun 		__entry->flags	= flags;
158*4882a593Smuzhiyun 	),
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	TP_printk("ring %p, request %p, flags %d, %s queue, work %p",
161*4882a593Smuzhiyun 			  __entry->ctx, __entry->req, __entry->flags,
162*4882a593Smuzhiyun 			  __entry->rw ? "hashed" : "normal", __entry->work)
163*4882a593Smuzhiyun );
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun /**
166*4882a593Smuzhiyun  * io_uring_defer - called when an io_uring request is deferred
167*4882a593Smuzhiyun  *
168*4882a593Smuzhiyun  * @ctx:	pointer to a ring context structure
169*4882a593Smuzhiyun  * @req:	pointer to a deferred request
170*4882a593Smuzhiyun  * @user_data:	user data associated with the request
171*4882a593Smuzhiyun  *
172*4882a593Smuzhiyun  * Allows to track deferred requests, to get an insight about what requests are
173*4882a593Smuzhiyun  * not started immediately.
174*4882a593Smuzhiyun  */
175*4882a593Smuzhiyun TRACE_EVENT(io_uring_defer,
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	TP_PROTO(void *ctx, void *req, unsigned long long user_data),
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	TP_ARGS(ctx, req, user_data),
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	TP_STRUCT__entry (
182*4882a593Smuzhiyun 		__field(  void *,	ctx		)
183*4882a593Smuzhiyun 		__field(  void *,	req		)
184*4882a593Smuzhiyun 		__field(  unsigned long long, data	)
185*4882a593Smuzhiyun 	),
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	TP_fast_assign(
188*4882a593Smuzhiyun 		__entry->ctx	= ctx;
189*4882a593Smuzhiyun 		__entry->req	= req;
190*4882a593Smuzhiyun 		__entry->data	= user_data;
191*4882a593Smuzhiyun 	),
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	TP_printk("ring %p, request %p user_data %llu", __entry->ctx,
194*4882a593Smuzhiyun 			__entry->req, __entry->data)
195*4882a593Smuzhiyun );
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun /**
198*4882a593Smuzhiyun  * io_uring_link - called before the io_uring request added into link_list of
199*4882a593Smuzhiyun  * 		   another request
200*4882a593Smuzhiyun  *
201*4882a593Smuzhiyun  * @ctx:		pointer to a ring context structure
202*4882a593Smuzhiyun  * @req:		pointer to a linked request
203*4882a593Smuzhiyun  * @target_req:		pointer to a previous request, that would contain @req
204*4882a593Smuzhiyun  *
205*4882a593Smuzhiyun  * Allows to track linked requests, to understand dependencies between requests
206*4882a593Smuzhiyun  * and how does it influence their execution flow.
207*4882a593Smuzhiyun  */
208*4882a593Smuzhiyun TRACE_EVENT(io_uring_link,
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	TP_PROTO(void *ctx, void *req, void *target_req),
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	TP_ARGS(ctx, req, target_req),
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	TP_STRUCT__entry (
215*4882a593Smuzhiyun 		__field(  void *,	ctx		)
216*4882a593Smuzhiyun 		__field(  void *,	req		)
217*4882a593Smuzhiyun 		__field(  void *,	target_req	)
218*4882a593Smuzhiyun 	),
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	TP_fast_assign(
221*4882a593Smuzhiyun 		__entry->ctx		= ctx;
222*4882a593Smuzhiyun 		__entry->req		= req;
223*4882a593Smuzhiyun 		__entry->target_req	= target_req;
224*4882a593Smuzhiyun 	),
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	TP_printk("ring %p, request %p linked after %p",
227*4882a593Smuzhiyun 			  __entry->ctx, __entry->req, __entry->target_req)
228*4882a593Smuzhiyun );
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun /**
231*4882a593Smuzhiyun  * io_uring_cqring_wait - called before start waiting for an available CQE
232*4882a593Smuzhiyun  *
233*4882a593Smuzhiyun  * @ctx:		pointer to a ring context structure
234*4882a593Smuzhiyun  * @min_events:	minimal number of events to wait for
235*4882a593Smuzhiyun  *
236*4882a593Smuzhiyun  * Allows to track waiting for CQE, so that we can e.g. troubleshoot
237*4882a593Smuzhiyun  * situations, when an application wants to wait for an event, that never
238*4882a593Smuzhiyun  * comes.
239*4882a593Smuzhiyun  */
240*4882a593Smuzhiyun TRACE_EVENT(io_uring_cqring_wait,
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	TP_PROTO(void *ctx, int min_events),
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	TP_ARGS(ctx, min_events),
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	TP_STRUCT__entry (
247*4882a593Smuzhiyun 		__field(  void *,	ctx		)
248*4882a593Smuzhiyun 		__field(  int,		min_events	)
249*4882a593Smuzhiyun 	),
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	TP_fast_assign(
252*4882a593Smuzhiyun 		__entry->ctx	= ctx;
253*4882a593Smuzhiyun 		__entry->min_events	= min_events;
254*4882a593Smuzhiyun 	),
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	TP_printk("ring %p, min_events %d", __entry->ctx, __entry->min_events)
257*4882a593Smuzhiyun );
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun /**
260*4882a593Smuzhiyun  * io_uring_fail_link - called before failing a linked request
261*4882a593Smuzhiyun  *
262*4882a593Smuzhiyun  * @req:	request, which links were cancelled
263*4882a593Smuzhiyun  * @link:	cancelled link
264*4882a593Smuzhiyun  *
265*4882a593Smuzhiyun  * Allows to track linked requests cancellation, to see not only that some work
266*4882a593Smuzhiyun  * was cancelled, but also which request was the reason.
267*4882a593Smuzhiyun  */
268*4882a593Smuzhiyun TRACE_EVENT(io_uring_fail_link,
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	TP_PROTO(void *req, void *link),
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	TP_ARGS(req, link),
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	TP_STRUCT__entry (
275*4882a593Smuzhiyun 		__field(  void *,	req	)
276*4882a593Smuzhiyun 		__field(  void *,	link	)
277*4882a593Smuzhiyun 	),
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	TP_fast_assign(
280*4882a593Smuzhiyun 		__entry->req	= req;
281*4882a593Smuzhiyun 		__entry->link	= link;
282*4882a593Smuzhiyun 	),
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	TP_printk("request %p, link %p", __entry->req, __entry->link)
285*4882a593Smuzhiyun );
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun /**
288*4882a593Smuzhiyun  * io_uring_complete - called when completing an SQE
289*4882a593Smuzhiyun  *
290*4882a593Smuzhiyun  * @ctx:		pointer to a ring context structure
291*4882a593Smuzhiyun  * @user_data:		user data associated with the request
292*4882a593Smuzhiyun  * @res:		result of the request
293*4882a593Smuzhiyun  * @cflags:		completion flags
294*4882a593Smuzhiyun  *
295*4882a593Smuzhiyun  */
296*4882a593Smuzhiyun TRACE_EVENT(io_uring_complete,
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	TP_PROTO(void *ctx, u64 user_data, int res, unsigned cflags),
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	TP_ARGS(ctx, user_data, res, cflags),
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	TP_STRUCT__entry (
303*4882a593Smuzhiyun 		__field(  void *,	ctx		)
304*4882a593Smuzhiyun 		__field(  u64,		user_data	)
305*4882a593Smuzhiyun 		__field(  int,		res		)
306*4882a593Smuzhiyun 		__field(  unsigned,	cflags		)
307*4882a593Smuzhiyun 	),
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	TP_fast_assign(
310*4882a593Smuzhiyun 		__entry->ctx		= ctx;
311*4882a593Smuzhiyun 		__entry->user_data	= user_data;
312*4882a593Smuzhiyun 		__entry->res		= res;
313*4882a593Smuzhiyun 		__entry->cflags		= cflags;
314*4882a593Smuzhiyun 	),
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	TP_printk("ring %p, user_data 0x%llx, result %d, cflags %x",
317*4882a593Smuzhiyun 			  __entry->ctx, (unsigned long long)__entry->user_data,
318*4882a593Smuzhiyun 			  __entry->res, __entry->cflags)
319*4882a593Smuzhiyun );
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun /**
322*4882a593Smuzhiyun  * io_uring_submit_sqe - called before submitting one SQE
323*4882a593Smuzhiyun  *
324*4882a593Smuzhiyun  * @ctx:		pointer to a ring context structure
325*4882a593Smuzhiyun  * @req:		pointer to a submitted request
326*4882a593Smuzhiyun  * @opcode:		opcode of request
327*4882a593Smuzhiyun  * @user_data:		user data associated with the request
328*4882a593Smuzhiyun  * @flags		request flags
329*4882a593Smuzhiyun  * @force_nonblock:	whether a context blocking or not
330*4882a593Smuzhiyun  * @sq_thread:		true if sq_thread has submitted this SQE
331*4882a593Smuzhiyun  *
332*4882a593Smuzhiyun  * Allows to track SQE submitting, to understand what was the source of it, SQ
333*4882a593Smuzhiyun  * thread or io_uring_enter call.
334*4882a593Smuzhiyun  */
335*4882a593Smuzhiyun TRACE_EVENT(io_uring_submit_sqe,
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	TP_PROTO(void *ctx, void *req, u8 opcode, u64 user_data, u32 flags,
338*4882a593Smuzhiyun 		 bool force_nonblock, bool sq_thread),
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	TP_ARGS(ctx, req, opcode, user_data, flags, force_nonblock, sq_thread),
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	TP_STRUCT__entry (
343*4882a593Smuzhiyun 		__field(  void *,	ctx		)
344*4882a593Smuzhiyun 		__field(  void *,	req		)
345*4882a593Smuzhiyun 		__field(  u8,		opcode		)
346*4882a593Smuzhiyun 		__field(  u64,		user_data	)
347*4882a593Smuzhiyun 		__field(  u32,		flags		)
348*4882a593Smuzhiyun 		__field(  bool,		force_nonblock	)
349*4882a593Smuzhiyun 		__field(  bool,		sq_thread	)
350*4882a593Smuzhiyun 	),
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	TP_fast_assign(
353*4882a593Smuzhiyun 		__entry->ctx		= ctx;
354*4882a593Smuzhiyun 		__entry->req		= req;
355*4882a593Smuzhiyun 		__entry->opcode		= opcode;
356*4882a593Smuzhiyun 		__entry->user_data	= user_data;
357*4882a593Smuzhiyun 		__entry->flags		= flags;
358*4882a593Smuzhiyun 		__entry->force_nonblock	= force_nonblock;
359*4882a593Smuzhiyun 		__entry->sq_thread	= sq_thread;
360*4882a593Smuzhiyun 	),
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	TP_printk("ring %p, req %p, op %d, data 0x%llx, flags %u, "
363*4882a593Smuzhiyun 		  "non block %d, sq_thread %d", __entry->ctx, __entry->req,
364*4882a593Smuzhiyun 		  __entry->opcode, (unsigned long long)__entry->user_data,
365*4882a593Smuzhiyun 		  __entry->flags, __entry->force_nonblock, __entry->sq_thread)
366*4882a593Smuzhiyun );
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun /*
369*4882a593Smuzhiyun  * io_uring_poll_arm - called after arming a poll wait if successful
370*4882a593Smuzhiyun  *
371*4882a593Smuzhiyun  * @ctx:		pointer to a ring context structure
372*4882a593Smuzhiyun  * @req:		pointer to the armed request
373*4882a593Smuzhiyun  * @opcode:		opcode of request
374*4882a593Smuzhiyun  * @user_data:		user data associated with the request
375*4882a593Smuzhiyun  * @mask:		request poll events mask
376*4882a593Smuzhiyun  * @events:		registered events of interest
377*4882a593Smuzhiyun  *
378*4882a593Smuzhiyun  * Allows to track which fds are waiting for and what are the events of
379*4882a593Smuzhiyun  * interest.
380*4882a593Smuzhiyun  */
381*4882a593Smuzhiyun TRACE_EVENT(io_uring_poll_arm,
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	TP_PROTO(void *ctx, void *req, u8 opcode, u64 user_data,
384*4882a593Smuzhiyun 		 int mask, int events),
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 	TP_ARGS(ctx, req, opcode, user_data, mask, events),
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 	TP_STRUCT__entry (
389*4882a593Smuzhiyun 		__field(  void *,	ctx		)
390*4882a593Smuzhiyun 		__field(  void *,	req		)
391*4882a593Smuzhiyun 		__field(  u8,		opcode		)
392*4882a593Smuzhiyun 		__field(  u64,		user_data	)
393*4882a593Smuzhiyun 		__field(  int,		mask		)
394*4882a593Smuzhiyun 		__field(  int,		events		)
395*4882a593Smuzhiyun 	),
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	TP_fast_assign(
398*4882a593Smuzhiyun 		__entry->ctx		= ctx;
399*4882a593Smuzhiyun 		__entry->req		= req;
400*4882a593Smuzhiyun 		__entry->opcode		= opcode;
401*4882a593Smuzhiyun 		__entry->user_data	= user_data;
402*4882a593Smuzhiyun 		__entry->mask		= mask;
403*4882a593Smuzhiyun 		__entry->events		= events;
404*4882a593Smuzhiyun 	),
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	TP_printk("ring %p, req %p, op %d, data 0x%llx, mask 0x%x, events 0x%x",
407*4882a593Smuzhiyun 		  __entry->ctx, __entry->req, __entry->opcode,
408*4882a593Smuzhiyun 		  (unsigned long long) __entry->user_data,
409*4882a593Smuzhiyun 		  __entry->mask, __entry->events)
410*4882a593Smuzhiyun );
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun TRACE_EVENT(io_uring_poll_wake,
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 	TP_PROTO(void *ctx, u8 opcode, u64 user_data, int mask),
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	TP_ARGS(ctx, opcode, user_data, mask),
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	TP_STRUCT__entry (
419*4882a593Smuzhiyun 		__field(  void *,	ctx		)
420*4882a593Smuzhiyun 		__field(  u8,		opcode		)
421*4882a593Smuzhiyun 		__field(  u64,		user_data	)
422*4882a593Smuzhiyun 		__field(  int,		mask		)
423*4882a593Smuzhiyun 	),
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	TP_fast_assign(
426*4882a593Smuzhiyun 		__entry->ctx		= ctx;
427*4882a593Smuzhiyun 		__entry->opcode		= opcode;
428*4882a593Smuzhiyun 		__entry->user_data	= user_data;
429*4882a593Smuzhiyun 		__entry->mask		= mask;
430*4882a593Smuzhiyun 	),
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	TP_printk("ring %p, op %d, data 0x%llx, mask 0x%x",
433*4882a593Smuzhiyun 			  __entry->ctx, __entry->opcode,
434*4882a593Smuzhiyun 			  (unsigned long long) __entry->user_data,
435*4882a593Smuzhiyun 			  __entry->mask)
436*4882a593Smuzhiyun );
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun TRACE_EVENT(io_uring_task_add,
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 	TP_PROTO(void *ctx, u8 opcode, u64 user_data, int mask),
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	TP_ARGS(ctx, opcode, user_data, mask),
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 	TP_STRUCT__entry (
445*4882a593Smuzhiyun 		__field(  void *,	ctx		)
446*4882a593Smuzhiyun 		__field(  u8,		opcode		)
447*4882a593Smuzhiyun 		__field(  u64,		user_data	)
448*4882a593Smuzhiyun 		__field(  int,		mask		)
449*4882a593Smuzhiyun 	),
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 	TP_fast_assign(
452*4882a593Smuzhiyun 		__entry->ctx		= ctx;
453*4882a593Smuzhiyun 		__entry->opcode		= opcode;
454*4882a593Smuzhiyun 		__entry->user_data	= user_data;
455*4882a593Smuzhiyun 		__entry->mask		= mask;
456*4882a593Smuzhiyun 	),
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	TP_printk("ring %p, op %d, data 0x%llx, mask %x",
459*4882a593Smuzhiyun 			  __entry->ctx, __entry->opcode,
460*4882a593Smuzhiyun 			  (unsigned long long) __entry->user_data,
461*4882a593Smuzhiyun 			  __entry->mask)
462*4882a593Smuzhiyun );
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun /*
465*4882a593Smuzhiyun  * io_uring_task_run - called when task_work_run() executes the poll events
466*4882a593Smuzhiyun  *                     notification callbacks
467*4882a593Smuzhiyun  *
468*4882a593Smuzhiyun  * @ctx:		pointer to a ring context structure
469*4882a593Smuzhiyun  * @req:		pointer to the armed request
470*4882a593Smuzhiyun  * @opcode:		opcode of request
471*4882a593Smuzhiyun  * @user_data:		user data associated with the request
472*4882a593Smuzhiyun  *
473*4882a593Smuzhiyun  * Allows to track when notified poll events are processed
474*4882a593Smuzhiyun  */
475*4882a593Smuzhiyun TRACE_EVENT(io_uring_task_run,
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun 	TP_PROTO(void *ctx, void *req, u8 opcode, u64 user_data),
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 	TP_ARGS(ctx, req, opcode, user_data),
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 	TP_STRUCT__entry (
482*4882a593Smuzhiyun 		__field(  void *,	ctx		)
483*4882a593Smuzhiyun 		__field(  void *,	req		)
484*4882a593Smuzhiyun 		__field(  u8,		opcode		)
485*4882a593Smuzhiyun 		__field(  u64,		user_data	)
486*4882a593Smuzhiyun 	),
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 	TP_fast_assign(
489*4882a593Smuzhiyun 		__entry->ctx		= ctx;
490*4882a593Smuzhiyun 		__entry->req		= req;
491*4882a593Smuzhiyun 		__entry->opcode		= opcode;
492*4882a593Smuzhiyun 		__entry->user_data	= user_data;
493*4882a593Smuzhiyun 	),
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	TP_printk("ring %p, req %p, op %d, data 0x%llx",
496*4882a593Smuzhiyun 		  __entry->ctx, __entry->req, __entry->opcode,
497*4882a593Smuzhiyun 		  (unsigned long long) __entry->user_data)
498*4882a593Smuzhiyun );
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun #endif /* _TRACE_IO_URING_H */
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun /* This part must be outside protection */
503*4882a593Smuzhiyun #include <trace/define_trace.h>
504