1*4882a593Smuzhiyun /* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT */ 2*4882a593Smuzhiyun /* 3*4882a593Smuzhiyun * Header file for the io_uring interface. 4*4882a593Smuzhiyun * 5*4882a593Smuzhiyun * Copyright (C) 2019 Jens Axboe 6*4882a593Smuzhiyun * Copyright (C) 2019 Christoph Hellwig 7*4882a593Smuzhiyun */ 8*4882a593Smuzhiyun #ifndef LINUX_IO_URING_H 9*4882a593Smuzhiyun #define LINUX_IO_URING_H 10*4882a593Smuzhiyun 11*4882a593Smuzhiyun #include <linux/fs.h> 12*4882a593Smuzhiyun #include <linux/types.h> 13*4882a593Smuzhiyun 14*4882a593Smuzhiyun /* 15*4882a593Smuzhiyun * IO submission data structure (Submission Queue Entry) 16*4882a593Smuzhiyun */ 17*4882a593Smuzhiyun struct io_uring_sqe { 18*4882a593Smuzhiyun __u8 opcode; /* type of operation for this sqe */ 19*4882a593Smuzhiyun __u8 flags; /* IOSQE_ flags */ 20*4882a593Smuzhiyun __u16 ioprio; /* ioprio for the request */ 21*4882a593Smuzhiyun __s32 fd; /* file descriptor to do IO on */ 22*4882a593Smuzhiyun union { 23*4882a593Smuzhiyun __u64 off; /* offset into file */ 24*4882a593Smuzhiyun __u64 addr2; 25*4882a593Smuzhiyun }; 26*4882a593Smuzhiyun union { 27*4882a593Smuzhiyun __u64 addr; /* pointer to buffer or iovecs */ 28*4882a593Smuzhiyun __u64 splice_off_in; 29*4882a593Smuzhiyun }; 30*4882a593Smuzhiyun __u32 len; /* buffer size or number of iovecs */ 31*4882a593Smuzhiyun union { 32*4882a593Smuzhiyun __kernel_rwf_t rw_flags; 33*4882a593Smuzhiyun __u32 fsync_flags; 34*4882a593Smuzhiyun __u16 poll_events; /* compatibility */ 35*4882a593Smuzhiyun __u32 poll32_events; /* word-reversed for BE */ 36*4882a593Smuzhiyun __u32 sync_range_flags; 37*4882a593Smuzhiyun __u32 msg_flags; 38*4882a593Smuzhiyun __u32 timeout_flags; 39*4882a593Smuzhiyun __u32 accept_flags; 40*4882a593Smuzhiyun __u32 cancel_flags; 41*4882a593Smuzhiyun __u32 open_flags; 42*4882a593Smuzhiyun __u32 statx_flags; 43*4882a593Smuzhiyun __u32 fadvise_advice; 44*4882a593Smuzhiyun __u32 splice_flags; 45*4882a593Smuzhiyun __u32 rename_flags; 46*4882a593Smuzhiyun __u32 unlink_flags; 47*4882a593Smuzhiyun __u32 hardlink_flags; 48*4882a593Smuzhiyun }; 49*4882a593Smuzhiyun __u64 user_data; /* data to be passed back at completion time */ 50*4882a593Smuzhiyun /* pack this to avoid bogus arm OABI complaints */ 51*4882a593Smuzhiyun union { 52*4882a593Smuzhiyun /* index into fixed buffers, if used */ 53*4882a593Smuzhiyun __u16 buf_index; 54*4882a593Smuzhiyun /* for grouped buffer selection */ 55*4882a593Smuzhiyun __u16 buf_group; 56*4882a593Smuzhiyun } __attribute__((packed)); 57*4882a593Smuzhiyun /* personality to use, if used */ 58*4882a593Smuzhiyun __u16 personality; 59*4882a593Smuzhiyun union { 60*4882a593Smuzhiyun __s32 splice_fd_in; 61*4882a593Smuzhiyun __u32 file_index; 62*4882a593Smuzhiyun }; 63*4882a593Smuzhiyun __u64 __pad2[2]; 64*4882a593Smuzhiyun }; 65*4882a593Smuzhiyun 66*4882a593Smuzhiyun enum { 67*4882a593Smuzhiyun IOSQE_FIXED_FILE_BIT, 68*4882a593Smuzhiyun IOSQE_IO_DRAIN_BIT, 69*4882a593Smuzhiyun IOSQE_IO_LINK_BIT, 70*4882a593Smuzhiyun IOSQE_IO_HARDLINK_BIT, 71*4882a593Smuzhiyun IOSQE_ASYNC_BIT, 72*4882a593Smuzhiyun IOSQE_BUFFER_SELECT_BIT, 73*4882a593Smuzhiyun }; 74*4882a593Smuzhiyun 75*4882a593Smuzhiyun /* 76*4882a593Smuzhiyun * sqe->flags 77*4882a593Smuzhiyun */ 78*4882a593Smuzhiyun /* use fixed fileset */ 79*4882a593Smuzhiyun #define IOSQE_FIXED_FILE (1U << IOSQE_FIXED_FILE_BIT) 80*4882a593Smuzhiyun /* issue after inflight IO */ 81*4882a593Smuzhiyun #define IOSQE_IO_DRAIN (1U << IOSQE_IO_DRAIN_BIT) 82*4882a593Smuzhiyun /* links next sqe */ 83*4882a593Smuzhiyun #define IOSQE_IO_LINK (1U << IOSQE_IO_LINK_BIT) 84*4882a593Smuzhiyun /* like LINK, but stronger */ 85*4882a593Smuzhiyun #define IOSQE_IO_HARDLINK (1U << IOSQE_IO_HARDLINK_BIT) 86*4882a593Smuzhiyun /* always go async */ 87*4882a593Smuzhiyun #define IOSQE_ASYNC (1U << IOSQE_ASYNC_BIT) 88*4882a593Smuzhiyun /* select buffer from sqe->buf_group */ 89*4882a593Smuzhiyun #define IOSQE_BUFFER_SELECT (1U << IOSQE_BUFFER_SELECT_BIT) 90*4882a593Smuzhiyun 91*4882a593Smuzhiyun /* 92*4882a593Smuzhiyun * io_uring_setup() flags 93*4882a593Smuzhiyun */ 94*4882a593Smuzhiyun #define IORING_SETUP_IOPOLL (1U << 0) /* io_context is polled */ 95*4882a593Smuzhiyun #define IORING_SETUP_SQPOLL (1U << 1) /* SQ poll thread */ 96*4882a593Smuzhiyun #define IORING_SETUP_SQ_AFF (1U << 2) /* sq_thread_cpu is valid */ 97*4882a593Smuzhiyun #define IORING_SETUP_CQSIZE (1U << 3) /* app defines CQ size */ 98*4882a593Smuzhiyun #define IORING_SETUP_CLAMP (1U << 4) /* clamp SQ/CQ ring sizes */ 99*4882a593Smuzhiyun #define IORING_SETUP_ATTACH_WQ (1U << 5) /* attach to existing wq */ 100*4882a593Smuzhiyun #define IORING_SETUP_R_DISABLED (1U << 6) /* start with ring disabled */ 101*4882a593Smuzhiyun 102*4882a593Smuzhiyun enum { 103*4882a593Smuzhiyun IORING_OP_NOP, 104*4882a593Smuzhiyun IORING_OP_READV, 105*4882a593Smuzhiyun IORING_OP_WRITEV, 106*4882a593Smuzhiyun IORING_OP_FSYNC, 107*4882a593Smuzhiyun IORING_OP_READ_FIXED, 108*4882a593Smuzhiyun IORING_OP_WRITE_FIXED, 109*4882a593Smuzhiyun IORING_OP_POLL_ADD, 110*4882a593Smuzhiyun IORING_OP_POLL_REMOVE, 111*4882a593Smuzhiyun IORING_OP_SYNC_FILE_RANGE, 112*4882a593Smuzhiyun IORING_OP_SENDMSG, 113*4882a593Smuzhiyun IORING_OP_RECVMSG, 114*4882a593Smuzhiyun IORING_OP_TIMEOUT, 115*4882a593Smuzhiyun IORING_OP_TIMEOUT_REMOVE, 116*4882a593Smuzhiyun IORING_OP_ACCEPT, 117*4882a593Smuzhiyun IORING_OP_ASYNC_CANCEL, 118*4882a593Smuzhiyun IORING_OP_LINK_TIMEOUT, 119*4882a593Smuzhiyun IORING_OP_CONNECT, 120*4882a593Smuzhiyun IORING_OP_FALLOCATE, 121*4882a593Smuzhiyun IORING_OP_OPENAT, 122*4882a593Smuzhiyun IORING_OP_CLOSE, 123*4882a593Smuzhiyun IORING_OP_FILES_UPDATE, 124*4882a593Smuzhiyun IORING_OP_STATX, 125*4882a593Smuzhiyun IORING_OP_READ, 126*4882a593Smuzhiyun IORING_OP_WRITE, 127*4882a593Smuzhiyun IORING_OP_FADVISE, 128*4882a593Smuzhiyun IORING_OP_MADVISE, 129*4882a593Smuzhiyun IORING_OP_SEND, 130*4882a593Smuzhiyun IORING_OP_RECV, 131*4882a593Smuzhiyun IORING_OP_OPENAT2, 132*4882a593Smuzhiyun IORING_OP_EPOLL_CTL, 133*4882a593Smuzhiyun IORING_OP_SPLICE, 134*4882a593Smuzhiyun IORING_OP_PROVIDE_BUFFERS, 135*4882a593Smuzhiyun IORING_OP_REMOVE_BUFFERS, 136*4882a593Smuzhiyun IORING_OP_TEE, 137*4882a593Smuzhiyun IORING_OP_SHUTDOWN, 138*4882a593Smuzhiyun IORING_OP_RENAMEAT, 139*4882a593Smuzhiyun IORING_OP_UNLINKAT, 140*4882a593Smuzhiyun 141*4882a593Smuzhiyun /* this goes last, obviously */ 142*4882a593Smuzhiyun IORING_OP_LAST, 143*4882a593Smuzhiyun }; 144*4882a593Smuzhiyun 145*4882a593Smuzhiyun /* 146*4882a593Smuzhiyun * sqe->fsync_flags 147*4882a593Smuzhiyun */ 148*4882a593Smuzhiyun #define IORING_FSYNC_DATASYNC (1U << 0) 149*4882a593Smuzhiyun 150*4882a593Smuzhiyun /* 151*4882a593Smuzhiyun * sqe->timeout_flags 152*4882a593Smuzhiyun */ 153*4882a593Smuzhiyun #define IORING_TIMEOUT_ABS (1U << 0) 154*4882a593Smuzhiyun #define IORING_TIMEOUT_UPDATE (1U << 1) 155*4882a593Smuzhiyun #define IORING_TIMEOUT_BOOTTIME (1U << 2) 156*4882a593Smuzhiyun #define IORING_TIMEOUT_REALTIME (1U << 3) 157*4882a593Smuzhiyun #define IORING_LINK_TIMEOUT_UPDATE (1U << 4) 158*4882a593Smuzhiyun #define IORING_TIMEOUT_CLOCK_MASK (IORING_TIMEOUT_BOOTTIME | IORING_TIMEOUT_REALTIME) 159*4882a593Smuzhiyun #define IORING_TIMEOUT_UPDATE_MASK (IORING_TIMEOUT_UPDATE | IORING_LINK_TIMEOUT_UPDATE) 160*4882a593Smuzhiyun /* 161*4882a593Smuzhiyun * sqe->splice_flags 162*4882a593Smuzhiyun * extends splice(2) flags 163*4882a593Smuzhiyun */ 164*4882a593Smuzhiyun #define SPLICE_F_FD_IN_FIXED (1U << 31) /* the last bit of __u32 */ 165*4882a593Smuzhiyun 166*4882a593Smuzhiyun /* 167*4882a593Smuzhiyun * POLL_ADD flags. Note that since sqe->poll_events is the flag space, the 168*4882a593Smuzhiyun * command flags for POLL_ADD are stored in sqe->len. 169*4882a593Smuzhiyun * 170*4882a593Smuzhiyun * IORING_POLL_ADD_MULTI Multishot poll. Sets IORING_CQE_F_MORE if 171*4882a593Smuzhiyun * the poll handler will continue to report 172*4882a593Smuzhiyun * CQEs on behalf of the same SQE. 173*4882a593Smuzhiyun * 174*4882a593Smuzhiyun * IORING_POLL_UPDATE Update existing poll request, matching 175*4882a593Smuzhiyun * sqe->addr as the old user_data field. 176*4882a593Smuzhiyun */ 177*4882a593Smuzhiyun #define IORING_POLL_ADD_MULTI (1U << 0) 178*4882a593Smuzhiyun #define IORING_POLL_UPDATE_EVENTS (1U << 1) 179*4882a593Smuzhiyun #define IORING_POLL_UPDATE_USER_DATA (1U << 2) 180*4882a593Smuzhiyun 181*4882a593Smuzhiyun /* 182*4882a593Smuzhiyun * IO completion data structure (Completion Queue Entry) 183*4882a593Smuzhiyun */ 184*4882a593Smuzhiyun struct io_uring_cqe { 185*4882a593Smuzhiyun __u64 user_data; /* sqe->data submission passed back */ 186*4882a593Smuzhiyun __s32 res; /* result code for this event */ 187*4882a593Smuzhiyun __u32 flags; 188*4882a593Smuzhiyun }; 189*4882a593Smuzhiyun 190*4882a593Smuzhiyun /* 191*4882a593Smuzhiyun * cqe->flags 192*4882a593Smuzhiyun * 193*4882a593Smuzhiyun * IORING_CQE_F_BUFFER If set, the upper 16 bits are the buffer ID 194*4882a593Smuzhiyun * IORING_CQE_F_MORE If set, parent SQE will generate more CQE entries 195*4882a593Smuzhiyun */ 196*4882a593Smuzhiyun #define IORING_CQE_F_BUFFER (1U << 0) 197*4882a593Smuzhiyun #define IORING_CQE_F_MORE (1U << 1) 198*4882a593Smuzhiyun 199*4882a593Smuzhiyun enum { 200*4882a593Smuzhiyun IORING_CQE_BUFFER_SHIFT = 16, 201*4882a593Smuzhiyun }; 202*4882a593Smuzhiyun 203*4882a593Smuzhiyun /* 204*4882a593Smuzhiyun * Magic offsets for the application to mmap the data it needs 205*4882a593Smuzhiyun */ 206*4882a593Smuzhiyun #define IORING_OFF_SQ_RING 0ULL 207*4882a593Smuzhiyun #define IORING_OFF_CQ_RING 0x8000000ULL 208*4882a593Smuzhiyun #define IORING_OFF_SQES 0x10000000ULL 209*4882a593Smuzhiyun 210*4882a593Smuzhiyun /* 211*4882a593Smuzhiyun * Filled with the offset for mmap(2) 212*4882a593Smuzhiyun */ 213*4882a593Smuzhiyun struct io_sqring_offsets { 214*4882a593Smuzhiyun __u32 head; 215*4882a593Smuzhiyun __u32 tail; 216*4882a593Smuzhiyun __u32 ring_mask; 217*4882a593Smuzhiyun __u32 ring_entries; 218*4882a593Smuzhiyun __u32 flags; 219*4882a593Smuzhiyun __u32 dropped; 220*4882a593Smuzhiyun __u32 array; 221*4882a593Smuzhiyun __u32 resv1; 222*4882a593Smuzhiyun __u64 resv2; 223*4882a593Smuzhiyun }; 224*4882a593Smuzhiyun 225*4882a593Smuzhiyun /* 226*4882a593Smuzhiyun * sq_ring->flags 227*4882a593Smuzhiyun */ 228*4882a593Smuzhiyun #define IORING_SQ_NEED_WAKEUP (1U << 0) /* needs io_uring_enter wakeup */ 229*4882a593Smuzhiyun #define IORING_SQ_CQ_OVERFLOW (1U << 1) /* CQ ring is overflown */ 230*4882a593Smuzhiyun 231*4882a593Smuzhiyun struct io_cqring_offsets { 232*4882a593Smuzhiyun __u32 head; 233*4882a593Smuzhiyun __u32 tail; 234*4882a593Smuzhiyun __u32 ring_mask; 235*4882a593Smuzhiyun __u32 ring_entries; 236*4882a593Smuzhiyun __u32 overflow; 237*4882a593Smuzhiyun __u32 cqes; 238*4882a593Smuzhiyun __u32 flags; 239*4882a593Smuzhiyun __u32 resv1; 240*4882a593Smuzhiyun __u64 resv2; 241*4882a593Smuzhiyun }; 242*4882a593Smuzhiyun 243*4882a593Smuzhiyun /* 244*4882a593Smuzhiyun * cq_ring->flags 245*4882a593Smuzhiyun */ 246*4882a593Smuzhiyun 247*4882a593Smuzhiyun /* disable eventfd notifications */ 248*4882a593Smuzhiyun #define IORING_CQ_EVENTFD_DISABLED (1U << 0) 249*4882a593Smuzhiyun 250*4882a593Smuzhiyun /* 251*4882a593Smuzhiyun * io_uring_enter(2) flags 252*4882a593Smuzhiyun */ 253*4882a593Smuzhiyun #define IORING_ENTER_GETEVENTS (1U << 0) 254*4882a593Smuzhiyun #define IORING_ENTER_SQ_WAKEUP (1U << 1) 255*4882a593Smuzhiyun #define IORING_ENTER_SQ_WAIT (1U << 2) 256*4882a593Smuzhiyun #define IORING_ENTER_EXT_ARG (1U << 3) 257*4882a593Smuzhiyun 258*4882a593Smuzhiyun /* 259*4882a593Smuzhiyun * Passed in for io_uring_setup(2). Copied back with updated info on success 260*4882a593Smuzhiyun */ 261*4882a593Smuzhiyun struct io_uring_params { 262*4882a593Smuzhiyun __u32 sq_entries; 263*4882a593Smuzhiyun __u32 cq_entries; 264*4882a593Smuzhiyun __u32 flags; 265*4882a593Smuzhiyun __u32 sq_thread_cpu; 266*4882a593Smuzhiyun __u32 sq_thread_idle; 267*4882a593Smuzhiyun __u32 features; 268*4882a593Smuzhiyun __u32 wq_fd; 269*4882a593Smuzhiyun __u32 resv[3]; 270*4882a593Smuzhiyun struct io_sqring_offsets sq_off; 271*4882a593Smuzhiyun struct io_cqring_offsets cq_off; 272*4882a593Smuzhiyun }; 273*4882a593Smuzhiyun 274*4882a593Smuzhiyun /* 275*4882a593Smuzhiyun * io_uring_params->features flags 276*4882a593Smuzhiyun */ 277*4882a593Smuzhiyun #define IORING_FEAT_SINGLE_MMAP (1U << 0) 278*4882a593Smuzhiyun #define IORING_FEAT_NODROP (1U << 1) 279*4882a593Smuzhiyun #define IORING_FEAT_SUBMIT_STABLE (1U << 2) 280*4882a593Smuzhiyun #define IORING_FEAT_RW_CUR_POS (1U << 3) 281*4882a593Smuzhiyun #define IORING_FEAT_CUR_PERSONALITY (1U << 4) 282*4882a593Smuzhiyun #define IORING_FEAT_FAST_POLL (1U << 5) 283*4882a593Smuzhiyun #define IORING_FEAT_POLL_32BITS (1U << 6) 284*4882a593Smuzhiyun #define IORING_FEAT_SQPOLL_NONFIXED (1U << 7) 285*4882a593Smuzhiyun #define IORING_FEAT_EXT_ARG (1U << 8) 286*4882a593Smuzhiyun #define IORING_FEAT_NATIVE_WORKERS (1U << 9) 287*4882a593Smuzhiyun #define IORING_FEAT_RSRC_TAGS (1U << 10) 288*4882a593Smuzhiyun 289*4882a593Smuzhiyun /* 290*4882a593Smuzhiyun * io_uring_register(2) opcodes and arguments 291*4882a593Smuzhiyun */ 292*4882a593Smuzhiyun enum { 293*4882a593Smuzhiyun IORING_REGISTER_BUFFERS = 0, 294*4882a593Smuzhiyun IORING_UNREGISTER_BUFFERS = 1, 295*4882a593Smuzhiyun IORING_REGISTER_FILES = 2, 296*4882a593Smuzhiyun IORING_UNREGISTER_FILES = 3, 297*4882a593Smuzhiyun IORING_REGISTER_EVENTFD = 4, 298*4882a593Smuzhiyun IORING_UNREGISTER_EVENTFD = 5, 299*4882a593Smuzhiyun IORING_REGISTER_FILES_UPDATE = 6, 300*4882a593Smuzhiyun IORING_REGISTER_EVENTFD_ASYNC = 7, 301*4882a593Smuzhiyun IORING_REGISTER_PROBE = 8, 302*4882a593Smuzhiyun IORING_REGISTER_PERSONALITY = 9, 303*4882a593Smuzhiyun IORING_UNREGISTER_PERSONALITY = 10, 304*4882a593Smuzhiyun IORING_REGISTER_RESTRICTIONS = 11, 305*4882a593Smuzhiyun IORING_REGISTER_ENABLE_RINGS = 12, 306*4882a593Smuzhiyun 307*4882a593Smuzhiyun /* extended with tagging */ 308*4882a593Smuzhiyun IORING_REGISTER_FILES2 = 13, 309*4882a593Smuzhiyun IORING_REGISTER_FILES_UPDATE2 = 14, 310*4882a593Smuzhiyun IORING_REGISTER_BUFFERS2 = 15, 311*4882a593Smuzhiyun IORING_REGISTER_BUFFERS_UPDATE = 16, 312*4882a593Smuzhiyun 313*4882a593Smuzhiyun /* set/clear io-wq thread affinities */ 314*4882a593Smuzhiyun IORING_REGISTER_IOWQ_AFF = 17, 315*4882a593Smuzhiyun IORING_UNREGISTER_IOWQ_AFF = 18, 316*4882a593Smuzhiyun 317*4882a593Smuzhiyun /* set/get max number of io-wq workers */ 318*4882a593Smuzhiyun IORING_REGISTER_IOWQ_MAX_WORKERS = 19, 319*4882a593Smuzhiyun 320*4882a593Smuzhiyun /* this goes last */ 321*4882a593Smuzhiyun IORING_REGISTER_LAST 322*4882a593Smuzhiyun }; 323*4882a593Smuzhiyun 324*4882a593Smuzhiyun /* io-wq worker categories */ 325*4882a593Smuzhiyun enum { 326*4882a593Smuzhiyun IO_WQ_BOUND, 327*4882a593Smuzhiyun IO_WQ_UNBOUND, 328*4882a593Smuzhiyun }; 329*4882a593Smuzhiyun 330*4882a593Smuzhiyun /* deprecated, see struct io_uring_rsrc_update */ 331*4882a593Smuzhiyun struct io_uring_files_update { 332*4882a593Smuzhiyun __u32 offset; 333*4882a593Smuzhiyun __u32 resv; 334*4882a593Smuzhiyun __aligned_u64 /* __s32 * */ fds; 335*4882a593Smuzhiyun }; 336*4882a593Smuzhiyun 337*4882a593Smuzhiyun struct io_uring_rsrc_register { 338*4882a593Smuzhiyun __u32 nr; 339*4882a593Smuzhiyun __u32 resv; 340*4882a593Smuzhiyun __u64 resv2; 341*4882a593Smuzhiyun __aligned_u64 data; 342*4882a593Smuzhiyun __aligned_u64 tags; 343*4882a593Smuzhiyun }; 344*4882a593Smuzhiyun 345*4882a593Smuzhiyun struct io_uring_rsrc_update { 346*4882a593Smuzhiyun __u32 offset; 347*4882a593Smuzhiyun __u32 resv; 348*4882a593Smuzhiyun __aligned_u64 data; 349*4882a593Smuzhiyun }; 350*4882a593Smuzhiyun 351*4882a593Smuzhiyun struct io_uring_rsrc_update2 { 352*4882a593Smuzhiyun __u32 offset; 353*4882a593Smuzhiyun __u32 resv; 354*4882a593Smuzhiyun __aligned_u64 data; 355*4882a593Smuzhiyun __aligned_u64 tags; 356*4882a593Smuzhiyun __u32 nr; 357*4882a593Smuzhiyun __u32 resv2; 358*4882a593Smuzhiyun }; 359*4882a593Smuzhiyun 360*4882a593Smuzhiyun /* Skip updating fd indexes set to this value in the fd table */ 361*4882a593Smuzhiyun #define IORING_REGISTER_FILES_SKIP (-2) 362*4882a593Smuzhiyun 363*4882a593Smuzhiyun #define IO_URING_OP_SUPPORTED (1U << 0) 364*4882a593Smuzhiyun 365*4882a593Smuzhiyun struct io_uring_probe_op { 366*4882a593Smuzhiyun __u8 op; 367*4882a593Smuzhiyun __u8 resv; 368*4882a593Smuzhiyun __u16 flags; /* IO_URING_OP_* flags */ 369*4882a593Smuzhiyun __u32 resv2; 370*4882a593Smuzhiyun }; 371*4882a593Smuzhiyun 372*4882a593Smuzhiyun struct io_uring_probe { 373*4882a593Smuzhiyun __u8 last_op; /* last opcode supported */ 374*4882a593Smuzhiyun __u8 ops_len; /* length of ops[] array below */ 375*4882a593Smuzhiyun __u16 resv; 376*4882a593Smuzhiyun __u32 resv2[3]; 377*4882a593Smuzhiyun struct io_uring_probe_op ops[0]; 378*4882a593Smuzhiyun }; 379*4882a593Smuzhiyun 380*4882a593Smuzhiyun struct io_uring_restriction { 381*4882a593Smuzhiyun __u16 opcode; 382*4882a593Smuzhiyun union { 383*4882a593Smuzhiyun __u8 register_op; /* IORING_RESTRICTION_REGISTER_OP */ 384*4882a593Smuzhiyun __u8 sqe_op; /* IORING_RESTRICTION_SQE_OP */ 385*4882a593Smuzhiyun __u8 sqe_flags; /* IORING_RESTRICTION_SQE_FLAGS_* */ 386*4882a593Smuzhiyun }; 387*4882a593Smuzhiyun __u8 resv; 388*4882a593Smuzhiyun __u32 resv2[3]; 389*4882a593Smuzhiyun }; 390*4882a593Smuzhiyun 391*4882a593Smuzhiyun /* 392*4882a593Smuzhiyun * io_uring_restriction->opcode values 393*4882a593Smuzhiyun */ 394*4882a593Smuzhiyun enum { 395*4882a593Smuzhiyun /* Allow an io_uring_register(2) opcode */ 396*4882a593Smuzhiyun IORING_RESTRICTION_REGISTER_OP = 0, 397*4882a593Smuzhiyun 398*4882a593Smuzhiyun /* Allow an sqe opcode */ 399*4882a593Smuzhiyun IORING_RESTRICTION_SQE_OP = 1, 400*4882a593Smuzhiyun 401*4882a593Smuzhiyun /* Allow sqe flags */ 402*4882a593Smuzhiyun IORING_RESTRICTION_SQE_FLAGS_ALLOWED = 2, 403*4882a593Smuzhiyun 404*4882a593Smuzhiyun /* Require sqe flags (these flags must be set on each submission) */ 405*4882a593Smuzhiyun IORING_RESTRICTION_SQE_FLAGS_REQUIRED = 3, 406*4882a593Smuzhiyun 407*4882a593Smuzhiyun IORING_RESTRICTION_LAST 408*4882a593Smuzhiyun }; 409*4882a593Smuzhiyun 410*4882a593Smuzhiyun struct io_uring_getevents_arg { 411*4882a593Smuzhiyun __u64 sigmask; 412*4882a593Smuzhiyun __u32 sigmask_sz; 413*4882a593Smuzhiyun __u32 pad; 414*4882a593Smuzhiyun __u64 ts; 415*4882a593Smuzhiyun }; 416*4882a593Smuzhiyun 417*4882a593Smuzhiyun #endif 418