Lines Matching refs:sqe

2901 static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,  in io_prep_rw()  argument
2913 kiocb->ki_pos = READ_ONCE(sqe->off); in io_prep_rw()
2924 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags)); in io_prep_rw()
2937 ioprio = READ_ONCE(sqe->ioprio); in io_prep_rw()
2962 req->buf_index = READ_ONCE(sqe->buf_index); in io_prep_rw()
2977 req->rw.addr = READ_ONCE(sqe->addr); in io_prep_rw()
2978 req->rw.len = READ_ONCE(sqe->len); in io_prep_rw()
3420 static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_read_prep() argument
3424 return io_prep_rw(req, sqe, READ); in io_read_prep()
3643 static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_write_prep() argument
3647 return io_prep_rw(req, sqe, WRITE); in io_write_prep()
3753 const struct io_uring_sqe *sqe) in io_renameat_prep() argument
3760 if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in) in io_renameat_prep()
3765 ren->old_dfd = READ_ONCE(sqe->fd); in io_renameat_prep()
3766 oldf = u64_to_user_ptr(READ_ONCE(sqe->addr)); in io_renameat_prep()
3767 newf = u64_to_user_ptr(READ_ONCE(sqe->addr2)); in io_renameat_prep()
3768 ren->new_dfd = READ_ONCE(sqe->len); in io_renameat_prep()
3769 ren->flags = READ_ONCE(sqe->rename_flags); in io_renameat_prep()
3804 const struct io_uring_sqe *sqe) in io_unlinkat_prep() argument
3811 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index || in io_unlinkat_prep()
3812 sqe->splice_fd_in) in io_unlinkat_prep()
3817 un->dfd = READ_ONCE(sqe->fd); in io_unlinkat_prep()
3819 un->flags = READ_ONCE(sqe->unlink_flags); in io_unlinkat_prep()
3823 fname = u64_to_user_ptr(READ_ONCE(sqe->addr)); in io_unlinkat_prep()
3853 const struct io_uring_sqe *sqe) in io_shutdown_prep() argument
3858 if (unlikely(sqe->ioprio || sqe->off || sqe->addr || sqe->rw_flags || in io_shutdown_prep()
3859 sqe->buf_index || sqe->splice_fd_in)) in io_shutdown_prep()
3862 req->shutdown.how = READ_ONCE(sqe->len); in io_shutdown_prep()
3893 const struct io_uring_sqe *sqe) in __io_splice_prep() argument
3901 sp->len = READ_ONCE(sqe->len); in __io_splice_prep()
3902 sp->flags = READ_ONCE(sqe->splice_flags); in __io_splice_prep()
3905 sp->splice_fd_in = READ_ONCE(sqe->splice_fd_in); in __io_splice_prep()
3910 const struct io_uring_sqe *sqe) in io_tee_prep() argument
3912 if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off)) in io_tee_prep()
3914 return __io_splice_prep(req, sqe); in io_tee_prep()
3947 static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_splice_prep() argument
3951 sp->off_in = READ_ONCE(sqe->splice_off_in); in io_splice_prep()
3952 sp->off_out = READ_ONCE(sqe->off); in io_splice_prep()
3953 return __io_splice_prep(req, sqe); in io_splice_prep()
4004 static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_fsync_prep() argument
4010 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index || in io_fsync_prep()
4011 sqe->splice_fd_in)) in io_fsync_prep()
4014 req->sync.flags = READ_ONCE(sqe->fsync_flags); in io_fsync_prep()
4018 req->sync.off = READ_ONCE(sqe->off); in io_fsync_prep()
4019 req->sync.len = READ_ONCE(sqe->len); in io_fsync_prep()
4042 const struct io_uring_sqe *sqe) in io_fallocate_prep() argument
4044 if (sqe->ioprio || sqe->buf_index || sqe->rw_flags || in io_fallocate_prep()
4045 sqe->splice_fd_in) in io_fallocate_prep()
4050 req->sync.off = READ_ONCE(sqe->off); in io_fallocate_prep()
4051 req->sync.len = READ_ONCE(sqe->addr); in io_fallocate_prep()
4052 req->sync.mode = READ_ONCE(sqe->len); in io_fallocate_prep()
4073 static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in __io_openat_prep() argument
4080 if (unlikely(sqe->ioprio || sqe->buf_index)) in __io_openat_prep()
4089 req->open.dfd = READ_ONCE(sqe->fd); in __io_openat_prep()
4090 fname = u64_to_user_ptr(READ_ONCE(sqe->addr)); in __io_openat_prep()
4098 req->open.file_slot = READ_ONCE(sqe->file_index); in __io_openat_prep()
4107 static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_openat_prep() argument
4109 u64 mode = READ_ONCE(sqe->len); in io_openat_prep()
4110 u64 flags = READ_ONCE(sqe->open_flags); in io_openat_prep()
4113 return __io_openat_prep(req, sqe); in io_openat_prep()
4116 static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_openat2_prep() argument
4122 how = u64_to_user_ptr(READ_ONCE(sqe->addr2)); in io_openat2_prep()
4123 len = READ_ONCE(sqe->len); in io_openat2_prep()
4132 return __io_openat_prep(req, sqe); in io_openat2_prep()
4207 const struct io_uring_sqe *sqe) in io_remove_buffers_prep() argument
4212 if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off || in io_remove_buffers_prep()
4213 sqe->splice_fd_in) in io_remove_buffers_prep()
4216 tmp = READ_ONCE(sqe->fd); in io_remove_buffers_prep()
4222 p->bgid = READ_ONCE(sqe->buf_group); in io_remove_buffers_prep()
4279 const struct io_uring_sqe *sqe) in io_provide_buffers_prep() argument
4285 if (sqe->ioprio || sqe->rw_flags || sqe->splice_fd_in) in io_provide_buffers_prep()
4288 tmp = READ_ONCE(sqe->fd); in io_provide_buffers_prep()
4292 p->addr = READ_ONCE(sqe->addr); in io_provide_buffers_prep()
4293 p->len = READ_ONCE(sqe->len); in io_provide_buffers_prep()
4305 p->bgid = READ_ONCE(sqe->buf_group); in io_provide_buffers_prep()
4306 tmp = READ_ONCE(sqe->off); in io_provide_buffers_prep()
4371 const struct io_uring_sqe *sqe) in io_epoll_ctl_prep() argument
4374 if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in) in io_epoll_ctl_prep()
4379 req->epoll.epfd = READ_ONCE(sqe->fd); in io_epoll_ctl_prep()
4380 req->epoll.op = READ_ONCE(sqe->len); in io_epoll_ctl_prep()
4381 req->epoll.fd = READ_ONCE(sqe->off); in io_epoll_ctl_prep()
4386 ev = u64_to_user_ptr(READ_ONCE(sqe->addr)); in io_epoll_ctl_prep()
4417 static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_madvise_prep() argument
4420 if (sqe->ioprio || sqe->buf_index || sqe->off || sqe->splice_fd_in) in io_madvise_prep()
4425 req->madvise.addr = READ_ONCE(sqe->addr); in io_madvise_prep()
4426 req->madvise.len = READ_ONCE(sqe->len); in io_madvise_prep()
4427 req->madvise.advice = READ_ONCE(sqe->fadvise_advice); in io_madvise_prep()
4453 static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_fadvise_prep() argument
4455 if (sqe->ioprio || sqe->buf_index || sqe->addr || sqe->splice_fd_in) in io_fadvise_prep()
4460 req->fadvise.offset = READ_ONCE(sqe->off); in io_fadvise_prep()
4461 req->fadvise.len = READ_ONCE(sqe->len); in io_fadvise_prep()
4462 req->fadvise.advice = READ_ONCE(sqe->fadvise_advice); in io_fadvise_prep()
4489 static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_statx_prep() argument
4493 if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in) in io_statx_prep()
4498 req->statx.dfd = READ_ONCE(sqe->fd); in io_statx_prep()
4499 req->statx.mask = READ_ONCE(sqe->len); in io_statx_prep()
4500 req->statx.filename = u64_to_user_ptr(READ_ONCE(sqe->addr)); in io_statx_prep()
4501 req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2)); in io_statx_prep()
4502 req->statx.flags = READ_ONCE(sqe->statx_flags); in io_statx_prep()
4524 static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_close_prep() argument
4528 if (sqe->ioprio || sqe->off || sqe->addr || sqe->len || in io_close_prep()
4529 sqe->rw_flags || sqe->buf_index) in io_close_prep()
4534 req->close.fd = READ_ONCE(sqe->fd); in io_close_prep()
4535 req->close.file_slot = READ_ONCE(sqe->file_index); in io_close_prep()
4593 static int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_sfr_prep() argument
4599 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index || in io_sfr_prep()
4600 sqe->splice_fd_in)) in io_sfr_prep()
4603 req->sync.off = READ_ONCE(sqe->off); in io_sfr_prep()
4604 req->sync.len = READ_ONCE(sqe->len); in io_sfr_prep()
4605 req->sync.flags = READ_ONCE(sqe->sync_range_flags); in io_sfr_prep()
4668 static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_sendmsg_prep() argument
4674 if (unlikely(sqe->addr2 || sqe->file_index)) in io_sendmsg_prep()
4676 if (unlikely(sqe->addr2 || sqe->file_index || sqe->ioprio)) in io_sendmsg_prep()
4679 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); in io_sendmsg_prep()
4680 sr->len = READ_ONCE(sqe->len); in io_sendmsg_prep()
4681 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL; in io_sendmsg_prep()
4893 static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_recvmsg_prep() argument
4899 if (unlikely(sqe->addr2 || sqe->file_index)) in io_recvmsg_prep()
4901 if (unlikely(sqe->addr2 || sqe->file_index || sqe->ioprio)) in io_recvmsg_prep()
4904 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); in io_recvmsg_prep()
4905 sr->len = READ_ONCE(sqe->len); in io_recvmsg_prep()
4906 sr->bgid = READ_ONCE(sqe->buf_group); in io_recvmsg_prep()
4907 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL; in io_recvmsg_prep()
5030 static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_accept_prep() argument
5036 if (sqe->ioprio || sqe->len || sqe->buf_index) in io_accept_prep()
5039 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr)); in io_accept_prep()
5040 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2)); in io_accept_prep()
5041 accept->flags = READ_ONCE(sqe->accept_flags); in io_accept_prep()
5044 accept->file_slot = READ_ONCE(sqe->file_index); in io_accept_prep()
5102 static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_connect_prep() argument
5108 if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags || in io_connect_prep()
5109 sqe->splice_fd_in) in io_connect_prep()
5112 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr)); in io_connect_prep()
5113 conn->addr_len = READ_ONCE(sqe->addr2); in io_connect_prep()
5166 static int io_##op##_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) \
5746 static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe, in io_poll_parse_events() argument
5751 events = READ_ONCE(sqe->poll32_events); in io_poll_parse_events()
5761 const struct io_uring_sqe *sqe) in io_poll_update_prep() argument
5768 if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in) in io_poll_update_prep()
5770 flags = READ_ONCE(sqe->len); in io_poll_update_prep()
5778 upd->old_user_data = READ_ONCE(sqe->addr); in io_poll_update_prep()
5782 upd->new_user_data = READ_ONCE(sqe->off); in io_poll_update_prep()
5786 upd->events = io_poll_parse_events(sqe, flags); in io_poll_update_prep()
5787 else if (sqe->poll32_events) in io_poll_update_prep()
5793 static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_poll_add_prep() argument
5800 if (sqe->ioprio || sqe->buf_index || sqe->off || sqe->addr) in io_poll_add_prep()
5802 flags = READ_ONCE(sqe->len); in io_poll_add_prep()
5807 poll->events = io_poll_parse_events(sqe, flags); in io_poll_add_prep()
5992 const struct io_uring_sqe *sqe) in io_timeout_remove_prep() argument
6000 if (sqe->ioprio || sqe->buf_index || sqe->len || sqe->splice_fd_in) in io_timeout_remove_prep()
6004 tr->addr = READ_ONCE(sqe->addr); in io_timeout_remove_prep()
6005 tr->flags = READ_ONCE(sqe->timeout_flags); in io_timeout_remove_prep()
6013 if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2))) in io_timeout_remove_prep()
6061 static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, in io_timeout_prep() argument
6066 u32 off = READ_ONCE(sqe->off); in io_timeout_prep()
6070 if (sqe->ioprio || sqe->buf_index || sqe->len != 1 || in io_timeout_prep()
6071 sqe->splice_fd_in) in io_timeout_prep()
6075 flags = READ_ONCE(sqe->timeout_flags); in io_timeout_prep()
6094 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr))) in io_timeout_prep()
6227 const struct io_uring_sqe *sqe) in io_async_cancel_prep() argument
6233 if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags || in io_async_cancel_prep()
6234 sqe->splice_fd_in) in io_async_cancel_prep()
6237 req->cancel.addr = READ_ONCE(sqe->addr); in io_async_cancel_prep()
6271 const struct io_uring_sqe *sqe) in io_rsrc_update_prep() argument
6275 if (sqe->ioprio || sqe->rw_flags || sqe->splice_fd_in) in io_rsrc_update_prep()
6278 req->rsrc_update.offset = READ_ONCE(sqe->off); in io_rsrc_update_prep()
6279 req->rsrc_update.nr_args = READ_ONCE(sqe->len); in io_rsrc_update_prep()
6282 req->rsrc_update.arg = READ_ONCE(sqe->addr); in io_rsrc_update_prep()
6310 static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_req_prep() argument
6318 return io_read_prep(req, sqe); in io_req_prep()
6322 return io_write_prep(req, sqe); in io_req_prep()
6324 return io_poll_add_prep(req, sqe); in io_req_prep()
6326 return io_poll_update_prep(req, sqe); in io_req_prep()
6328 return io_fsync_prep(req, sqe); in io_req_prep()
6330 return io_sfr_prep(req, sqe); in io_req_prep()
6333 return io_sendmsg_prep(req, sqe); in io_req_prep()
6336 return io_recvmsg_prep(req, sqe); in io_req_prep()
6338 return io_connect_prep(req, sqe); in io_req_prep()
6340 return io_timeout_prep(req, sqe, false); in io_req_prep()
6342 return io_timeout_remove_prep(req, sqe); in io_req_prep()
6344 return io_async_cancel_prep(req, sqe); in io_req_prep()
6346 return io_timeout_prep(req, sqe, true); in io_req_prep()
6348 return io_accept_prep(req, sqe); in io_req_prep()
6350 return io_fallocate_prep(req, sqe); in io_req_prep()
6352 return io_openat_prep(req, sqe); in io_req_prep()
6354 return io_close_prep(req, sqe); in io_req_prep()
6356 return io_rsrc_update_prep(req, sqe); in io_req_prep()
6358 return io_statx_prep(req, sqe); in io_req_prep()
6360 return io_fadvise_prep(req, sqe); in io_req_prep()
6362 return io_madvise_prep(req, sqe); in io_req_prep()
6364 return io_openat2_prep(req, sqe); in io_req_prep()
6366 return io_epoll_ctl_prep(req, sqe); in io_req_prep()
6368 return io_splice_prep(req, sqe); in io_req_prep()
6370 return io_provide_buffers_prep(req, sqe); in io_req_prep()
6372 return io_remove_buffers_prep(req, sqe); in io_req_prep()
6374 return io_tee_prep(req, sqe); in io_req_prep()
6376 return io_shutdown_prep(req, sqe); in io_req_prep()
6378 return io_renameat_prep(req, sqe); in io_req_prep()
6380 return io_unlinkat_prep(req, sqe); in io_req_prep()
6970 const struct io_uring_sqe *sqe) in io_init_req() argument
6978 req->opcode = READ_ONCE(sqe->opcode); in io_init_req()
6980 req->flags = sqe_flags = READ_ONCE(sqe->flags); in io_init_req()
6981 req->user_data = READ_ONCE(sqe->user_data); in io_init_req()
7000 personality = READ_ONCE(sqe->personality); in io_init_req()
7021 req->file = io_file_get(ctx, req, READ_ONCE(sqe->fd), in io_init_req()
7032 const struct io_uring_sqe *sqe) in io_submit_sqe() argument
7038 ret = io_init_req(ctx, req, sqe); in io_submit_sqe()
7064 ret = io_req_prep(req, sqe); in io_submit_sqe()
7196 const struct io_uring_sqe *sqe; in io_submit_sqes() local
7205 sqe = io_get_sqe(ctx); in io_submit_sqes()
7206 if (unlikely(!sqe)) { in io_submit_sqes()
7212 if (io_submit_sqe(ctx, req, sqe)) in io_submit_sqes()