Lines Matching full:req
518 struct io_kiocb *req; member
814 typedef void (*io_req_tw_func_t)(struct io_kiocb *req, bool *locked);
909 struct io_kiocb *req; member
914 /* needs req->file assigned */
1083 static bool io_disarm_next(struct io_kiocb *req);
1090 static void io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags);
1092 static void io_put_req(struct io_kiocb *req);
1093 static void io_put_req_deferred(struct io_kiocb *req);
1094 static void io_dismantle_req(struct io_kiocb *req);
1095 static void io_queue_linked_timeout(struct io_kiocb *req);
1099 static void io_clean_op(struct io_kiocb *req);
1101 struct io_kiocb *req, int fd, bool fixed);
1102 static void __io_queue_sqe(struct io_kiocb *req);
1105 static void io_req_task_queue(struct io_kiocb *req);
1107 static int io_req_prep_async(struct io_kiocb *req);
1109 static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
1111 static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags);
1147 #define req_ref_zero_or_close_to_overflow(req) \ argument
1148 ((unsigned int) atomic_read(&(req->refs)) + 127u <= 127u)
1150 static inline bool req_ref_inc_not_zero(struct io_kiocb *req) in req_ref_inc_not_zero() argument
1152 WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT)); in req_ref_inc_not_zero()
1153 return atomic_inc_not_zero(&req->refs); in req_ref_inc_not_zero()
1156 static inline bool req_ref_put_and_test(struct io_kiocb *req) in req_ref_put_and_test() argument
1158 if (likely(!(req->flags & REQ_F_REFCOUNT))) in req_ref_put_and_test()
1161 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req)); in req_ref_put_and_test()
1162 return atomic_dec_and_test(&req->refs); in req_ref_put_and_test()
1165 static inline void req_ref_get(struct io_kiocb *req) in req_ref_get() argument
1167 WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT)); in req_ref_get()
1168 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req)); in req_ref_get()
1169 atomic_inc(&req->refs); in req_ref_get()
1172 static inline void __io_req_set_refcount(struct io_kiocb *req, int nr) in __io_req_set_refcount() argument
1174 if (!(req->flags & REQ_F_REFCOUNT)) { in __io_req_set_refcount()
1175 req->flags |= REQ_F_REFCOUNT; in __io_req_set_refcount()
1176 atomic_set(&req->refs, nr); in __io_req_set_refcount()
1180 static inline void io_req_set_refcount(struct io_kiocb *req) in io_req_set_refcount() argument
1182 __io_req_set_refcount(req, 1); in io_req_set_refcount()
1185 static inline void io_req_set_rsrc_node(struct io_kiocb *req) in io_req_set_rsrc_node() argument
1187 struct io_ring_ctx *ctx = req->ctx; in io_req_set_rsrc_node()
1189 if (!req->fixed_rsrc_refs) { in io_req_set_rsrc_node()
1190 req->fixed_rsrc_refs = &ctx->rsrc_node->refs; in io_req_set_rsrc_node()
1191 percpu_ref_get(req->fixed_rsrc_refs); in io_req_set_rsrc_node()
1209 __must_hold(&req->ctx->timeout_lock) in io_match_task()
1211 struct io_kiocb *req; in io_match_task() local
1218 io_for_each_link(req, head) { in io_match_task()
1219 if (req->flags & REQ_F_INFLIGHT) in io_match_task()
1227 struct io_kiocb *req; in io_match_linked() local
1229 io_for_each_link(req, head) { in io_match_linked()
1230 if (req->flags & REQ_F_INFLIGHT) in io_match_linked()
1263 static inline void req_set_fail(struct io_kiocb *req) in req_set_fail() argument
1265 req->flags |= REQ_F_FAIL; in req_set_fail()
1268 static inline void req_fail_link_node(struct io_kiocb *req, int res) in req_fail_link_node() argument
1270 req_set_fail(req); in req_fail_link_node()
1271 req->result = res; in req_fail_link_node()
1281 static inline bool io_is_timeout_noseq(struct io_kiocb *req) in io_is_timeout_noseq() argument
1283 return !req->timeout.off; in io_is_timeout_noseq()
1291 struct io_kiocb *req, *tmp; in io_fallback_req_func() local
1295 llist_for_each_entry_safe(req, tmp, node, io_task_work.fallback_node) in io_fallback_req_func()
1296 req->io_task_work.func(req, &locked); in io_fallback_req_func()
1381 static bool req_need_defer(struct io_kiocb *req, u32 seq) in req_need_defer() argument
1383 if (unlikely(req->flags & REQ_F_IO_DRAIN)) { in req_need_defer()
1384 struct io_ring_ctx *ctx = req->ctx; in req_need_defer()
1401 static inline bool io_req_ffs_set(struct io_kiocb *req) in io_req_ffs_set() argument
1403 return IS_ENABLED(CONFIG_64BIT) && (req->flags & REQ_F_FIXED_FILE); in io_req_ffs_set()
1406 static void io_req_track_inflight(struct io_kiocb *req) in io_req_track_inflight() argument
1408 if (!(req->flags & REQ_F_INFLIGHT)) { in io_req_track_inflight()
1409 req->flags |= REQ_F_INFLIGHT; in io_req_track_inflight()
1410 atomic_inc(&req->task->io_uring->inflight_tracked); in io_req_track_inflight()
1414 static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req) in __io_prep_linked_timeout() argument
1416 if (WARN_ON_ONCE(!req->link)) in __io_prep_linked_timeout()
1419 req->flags &= ~REQ_F_ARM_LTIMEOUT; in __io_prep_linked_timeout()
1420 req->flags |= REQ_F_LINK_TIMEOUT; in __io_prep_linked_timeout()
1423 io_req_set_refcount(req); in __io_prep_linked_timeout()
1424 __io_req_set_refcount(req->link, 2); in __io_prep_linked_timeout()
1425 return req->link; in __io_prep_linked_timeout()
1428 static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req) in io_prep_linked_timeout() argument
1430 if (likely(!(req->flags & REQ_F_ARM_LTIMEOUT))) in io_prep_linked_timeout()
1432 return __io_prep_linked_timeout(req); in io_prep_linked_timeout()
1435 static void io_prep_async_work(struct io_kiocb *req) in io_prep_async_work() argument
1437 const struct io_op_def *def = &io_op_defs[req->opcode]; in io_prep_async_work()
1438 struct io_ring_ctx *ctx = req->ctx; in io_prep_async_work()
1440 if (!(req->flags & REQ_F_CREDS)) { in io_prep_async_work()
1441 req->flags |= REQ_F_CREDS; in io_prep_async_work()
1442 req->creds = get_current_cred(); in io_prep_async_work()
1445 req->work.list.next = NULL; in io_prep_async_work()
1446 req->work.flags = 0; in io_prep_async_work()
1447 if (req->flags & REQ_F_FORCE_ASYNC) in io_prep_async_work()
1448 req->work.flags |= IO_WQ_WORK_CONCURRENT; in io_prep_async_work()
1450 if (req->flags & REQ_F_ISREG) { in io_prep_async_work()
1452 io_wq_hash_work(&req->work, file_inode(req->file)); in io_prep_async_work()
1453 } else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) { in io_prep_async_work()
1455 req->work.flags |= IO_WQ_WORK_UNBOUND; in io_prep_async_work()
1459 static void io_prep_async_link(struct io_kiocb *req) in io_prep_async_link() argument
1463 if (req->flags & REQ_F_LINK_TIMEOUT) { in io_prep_async_link()
1464 struct io_ring_ctx *ctx = req->ctx; in io_prep_async_link()
1467 io_for_each_link(cur, req) in io_prep_async_link()
1471 io_for_each_link(cur, req) in io_prep_async_link()
1476 static void io_queue_async_work(struct io_kiocb *req, bool *locked) in io_queue_async_work() argument
1478 struct io_ring_ctx *ctx = req->ctx; in io_queue_async_work()
1479 struct io_kiocb *link = io_prep_linked_timeout(req); in io_queue_async_work()
1480 struct io_uring_task *tctx = req->task->io_uring; in io_queue_async_work()
1489 io_prep_async_link(req); in io_queue_async_work()
1498 if (WARN_ON_ONCE(!same_thread_group(req->task, current))) in io_queue_async_work()
1499 req->work.flags |= IO_WQ_WORK_CANCEL; in io_queue_async_work()
1501 trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req, in io_queue_async_work()
1502 &req->work, req->flags); in io_queue_async_work()
1503 io_wq_enqueue(tctx->io_wq, &req->work); in io_queue_async_work()
1508 static void io_kill_timeout(struct io_kiocb *req, int status) in io_kill_timeout() argument
1509 __must_hold(&req->ctx->completion_lock) in io_kill_timeout()
1510 __must_hold(&req->ctx->timeout_lock) in io_kill_timeout()
1512 struct io_timeout_data *io = req->async_data; in io_kill_timeout()
1516 req_set_fail(req); in io_kill_timeout()
1517 atomic_set(&req->ctx->cq_timeouts, in io_kill_timeout()
1518 atomic_read(&req->ctx->cq_timeouts) + 1); in io_kill_timeout()
1519 list_del_init(&req->timeout.list); in io_kill_timeout()
1520 io_fill_cqe_req(req, status, 0); in io_kill_timeout()
1521 io_put_req_deferred(req); in io_kill_timeout()
1531 if (req_need_defer(de->req, de->seq)) in io_queue_deferred()
1534 io_req_task_queue(de->req); in io_queue_deferred()
1543 struct io_kiocb *req, *tmp; in io_flush_timeouts() local
1546 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) { in io_flush_timeouts()
1549 if (io_is_timeout_noseq(req)) in io_flush_timeouts()
1559 events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush; in io_flush_timeouts()
1564 io_kill_timeout(req, 0); in io_flush_timeouts()
1821 static noinline void io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags) in io_fill_cqe_req() argument
1823 __io_fill_cqe(req->ctx, req->user_data, res, cflags); in io_fill_cqe_req()
1833 static void io_req_complete_post(struct io_kiocb *req, s32 res, in io_req_complete_post() argument
1836 struct io_ring_ctx *ctx = req->ctx; in io_req_complete_post()
1839 __io_fill_cqe(ctx, req->user_data, res, cflags); in io_req_complete_post()
1844 if (req_ref_put_and_test(req)) { in io_req_complete_post()
1845 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) { in io_req_complete_post()
1846 if (req->flags & IO_DISARM_MASK) in io_req_complete_post()
1847 io_disarm_next(req); in io_req_complete_post()
1848 if (req->link) { in io_req_complete_post()
1849 io_req_task_queue(req->link); in io_req_complete_post()
1850 req->link = NULL; in io_req_complete_post()
1853 io_dismantle_req(req); in io_req_complete_post()
1854 io_put_task(req->task, 1); in io_req_complete_post()
1855 list_add(&req->inflight_entry, &ctx->locked_free_list); in io_req_complete_post()
1859 req = NULL; in io_req_complete_post()
1864 if (req) { in io_req_complete_post()
1870 static inline bool io_req_needs_clean(struct io_kiocb *req) in io_req_needs_clean() argument
1872 return req->flags & IO_REQ_CLEAN_FLAGS; in io_req_needs_clean()
1875 static inline void io_req_complete_state(struct io_kiocb *req, s32 res, in io_req_complete_state() argument
1878 if (io_req_needs_clean(req)) in io_req_complete_state()
1879 io_clean_op(req); in io_req_complete_state()
1880 req->result = res; in io_req_complete_state()
1881 req->compl.cflags = cflags; in io_req_complete_state()
1882 req->flags |= REQ_F_COMPLETE_INLINE; in io_req_complete_state()
1885 static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags, in __io_req_complete() argument
1889 io_req_complete_state(req, res, cflags); in __io_req_complete()
1891 io_req_complete_post(req, res, cflags); in __io_req_complete()
1894 static inline void io_req_complete(struct io_kiocb *req, s32 res) in io_req_complete() argument
1896 __io_req_complete(req, 0, res, 0); in io_req_complete()
1899 static void io_req_complete_failed(struct io_kiocb *req, s32 res) in io_req_complete_failed() argument
1901 req_set_fail(req); in io_req_complete_failed()
1902 io_req_complete_post(req, res, 0); in io_req_complete_failed()
1905 static void io_req_complete_fail_submit(struct io_kiocb *req) in io_req_complete_fail_submit() argument
1911 req->flags &= ~REQ_F_HARDLINK; in io_req_complete_fail_submit()
1912 req->flags |= REQ_F_LINK; in io_req_complete_fail_submit()
1913 io_req_complete_failed(req, req->result); in io_req_complete_fail_submit()
1920 static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx) in io_preinit_req() argument
1922 req->ctx = ctx; in io_preinit_req()
1923 req->link = NULL; in io_preinit_req()
1924 req->async_data = NULL; in io_preinit_req()
1926 req->result = 0; in io_preinit_req()
1954 struct io_kiocb *req = list_first_entry(&state->free_list, in io_flush_cached_reqs() local
1957 list_del(&req->inflight_entry); in io_flush_cached_reqs()
1958 state->reqs[nr++] = req; in io_flush_cached_reqs()
2013 static void io_dismantle_req(struct io_kiocb *req) in io_dismantle_req() argument
2015 unsigned int flags = req->flags; in io_dismantle_req()
2017 if (io_req_needs_clean(req)) in io_dismantle_req()
2018 io_clean_op(req); in io_dismantle_req()
2020 io_put_file(req->file); in io_dismantle_req()
2021 if (req->fixed_rsrc_refs) in io_dismantle_req()
2022 percpu_ref_put(req->fixed_rsrc_refs); in io_dismantle_req()
2023 if (req->async_data) { in io_dismantle_req()
2024 kfree(req->async_data); in io_dismantle_req()
2025 req->async_data = NULL; in io_dismantle_req()
2029 static void __io_free_req(struct io_kiocb *req) in __io_free_req() argument
2031 struct io_ring_ctx *ctx = req->ctx; in __io_free_req()
2033 io_dismantle_req(req); in __io_free_req()
2034 io_put_task(req->task, 1); in __io_free_req()
2037 list_add(&req->inflight_entry, &ctx->locked_free_list); in __io_free_req()
2044 static inline void io_remove_next_linked(struct io_kiocb *req) in io_remove_next_linked() argument
2046 struct io_kiocb *nxt = req->link; in io_remove_next_linked()
2048 req->link = nxt->link; in io_remove_next_linked()
2052 static bool io_kill_linked_timeout(struct io_kiocb *req) in io_kill_linked_timeout() argument
2053 __must_hold(&req->ctx->completion_lock) in io_kill_linked_timeout()
2054 __must_hold(&req->ctx->timeout_lock) in io_kill_linked_timeout()
2056 struct io_kiocb *link = req->link; in io_kill_linked_timeout()
2061 io_remove_next_linked(req); in io_kill_linked_timeout()
2073 static void io_fail_links(struct io_kiocb *req) in io_fail_links() argument
2074 __must_hold(&req->ctx->completion_lock) in io_fail_links()
2076 struct io_kiocb *nxt, *link = req->link; in io_fail_links()
2078 req->link = NULL; in io_fail_links()
2088 trace_io_uring_fail_link(req, link); in io_fail_links()
2095 static bool io_disarm_next(struct io_kiocb *req) in io_disarm_next() argument
2096 __must_hold(&req->ctx->completion_lock) in io_disarm_next()
2100 if (req->flags & REQ_F_ARM_LTIMEOUT) { in io_disarm_next()
2101 struct io_kiocb *link = req->link; in io_disarm_next()
2103 req->flags &= ~REQ_F_ARM_LTIMEOUT; in io_disarm_next()
2105 io_remove_next_linked(req); in io_disarm_next()
2110 } else if (req->flags & REQ_F_LINK_TIMEOUT) { in io_disarm_next()
2111 struct io_ring_ctx *ctx = req->ctx; in io_disarm_next()
2114 posted = io_kill_linked_timeout(req); in io_disarm_next()
2117 if (unlikely((req->flags & REQ_F_FAIL) && in io_disarm_next()
2118 !(req->flags & REQ_F_HARDLINK))) { in io_disarm_next()
2119 posted |= (req->link != NULL); in io_disarm_next()
2120 io_fail_links(req); in io_disarm_next()
2125 static struct io_kiocb *__io_req_find_next(struct io_kiocb *req) in __io_req_find_next() argument
2135 if (req->flags & IO_DISARM_MASK) { in __io_req_find_next()
2136 struct io_ring_ctx *ctx = req->ctx; in __io_req_find_next()
2140 posted = io_disarm_next(req); in __io_req_find_next()
2142 io_commit_cqring(req->ctx); in __io_req_find_next()
2147 nxt = req->link; in __io_req_find_next()
2148 req->link = NULL; in __io_req_find_next()
2152 static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req) in io_req_find_next() argument
2154 if (likely(!(req->flags & (REQ_F_LINK|REQ_F_HARDLINK)))) in io_req_find_next()
2156 return __io_req_find_next(req); in io_req_find_next()
2196 struct io_kiocb *req = container_of(node, struct io_kiocb, in tctx_task_work() local
2199 if (req->ctx != ctx) { in tctx_task_work()
2201 ctx = req->ctx; in tctx_task_work()
2206 req->io_task_work.func(req, &locked); in tctx_task_work()
2220 static void io_req_task_work_add(struct io_kiocb *req) in io_req_task_work_add() argument
2222 struct task_struct *tsk = req->task; in io_req_task_work_add()
2232 wq_list_add_tail(&req->io_task_work.node, &tctx->task_list); in io_req_task_work_add()
2248 notify = (req->ctx->flags & IORING_SETUP_SQPOLL) ? TWA_NONE : TWA_SIGNAL; in io_req_task_work_add()
2261 req = container_of(node, struct io_kiocb, io_task_work.node); in io_req_task_work_add()
2263 if (llist_add(&req->io_task_work.fallback_node, in io_req_task_work_add()
2264 &req->ctx->fallback_llist)) in io_req_task_work_add()
2265 schedule_delayed_work(&req->ctx->fallback_work, 1); in io_req_task_work_add()
2269 static void io_req_task_cancel(struct io_kiocb *req, bool *locked) in io_req_task_cancel() argument
2271 struct io_ring_ctx *ctx = req->ctx; in io_req_task_cancel()
2275 io_req_complete_failed(req, req->result); in io_req_task_cancel()
2278 static void io_req_task_submit(struct io_kiocb *req, bool *locked) in io_req_task_submit() argument
2280 struct io_ring_ctx *ctx = req->ctx; in io_req_task_submit()
2283 /* req->task == current here, checking PF_EXITING is safe */ in io_req_task_submit()
2284 if (likely(!(req->task->flags & PF_EXITING))) in io_req_task_submit()
2285 __io_queue_sqe(req); in io_req_task_submit()
2287 io_req_complete_failed(req, -EFAULT); in io_req_task_submit()
2290 static void io_req_task_queue_fail(struct io_kiocb *req, int ret) in io_req_task_queue_fail() argument
2292 req->result = ret; in io_req_task_queue_fail()
2293 req->io_task_work.func = io_req_task_cancel; in io_req_task_queue_fail()
2294 io_req_task_work_add(req); in io_req_task_queue_fail()
2297 static void io_req_task_queue(struct io_kiocb *req) in io_req_task_queue() argument
2299 req->io_task_work.func = io_req_task_submit; in io_req_task_queue()
2300 io_req_task_work_add(req); in io_req_task_queue()
2303 static void io_req_task_queue_reissue(struct io_kiocb *req) in io_req_task_queue_reissue() argument
2305 req->io_task_work.func = io_queue_async_work; in io_req_task_queue_reissue()
2306 io_req_task_work_add(req); in io_req_task_queue_reissue()
2309 static inline void io_queue_next(struct io_kiocb *req) in io_queue_next() argument
2311 struct io_kiocb *nxt = io_req_find_next(req); in io_queue_next()
2317 static void io_free_req(struct io_kiocb *req) in io_free_req() argument
2319 io_queue_next(req); in io_free_req()
2320 __io_free_req(req); in io_free_req()
2323 static void io_free_req_work(struct io_kiocb *req, bool *locked) in io_free_req_work() argument
2325 io_free_req(req); in io_free_req_work()
2350 static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req, in io_req_free_batch() argument
2353 io_queue_next(req); in io_req_free_batch()
2354 io_dismantle_req(req); in io_req_free_batch()
2356 if (req->task != rb->task) { in io_req_free_batch()
2359 rb->task = req->task; in io_req_free_batch()
2366 state->reqs[state->free_reqs++] = req; in io_req_free_batch()
2368 list_add(&req->inflight_entry, &state->free_list); in io_req_free_batch()
2380 struct io_kiocb *req = state->compl_reqs[i]; in io_submit_flush_completions() local
2382 __io_fill_cqe(ctx, req->user_data, req->result, in io_submit_flush_completions()
2383 req->compl.cflags); in io_submit_flush_completions()
2391 struct io_kiocb *req = state->compl_reqs[i]; in io_submit_flush_completions() local
2393 if (req_ref_put_and_test(req)) in io_submit_flush_completions()
2394 io_req_free_batch(&rb, req, &ctx->submit_state); in io_submit_flush_completions()
2405 static inline struct io_kiocb *io_put_req_find_next(struct io_kiocb *req) in io_put_req_find_next() argument
2409 if (req_ref_put_and_test(req)) { in io_put_req_find_next()
2410 nxt = io_req_find_next(req); in io_put_req_find_next()
2411 __io_free_req(req); in io_put_req_find_next()
2416 static inline void io_put_req(struct io_kiocb *req) in io_put_req() argument
2418 if (req_ref_put_and_test(req)) in io_put_req()
2419 io_free_req(req); in io_put_req()
2422 static inline void io_put_req_deferred(struct io_kiocb *req) in io_put_req_deferred() argument
2424 if (req_ref_put_and_test(req)) { in io_put_req_deferred()
2425 req->io_task_work.func = io_free_req_work; in io_put_req_deferred()
2426 io_req_task_work_add(req); in io_put_req_deferred()
2445 static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf) in io_put_kbuf() argument
2451 req->flags &= ~REQ_F_BUFFER_SELECTED; in io_put_kbuf()
2456 static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req) in io_put_rw_kbuf() argument
2460 if (likely(!(req->flags & REQ_F_BUFFER_SELECTED))) in io_put_rw_kbuf()
2462 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr; in io_put_rw_kbuf()
2463 return io_put_kbuf(req, kbuf); in io_put_rw_kbuf()
2484 struct io_kiocb *req; in io_iopoll_complete() local
2491 req = list_first_entry(done, struct io_kiocb, inflight_entry); in io_iopoll_complete()
2492 list_del(&req->inflight_entry); in io_iopoll_complete()
2494 io_fill_cqe_req(req, req->result, io_put_rw_kbuf(req)); in io_iopoll_complete()
2497 if (req_ref_put_and_test(req)) in io_iopoll_complete()
2498 io_req_free_batch(&rb, req, &ctx->submit_state); in io_iopoll_complete()
2509 struct io_kiocb *req, *tmp; in io_do_iopoll() local
2519 list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) { in io_do_iopoll()
2520 struct kiocb *kiocb = &req->rw.kiocb; in io_do_iopoll()
2528 if (READ_ONCE(req->iopoll_completed)) { in io_do_iopoll()
2529 list_move_tail(&req->inflight_entry, &done); in io_do_iopoll()
2541 /* iopoll may have completed current req */ in io_do_iopoll()
2542 if (READ_ONCE(req->iopoll_completed)) in io_do_iopoll()
2543 list_move_tail(&req->inflight_entry, &done); in io_do_iopoll()
2634 static void kiocb_end_write(struct io_kiocb *req) in kiocb_end_write() argument
2640 if (req->flags & REQ_F_ISREG) { in kiocb_end_write()
2641 struct super_block *sb = file_inode(req->file)->i_sb; in kiocb_end_write()
2649 static bool io_resubmit_prep(struct io_kiocb *req) in io_resubmit_prep() argument
2651 struct io_async_rw *rw = req->async_data; in io_resubmit_prep()
2654 return !io_req_prep_async(req); in io_resubmit_prep()
2659 static bool io_rw_should_reissue(struct io_kiocb *req) in io_rw_should_reissue() argument
2661 umode_t mode = file_inode(req->file)->i_mode; in io_rw_should_reissue()
2662 struct io_ring_ctx *ctx = req->ctx; in io_rw_should_reissue()
2666 if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() && in io_rw_should_reissue()
2680 if (!same_thread_group(req->task, current) || !in_task()) in io_rw_should_reissue()
2685 static bool io_resubmit_prep(struct io_kiocb *req) in io_resubmit_prep() argument
2689 static bool io_rw_should_reissue(struct io_kiocb *req) in io_rw_should_reissue() argument
2695 static bool __io_complete_rw_common(struct io_kiocb *req, long res) in __io_complete_rw_common() argument
2697 if (req->rw.kiocb.ki_flags & IOCB_WRITE) { in __io_complete_rw_common()
2698 kiocb_end_write(req); in __io_complete_rw_common()
2699 fsnotify_modify(req->file); in __io_complete_rw_common()
2701 fsnotify_access(req->file); in __io_complete_rw_common()
2703 if (res != req->result) { in __io_complete_rw_common()
2705 io_rw_should_reissue(req)) { in __io_complete_rw_common()
2706 req->flags |= REQ_F_REISSUE; in __io_complete_rw_common()
2709 req_set_fail(req); in __io_complete_rw_common()
2710 req->result = res; in __io_complete_rw_common()
2715 static inline int io_fixup_rw_res(struct io_kiocb *req, unsigned res) in io_fixup_rw_res() argument
2717 struct io_async_rw *io = req->async_data; in io_fixup_rw_res()
2729 static void io_req_task_complete(struct io_kiocb *req, bool *locked) in io_req_task_complete() argument
2731 unsigned int cflags = io_put_rw_kbuf(req); in io_req_task_complete()
2732 int res = req->result; in io_req_task_complete()
2735 struct io_ring_ctx *ctx = req->ctx; in io_req_task_complete()
2738 io_req_complete_state(req, res, cflags); in io_req_task_complete()
2739 state->compl_reqs[state->compl_nr++] = req; in io_req_task_complete()
2743 io_req_complete_post(req, res, cflags); in io_req_task_complete()
2747 static void __io_complete_rw(struct io_kiocb *req, long res, long res2, in __io_complete_rw() argument
2750 if (__io_complete_rw_common(req, res)) in __io_complete_rw()
2752 __io_req_complete(req, issue_flags, io_fixup_rw_res(req, res), io_put_rw_kbuf(req)); in __io_complete_rw()
2757 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb); in io_complete_rw() local
2759 if (__io_complete_rw_common(req, res)) in io_complete_rw()
2761 req->result = io_fixup_rw_res(req, res); in io_complete_rw()
2762 req->io_task_work.func = io_req_task_complete; in io_complete_rw()
2763 io_req_task_work_add(req); in io_complete_rw()
2768 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb); in io_complete_rw_iopoll() local
2771 kiocb_end_write(req); in io_complete_rw_iopoll()
2772 if (unlikely(res != req->result)) { in io_complete_rw_iopoll()
2773 if (res == -EAGAIN && io_rw_should_reissue(req)) { in io_complete_rw_iopoll()
2774 req->flags |= REQ_F_REISSUE; in io_complete_rw_iopoll()
2779 WRITE_ONCE(req->result, res); in io_complete_rw_iopoll()
2782 WRITE_ONCE(req->iopoll_completed, 1); in io_complete_rw_iopoll()
2791 static void io_iopoll_req_issued(struct io_kiocb *req) in io_iopoll_req_issued() argument
2793 struct io_ring_ctx *ctx = req->ctx; in io_iopoll_req_issued()
2814 if (list_req->file != req->file) { in io_iopoll_req_issued()
2818 queue_num1 = blk_qc_t_to_queue_num(req->rw.kiocb.ki_cookie); in io_iopoll_req_issued()
2828 if (READ_ONCE(req->iopoll_completed)) in io_iopoll_req_issued()
2829 list_add(&req->inflight_entry, &ctx->iopoll_list); in io_iopoll_req_issued()
2831 list_add_tail(&req->inflight_entry, &ctx->iopoll_list); in io_iopoll_req_issued()
2891 static bool io_file_supports_nowait(struct io_kiocb *req, int rw) in io_file_supports_nowait() argument
2893 if (rw == READ && (req->flags & REQ_F_NOWAIT_READ)) in io_file_supports_nowait()
2895 else if (rw == WRITE && (req->flags & REQ_F_NOWAIT_WRITE)) in io_file_supports_nowait()
2898 return __io_file_supports_nowait(req->file, rw); in io_file_supports_nowait()
2901 static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, in io_prep_rw() argument
2904 struct io_ring_ctx *ctx = req->ctx; in io_prep_rw()
2905 struct kiocb *kiocb = &req->rw.kiocb; in io_prep_rw()
2906 struct file *file = req->file; in io_prep_rw()
2910 if (!io_req_ffs_set(req) && S_ISREG(file_inode(file)->i_mode)) in io_prep_rw()
2911 req->flags |= REQ_F_ISREG; in io_prep_rw()
2916 req->flags |= REQ_F_CUR_POS; in io_prep_rw()
2934 ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req, rw))) in io_prep_rw()
2935 req->flags |= REQ_F_NOWAIT; in io_prep_rw()
2954 req->iopoll_completed = 0; in io_prep_rw()
2962 req->buf_index = READ_ONCE(sqe->buf_index); in io_prep_rw()
2963 req->imu = NULL; in io_prep_rw()
2965 if (req->opcode == IORING_OP_READ_FIXED || in io_prep_rw()
2966 req->opcode == IORING_OP_WRITE_FIXED) { in io_prep_rw()
2967 struct io_ring_ctx *ctx = req->ctx; in io_prep_rw()
2970 if (unlikely(req->buf_index >= ctx->nr_user_bufs)) in io_prep_rw()
2972 index = array_index_nospec(req->buf_index, ctx->nr_user_bufs); in io_prep_rw()
2973 req->imu = ctx->user_bufs[index]; in io_prep_rw()
2974 io_req_set_rsrc_node(req); in io_prep_rw()
2977 req->rw.addr = READ_ONCE(sqe->addr); in io_prep_rw()
2978 req->rw.len = READ_ONCE(sqe->len); in io_prep_rw()
3006 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb); in kiocb_done() local
3008 if (req->flags & REQ_F_CUR_POS) in kiocb_done()
3009 req->file->f_pos = kiocb->ki_pos; in kiocb_done()
3011 __io_complete_rw(req, ret, 0, issue_flags); in kiocb_done()
3015 if (req->flags & REQ_F_REISSUE) { in kiocb_done()
3016 req->flags &= ~REQ_F_REISSUE; in kiocb_done()
3017 if (io_resubmit_prep(req)) { in kiocb_done()
3018 io_req_task_queue_reissue(req); in kiocb_done()
3020 unsigned int cflags = io_put_rw_kbuf(req); in kiocb_done()
3021 struct io_ring_ctx *ctx = req->ctx; in kiocb_done()
3023 ret = io_fixup_rw_res(req, ret); in kiocb_done()
3024 req_set_fail(req); in kiocb_done()
3027 __io_req_complete(req, issue_flags, ret, cflags); in kiocb_done()
3030 __io_req_complete(req, issue_flags, ret, cflags); in kiocb_done()
3036 static int __io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter, in __io_import_fixed() argument
3039 size_t len = req->rw.len; in __io_import_fixed()
3040 u64 buf_end, buf_addr = req->rw.addr; in __io_import_fixed()
3094 static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter) in io_import_fixed() argument
3096 if (WARN_ON_ONCE(!req->imu)) in io_import_fixed()
3098 return __io_import_fixed(req, rw, iter, req->imu); in io_import_fixed()
3119 static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len, in io_buffer_select() argument
3125 if (req->flags & REQ_F_BUFFER_SELECTED) in io_buffer_select()
3128 io_ring_submit_lock(req->ctx, needs_lock); in io_buffer_select()
3130 lockdep_assert_held(&req->ctx->uring_lock); in io_buffer_select()
3132 head = xa_load(&req->ctx->io_buffers, bgid); in io_buffer_select()
3140 xa_erase(&req->ctx->io_buffers, bgid); in io_buffer_select()
3148 io_ring_submit_unlock(req->ctx, needs_lock); in io_buffer_select()
3153 static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len, in io_rw_buffer_select() argument
3159 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr; in io_rw_buffer_select()
3160 bgid = req->buf_index; in io_rw_buffer_select()
3161 kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock); in io_rw_buffer_select()
3164 req->rw.addr = (u64) (unsigned long) kbuf; in io_rw_buffer_select()
3165 req->flags |= REQ_F_BUFFER_SELECTED; in io_rw_buffer_select()
3170 static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov, in io_compat_import() argument
3178 uiov = u64_to_user_ptr(req->rw.addr); in io_compat_import()
3187 buf = io_rw_buffer_select(req, &len, needs_lock); in io_compat_import()
3196 static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, in __io_iov_buffer_select() argument
3199 struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr); in __io_iov_buffer_select()
3209 buf = io_rw_buffer_select(req, &len, needs_lock); in __io_iov_buffer_select()
3217 static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, in io_iov_buffer_select() argument
3220 if (req->flags & REQ_F_BUFFER_SELECTED) { in io_iov_buffer_select()
3223 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr; in io_iov_buffer_select()
3228 if (req->rw.len != 1) in io_iov_buffer_select()
3232 if (req->ctx->compat) in io_iov_buffer_select()
3233 return io_compat_import(req, iov, needs_lock); in io_iov_buffer_select()
3236 return __io_iov_buffer_select(req, iov, needs_lock); in io_iov_buffer_select()
3239 static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec, in io_import_iovec() argument
3242 void __user *buf = u64_to_user_ptr(req->rw.addr); in io_import_iovec()
3243 size_t sqe_len = req->rw.len; in io_import_iovec()
3244 u8 opcode = req->opcode; in io_import_iovec()
3249 return io_import_fixed(req, rw, iter); in io_import_iovec()
3253 if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT)) in io_import_iovec()
3257 if (req->flags & REQ_F_BUFFER_SELECT) { in io_import_iovec()
3258 buf = io_rw_buffer_select(req, &sqe_len, needs_lock); in io_import_iovec()
3261 req->rw.len = sqe_len; in io_import_iovec()
3269 if (req->flags & REQ_F_BUFFER_SELECT) { in io_import_iovec()
3270 ret = io_iov_buffer_select(req, *iovec, needs_lock); in io_import_iovec()
3278 req->ctx->compat); in io_import_iovec()
3290 static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter) in loop_rw_iter() argument
3292 struct kiocb *kiocb = &req->rw.kiocb; in loop_rw_iter()
3293 struct file *file = req->file; in loop_rw_iter()
3313 iovec.iov_base = u64_to_user_ptr(req->rw.addr); in loop_rw_iter()
3314 iovec.iov_len = req->rw.len; in loop_rw_iter()
3334 req->rw.addr += nr; in loop_rw_iter()
3335 req->rw.len -= nr; in loop_rw_iter()
3336 if (!req->rw.len) in loop_rw_iter()
3346 static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec, in io_req_map_rw() argument
3349 struct io_async_rw *rw = req->async_data; in io_req_map_rw()
3369 req->flags |= REQ_F_NEED_CLEANUP; in io_req_map_rw()
3373 static inline int io_alloc_async_data(struct io_kiocb *req) in io_alloc_async_data() argument
3375 WARN_ON_ONCE(!io_op_defs[req->opcode].async_size); in io_alloc_async_data()
3376 req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL); in io_alloc_async_data()
3377 return req->async_data == NULL; in io_alloc_async_data()
3380 static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec, in io_setup_async_rw() argument
3384 if (!force && !io_op_defs[req->opcode].needs_async_setup) in io_setup_async_rw()
3386 if (!req->async_data) { in io_setup_async_rw()
3389 if (io_alloc_async_data(req)) { in io_setup_async_rw()
3394 io_req_map_rw(req, iovec, fast_iov, iter); in io_setup_async_rw()
3395 iorw = req->async_data; in io_setup_async_rw()
3402 static inline int io_rw_prep_async(struct io_kiocb *req, int rw) in io_rw_prep_async() argument
3404 struct io_async_rw *iorw = req->async_data; in io_rw_prep_async()
3408 ret = io_import_iovec(rw, req, &iov, &iorw->iter, false); in io_rw_prep_async()
3415 req->flags |= REQ_F_NEED_CLEANUP; in io_rw_prep_async()
3420 static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_read_prep() argument
3422 if (unlikely(!(req->file->f_mode & FMODE_READ))) in io_read_prep()
3424 return io_prep_rw(req, sqe, READ); in io_read_prep()
3441 struct io_kiocb *req = wait->private; in io_async_buf_func() local
3449 req->rw.kiocb.ki_flags &= ~IOCB_WAITQ; in io_async_buf_func()
3451 io_req_task_queue(req); in io_async_buf_func()
3467 static bool io_rw_should_retry(struct io_kiocb *req) in io_rw_should_retry() argument
3469 struct io_async_rw *rw = req->async_data; in io_rw_should_retry()
3471 struct kiocb *kiocb = &req->rw.kiocb; in io_rw_should_retry()
3474 if (req->flags & REQ_F_NOWAIT) in io_rw_should_retry()
3485 if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC)) in io_rw_should_retry()
3489 wait->wait.private = req; in io_rw_should_retry()
3498 static inline int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter) in io_iter_do_read() argument
3500 if (req->file->f_op->read_iter) in io_iter_do_read()
3501 return call_read_iter(req->file, &req->rw.kiocb, iter); in io_iter_do_read()
3502 else if (req->file->f_op->read) in io_iter_do_read()
3503 return loop_rw_iter(READ, req, iter); in io_iter_do_read()
3508 static bool need_read_all(struct io_kiocb *req) in need_read_all() argument
3510 return req->flags & REQ_F_ISREG || in need_read_all()
3511 S_ISBLK(file_inode(req->file)->i_mode); in need_read_all()
3514 static int io_read(struct io_kiocb *req, unsigned int issue_flags) in io_read() argument
3517 struct kiocb *kiocb = &req->rw.kiocb; in io_read()
3519 struct io_async_rw *rw = req->async_data; in io_read()
3535 ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock); in io_read()
3541 req->result = iov_iter_count(iter); in io_read()
3550 if (force_nonblock && !io_file_supports_nowait(req, READ)) { in io_read()
3551 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, true); in io_read()
3555 ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), req->result); in io_read()
3561 ret = io_iter_do_read(req, iter); in io_read()
3563 if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) { in io_read()
3564 req->flags &= ~REQ_F_REISSUE; in io_read()
3566 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_read()
3569 if (req->flags & REQ_F_NOWAIT) in io_read()
3574 } else if (ret <= 0 || ret == req->result || !force_nonblock || in io_read()
3575 (req->flags & REQ_F_NOWAIT) || !need_read_all(req)) { in io_read()
3587 ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true); in io_read()
3592 rw = req->async_data; in io_read()
3615 if (!io_rw_should_retry(req)) { in io_read()
3620 req->result = iov_iter_count(iter); in io_read()
3627 ret = io_iter_do_read(req, iter); in io_read()
3643 static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_write_prep() argument
3645 if (unlikely(!(req->file->f_mode & FMODE_WRITE))) in io_write_prep()
3647 return io_prep_rw(req, sqe, WRITE); in io_write_prep()
3650 static int io_write(struct io_kiocb *req, unsigned int issue_flags) in io_write() argument
3653 struct kiocb *kiocb = &req->rw.kiocb; in io_write()
3655 struct io_async_rw *rw = req->async_data; in io_write()
3666 ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock); in io_write()
3672 req->result = iov_iter_count(iter); in io_write()
3681 if (force_nonblock && !io_file_supports_nowait(req, WRITE)) in io_write()
3686 (req->flags & REQ_F_ISREG)) in io_write()
3689 ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), req->result); in io_write()
3700 if (req->flags & REQ_F_ISREG) { in io_write()
3701 sb_start_write(file_inode(req->file)->i_sb); in io_write()
3702 __sb_writers_release(file_inode(req->file)->i_sb, in io_write()
3707 if (req->file->f_op->write_iter) in io_write()
3708 ret2 = call_write_iter(req->file, kiocb, iter); in io_write()
3709 else if (req->file->f_op->write) in io_write()
3710 ret2 = loop_rw_iter(WRITE, req, iter); in io_write()
3714 if (req->flags & REQ_F_REISSUE) { in io_write()
3715 req->flags &= ~REQ_F_REISSUE; in io_write()
3726 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT)) in io_write()
3730 if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN) in io_write()
3737 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false); in io_write()
3740 kiocb_end_write(req); in io_write()
3752 static int io_renameat_prep(struct io_kiocb *req, in io_renameat_prep() argument
3755 struct io_rename *ren = &req->rename; in io_renameat_prep()
3758 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_renameat_prep()
3762 if (unlikely(req->flags & REQ_F_FIXED_FILE)) in io_renameat_prep()
3781 req->flags |= REQ_F_NEED_CLEANUP; in io_renameat_prep()
3785 static int io_renameat(struct io_kiocb *req, unsigned int issue_flags) in io_renameat() argument
3787 struct io_rename *ren = &req->rename; in io_renameat()
3796 req->flags &= ~REQ_F_NEED_CLEANUP; in io_renameat()
3798 req_set_fail(req); in io_renameat()
3799 io_req_complete(req, ret); in io_renameat()
3803 static int io_unlinkat_prep(struct io_kiocb *req, in io_unlinkat_prep() argument
3806 struct io_unlink *un = &req->unlink; in io_unlinkat_prep()
3809 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_unlinkat_prep()
3814 if (unlikely(req->flags & REQ_F_FIXED_FILE)) in io_unlinkat_prep()
3828 req->flags |= REQ_F_NEED_CLEANUP; in io_unlinkat_prep()
3832 static int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags) in io_unlinkat() argument
3834 struct io_unlink *un = &req->unlink; in io_unlinkat()
3845 req->flags &= ~REQ_F_NEED_CLEANUP; in io_unlinkat()
3847 req_set_fail(req); in io_unlinkat()
3848 io_req_complete(req, ret); in io_unlinkat()
3852 static int io_shutdown_prep(struct io_kiocb *req, in io_shutdown_prep() argument
3856 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_shutdown_prep()
3862 req->shutdown.how = READ_ONCE(sqe->len); in io_shutdown_prep()
3869 static int io_shutdown(struct io_kiocb *req, unsigned int issue_flags) in io_shutdown() argument
3878 sock = sock_from_file(req->file, &ret); in io_shutdown()
3882 ret = __sys_shutdown_sock(sock, req->shutdown.how); in io_shutdown()
3884 req_set_fail(req); in io_shutdown()
3885 io_req_complete(req, ret); in io_shutdown()
3892 static int __io_splice_prep(struct io_kiocb *req, in __io_splice_prep() argument
3895 struct io_splice *sp = &req->splice; in __io_splice_prep()
3898 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in __io_splice_prep()
3909 static int io_tee_prep(struct io_kiocb *req, in io_tee_prep() argument
3914 return __io_splice_prep(req, sqe); in io_tee_prep()
3917 static int io_tee(struct io_kiocb *req, unsigned int issue_flags) in io_tee() argument
3919 struct io_splice *sp = &req->splice; in io_tee()
3928 in = io_file_get(req->ctx, req, sp->splice_fd_in, in io_tee()
3942 req_set_fail(req); in io_tee()
3943 io_req_complete(req, ret); in io_tee()
3947 static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_splice_prep() argument
3949 struct io_splice *sp = &req->splice; in io_splice_prep()
3953 return __io_splice_prep(req, sqe); in io_splice_prep()
3956 static int io_splice(struct io_kiocb *req, unsigned int issue_flags) in io_splice() argument
3958 struct io_splice *sp = &req->splice; in io_splice()
3968 in = io_file_get(req->ctx, req, sp->splice_fd_in, in io_splice()
3985 req_set_fail(req); in io_splice()
3986 io_req_complete(req, ret); in io_splice()
3993 static int io_nop(struct io_kiocb *req, unsigned int issue_flags) in io_nop() argument
3995 struct io_ring_ctx *ctx = req->ctx; in io_nop()
4000 __io_req_complete(req, issue_flags, 0, 0); in io_nop()
4004 static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_fsync_prep() argument
4006 struct io_ring_ctx *ctx = req->ctx; in io_fsync_prep()
4014 req->sync.flags = READ_ONCE(sqe->fsync_flags); in io_fsync_prep()
4015 if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC)) in io_fsync_prep()
4018 req->sync.off = READ_ONCE(sqe->off); in io_fsync_prep()
4019 req->sync.len = READ_ONCE(sqe->len); in io_fsync_prep()
4023 static int io_fsync(struct io_kiocb *req, unsigned int issue_flags) in io_fsync() argument
4025 loff_t end = req->sync.off + req->sync.len; in io_fsync()
4032 ret = vfs_fsync_range(req->file, req->sync.off, in io_fsync()
4034 req->sync.flags & IORING_FSYNC_DATASYNC); in io_fsync()
4036 req_set_fail(req); in io_fsync()
4037 io_req_complete(req, ret); in io_fsync()
4041 static int io_fallocate_prep(struct io_kiocb *req, in io_fallocate_prep() argument
4047 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_fallocate_prep()
4050 req->sync.off = READ_ONCE(sqe->off); in io_fallocate_prep()
4051 req->sync.len = READ_ONCE(sqe->addr); in io_fallocate_prep()
4052 req->sync.mode = READ_ONCE(sqe->len); in io_fallocate_prep()
4056 static int io_fallocate(struct io_kiocb *req, unsigned int issue_flags) in io_fallocate() argument
4063 ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off, in io_fallocate()
4064 req->sync.len); in io_fallocate()
4066 req_set_fail(req); in io_fallocate()
4068 fsnotify_modify(req->file); in io_fallocate()
4069 io_req_complete(req, ret); in io_fallocate()
4073 static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in __io_openat_prep() argument
4078 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in __io_openat_prep()
4082 if (unlikely(req->flags & REQ_F_FIXED_FILE)) in __io_openat_prep()
4086 if (!(req->open.how.flags & O_PATH) && force_o_largefile()) in __io_openat_prep()
4087 req->open.how.flags |= O_LARGEFILE; in __io_openat_prep()
4089 req->open.dfd = READ_ONCE(sqe->fd); in __io_openat_prep()
4091 req->open.filename = getname(fname); in __io_openat_prep()
4092 if (IS_ERR(req->open.filename)) { in __io_openat_prep()
4093 ret = PTR_ERR(req->open.filename); in __io_openat_prep()
4094 req->open.filename = NULL; in __io_openat_prep()
4098 req->open.file_slot = READ_ONCE(sqe->file_index); in __io_openat_prep()
4099 if (req->open.file_slot && (req->open.how.flags & O_CLOEXEC)) in __io_openat_prep()
4102 req->open.nofile = rlimit(RLIMIT_NOFILE); in __io_openat_prep()
4103 req->flags |= REQ_F_NEED_CLEANUP; in __io_openat_prep()
4107 static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_openat_prep() argument
4112 req->open.how = build_open_how(flags, mode); in io_openat_prep()
4113 return __io_openat_prep(req, sqe); in io_openat_prep()
4116 static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_openat2_prep() argument
4127 ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how, in io_openat2_prep()
4132 return __io_openat_prep(req, sqe); in io_openat2_prep()
4135 static int io_openat2(struct io_kiocb *req, unsigned int issue_flags) in io_openat2() argument
4140 bool fixed = !!req->open.file_slot; in io_openat2()
4143 ret = build_open_flags(&req->open.how, &op); in io_openat2()
4147 resolve_nonblock = req->open.how.resolve & RESOLVE_CACHED; in io_openat2()
4153 if (req->open.how.flags & (O_TRUNC | O_CREAT | O_TMPFILE)) in io_openat2()
4160 ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile); in io_openat2()
4165 file = do_filp_open(req->open.dfd, req->open.filename, &op); in io_openat2()
4190 ret = io_install_fixed_file(req, file, issue_flags, in io_openat2()
4191 req->open.file_slot - 1); in io_openat2()
4193 putname(req->open.filename); in io_openat2()
4194 req->flags &= ~REQ_F_NEED_CLEANUP; in io_openat2()
4196 req_set_fail(req); in io_openat2()
4197 __io_req_complete(req, issue_flags, ret, 0); in io_openat2()
4201 static int io_openat(struct io_kiocb *req, unsigned int issue_flags) in io_openat() argument
4203 return io_openat2(req, issue_flags); in io_openat()
4206 static int io_remove_buffers_prep(struct io_kiocb *req, in io_remove_buffers_prep() argument
4209 struct io_provide_buf *p = &req->pbuf; in io_remove_buffers_prep()
4253 static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags) in io_remove_buffers() argument
4255 struct io_provide_buf *p = &req->pbuf; in io_remove_buffers()
4256 struct io_ring_ctx *ctx = req->ctx; in io_remove_buffers()
4270 req_set_fail(req); in io_remove_buffers()
4273 __io_req_complete(req, issue_flags, ret, 0); in io_remove_buffers()
4278 static int io_provide_buffers_prep(struct io_kiocb *req, in io_provide_buffers_prep() argument
4282 struct io_provide_buf *p = &req->pbuf; in io_provide_buffers_prep()
4341 static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags) in io_provide_buffers() argument
4343 struct io_provide_buf *p = &req->pbuf; in io_provide_buffers()
4344 struct io_ring_ctx *ctx = req->ctx; in io_provide_buffers()
4363 req_set_fail(req); in io_provide_buffers()
4365 __io_req_complete(req, issue_flags, ret, 0); in io_provide_buffers()
4370 static int io_epoll_ctl_prep(struct io_kiocb *req, in io_epoll_ctl_prep() argument
4376 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_epoll_ctl_prep()
4379 req->epoll.epfd = READ_ONCE(sqe->fd); in io_epoll_ctl_prep()
4380 req->epoll.op = READ_ONCE(sqe->len); in io_epoll_ctl_prep()
4381 req->epoll.fd = READ_ONCE(sqe->off); in io_epoll_ctl_prep()
4383 if (ep_op_has_event(req->epoll.op)) { in io_epoll_ctl_prep()
4387 if (copy_from_user(&req->epoll.event, ev, sizeof(*ev))) in io_epoll_ctl_prep()
4397 static int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags) in io_epoll_ctl() argument
4400 struct io_epoll *ie = &req->epoll; in io_epoll_ctl()
4409 req_set_fail(req); in io_epoll_ctl()
4410 __io_req_complete(req, issue_flags, ret, 0); in io_epoll_ctl()
4417 static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_madvise_prep() argument
4422 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_madvise_prep()
4425 req->madvise.addr = READ_ONCE(sqe->addr); in io_madvise_prep()
4426 req->madvise.len = READ_ONCE(sqe->len); in io_madvise_prep()
4427 req->madvise.advice = READ_ONCE(sqe->fadvise_advice); in io_madvise_prep()
4434 static int io_madvise(struct io_kiocb *req, unsigned int issue_flags) in io_madvise() argument
4437 struct io_madvise *ma = &req->madvise; in io_madvise()
4445 req_set_fail(req); in io_madvise()
4446 io_req_complete(req, ret); in io_madvise()
4453 static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_fadvise_prep() argument
4457 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_fadvise_prep()
4460 req->fadvise.offset = READ_ONCE(sqe->off); in io_fadvise_prep()
4461 req->fadvise.len = READ_ONCE(sqe->len); in io_fadvise_prep()
4462 req->fadvise.advice = READ_ONCE(sqe->fadvise_advice); in io_fadvise_prep()
4466 static int io_fadvise(struct io_kiocb *req, unsigned int issue_flags) in io_fadvise() argument
4468 struct io_fadvise *fa = &req->fadvise; in io_fadvise()
4482 ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice); in io_fadvise()
4484 req_set_fail(req); in io_fadvise()
4485 __io_req_complete(req, issue_flags, ret, 0); in io_fadvise()
4489 static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_statx_prep() argument
4491 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_statx_prep()
4495 if (req->flags & REQ_F_FIXED_FILE) in io_statx_prep()
4498 req->statx.dfd = READ_ONCE(sqe->fd); in io_statx_prep()
4499 req->statx.mask = READ_ONCE(sqe->len); in io_statx_prep()
4500 req->statx.filename = u64_to_user_ptr(READ_ONCE(sqe->addr)); in io_statx_prep()
4501 req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2)); in io_statx_prep()
4502 req->statx.flags = READ_ONCE(sqe->statx_flags); in io_statx_prep()
4507 static int io_statx(struct io_kiocb *req, unsigned int issue_flags) in io_statx() argument
4509 struct io_statx *ctx = &req->statx; in io_statx()
4519 req_set_fail(req); in io_statx()
4520 io_req_complete(req, ret); in io_statx()
4524 static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_close_prep() argument
4526 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_close_prep()
4531 if (req->flags & REQ_F_FIXED_FILE) in io_close_prep()
4534 req->close.fd = READ_ONCE(sqe->fd); in io_close_prep()
4535 req->close.file_slot = READ_ONCE(sqe->file_index); in io_close_prep()
4536 if (req->close.file_slot && req->close.fd) in io_close_prep()
4542 static int io_close(struct io_kiocb *req, unsigned int issue_flags) in io_close() argument
4545 struct io_close *close = &req->close; in io_close()
4550 if (req->close.file_slot) { in io_close()
4551 ret = io_close_fixed(req, issue_flags); in io_close()
4586 req_set_fail(req); in io_close()
4589 __io_req_complete(req, issue_flags, ret, 0); in io_close()
4593 static int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_sfr_prep() argument
4595 struct io_ring_ctx *ctx = req->ctx; in io_sfr_prep()
4603 req->sync.off = READ_ONCE(sqe->off); in io_sfr_prep()
4604 req->sync.len = READ_ONCE(sqe->len); in io_sfr_prep()
4605 req->sync.flags = READ_ONCE(sqe->sync_range_flags); in io_sfr_prep()
4609 static int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags) in io_sync_file_range() argument
4617 ret = sync_file_range(req->file, req->sync.off, req->sync.len, in io_sync_file_range()
4618 req->sync.flags); in io_sync_file_range()
4620 req_set_fail(req); in io_sync_file_range()
4621 io_req_complete(req, ret); in io_sync_file_range()
4626 static int io_setup_async_msg(struct io_kiocb *req, in io_setup_async_msg() argument
4629 struct io_async_msghdr *async_msg = req->async_data; in io_setup_async_msg()
4633 if (io_alloc_async_data(req)) { in io_setup_async_msg()
4637 async_msg = req->async_data; in io_setup_async_msg()
4638 req->flags |= REQ_F_NEED_CLEANUP; in io_setup_async_msg()
4649 static int io_sendmsg_copy_hdr(struct io_kiocb *req, in io_sendmsg_copy_hdr() argument
4654 return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg, in io_sendmsg_copy_hdr()
4655 req->sr_msg.msg_flags, &iomsg->free_iov); in io_sendmsg_copy_hdr()
4658 static int io_sendmsg_prep_async(struct io_kiocb *req) in io_sendmsg_prep_async() argument
4662 ret = io_sendmsg_copy_hdr(req, req->async_data); in io_sendmsg_prep_async()
4664 req->flags |= REQ_F_NEED_CLEANUP; in io_sendmsg_prep_async()
4668 static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_sendmsg_prep() argument
4670 struct io_sr_msg *sr = &req->sr_msg; in io_sendmsg_prep()
4672 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_sendmsg_prep()
4683 req->flags |= REQ_F_NOWAIT; in io_sendmsg_prep()
4686 if (req->ctx->compat) in io_sendmsg_prep()
4692 static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags) in io_sendmsg() argument
4700 sock = sock_from_file(req->file, &ret); in io_sendmsg()
4704 kmsg = req->async_data; in io_sendmsg()
4706 ret = io_sendmsg_copy_hdr(req, &iomsg); in io_sendmsg()
4712 flags = req->sr_msg.msg_flags; in io_sendmsg()
4720 return io_setup_async_msg(req, kmsg); in io_sendmsg()
4727 req->flags &= ~REQ_F_NEED_CLEANUP; in io_sendmsg()
4729 req_set_fail(req); in io_sendmsg()
4730 __io_req_complete(req, issue_flags, ret, 0); in io_sendmsg()
4734 static int io_send(struct io_kiocb *req, unsigned int issue_flags) in io_send() argument
4736 struct io_sr_msg *sr = &req->sr_msg; in io_send()
4744 sock = sock_from_file(req->file, &ret); in io_send()
4757 flags = req->sr_msg.msg_flags; in io_send()
4771 req_set_fail(req); in io_send()
4772 __io_req_complete(req, issue_flags, ret, 0); in io_send()
4776 static int __io_recvmsg_copy_hdr(struct io_kiocb *req, in __io_recvmsg_copy_hdr() argument
4779 struct io_sr_msg *sr = &req->sr_msg; in __io_recvmsg_copy_hdr()
4789 if (req->flags & REQ_F_BUFFER_SELECT) { in __io_recvmsg_copy_hdr()
4809 static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req, in __io_compat_recvmsg_copy_hdr() argument
4812 struct io_sr_msg *sr = &req->sr_msg; in __io_compat_recvmsg_copy_hdr()
4824 if (req->flags & REQ_F_BUFFER_SELECT) { in __io_compat_recvmsg_copy_hdr()
4850 static int io_recvmsg_copy_hdr(struct io_kiocb *req, in io_recvmsg_copy_hdr() argument
4856 if (req->ctx->compat) in io_recvmsg_copy_hdr()
4857 return __io_compat_recvmsg_copy_hdr(req, iomsg); in io_recvmsg_copy_hdr()
4860 return __io_recvmsg_copy_hdr(req, iomsg); in io_recvmsg_copy_hdr()
4863 static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req, in io_recv_buffer_select() argument
4866 struct io_sr_msg *sr = &req->sr_msg; in io_recv_buffer_select()
4869 kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock); in io_recv_buffer_select()
4874 req->flags |= REQ_F_BUFFER_SELECTED; in io_recv_buffer_select()
4878 static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req) in io_put_recv_kbuf() argument
4880 return io_put_kbuf(req, req->sr_msg.kbuf); in io_put_recv_kbuf()
4883 static int io_recvmsg_prep_async(struct io_kiocb *req) in io_recvmsg_prep_async() argument
4887 ret = io_recvmsg_copy_hdr(req, req->async_data); in io_recvmsg_prep_async()
4889 req->flags |= REQ_F_NEED_CLEANUP; in io_recvmsg_prep_async()
4893 static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_recvmsg_prep() argument
4895 struct io_sr_msg *sr = &req->sr_msg; in io_recvmsg_prep()
4897 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_recvmsg_prep()
4909 req->flags |= REQ_F_NOWAIT; in io_recvmsg_prep()
4912 if (req->ctx->compat) in io_recvmsg_prep()
4918 static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) in io_recvmsg() argument
4928 sock = sock_from_file(req->file, &ret); in io_recvmsg()
4932 kmsg = req->async_data; in io_recvmsg()
4934 ret = io_recvmsg_copy_hdr(req, &iomsg); in io_recvmsg()
4940 if (req->flags & REQ_F_BUFFER_SELECT) { in io_recvmsg()
4941 kbuf = io_recv_buffer_select(req, !force_nonblock); in io_recvmsg()
4945 kmsg->fast_iov[0].iov_len = req->sr_msg.len; in io_recvmsg()
4947 1, req->sr_msg.len); in io_recvmsg()
4950 flags = req->sr_msg.msg_flags; in io_recvmsg()
4956 ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg, in io_recvmsg()
4959 return io_setup_async_msg(req, kmsg); in io_recvmsg()
4963 if (req->flags & REQ_F_BUFFER_SELECTED) in io_recvmsg()
4964 cflags = io_put_recv_kbuf(req); in io_recvmsg()
4968 req->flags &= ~REQ_F_NEED_CLEANUP; in io_recvmsg()
4970 req_set_fail(req); in io_recvmsg()
4971 __io_req_complete(req, issue_flags, ret, cflags); in io_recvmsg()
4975 static int io_recv(struct io_kiocb *req, unsigned int issue_flags) in io_recv() argument
4978 struct io_sr_msg *sr = &req->sr_msg; in io_recv()
4988 sock = sock_from_file(req->file, &ret); in io_recv()
4992 if (req->flags & REQ_F_BUFFER_SELECT) { in io_recv()
4993 kbuf = io_recv_buffer_select(req, !force_nonblock); in io_recv()
5010 flags = req->sr_msg.msg_flags; in io_recv()
5022 if (req->flags & REQ_F_BUFFER_SELECTED) in io_recv()
5023 cflags = io_put_recv_kbuf(req); in io_recv()
5025 req_set_fail(req); in io_recv()
5026 __io_req_complete(req, issue_flags, ret, cflags); in io_recv()
5030 static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_accept_prep() argument
5032 struct io_accept *accept = &req->accept; in io_accept_prep()
5034 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_accept_prep()
5054 static int io_accept(struct io_kiocb *req, unsigned int issue_flags) in io_accept() argument
5056 struct io_accept *accept = &req->accept; in io_accept()
5063 if (req->file->f_flags & O_NONBLOCK) in io_accept()
5064 req->flags |= REQ_F_NOWAIT; in io_accept()
5071 file = do_accept(req->file, file_flags, accept->addr, accept->addr_len, in io_accept()
5082 req_set_fail(req); in io_accept()
5087 ret = io_install_fixed_file(req, file, issue_flags, in io_accept()
5090 __io_req_complete(req, issue_flags, ret, 0); in io_accept()
5094 static int io_connect_prep_async(struct io_kiocb *req) in io_connect_prep_async() argument
5096 struct io_async_connect *io = req->async_data; in io_connect_prep_async()
5097 struct io_connect *conn = &req->connect; in io_connect_prep_async()
5102 static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_connect_prep() argument
5104 struct io_connect *conn = &req->connect; in io_connect_prep()
5106 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_connect_prep()
5117 static int io_connect(struct io_kiocb *req, unsigned int issue_flags) in io_connect() argument
5124 if (req->async_data) { in io_connect()
5125 io = req->async_data; in io_connect()
5127 ret = move_addr_to_kernel(req->connect.addr, in io_connect()
5128 req->connect.addr_len, in io_connect()
5137 ret = __sys_connect_file(req->file, &io->address, in io_connect()
5138 req->connect.addr_len, file_flags); in io_connect()
5140 if (req->async_data) in io_connect()
5142 if (io_alloc_async_data(req)) { in io_connect()
5146 memcpy(req->async_data, &__io, sizeof(__io)); in io_connect()
5153 req_set_fail(req); in io_connect()
5154 __io_req_complete(req, issue_flags, ret, 0); in io_connect()
5159 static int io_##op(struct io_kiocb *req, unsigned int issue_flags) \
5166 static int io_##op##_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) \
5173 static int io_##op##_prep_async(struct io_kiocb *req) \
5188 struct io_kiocb *req; member
5203 static bool io_poll_get_ownership_slowpath(struct io_kiocb *req) in io_poll_get_ownership_slowpath() argument
5212 v = atomic_fetch_or(IO_POLL_RETRY_FLAG, &req->poll_refs); in io_poll_get_ownership_slowpath()
5215 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK); in io_poll_get_ownership_slowpath()
5224 static inline bool io_poll_get_ownership(struct io_kiocb *req) in io_poll_get_ownership() argument
5226 if (unlikely(atomic_read(&req->poll_refs) >= IO_POLL_REF_BIAS)) in io_poll_get_ownership()
5227 return io_poll_get_ownership_slowpath(req); in io_poll_get_ownership()
5228 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK); in io_poll_get_ownership()
5231 static void io_poll_mark_cancelled(struct io_kiocb *req) in io_poll_mark_cancelled() argument
5233 atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs); in io_poll_mark_cancelled()
5236 static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req) in io_poll_get_double() argument
5239 if (req->opcode == IORING_OP_POLL_ADD) in io_poll_get_double()
5240 return req->async_data; in io_poll_get_double()
5241 return req->apoll->double_poll; in io_poll_get_double()
5244 static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req) in io_poll_get_single() argument
5246 if (req->opcode == IORING_OP_POLL_ADD) in io_poll_get_single()
5247 return &req->poll; in io_poll_get_single()
5248 return &req->apoll->poll; in io_poll_get_single()
5251 static void io_poll_req_insert(struct io_kiocb *req) in io_poll_req_insert() argument
5253 struct io_ring_ctx *ctx = req->ctx; in io_poll_req_insert()
5256 list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)]; in io_poll_req_insert()
5257 hlist_add_head(&req->hash_node, list); in io_poll_req_insert()
5283 static void io_poll_remove_entries(struct io_kiocb *req) in io_poll_remove_entries() argument
5285 struct io_poll_iocb *poll = io_poll_get_single(req); in io_poll_remove_entries()
5286 struct io_poll_iocb *poll_double = io_poll_get_double(req); in io_poll_remove_entries()
5316 * the request, then the mask is stored in req->result.
5318 static int io_poll_check_events(struct io_kiocb *req) in io_poll_check_events() argument
5320 struct io_ring_ctx *ctx = req->ctx; in io_poll_check_events()
5321 struct io_poll_iocb *poll = io_poll_get_single(req); in io_poll_check_events()
5324 /* req->task == current here, checking PF_EXITING is safe */ in io_poll_check_events()
5325 if (unlikely(req->task->flags & PF_EXITING)) in io_poll_check_events()
5326 io_poll_mark_cancelled(req); in io_poll_check_events()
5329 v = atomic_read(&req->poll_refs); in io_poll_check_events()
5342 req->result = 0; in io_poll_check_events()
5344 req->result = 0; in io_poll_check_events()
5350 atomic_andnot(IO_POLL_RETRY_FLAG, &req->poll_refs); in io_poll_check_events()
5354 if (!req->result) { in io_poll_check_events()
5357 req->result = vfs_poll(req->file, &pt) & poll->events; in io_poll_check_events()
5361 if (req->result && !(poll->events & EPOLLONESHOT)) { in io_poll_check_events()
5362 __poll_t mask = mangle_poll(req->result & poll->events); in io_poll_check_events()
5366 filled = io_fill_cqe_aux(ctx, req->user_data, mask, in io_poll_check_events()
5373 } else if (req->result) { in io_poll_check_events()
5378 req->result = 0; in io_poll_check_events()
5384 } while (atomic_sub_return(v & IO_POLL_REF_MASK, &req->poll_refs) & in io_poll_check_events()
5390 static void io_poll_task_func(struct io_kiocb *req, bool *locked) in io_poll_task_func() argument
5392 struct io_ring_ctx *ctx = req->ctx; in io_poll_task_func()
5395 ret = io_poll_check_events(req); in io_poll_task_func()
5400 req->result = mangle_poll(req->result & req->poll.events); in io_poll_task_func()
5402 req->result = ret; in io_poll_task_func()
5403 req_set_fail(req); in io_poll_task_func()
5406 io_poll_remove_entries(req); in io_poll_task_func()
5408 hash_del(&req->hash_node); in io_poll_task_func()
5410 io_req_complete_post(req, req->result, 0); in io_poll_task_func()
5413 static void io_apoll_task_func(struct io_kiocb *req, bool *locked) in io_apoll_task_func() argument
5415 struct io_ring_ctx *ctx = req->ctx; in io_apoll_task_func()
5418 ret = io_poll_check_events(req); in io_apoll_task_func()
5422 io_poll_remove_entries(req); in io_apoll_task_func()
5424 hash_del(&req->hash_node); in io_apoll_task_func()
5428 io_req_task_submit(req, locked); in io_apoll_task_func()
5430 io_req_complete_failed(req, ret); in io_apoll_task_func()
5433 static void __io_poll_execute(struct io_kiocb *req, int mask) in __io_poll_execute() argument
5435 req->result = mask; in __io_poll_execute()
5436 if (req->opcode == IORING_OP_POLL_ADD) in __io_poll_execute()
5437 req->io_task_work.func = io_poll_task_func; in __io_poll_execute()
5439 req->io_task_work.func = io_apoll_task_func; in __io_poll_execute()
5441 trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask); in __io_poll_execute()
5442 io_req_task_work_add(req); in __io_poll_execute()
5445 static inline void io_poll_execute(struct io_kiocb *req, int res) in io_poll_execute() argument
5447 if (io_poll_get_ownership(req)) in io_poll_execute()
5448 __io_poll_execute(req, res); in io_poll_execute()
5451 static void io_poll_cancel_req(struct io_kiocb *req) in io_poll_cancel_req() argument
5453 io_poll_mark_cancelled(req); in io_poll_cancel_req()
5455 io_poll_execute(req, 0); in io_poll_cancel_req()
5461 struct io_kiocb *req = wait->private; in io_poll_wake() local
5467 io_poll_mark_cancelled(req); in io_poll_wake()
5469 io_poll_execute(req, 0); in io_poll_wake()
5482 * as req->head is NULL'ed out, the request can be in io_poll_wake()
5494 if (io_poll_get_ownership(req)) { in io_poll_wake()
5503 __io_poll_execute(req, mask); in io_poll_wake()
5512 struct io_kiocb *req = pt->req; in __io_queue_proc() local
5544 poll->wait.private = req; in __io_queue_proc()
5557 __io_queue_proc(&pt->req->poll, pt, head, in io_poll_queue_proc()
5558 (struct io_poll_iocb **) &pt->req->async_data); in io_poll_queue_proc()
5561 static int __io_arm_poll_handler(struct io_kiocb *req, in __io_arm_poll_handler() argument
5565 struct io_ring_ctx *ctx = req->ctx; in __io_arm_poll_handler()
5567 INIT_HLIST_NODE(&req->hash_node); in __io_arm_poll_handler()
5569 poll->file = req->file; in __io_arm_poll_handler()
5570 poll->wait.private = req; in __io_arm_poll_handler()
5573 ipt->req = req; in __io_arm_poll_handler()
5581 atomic_set(&req->poll_refs, 1); in __io_arm_poll_handler()
5582 mask = vfs_poll(req->file, &ipt->pt) & poll->events; in __io_arm_poll_handler()
5585 io_poll_remove_entries(req); in __io_arm_poll_handler()
5586 /* no one else has access to the req, forget about the ref */ in __io_arm_poll_handler()
5590 io_poll_remove_entries(req); in __io_arm_poll_handler()
5597 io_poll_req_insert(req); in __io_arm_poll_handler()
5606 __io_poll_execute(req, mask); in __io_arm_poll_handler()
5614 if (atomic_cmpxchg(&req->poll_refs, 1, 0) != 1) in __io_arm_poll_handler()
5615 __io_poll_execute(req, 0); in __io_arm_poll_handler()
5623 struct async_poll *apoll = pt->req->apoll; in io_async_queue_proc()
5634 static int io_arm_poll_handler(struct io_kiocb *req) in io_arm_poll_handler() argument
5636 const struct io_op_def *def = &io_op_defs[req->opcode]; in io_arm_poll_handler()
5637 struct io_ring_ctx *ctx = req->ctx; in io_arm_poll_handler()
5643 if (!req->file || !file_can_poll(req->file)) in io_arm_poll_handler()
5645 if (req->flags & REQ_F_POLLED) in io_arm_poll_handler()
5654 if ((req->opcode == IORING_OP_RECVMSG) && in io_arm_poll_handler()
5655 (req->sr_msg.msg_flags & MSG_ERRQUEUE)) in io_arm_poll_handler()
5665 req->apoll = apoll; in io_arm_poll_handler()
5666 req->flags |= REQ_F_POLLED; in io_arm_poll_handler()
5669 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask); in io_arm_poll_handler()
5673 trace_io_uring_poll_arm(ctx, req, req->opcode, req->user_data, in io_arm_poll_handler()
5685 struct io_kiocb *req; in io_poll_remove_all() local
5694 hlist_for_each_entry_safe(req, tmp, list, hash_node) { in io_poll_remove_all()
5695 if (io_match_task_safe(req, tsk, cancel_all)) { in io_poll_remove_all()
5696 hlist_del_init(&req->hash_node); in io_poll_remove_all()
5697 io_poll_cancel_req(req); in io_poll_remove_all()
5711 struct io_kiocb *req; in io_poll_find() local
5714 hlist_for_each_entry(req, list, hash_node) { in io_poll_find()
5715 if (sqe_addr != req->user_data) in io_poll_find()
5717 if (poll_only && req->opcode != IORING_OP_POLL_ADD) in io_poll_find()
5719 return req; in io_poll_find()
5724 static bool io_poll_disarm(struct io_kiocb *req) in io_poll_disarm() argument
5727 if (!io_poll_get_ownership(req)) in io_poll_disarm()
5729 io_poll_remove_entries(req); in io_poll_disarm()
5730 hash_del(&req->hash_node); in io_poll_disarm()
5738 struct io_kiocb *req = io_poll_find(ctx, sqe_addr, poll_only); in io_poll_cancel() local
5740 if (!req) in io_poll_cancel()
5742 io_poll_cancel_req(req); in io_poll_cancel()
5760 static int io_poll_update_prep(struct io_kiocb *req, in io_poll_update_prep() argument
5763 struct io_poll_update *upd = &req->poll_update; in io_poll_update_prep()
5766 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_poll_update_prep()
5793 static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_poll_add_prep() argument
5795 struct io_poll_iocb *poll = &req->poll; in io_poll_add_prep()
5798 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_poll_add_prep()
5806 io_req_set_refcount(req); in io_poll_add_prep()
5811 static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags) in io_poll_add() argument
5813 struct io_poll_iocb *poll = &req->poll; in io_poll_add()
5819 ret = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events); in io_poll_add()
5821 req_set_fail(req); in io_poll_add()
5824 __io_req_complete(req, issue_flags, ret, 0); in io_poll_add()
5828 static int io_poll_update(struct io_kiocb *req, unsigned int issue_flags) in io_poll_update() argument
5830 struct io_ring_ctx *ctx = req->ctx; in io_poll_update()
5835 preq = io_poll_find(ctx, req->poll_update.old_user_data, true); in io_poll_update()
5843 if (req->poll_update.update_events || req->poll_update.update_user_data) { in io_poll_update()
5845 if (req->poll_update.update_events) { in io_poll_update()
5847 preq->poll.events |= req->poll_update.events & 0xffff; in io_poll_update()
5850 if (req->poll_update.update_user_data) in io_poll_update()
5851 preq->user_data = req->poll_update.new_user_data; in io_poll_update()
5862 req_set_fail(req); in io_poll_update()
5864 io_req_complete(req, ret); in io_poll_update()
5868 static void io_req_task_timeout(struct io_kiocb *req, bool *locked) in io_req_task_timeout() argument
5870 req_set_fail(req); in io_req_task_timeout()
5871 io_req_complete_post(req, -ETIME, 0); in io_req_task_timeout()
5878 struct io_kiocb *req = data->req; in io_timeout_fn() local
5879 struct io_ring_ctx *ctx = req->ctx; in io_timeout_fn()
5883 list_del_init(&req->timeout.list); in io_timeout_fn()
5884 atomic_set(&req->ctx->cq_timeouts, in io_timeout_fn()
5885 atomic_read(&req->ctx->cq_timeouts) + 1); in io_timeout_fn()
5888 req->io_task_work.func = io_req_task_timeout; in io_timeout_fn()
5889 io_req_task_work_add(req); in io_timeout_fn()
5898 struct io_kiocb *req; in io_timeout_extract() local
5901 list_for_each_entry(req, &ctx->timeout_list, timeout.list) { in io_timeout_extract()
5902 found = user_data == req->user_data; in io_timeout_extract()
5909 io = req->async_data; in io_timeout_extract()
5912 list_del_init(&req->timeout.list); in io_timeout_extract()
5913 return req; in io_timeout_extract()
5920 struct io_kiocb *req = io_timeout_extract(ctx, user_data); in io_timeout_cancel() local
5922 if (IS_ERR(req)) in io_timeout_cancel()
5923 return PTR_ERR(req); in io_timeout_cancel()
5925 req_set_fail(req); in io_timeout_cancel()
5926 io_fill_cqe_req(req, -ECANCELED, 0); in io_timeout_cancel()
5927 io_put_req_deferred(req); in io_timeout_cancel()
5952 struct io_kiocb *req; in io_linked_timeout_update() local
5955 list_for_each_entry(req, &ctx->ltimeout_list, timeout.list) { in io_linked_timeout_update()
5956 found = user_data == req->user_data; in io_linked_timeout_update()
5963 io = req->async_data; in io_linked_timeout_update()
5976 struct io_kiocb *req = io_timeout_extract(ctx, user_data); in io_timeout_update() local
5979 if (IS_ERR(req)) in io_timeout_update()
5980 return PTR_ERR(req); in io_timeout_update()
5982 req->timeout.off = 0; /* noseq */ in io_timeout_update()
5983 data = req->async_data; in io_timeout_update()
5984 list_add_tail(&req->timeout.list, &ctx->timeout_list); in io_timeout_update()
5991 static int io_timeout_remove_prep(struct io_kiocb *req, in io_timeout_remove_prep() argument
5994 struct io_timeout_rem *tr = &req->timeout_rem; in io_timeout_remove_prep()
5996 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_timeout_remove_prep()
5998 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) in io_timeout_remove_prep()
6032 static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags) in io_timeout_remove() argument
6034 struct io_timeout_rem *tr = &req->timeout_rem; in io_timeout_remove()
6035 struct io_ring_ctx *ctx = req->ctx; in io_timeout_remove()
6038 if (!(req->timeout_rem.flags & IORING_TIMEOUT_UPDATE)) { in io_timeout_remove()
6056 req_set_fail(req); in io_timeout_remove()
6057 io_req_complete_post(req, ret, 0); in io_timeout_remove()
6061 static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, in io_timeout_prep() argument
6068 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_timeout_prep()
6082 INIT_LIST_HEAD(&req->timeout.list); in io_timeout_prep()
6083 req->timeout.off = off; in io_timeout_prep()
6084 if (unlikely(off && !req->ctx->off_timeout_used)) in io_timeout_prep()
6085 req->ctx->off_timeout_used = true; in io_timeout_prep()
6087 if (!req->async_data && io_alloc_async_data(req)) in io_timeout_prep()
6090 data = req->async_data; in io_timeout_prep()
6091 data->req = req; in io_timeout_prep()
6097 INIT_LIST_HEAD(&req->timeout.list); in io_timeout_prep()
6102 struct io_submit_link *link = &req->ctx->submit_state.link; in io_timeout_prep()
6108 req->timeout.head = link->last; in io_timeout_prep()
6114 static int io_timeout(struct io_kiocb *req, unsigned int issue_flags) in io_timeout() argument
6116 struct io_ring_ctx *ctx = req->ctx; in io_timeout()
6117 struct io_timeout_data *data = req->async_data; in io_timeout()
6119 u32 tail, off = req->timeout.off; in io_timeout()
6128 if (io_is_timeout_noseq(req)) { in io_timeout()
6134 req->timeout.target_seq = tail + off; in io_timeout()
6157 list_add(&req->timeout.list, entry); in io_timeout()
6171 struct io_kiocb *req = container_of(work, struct io_kiocb, work); in io_cancel_cb() local
6174 return req->ctx == cd->ctx && req->user_data == cd->user_data; in io_cancel_cb()
6203 static int io_try_cancel_userdata(struct io_kiocb *req, u64 sqe_addr) in io_try_cancel_userdata() argument
6205 struct io_ring_ctx *ctx = req->ctx; in io_try_cancel_userdata()
6208 WARN_ON_ONCE(!io_wq_current_is_worker() && req->task != current); in io_try_cancel_userdata()
6210 ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx); in io_try_cancel_userdata()
6226 static int io_async_cancel_prep(struct io_kiocb *req, in io_async_cancel_prep() argument
6229 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_async_cancel_prep()
6231 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) in io_async_cancel_prep()
6237 req->cancel.addr = READ_ONCE(sqe->addr); in io_async_cancel_prep()
6241 static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags) in io_async_cancel() argument
6243 struct io_ring_ctx *ctx = req->ctx; in io_async_cancel()
6244 u64 sqe_addr = req->cancel.addr; in io_async_cancel()
6248 ret = io_try_cancel_userdata(req, sqe_addr); in io_async_cancel()
6258 ret = io_async_cancel_one(tctx, req->cancel.addr, ctx); in io_async_cancel()
6265 req_set_fail(req); in io_async_cancel()
6266 io_req_complete_post(req, ret, 0); in io_async_cancel()
6270 static int io_rsrc_update_prep(struct io_kiocb *req, in io_rsrc_update_prep() argument
6273 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) in io_rsrc_update_prep()
6278 req->rsrc_update.offset = READ_ONCE(sqe->off); in io_rsrc_update_prep()
6279 req->rsrc_update.nr_args = READ_ONCE(sqe->len); in io_rsrc_update_prep()
6280 if (!req->rsrc_update.nr_args) in io_rsrc_update_prep()
6282 req->rsrc_update.arg = READ_ONCE(sqe->addr); in io_rsrc_update_prep()
6286 static int io_files_update(struct io_kiocb *req, unsigned int issue_flags) in io_files_update() argument
6288 struct io_ring_ctx *ctx = req->ctx; in io_files_update()
6292 up.offset = req->rsrc_update.offset; in io_files_update()
6293 up.data = req->rsrc_update.arg; in io_files_update()
6301 &up, req->rsrc_update.nr_args); in io_files_update()
6305 req_set_fail(req); in io_files_update()
6306 __io_req_complete(req, issue_flags, ret, 0); in io_files_update()
6310 static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_req_prep() argument
6312 switch (req->opcode) { in io_req_prep()
6318 return io_read_prep(req, sqe); in io_req_prep()
6322 return io_write_prep(req, sqe); in io_req_prep()
6324 return io_poll_add_prep(req, sqe); in io_req_prep()
6326 return io_poll_update_prep(req, sqe); in io_req_prep()
6328 return io_fsync_prep(req, sqe); in io_req_prep()
6330 return io_sfr_prep(req, sqe); in io_req_prep()
6333 return io_sendmsg_prep(req, sqe); in io_req_prep()
6336 return io_recvmsg_prep(req, sqe); in io_req_prep()
6338 return io_connect_prep(req, sqe); in io_req_prep()
6340 return io_timeout_prep(req, sqe, false); in io_req_prep()
6342 return io_timeout_remove_prep(req, sqe); in io_req_prep()
6344 return io_async_cancel_prep(req, sqe); in io_req_prep()
6346 return io_timeout_prep(req, sqe, true); in io_req_prep()
6348 return io_accept_prep(req, sqe); in io_req_prep()
6350 return io_fallocate_prep(req, sqe); in io_req_prep()
6352 return io_openat_prep(req, sqe); in io_req_prep()
6354 return io_close_prep(req, sqe); in io_req_prep()
6356 return io_rsrc_update_prep(req, sqe); in io_req_prep()
6358 return io_statx_prep(req, sqe); in io_req_prep()
6360 return io_fadvise_prep(req, sqe); in io_req_prep()
6362 return io_madvise_prep(req, sqe); in io_req_prep()
6364 return io_openat2_prep(req, sqe); in io_req_prep()
6366 return io_epoll_ctl_prep(req, sqe); in io_req_prep()
6368 return io_splice_prep(req, sqe); in io_req_prep()
6370 return io_provide_buffers_prep(req, sqe); in io_req_prep()
6372 return io_remove_buffers_prep(req, sqe); in io_req_prep()
6374 return io_tee_prep(req, sqe); in io_req_prep()
6376 return io_shutdown_prep(req, sqe); in io_req_prep()
6378 return io_renameat_prep(req, sqe); in io_req_prep()
6380 return io_unlinkat_prep(req, sqe); in io_req_prep()
6384 req->opcode); in io_req_prep()
6388 static int io_req_prep_async(struct io_kiocb *req) in io_req_prep_async() argument
6390 if (!io_op_defs[req->opcode].needs_async_setup) in io_req_prep_async()
6392 if (WARN_ON_ONCE(req->async_data)) in io_req_prep_async()
6394 if (io_alloc_async_data(req)) in io_req_prep_async()
6397 switch (req->opcode) { in io_req_prep_async()
6399 return io_rw_prep_async(req, READ); in io_req_prep_async()
6401 return io_rw_prep_async(req, WRITE); in io_req_prep_async()
6403 return io_sendmsg_prep_async(req); in io_req_prep_async()
6405 return io_recvmsg_prep_async(req); in io_req_prep_async()
6407 return io_connect_prep_async(req); in io_req_prep_async()
6410 req->opcode); in io_req_prep_async()
6414 static u32 io_get_sequence(struct io_kiocb *req) in io_get_sequence() argument
6416 u32 seq = req->ctx->cached_sq_head; in io_get_sequence()
6418 /* need original cached_sq_head, but it was increased for each req */ in io_get_sequence()
6419 io_for_each_link(req, req) in io_get_sequence()
6424 static bool io_drain_req(struct io_kiocb *req) in io_drain_req() argument
6427 struct io_ring_ctx *ctx = req->ctx; in io_drain_req()
6432 if (req->flags & REQ_F_FAIL) { in io_drain_req()
6433 io_req_complete_fail_submit(req); in io_drain_req()
6444 req->flags |= REQ_F_IO_DRAIN; in io_drain_req()
6448 io_for_each_link(pos, req->link) { in io_drain_req()
6451 req->flags |= REQ_F_IO_DRAIN; in io_drain_req()
6456 /* Still need defer if there is pending req in defer list. */ in io_drain_req()
6459 !(req->flags & REQ_F_IO_DRAIN))) { in io_drain_req()
6466 seq = io_get_sequence(req); in io_drain_req()
6468 if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list)) in io_drain_req()
6471 ret = io_req_prep_async(req); in io_drain_req()
6474 io_prep_async_link(req); in io_drain_req()
6479 io_req_complete_failed(req, ret); in io_drain_req()
6484 if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) { in io_drain_req()
6487 io_queue_async_work(req, NULL); in io_drain_req()
6491 trace_io_uring_defer(ctx, req, req->user_data); in io_drain_req()
6492 de->req = req; in io_drain_req()
6499 static void io_clean_op(struct io_kiocb *req) in io_clean_op() argument
6501 if (req->flags & REQ_F_BUFFER_SELECTED) { in io_clean_op()
6502 switch (req->opcode) { in io_clean_op()
6506 kfree((void *)(unsigned long)req->rw.addr); in io_clean_op()
6510 kfree(req->sr_msg.kbuf); in io_clean_op()
6515 if (req->flags & REQ_F_NEED_CLEANUP) { in io_clean_op()
6516 switch (req->opcode) { in io_clean_op()
6523 struct io_async_rw *io = req->async_data; in io_clean_op()
6530 struct io_async_msghdr *io = req->async_data; in io_clean_op()
6537 if (req->open.filename) in io_clean_op()
6538 putname(req->open.filename); in io_clean_op()
6541 putname(req->rename.oldpath); in io_clean_op()
6542 putname(req->rename.newpath); in io_clean_op()
6545 putname(req->unlink.filename); in io_clean_op()
6549 if ((req->flags & REQ_F_POLLED) && req->apoll) { in io_clean_op()
6550 kfree(req->apoll->double_poll); in io_clean_op()
6551 kfree(req->apoll); in io_clean_op()
6552 req->apoll = NULL; in io_clean_op()
6554 if (req->flags & REQ_F_INFLIGHT) { in io_clean_op()
6555 struct io_uring_task *tctx = req->task->io_uring; in io_clean_op()
6559 if (req->flags & REQ_F_CREDS) in io_clean_op()
6560 put_cred(req->creds); in io_clean_op()
6562 req->flags &= ~IO_REQ_CLEAN_FLAGS; in io_clean_op()
6565 static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags) in io_issue_sqe() argument
6567 struct io_ring_ctx *ctx = req->ctx; in io_issue_sqe()
6571 if ((req->flags & REQ_F_CREDS) && req->creds != current_cred()) in io_issue_sqe()
6572 creds = override_creds(req->creds); in io_issue_sqe()
6574 switch (req->opcode) { in io_issue_sqe()
6576 ret = io_nop(req, issue_flags); in io_issue_sqe()
6581 ret = io_read(req, issue_flags); in io_issue_sqe()
6586 ret = io_write(req, issue_flags); in io_issue_sqe()
6589 ret = io_fsync(req, issue_flags); in io_issue_sqe()
6592 ret = io_poll_add(req, issue_flags); in io_issue_sqe()
6595 ret = io_poll_update(req, issue_flags); in io_issue_sqe()
6598 ret = io_sync_file_range(req, issue_flags); in io_issue_sqe()
6601 ret = io_sendmsg(req, issue_flags); in io_issue_sqe()
6604 ret = io_send(req, issue_flags); in io_issue_sqe()
6607 ret = io_recvmsg(req, issue_flags); in io_issue_sqe()
6610 ret = io_recv(req, issue_flags); in io_issue_sqe()
6613 ret = io_timeout(req, issue_flags); in io_issue_sqe()
6616 ret = io_timeout_remove(req, issue_flags); in io_issue_sqe()
6619 ret = io_accept(req, issue_flags); in io_issue_sqe()
6622 ret = io_connect(req, issue_flags); in io_issue_sqe()
6625 ret = io_async_cancel(req, issue_flags); in io_issue_sqe()
6628 ret = io_fallocate(req, issue_flags); in io_issue_sqe()
6631 ret = io_openat(req, issue_flags); in io_issue_sqe()
6634 ret = io_close(req, issue_flags); in io_issue_sqe()
6637 ret = io_files_update(req, issue_flags); in io_issue_sqe()
6640 ret = io_statx(req, issue_flags); in io_issue_sqe()
6643 ret = io_fadvise(req, issue_flags); in io_issue_sqe()
6646 ret = io_madvise(req, issue_flags); in io_issue_sqe()
6649 ret = io_openat2(req, issue_flags); in io_issue_sqe()
6652 ret = io_epoll_ctl(req, issue_flags); in io_issue_sqe()
6655 ret = io_splice(req, issue_flags); in io_issue_sqe()
6658 ret = io_provide_buffers(req, issue_flags); in io_issue_sqe()
6661 ret = io_remove_buffers(req, issue_flags); in io_issue_sqe()
6664 ret = io_tee(req, issue_flags); in io_issue_sqe()
6667 ret = io_shutdown(req, issue_flags); in io_issue_sqe()
6670 ret = io_renameat(req, issue_flags); in io_issue_sqe()
6673 ret = io_unlinkat(req, issue_flags); in io_issue_sqe()
6685 if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) in io_issue_sqe()
6686 io_iopoll_req_issued(req); in io_issue_sqe()
6693 struct io_kiocb *req = container_of(work, struct io_kiocb, work); in io_wq_free_work() local
6695 req = io_put_req_find_next(req); in io_wq_free_work()
6696 return req ? &req->work : NULL; in io_wq_free_work()
6701 struct io_kiocb *req = container_of(work, struct io_kiocb, work); in io_wq_submit_work() local
6706 if (!(req->flags & REQ_F_REFCOUNT)) in io_wq_submit_work()
6707 __io_req_set_refcount(req, 2); in io_wq_submit_work()
6709 req_ref_get(req); in io_wq_submit_work()
6711 timeout = io_prep_linked_timeout(req); in io_wq_submit_work()
6721 ret = io_issue_sqe(req, 0); in io_wq_submit_work()
6727 if (ret != -EAGAIN || !(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_wq_submit_work()
6735 io_req_task_queue_fail(req, ret); in io_wq_submit_work()
6766 struct io_kiocb *req, int fd) in io_file_get_fixed() argument
6778 req->flags |= (file_ptr << REQ_F_NOWAIT_READ_BIT); in io_file_get_fixed()
6779 io_req_set_rsrc_node(req); in io_file_get_fixed()
6784 struct io_kiocb *req, int fd) in io_file_get_normal() argument
6792 io_req_track_inflight(req); in io_file_get_normal()
6797 struct io_kiocb *req, int fd, bool fixed) in io_file_get() argument
6800 return io_file_get_fixed(ctx, req, fd); in io_file_get()
6802 return io_file_get_normal(ctx, req, fd); in io_file_get()
6805 static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked) in io_req_task_link_timeout() argument
6807 struct io_kiocb *prev = req->timeout.prev; in io_req_task_link_timeout()
6811 if (!(req->task->flags & PF_EXITING)) in io_req_task_link_timeout()
6812 ret = io_try_cancel_userdata(req, prev->user_data); in io_req_task_link_timeout()
6813 io_req_complete_post(req, ret ?: -ETIME, 0); in io_req_task_link_timeout()
6816 io_req_complete_post(req, -ETIME, 0); in io_req_task_link_timeout()
6824 struct io_kiocb *prev, *req = data->req; in io_link_timeout_fn() local
6825 struct io_ring_ctx *ctx = req->ctx; in io_link_timeout_fn()
6829 prev = req->timeout.head; in io_link_timeout_fn()
6830 req->timeout.head = NULL; in io_link_timeout_fn()
6841 list_del(&req->timeout.list); in io_link_timeout_fn()
6842 req->timeout.prev = prev; in io_link_timeout_fn()
6845 req->io_task_work.func = io_req_task_link_timeout; in io_link_timeout_fn()
6846 io_req_task_work_add(req); in io_link_timeout_fn()
6850 static void io_queue_linked_timeout(struct io_kiocb *req) in io_queue_linked_timeout() argument
6852 struct io_ring_ctx *ctx = req->ctx; in io_queue_linked_timeout()
6859 if (req->timeout.head) { in io_queue_linked_timeout()
6860 struct io_timeout_data *data = req->async_data; in io_queue_linked_timeout()
6865 list_add_tail(&req->timeout.list, &ctx->ltimeout_list); in io_queue_linked_timeout()
6869 io_put_req(req); in io_queue_linked_timeout()
6872 static void __io_queue_sqe(struct io_kiocb *req) in __io_queue_sqe() argument
6873 __must_hold(&req->ctx->uring_lock) in __io_queue_sqe()
6879 ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER); in __io_queue_sqe()
6886 if (req->flags & REQ_F_COMPLETE_INLINE) { in __io_queue_sqe()
6887 struct io_ring_ctx *ctx = req->ctx; in __io_queue_sqe()
6890 state->compl_reqs[state->compl_nr++] = req; in __io_queue_sqe()
6896 linked_timeout = io_prep_linked_timeout(req); in __io_queue_sqe()
6899 } else if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) { in __io_queue_sqe()
6900 linked_timeout = io_prep_linked_timeout(req); in __io_queue_sqe()
6902 switch (io_arm_poll_handler(req)) { in __io_queue_sqe()
6912 io_queue_async_work(req, NULL); in __io_queue_sqe()
6919 io_req_complete_failed(req, ret); in __io_queue_sqe()
6923 static inline void io_queue_sqe(struct io_kiocb *req) in io_queue_sqe() argument
6924 __must_hold(&req->ctx->uring_lock) in io_queue_sqe()
6926 if (unlikely(req->ctx->drain_active) && io_drain_req(req)) in io_queue_sqe()
6929 if (likely(!(req->flags & (REQ_F_FORCE_ASYNC | REQ_F_FAIL)))) { in io_queue_sqe()
6930 __io_queue_sqe(req); in io_queue_sqe()
6931 } else if (req->flags & REQ_F_FAIL) { in io_queue_sqe()
6932 io_req_complete_fail_submit(req); in io_queue_sqe()
6934 int ret = io_req_prep_async(req); in io_queue_sqe()
6937 io_req_complete_failed(req, ret); in io_queue_sqe()
6939 io_queue_async_work(req, NULL); in io_queue_sqe()
6949 struct io_kiocb *req, in io_check_restriction() argument
6955 if (!test_bit(req->opcode, ctx->restrictions.sqe_op)) in io_check_restriction()
6969 static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, in io_init_req() argument
6977 /* req is partially pre-initialised, see io_preinit_req() */ in io_init_req()
6978 req->opcode = READ_ONCE(sqe->opcode); in io_init_req()
6980 req->flags = sqe_flags = READ_ONCE(sqe->flags); in io_init_req()
6981 req->user_data = READ_ONCE(sqe->user_data); in io_init_req()
6982 req->file = NULL; in io_init_req()
6983 req->fixed_rsrc_refs = NULL; in io_init_req()
6984 req->task = current; in io_init_req()
6989 if (unlikely(req->opcode >= IORING_OP_LAST)) in io_init_req()
6991 if (!io_check_restriction(ctx, req, sqe_flags)) in io_init_req()
6995 !io_op_defs[req->opcode].buffer_select) in io_init_req()
7002 req->creds = xa_load(&ctx->personalities, personality); in io_init_req()
7003 if (!req->creds) in io_init_req()
7005 get_cred(req->creds); in io_init_req()
7006 req->flags |= REQ_F_CREDS; in io_init_req()
7015 io_op_defs[req->opcode].plug) { in io_init_req()
7020 if (io_op_defs[req->opcode].needs_file) { in io_init_req()
7021 req->file = io_file_get(ctx, req, READ_ONCE(sqe->fd), in io_init_req()
7023 if (unlikely(!req->file)) in io_init_req()
7031 static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, in io_submit_sqe() argument
7038 ret = io_init_req(ctx, req, sqe); in io_submit_sqe()
7044 * we can judge a link req is failed or cancelled by if in io_submit_sqe()
7046 * it may be set REQ_F_FAIL because of other req's failure in io_submit_sqe()
7047 * so let's leverage req->result to distinguish if a head in io_submit_sqe()
7048 * is set REQ_F_FAIL because of its failure or other req's in io_submit_sqe()
7054 } else if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) { in io_submit_sqe()
7056 * the current req is a normal req, we should return in io_submit_sqe()
7059 io_req_complete_failed(req, ret); in io_submit_sqe()
7062 req_fail_link_node(req, ret); in io_submit_sqe()
7064 ret = io_req_prep(req, sqe); in io_submit_sqe()
7070 trace_io_uring_submit_sqe(ctx, req, req->opcode, req->user_data, in io_submit_sqe()
7071 req->flags, true, in io_submit_sqe()
7084 if (!(req->flags & REQ_F_FAIL)) { in io_submit_sqe()
7085 ret = io_req_prep_async(req); in io_submit_sqe()
7087 req_fail_link_node(req, ret); in io_submit_sqe()
7092 trace_io_uring_link(ctx, req, head); in io_submit_sqe()
7093 link->last->link = req; in io_submit_sqe()
7094 link->last = req; in io_submit_sqe()
7097 if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) { in io_submit_sqe()
7102 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) { in io_submit_sqe()
7103 link->head = req; in io_submit_sqe()
7104 link->last = req; in io_submit_sqe()
7106 io_queue_sqe(req); in io_submit_sqe()
7197 struct io_kiocb *req; in io_submit_sqes() local
7199 req = io_alloc_req(ctx); in io_submit_sqes()
7200 if (unlikely(!req)) { in io_submit_sqes()
7207 list_add(&req->inflight_entry, &ctx->submit_state.free_list); in io_submit_sqes()
7212 if (io_submit_sqe(ctx, req, sqe)) in io_submit_sqes()
8303 static int io_install_fixed_file(struct io_kiocb *req, struct file *file, in io_install_fixed_file() argument
8306 struct io_ring_ctx *ctx = req->ctx; in io_install_fixed_file()
8359 static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags) in io_close_fixed() argument
8361 unsigned int offset = req->close.file_slot - 1; in io_close_fixed()
8362 struct io_ring_ctx *ctx = req->ctx; in io_close_fixed()
9174 struct io_kiocb *req, *nxt; in io_req_cache_free() local
9176 list_for_each_entry_safe(req, nxt, list, inflight_entry) { in io_req_cache_free()
9177 list_del(&req->inflight_entry); in io_req_cache_free()
9178 kmem_cache_free(req_cachep, req); in io_req_cache_free()
9332 struct io_kiocb *req = container_of(work, struct io_kiocb, work); in io_cancel_ctx_cb() local
9334 return req->ctx == data; in io_cancel_ctx_cb()
9409 struct io_kiocb *req, *tmp; in io_kill_timeouts() local
9414 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) { in io_kill_timeouts()
9415 if (io_match_task(req, tsk, cancel_all)) { in io_kill_timeouts()
9416 io_kill_timeout(req, -ECANCELED); in io_kill_timeouts()
9474 struct io_kiocb *req = container_of(work, struct io_kiocb, work); in io_cancel_task_cb() local
9477 return io_match_task_safe(req, cancel->task, cancel->all); in io_cancel_task_cb()
9488 if (io_match_task_safe(de->req, task, cancel_all)) { in io_cancel_defer_files()
9500 io_req_complete_failed(de->req, -ECANCELED); in io_cancel_defer_files()
10053 struct io_kiocb *req; in __io_uring_show_fdinfo() local
10055 hlist_for_each_entry(req, list, hash_node) in __io_uring_show_fdinfo()
10056 seq_printf(m, " op=%d, task_works=%d\n", req->opcode, in __io_uring_show_fdinfo()
10057 req->task->task_works != NULL); in __io_uring_show_fdinfo()