Lines Matching refs:tctx
1480 struct io_uring_task *tctx = req->task->io_uring; in io_queue_async_work() local
1485 BUG_ON(!tctx); in io_queue_async_work()
1486 BUG_ON(!tctx->io_wq); in io_queue_async_work()
1503 io_wq_enqueue(tctx->io_wq, &req->work); in io_queue_async_work()
1729 struct io_uring_task *tctx = task->io_uring; in io_put_task() local
1732 tctx->cached_refs += nr; in io_put_task()
1734 percpu_counter_sub(&tctx->inflight, nr); in io_put_task()
1735 if (unlikely(atomic_read(&tctx->in_idle))) in io_put_task()
1736 wake_up(&tctx->wait); in io_put_task()
1741 static void io_task_refs_refill(struct io_uring_task *tctx) in io_task_refs_refill() argument
1743 unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR; in io_task_refs_refill()
1745 percpu_counter_add(&tctx->inflight, refill); in io_task_refs_refill()
1747 tctx->cached_refs += refill; in io_task_refs_refill()
1752 struct io_uring_task *tctx = current->io_uring; in io_get_task_refs() local
1754 tctx->cached_refs -= nr; in io_get_task_refs()
1755 if (unlikely(tctx->cached_refs < 0)) in io_get_task_refs()
1756 io_task_refs_refill(tctx); in io_get_task_refs()
1761 struct io_uring_task *tctx = task->io_uring; in io_uring_drop_tctx_refs() local
1762 unsigned int refs = tctx->cached_refs; in io_uring_drop_tctx_refs()
1765 tctx->cached_refs = 0; in io_uring_drop_tctx_refs()
1766 percpu_counter_sub(&tctx->inflight, refs); in io_uring_drop_tctx_refs()
2176 struct io_uring_task *tctx = container_of(cb, struct io_uring_task, in tctx_task_work() local
2182 if (!tctx->task_list.first && locked && ctx->submit_state.compl_nr) in tctx_task_work()
2185 spin_lock_irq(&tctx->task_lock); in tctx_task_work()
2186 node = tctx->task_list.first; in tctx_task_work()
2187 INIT_WQ_LIST(&tctx->task_list); in tctx_task_work()
2189 tctx->task_running = false; in tctx_task_work()
2190 spin_unlock_irq(&tctx->task_lock); in tctx_task_work()
2216 if (unlikely(atomic_read(&tctx->in_idle))) in tctx_task_work()
2223 struct io_uring_task *tctx = tsk->io_uring; in io_req_task_work_add() local
2229 WARN_ON_ONCE(!tctx); in io_req_task_work_add()
2231 spin_lock_irqsave(&tctx->task_lock, flags); in io_req_task_work_add()
2232 wq_list_add_tail(&req->io_task_work.node, &tctx->task_list); in io_req_task_work_add()
2233 running = tctx->task_running; in io_req_task_work_add()
2235 tctx->task_running = true; in io_req_task_work_add()
2236 spin_unlock_irqrestore(&tctx->task_lock, flags); in io_req_task_work_add()
2249 if (!task_work_add(tsk, &tctx->task_work, notify)) { in io_req_task_work_add()
2254 spin_lock_irqsave(&tctx->task_lock, flags); in io_req_task_work_add()
2255 tctx->task_running = false; in io_req_task_work_add()
2256 node = tctx->task_list.first; in io_req_task_work_add()
2257 INIT_WQ_LIST(&tctx->task_list); in io_req_task_work_add()
2258 spin_unlock_irqrestore(&tctx->task_lock, flags); in io_req_task_work_add()
6177 static int io_async_cancel_one(struct io_uring_task *tctx, u64 user_data, in io_async_cancel_one() argument
6184 if (!tctx || !tctx->io_wq) in io_async_cancel_one()
6187 cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, &data, false); in io_async_cancel_one()
6256 struct io_uring_task *tctx = node->task->io_uring; in io_async_cancel() local
6258 ret = io_async_cancel_one(tctx, req->cancel.addr, ctx); in io_async_cancel()
6555 struct io_uring_task *tctx = req->task->io_uring; in io_clean_op() local
6557 atomic_dec(&tctx->inflight_tracked); in io_clean_op()
8511 struct io_uring_task *tctx; in io_uring_alloc_task_context() local
8514 tctx = kzalloc(sizeof(*tctx), GFP_KERNEL); in io_uring_alloc_task_context()
8515 if (unlikely(!tctx)) in io_uring_alloc_task_context()
8518 ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL); in io_uring_alloc_task_context()
8520 kfree(tctx); in io_uring_alloc_task_context()
8524 tctx->io_wq = io_init_wq_offload(ctx, task); in io_uring_alloc_task_context()
8525 if (IS_ERR(tctx->io_wq)) { in io_uring_alloc_task_context()
8526 ret = PTR_ERR(tctx->io_wq); in io_uring_alloc_task_context()
8527 percpu_counter_destroy(&tctx->inflight); in io_uring_alloc_task_context()
8528 kfree(tctx); in io_uring_alloc_task_context()
8532 xa_init(&tctx->xa); in io_uring_alloc_task_context()
8533 init_waitqueue_head(&tctx->wait); in io_uring_alloc_task_context()
8534 atomic_set(&tctx->in_idle, 0); in io_uring_alloc_task_context()
8535 atomic_set(&tctx->inflight_tracked, 0); in io_uring_alloc_task_context()
8536 task->io_uring = tctx; in io_uring_alloc_task_context()
8537 spin_lock_init(&tctx->task_lock); in io_uring_alloc_task_context()
8538 INIT_WQ_LIST(&tctx->task_list); in io_uring_alloc_task_context()
8539 init_task_work(&tctx->task_work, tctx_task_work); in io_uring_alloc_task_context()
8545 struct io_uring_task *tctx = tsk->io_uring; in __io_uring_free() local
8547 WARN_ON_ONCE(!xa_empty(&tctx->xa)); in __io_uring_free()
8548 WARN_ON_ONCE(tctx->io_wq); in __io_uring_free()
8549 WARN_ON_ONCE(tctx->cached_refs); in __io_uring_free()
8551 percpu_counter_destroy(&tctx->inflight); in __io_uring_free()
8552 kfree(tctx); in __io_uring_free()
9315 struct io_uring_task *tctx = current->io_uring; in io_tctx_exit_cb() local
9325 if (tctx && !atomic_read(&tctx->in_idle)) in io_tctx_exit_cb()
9514 struct io_uring_task *tctx = node->task->io_uring; in io_uring_try_cancel_iowq() local
9520 if (!tctx || !tctx->io_wq) in io_uring_try_cancel_iowq()
9522 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true); in io_uring_try_cancel_iowq()
9535 struct io_uring_task *tctx = task ? task->io_uring : NULL; in io_uring_try_cancel_requests() local
9543 } else if (tctx && tctx->io_wq) { in io_uring_try_cancel_requests()
9548 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb, in io_uring_try_cancel_requests()
9575 struct io_uring_task *tctx = current->io_uring; in __io_uring_add_tctx_node() local
9579 if (unlikely(!tctx)) { in __io_uring_add_tctx_node()
9584 tctx = current->io_uring; in __io_uring_add_tctx_node()
9589 ret = io_wq_max_workers(tctx->io_wq, limits); in __io_uring_add_tctx_node()
9594 if (!xa_load(&tctx->xa, (unsigned long)ctx)) { in __io_uring_add_tctx_node()
9601 ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx, in __io_uring_add_tctx_node()
9612 tctx->last = ctx; in __io_uring_add_tctx_node()
9621 struct io_uring_task *tctx = current->io_uring; in io_uring_add_tctx_node() local
9623 if (likely(tctx && tctx->last == ctx)) in io_uring_add_tctx_node()
9633 struct io_uring_task *tctx = current->io_uring; in io_uring_del_tctx_node() local
9636 if (!tctx) in io_uring_del_tctx_node()
9638 node = xa_erase(&tctx->xa, index); in io_uring_del_tctx_node()
9649 if (tctx->last == node->ctx) in io_uring_del_tctx_node()
9650 tctx->last = NULL; in io_uring_del_tctx_node()
9654 static void io_uring_clean_tctx(struct io_uring_task *tctx) in io_uring_clean_tctx() argument
9656 struct io_wq *wq = tctx->io_wq; in io_uring_clean_tctx()
9660 xa_for_each(&tctx->xa, index, node) { in io_uring_clean_tctx()
9670 tctx->io_wq = NULL; in io_uring_clean_tctx()
9674 static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked) in tctx_inflight() argument
9677 return atomic_read(&tctx->inflight_tracked); in tctx_inflight()
9678 return percpu_counter_sum(&tctx->inflight); in tctx_inflight()
9687 struct io_uring_task *tctx = current->io_uring; in io_uring_cancel_generic() local
9696 if (tctx->io_wq) in io_uring_cancel_generic()
9697 io_wq_exit_start(tctx->io_wq); in io_uring_cancel_generic()
9699 atomic_inc(&tctx->in_idle); in io_uring_cancel_generic()
9703 inflight = tctx_inflight(tctx, !cancel_all); in io_uring_cancel_generic()
9711 xa_for_each(&tctx->xa, index, node) { in io_uring_cancel_generic()
9724 prepare_to_wait(&tctx->wait, &wait, TASK_INTERRUPTIBLE); in io_uring_cancel_generic()
9733 if (inflight == tctx_inflight(tctx, !cancel_all)) in io_uring_cancel_generic()
9735 finish_wait(&tctx->wait, &wait); in io_uring_cancel_generic()
9738 io_uring_clean_tctx(tctx); in io_uring_cancel_generic()
9744 atomic_dec(&tctx->in_idle); in io_uring_cancel_generic()
10557 struct io_uring_task *tctx = current->io_uring; in io_register_iowq_aff() local
10561 if (!tctx || !tctx->io_wq) in io_register_iowq_aff()
10588 ret = io_wq_cpu_affinity(tctx->io_wq, new_mask); in io_register_iowq_aff()
10595 struct io_uring_task *tctx = current->io_uring; in io_unregister_iowq_aff() local
10597 if (!tctx || !tctx->io_wq) in io_unregister_iowq_aff()
10600 return io_wq_cpu_affinity(tctx->io_wq, NULL); in io_unregister_iowq_aff()
10608 struct io_uring_task *tctx = NULL; in io_register_iowq_max_workers() local
10632 tctx = sqd->thread->io_uring; in io_register_iowq_max_workers()
10635 tctx = current->io_uring; in io_register_iowq_max_workers()
10646 if (tctx && tctx->io_wq) { in io_register_iowq_max_workers()
10647 ret = io_wq_max_workers(tctx->io_wq, new_count); in io_register_iowq_max_workers()
10668 struct io_uring_task *tctx = node->task->io_uring; in io_register_iowq_max_workers() local
10670 if (WARN_ON_ONCE(!tctx->io_wq)) in io_register_iowq_max_workers()
10676 (void)io_wq_max_workers(tctx->io_wq, new_count); in io_register_iowq_max_workers()