Lines Matching refs:sqd

1088 static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
7231 static inline bool io_sqd_events_pending(struct io_sq_data *sqd) in io_sqd_events_pending() argument
7233 return READ_ONCE(sqd->state); in io_sqd_events_pending()
7292 static void io_sqd_update_thread_idle(struct io_sq_data *sqd) in io_sqd_update_thread_idle() argument
7297 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) in io_sqd_update_thread_idle()
7299 sqd->sq_thread_idle = sq_thread_idle; in io_sqd_update_thread_idle()
7302 static bool io_sqd_handle_event(struct io_sq_data *sqd) in io_sqd_handle_event() argument
7307 if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) || in io_sqd_handle_event()
7309 mutex_unlock(&sqd->lock); in io_sqd_handle_event()
7313 mutex_lock(&sqd->lock); in io_sqd_handle_event()
7315 return did_sig || test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state); in io_sqd_handle_event()
7320 struct io_sq_data *sqd = data; in io_sq_thread() local
7326 snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid); in io_sq_thread()
7329 if (sqd->sq_cpu != -1) in io_sq_thread()
7330 set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu)); in io_sq_thread()
7335 mutex_lock(&sqd->lock); in io_sq_thread()
7339 if (io_sqd_events_pending(sqd) || signal_pending(current)) { in io_sq_thread()
7340 if (io_sqd_handle_event(sqd)) in io_sq_thread()
7342 timeout = jiffies + sqd->sq_thread_idle; in io_sq_thread()
7345 cap_entries = !list_is_singular(&sqd->ctx_list); in io_sq_thread()
7346 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) { in io_sq_thread()
7358 timeout = jiffies + sqd->sq_thread_idle; in io_sq_thread()
7362 prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE); in io_sq_thread()
7363 if (!io_sqd_events_pending(sqd) && !current->task_works) { in io_sq_thread()
7366 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) { in io_sq_thread()
7381 mutex_unlock(&sqd->lock); in io_sq_thread()
7383 mutex_lock(&sqd->lock); in io_sq_thread()
7385 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) in io_sq_thread()
7389 finish_wait(&sqd->wait, &wait); in io_sq_thread()
7390 timeout = jiffies + sqd->sq_thread_idle; in io_sq_thread()
7393 io_uring_cancel_generic(true, sqd); in io_sq_thread()
7394 sqd->thread = NULL; in io_sq_thread()
7395 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) in io_sq_thread()
7398 mutex_unlock(&sqd->lock); in io_sq_thread()
7400 complete(&sqd->exited); in io_sq_thread()
7822 static void io_sq_thread_unpark(struct io_sq_data *sqd) in io_sq_thread_unpark() argument
7823 __releases(&sqd->lock) in io_sq_thread_unpark()
7825 WARN_ON_ONCE(sqd->thread == current); in io_sq_thread_unpark()
7831 clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state); in io_sq_thread_unpark()
7832 if (atomic_dec_return(&sqd->park_pending)) in io_sq_thread_unpark()
7833 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state); in io_sq_thread_unpark()
7834 mutex_unlock(&sqd->lock); in io_sq_thread_unpark()
7837 static void io_sq_thread_park(struct io_sq_data *sqd) in io_sq_thread_park() argument
7838 __acquires(&sqd->lock) in io_sq_thread_park()
7840 WARN_ON_ONCE(sqd->thread == current); in io_sq_thread_park()
7842 atomic_inc(&sqd->park_pending); in io_sq_thread_park()
7843 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state); in io_sq_thread_park()
7844 mutex_lock(&sqd->lock); in io_sq_thread_park()
7845 if (sqd->thread) in io_sq_thread_park()
7846 wake_up_process(sqd->thread); in io_sq_thread_park()
7849 static void io_sq_thread_stop(struct io_sq_data *sqd) in io_sq_thread_stop() argument
7851 WARN_ON_ONCE(sqd->thread == current); in io_sq_thread_stop()
7852 WARN_ON_ONCE(test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state)); in io_sq_thread_stop()
7854 set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state); in io_sq_thread_stop()
7855 mutex_lock(&sqd->lock); in io_sq_thread_stop()
7856 if (sqd->thread) in io_sq_thread_stop()
7857 wake_up_process(sqd->thread); in io_sq_thread_stop()
7858 mutex_unlock(&sqd->lock); in io_sq_thread_stop()
7859 wait_for_completion(&sqd->exited); in io_sq_thread_stop()
7862 static void io_put_sq_data(struct io_sq_data *sqd) in io_put_sq_data() argument
7864 if (refcount_dec_and_test(&sqd->refs)) { in io_put_sq_data()
7865 WARN_ON_ONCE(atomic_read(&sqd->park_pending)); in io_put_sq_data()
7867 io_sq_thread_stop(sqd); in io_put_sq_data()
7868 kfree(sqd); in io_put_sq_data()
7874 struct io_sq_data *sqd = ctx->sq_data; in io_sq_thread_finish() local
7876 if (sqd) { in io_sq_thread_finish()
7877 io_sq_thread_park(sqd); in io_sq_thread_finish()
7879 io_sqd_update_thread_idle(sqd); in io_sq_thread_finish()
7880 io_sq_thread_unpark(sqd); in io_sq_thread_finish()
7882 io_put_sq_data(sqd); in io_sq_thread_finish()
7890 struct io_sq_data *sqd; in io_attach_sq_data() local
7902 sqd = ctx_attach->sq_data; in io_attach_sq_data()
7903 if (!sqd) { in io_attach_sq_data()
7907 if (sqd->task_tgid != current->tgid) { in io_attach_sq_data()
7912 refcount_inc(&sqd->refs); in io_attach_sq_data()
7914 return sqd; in io_attach_sq_data()
7920 struct io_sq_data *sqd; in io_get_sq_data() local
7924 sqd = io_attach_sq_data(p); in io_get_sq_data()
7925 if (!IS_ERR(sqd)) { in io_get_sq_data()
7927 return sqd; in io_get_sq_data()
7930 if (PTR_ERR(sqd) != -EPERM) in io_get_sq_data()
7931 return sqd; in io_get_sq_data()
7934 sqd = kzalloc(sizeof(*sqd), GFP_KERNEL); in io_get_sq_data()
7935 if (!sqd) in io_get_sq_data()
7938 atomic_set(&sqd->park_pending, 0); in io_get_sq_data()
7939 refcount_set(&sqd->refs, 1); in io_get_sq_data()
7940 INIT_LIST_HEAD(&sqd->ctx_list); in io_get_sq_data()
7941 mutex_init(&sqd->lock); in io_get_sq_data()
7942 init_waitqueue_head(&sqd->wait); in io_get_sq_data()
7943 init_completion(&sqd->exited); in io_get_sq_data()
7944 return sqd; in io_get_sq_data()
8577 struct io_sq_data *sqd; in io_sq_offload_create() local
8580 sqd = io_get_sq_data(p, &attached); in io_sq_offload_create()
8581 if (IS_ERR(sqd)) { in io_sq_offload_create()
8582 ret = PTR_ERR(sqd); in io_sq_offload_create()
8587 ctx->sq_data = sqd; in io_sq_offload_create()
8592 io_sq_thread_park(sqd); in io_sq_offload_create()
8593 list_add(&ctx->sqd_list, &sqd->ctx_list); in io_sq_offload_create()
8594 io_sqd_update_thread_idle(sqd); in io_sq_offload_create()
8596 ret = (attached && !sqd->thread) ? -ENXIO : 0; in io_sq_offload_create()
8597 io_sq_thread_unpark(sqd); in io_sq_offload_create()
8610 sqd->sq_cpu = cpu; in io_sq_offload_create()
8612 sqd->sq_cpu = -1; in io_sq_offload_create()
8615 sqd->task_pid = current->pid; in io_sq_offload_create()
8616 sqd->task_tgid = current->tgid; in io_sq_offload_create()
8617 tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE); in io_sq_offload_create()
8623 sqd->thread = tsk; in io_sq_offload_create()
9355 struct io_sq_data *sqd = ctx->sq_data; in io_ring_exit_work() local
9358 io_sq_thread_park(sqd); in io_ring_exit_work()
9359 tsk = sqd->thread; in io_ring_exit_work()
9363 io_sq_thread_unpark(sqd); in io_ring_exit_work()
9685 static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd) in io_uring_cancel_generic() argument
9692 WARN_ON_ONCE(sqd && sqd->thread != current); in io_uring_cancel_generic()
9707 if (!sqd) { in io_uring_cancel_generic()
9719 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) in io_uring_cancel_generic()
10609 struct io_sq_data *sqd = NULL; in io_register_iowq_max_workers() local
10620 sqd = ctx->sq_data; in io_register_iowq_max_workers()
10621 if (sqd) { in io_register_iowq_max_workers()
10627 refcount_inc(&sqd->refs); in io_register_iowq_max_workers()
10629 mutex_lock(&sqd->lock); in io_register_iowq_max_workers()
10631 if (sqd->thread) in io_register_iowq_max_workers()
10632 tctx = sqd->thread->io_uring; in io_register_iowq_max_workers()
10654 if (sqd) { in io_register_iowq_max_workers()
10655 mutex_unlock(&sqd->lock); in io_register_iowq_max_workers()
10656 io_put_sq_data(sqd); in io_register_iowq_max_workers()
10663 if (sqd) in io_register_iowq_max_workers()
10680 if (sqd) { in io_register_iowq_max_workers()
10681 mutex_unlock(&sqd->lock); in io_register_iowq_max_workers()
10682 io_put_sq_data(sqd); in io_register_iowq_max_workers()