• Home
  • Raw
  • Download

Lines Matching refs:sqd

1087 static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
7602 static inline bool io_sqd_events_pending(struct io_sq_data *sqd) in io_sqd_events_pending() argument
7604 return READ_ONCE(sqd->state); in io_sqd_events_pending()
7663 static void io_sqd_update_thread_idle(struct io_sq_data *sqd) in io_sqd_update_thread_idle() argument
7668 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) in io_sqd_update_thread_idle()
7670 sqd->sq_thread_idle = sq_thread_idle; in io_sqd_update_thread_idle()
7673 static bool io_sqd_handle_event(struct io_sq_data *sqd) in io_sqd_handle_event() argument
7678 if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) || in io_sqd_handle_event()
7680 mutex_unlock(&sqd->lock); in io_sqd_handle_event()
7684 mutex_lock(&sqd->lock); in io_sqd_handle_event()
7686 return did_sig || test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state); in io_sqd_handle_event()
7691 struct io_sq_data *sqd = data; in io_sq_thread() local
7697 snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid); in io_sq_thread()
7700 if (sqd->sq_cpu != -1) in io_sq_thread()
7701 set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu)); in io_sq_thread()
7706 mutex_lock(&sqd->lock); in io_sq_thread()
7710 if (io_sqd_events_pending(sqd) || signal_pending(current)) { in io_sq_thread()
7711 if (io_sqd_handle_event(sqd)) in io_sq_thread()
7713 timeout = jiffies + sqd->sq_thread_idle; in io_sq_thread()
7716 cap_entries = !list_is_singular(&sqd->ctx_list); in io_sq_thread()
7717 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) { in io_sq_thread()
7729 timeout = jiffies + sqd->sq_thread_idle; in io_sq_thread()
7733 prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE); in io_sq_thread()
7734 if (!io_sqd_events_pending(sqd) && !current->task_works) { in io_sq_thread()
7737 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) { in io_sq_thread()
7752 mutex_unlock(&sqd->lock); in io_sq_thread()
7754 mutex_lock(&sqd->lock); in io_sq_thread()
7756 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) in io_sq_thread()
7760 finish_wait(&sqd->wait, &wait); in io_sq_thread()
7761 timeout = jiffies + sqd->sq_thread_idle; in io_sq_thread()
7764 io_uring_cancel_generic(true, sqd); in io_sq_thread()
7765 sqd->thread = NULL; in io_sq_thread()
7766 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) in io_sq_thread()
7769 mutex_unlock(&sqd->lock); in io_sq_thread()
7771 complete(&sqd->exited); in io_sq_thread()
8212 static void io_sq_thread_unpark(struct io_sq_data *sqd) in io_sq_thread_unpark() argument
8213 __releases(&sqd->lock) in io_sq_thread_unpark()
8215 WARN_ON_ONCE(sqd->thread == current); in io_sq_thread_unpark()
8221 clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state); in io_sq_thread_unpark()
8222 if (atomic_dec_return(&sqd->park_pending)) in io_sq_thread_unpark()
8223 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state); in io_sq_thread_unpark()
8224 mutex_unlock(&sqd->lock); in io_sq_thread_unpark()
8227 static void io_sq_thread_park(struct io_sq_data *sqd) in io_sq_thread_park() argument
8228 __acquires(&sqd->lock) in io_sq_thread_park()
8230 WARN_ON_ONCE(sqd->thread == current); in io_sq_thread_park()
8232 atomic_inc(&sqd->park_pending); in io_sq_thread_park()
8233 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state); in io_sq_thread_park()
8234 mutex_lock(&sqd->lock); in io_sq_thread_park()
8235 if (sqd->thread) in io_sq_thread_park()
8236 wake_up_process(sqd->thread); in io_sq_thread_park()
8239 static void io_sq_thread_stop(struct io_sq_data *sqd) in io_sq_thread_stop() argument
8241 WARN_ON_ONCE(sqd->thread == current); in io_sq_thread_stop()
8242 WARN_ON_ONCE(test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state)); in io_sq_thread_stop()
8244 set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state); in io_sq_thread_stop()
8245 mutex_lock(&sqd->lock); in io_sq_thread_stop()
8246 if (sqd->thread) in io_sq_thread_stop()
8247 wake_up_process(sqd->thread); in io_sq_thread_stop()
8248 mutex_unlock(&sqd->lock); in io_sq_thread_stop()
8249 wait_for_completion(&sqd->exited); in io_sq_thread_stop()
8252 static void io_put_sq_data(struct io_sq_data *sqd) in io_put_sq_data() argument
8254 if (refcount_dec_and_test(&sqd->refs)) { in io_put_sq_data()
8255 WARN_ON_ONCE(atomic_read(&sqd->park_pending)); in io_put_sq_data()
8257 io_sq_thread_stop(sqd); in io_put_sq_data()
8258 kfree(sqd); in io_put_sq_data()
8264 struct io_sq_data *sqd = ctx->sq_data; in io_sq_thread_finish() local
8266 if (sqd) { in io_sq_thread_finish()
8267 io_sq_thread_park(sqd); in io_sq_thread_finish()
8269 io_sqd_update_thread_idle(sqd); in io_sq_thread_finish()
8270 io_sq_thread_unpark(sqd); in io_sq_thread_finish()
8272 io_put_sq_data(sqd); in io_sq_thread_finish()
8280 struct io_sq_data *sqd; in io_attach_sq_data() local
8292 sqd = ctx_attach->sq_data; in io_attach_sq_data()
8293 if (!sqd) { in io_attach_sq_data()
8297 if (sqd->task_tgid != current->tgid) { in io_attach_sq_data()
8302 refcount_inc(&sqd->refs); in io_attach_sq_data()
8304 return sqd; in io_attach_sq_data()
8310 struct io_sq_data *sqd; in io_get_sq_data() local
8314 sqd = io_attach_sq_data(p); in io_get_sq_data()
8315 if (!IS_ERR(sqd)) { in io_get_sq_data()
8317 return sqd; in io_get_sq_data()
8320 if (PTR_ERR(sqd) != -EPERM) in io_get_sq_data()
8321 return sqd; in io_get_sq_data()
8324 sqd = kzalloc(sizeof(*sqd), GFP_KERNEL); in io_get_sq_data()
8325 if (!sqd) in io_get_sq_data()
8328 atomic_set(&sqd->park_pending, 0); in io_get_sq_data()
8329 refcount_set(&sqd->refs, 1); in io_get_sq_data()
8330 INIT_LIST_HEAD(&sqd->ctx_list); in io_get_sq_data()
8331 mutex_init(&sqd->lock); in io_get_sq_data()
8332 init_waitqueue_head(&sqd->wait); in io_get_sq_data()
8333 init_completion(&sqd->exited); in io_get_sq_data()
8334 return sqd; in io_get_sq_data()
8912 struct io_sq_data *sqd; in io_sq_offload_create() local
8915 sqd = io_get_sq_data(p, &attached); in io_sq_offload_create()
8916 if (IS_ERR(sqd)) { in io_sq_offload_create()
8917 ret = PTR_ERR(sqd); in io_sq_offload_create()
8922 ctx->sq_data = sqd; in io_sq_offload_create()
8927 io_sq_thread_park(sqd); in io_sq_offload_create()
8928 list_add(&ctx->sqd_list, &sqd->ctx_list); in io_sq_offload_create()
8929 io_sqd_update_thread_idle(sqd); in io_sq_offload_create()
8931 ret = (attached && !sqd->thread) ? -ENXIO : 0; in io_sq_offload_create()
8932 io_sq_thread_unpark(sqd); in io_sq_offload_create()
8945 sqd->sq_cpu = cpu; in io_sq_offload_create()
8947 sqd->sq_cpu = -1; in io_sq_offload_create()
8950 sqd->task_pid = current->pid; in io_sq_offload_create()
8951 sqd->task_tgid = current->tgid; in io_sq_offload_create()
8952 tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE); in io_sq_offload_create()
8958 sqd->thread = tsk; in io_sq_offload_create()
9693 struct io_sq_data *sqd = ctx->sq_data; in io_ring_exit_work() local
9696 io_sq_thread_park(sqd); in io_ring_exit_work()
9697 tsk = sqd->thread; in io_ring_exit_work()
9701 io_sq_thread_unpark(sqd); in io_ring_exit_work()
10039 static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd) in io_uring_cancel_generic() argument
10046 WARN_ON_ONCE(sqd && sqd->thread != current); in io_uring_cancel_generic()
10061 if (!sqd) { in io_uring_cancel_generic()
10073 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) in io_uring_cancel_generic()
10965 struct io_sq_data *sqd = NULL; in io_register_iowq_max_workers() local
10976 sqd = ctx->sq_data; in io_register_iowq_max_workers()
10977 if (sqd) { in io_register_iowq_max_workers()
10983 refcount_inc(&sqd->refs); in io_register_iowq_max_workers()
10985 mutex_lock(&sqd->lock); in io_register_iowq_max_workers()
10987 if (sqd->thread) in io_register_iowq_max_workers()
10988 tctx = sqd->thread->io_uring; in io_register_iowq_max_workers()
11010 if (sqd) { in io_register_iowq_max_workers()
11011 mutex_unlock(&sqd->lock); in io_register_iowq_max_workers()
11012 io_put_sq_data(sqd); in io_register_iowq_max_workers()
11019 if (sqd) in io_register_iowq_max_workers()
11036 if (sqd) { in io_register_iowq_max_workers()
11037 mutex_unlock(&sqd->lock); in io_register_iowq_max_workers()
11038 io_put_sq_data(sqd); in io_register_iowq_max_workers()