Lines Matching refs:worker
139 static void io_wqe_dec_running(struct io_worker *worker);
146 static bool io_worker_get(struct io_worker *worker) in io_worker_get() argument
148 return refcount_inc_not_zero(&worker->ref); in io_worker_get()
151 static void io_worker_release(struct io_worker *worker) in io_worker_release() argument
153 if (refcount_dec_and_test(&worker->ref)) in io_worker_release()
154 complete(&worker->ref_done); in io_worker_release()
168 static inline struct io_wqe_acct *io_wqe_get_acct(struct io_worker *worker) in io_wqe_get_acct() argument
170 return io_get_acct(worker->wqe, worker->flags & IO_WORKER_F_BOUND); in io_wqe_get_acct()
179 static void io_worker_cancel_cb(struct io_worker *worker) in io_worker_cancel_cb() argument
181 struct io_wqe_acct *acct = io_wqe_get_acct(worker); in io_worker_cancel_cb()
182 struct io_wqe *wqe = worker->wqe; in io_worker_cancel_cb()
186 raw_spin_lock(&worker->wqe->lock); in io_worker_cancel_cb()
188 raw_spin_unlock(&worker->wqe->lock); in io_worker_cancel_cb()
190 clear_bit_unlock(0, &worker->create_state); in io_worker_cancel_cb()
191 io_worker_release(worker); in io_worker_cancel_cb()
196 struct io_worker *worker; in io_task_worker_match() local
200 worker = container_of(cb, struct io_worker, create_work); in io_task_worker_match()
201 return worker == data; in io_task_worker_match()
204 static void io_worker_exit(struct io_worker *worker) in io_worker_exit() argument
206 struct io_wqe *wqe = worker->wqe; in io_worker_exit()
211 io_task_worker_match, worker); in io_worker_exit()
215 io_worker_cancel_cb(worker); in io_worker_exit()
218 if (refcount_dec_and_test(&worker->ref)) in io_worker_exit()
219 complete(&worker->ref_done); in io_worker_exit()
220 wait_for_completion(&worker->ref_done); in io_worker_exit()
223 if (worker->flags & IO_WORKER_F_FREE) in io_worker_exit()
224 hlist_nulls_del_rcu(&worker->nulls_node); in io_worker_exit()
225 list_del_rcu(&worker->all_list); in io_worker_exit()
227 io_wqe_dec_running(worker); in io_worker_exit()
228 worker->flags = 0; in io_worker_exit()
233 kfree_rcu(worker, rcu); in io_worker_exit()
255 struct io_worker *worker; in io_wqe_activate_free_worker() local
262 hlist_nulls_for_each_entry_rcu(worker, n, &wqe->free_list, nulls_node) { in io_wqe_activate_free_worker()
263 if (!io_worker_get(worker)) in io_wqe_activate_free_worker()
265 if (io_wqe_get_acct(worker) != acct) { in io_wqe_activate_free_worker()
266 io_worker_release(worker); in io_wqe_activate_free_worker()
269 if (wake_up_process(worker->task)) { in io_wqe_activate_free_worker()
270 io_worker_release(worker); in io_wqe_activate_free_worker()
273 io_worker_release(worker); in io_wqe_activate_free_worker()
304 static void io_wqe_inc_running(struct io_worker *worker) in io_wqe_inc_running() argument
306 struct io_wqe_acct *acct = io_wqe_get_acct(worker); in io_wqe_inc_running()
313 struct io_worker *worker; in create_worker_cb() local
319 worker = container_of(cb, struct io_worker, create_work); in create_worker_cb()
320 wqe = worker->wqe; in create_worker_cb()
322 acct = &wqe->acct[worker->create_index]; in create_worker_cb()
330 create_io_worker(wq, wqe, worker->create_index); in create_worker_cb()
335 clear_bit_unlock(0, &worker->create_state); in create_worker_cb()
336 io_worker_release(worker); in create_worker_cb()
339 static bool io_queue_worker_create(struct io_worker *worker, in io_queue_worker_create() argument
343 struct io_wqe *wqe = worker->wqe; in io_queue_worker_create()
349 if (!io_worker_get(worker)) in io_queue_worker_create()
357 if (test_bit(0, &worker->create_state) || in io_queue_worker_create()
358 test_and_set_bit_lock(0, &worker->create_state)) in io_queue_worker_create()
362 init_task_work(&worker->create_work, func); in io_queue_worker_create()
363 worker->create_index = acct->index; in io_queue_worker_create()
364 if (!task_work_add(wq->task, &worker->create_work, TWA_SIGNAL)) { in io_queue_worker_create()
377 clear_bit_unlock(0, &worker->create_state); in io_queue_worker_create()
379 io_worker_release(worker); in io_queue_worker_create()
386 static void io_wqe_dec_running(struct io_worker *worker) in io_wqe_dec_running() argument
389 struct io_wqe_acct *acct = io_wqe_get_acct(worker); in io_wqe_dec_running()
390 struct io_wqe *wqe = worker->wqe; in io_wqe_dec_running()
392 if (!(worker->flags & IO_WORKER_F_UP)) in io_wqe_dec_running()
399 io_queue_worker_create(worker, acct, create_worker_cb); in io_wqe_dec_running()
408 static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker, in __io_worker_busy() argument
412 if (worker->flags & IO_WORKER_F_FREE) { in __io_worker_busy()
413 worker->flags &= ~IO_WORKER_F_FREE; in __io_worker_busy()
414 hlist_nulls_del_init_rcu(&worker->nulls_node); in __io_worker_busy()
425 static void __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker) in __io_worker_idle() argument
428 if (!(worker->flags & IO_WORKER_F_FREE)) { in __io_worker_idle()
429 worker->flags |= IO_WORKER_F_FREE; in __io_worker_idle()
430 hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list); in __io_worker_idle()
458 struct io_worker *worker) in io_get_next_work() argument
464 struct io_wqe *wqe = worker->wqe; in io_get_next_work()
524 static void io_assign_current_work(struct io_worker *worker, in io_assign_current_work() argument
532 spin_lock(&worker->lock); in io_assign_current_work()
533 worker->cur_work = work; in io_assign_current_work()
534 spin_unlock(&worker->lock); in io_assign_current_work()
539 static void io_worker_handle_work(struct io_worker *worker) in io_worker_handle_work() argument
542 struct io_wqe_acct *acct = io_wqe_get_acct(worker); in io_worker_handle_work()
543 struct io_wqe *wqe = worker->wqe; in io_worker_handle_work()
557 work = io_get_next_work(acct, worker); in io_worker_handle_work()
559 __io_worker_busy(wqe, worker, work); in io_worker_handle_work()
564 io_assign_current_work(worker, work); in io_worker_handle_work()
577 io_assign_current_work(worker, NULL); in io_worker_handle_work()
585 io_assign_current_work(worker, work); in io_worker_handle_work()
611 struct io_worker *worker = data; in io_wqe_worker() local
612 struct io_wqe_acct *acct = io_wqe_get_acct(worker); in io_wqe_worker()
613 struct io_wqe *wqe = worker->wqe; in io_wqe_worker()
618 worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING); in io_wqe_worker()
630 io_worker_handle_work(worker); in io_wqe_worker()
641 __io_worker_idle(wqe, worker); in io_wqe_worker()
658 io_worker_handle_work(worker); in io_wqe_worker()
661 io_worker_exit(worker); in io_wqe_worker()
670 struct io_worker *worker = tsk->pf_io_worker; in io_wq_worker_running() local
672 if (!worker) in io_wq_worker_running()
674 if (!(worker->flags & IO_WORKER_F_UP)) in io_wq_worker_running()
676 if (worker->flags & IO_WORKER_F_RUNNING) in io_wq_worker_running()
678 worker->flags |= IO_WORKER_F_RUNNING; in io_wq_worker_running()
679 io_wqe_inc_running(worker); in io_wq_worker_running()
688 struct io_worker *worker = tsk->pf_io_worker; in io_wq_worker_sleeping() local
690 if (!worker) in io_wq_worker_sleeping()
692 if (!(worker->flags & IO_WORKER_F_UP)) in io_wq_worker_sleeping()
694 if (!(worker->flags & IO_WORKER_F_RUNNING)) in io_wq_worker_sleeping()
697 worker->flags &= ~IO_WORKER_F_RUNNING; in io_wq_worker_sleeping()
699 raw_spin_lock(&worker->wqe->lock); in io_wq_worker_sleeping()
700 io_wqe_dec_running(worker); in io_wq_worker_sleeping()
701 raw_spin_unlock(&worker->wqe->lock); in io_wq_worker_sleeping()
704 static void io_init_new_worker(struct io_wqe *wqe, struct io_worker *worker, in io_init_new_worker() argument
707 tsk->pf_io_worker = worker; in io_init_new_worker()
708 worker->task = tsk; in io_init_new_worker()
713 hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list); in io_init_new_worker()
714 list_add_tail_rcu(&worker->all_list, &wqe->all_list); in io_init_new_worker()
715 worker->flags |= IO_WORKER_F_FREE; in io_init_new_worker()
747 struct io_worker *worker; in create_worker_cont() local
751 worker = container_of(cb, struct io_worker, create_work); in create_worker_cont()
752 clear_bit_unlock(0, &worker->create_state); in create_worker_cont()
753 wqe = worker->wqe; in create_worker_cont()
754 tsk = create_io_thread(io_wqe_worker, worker, wqe->node); in create_worker_cont()
756 io_init_new_worker(wqe, worker, tsk); in create_worker_cont()
757 io_worker_release(worker); in create_worker_cont()
760 struct io_wqe_acct *acct = io_wqe_get_acct(worker); in create_worker_cont()
776 kfree(worker); in create_worker_cont()
781 io_worker_release(worker); in create_worker_cont()
782 schedule_work(&worker->work); in create_worker_cont()
787 struct io_worker *worker = container_of(work, struct io_worker, work); in io_workqueue_create() local
788 struct io_wqe_acct *acct = io_wqe_get_acct(worker); in io_workqueue_create()
790 if (!io_queue_worker_create(worker, acct, create_worker_cont)) in io_workqueue_create()
791 kfree(worker); in io_workqueue_create()
797 struct io_worker *worker; in create_io_worker() local
802 worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, wqe->node); in create_io_worker()
803 if (!worker) { in create_io_worker()
813 refcount_set(&worker->ref, 1); in create_io_worker()
814 worker->wqe = wqe; in create_io_worker()
815 spin_lock_init(&worker->lock); in create_io_worker()
816 init_completion(&worker->ref_done); in create_io_worker()
819 worker->flags |= IO_WORKER_F_BOUND; in create_io_worker()
821 tsk = create_io_thread(io_wqe_worker, worker, wqe->node); in create_io_worker()
823 io_init_new_worker(wqe, worker, tsk); in create_io_worker()
825 kfree(worker); in create_io_worker()
828 INIT_WORK(&worker->work, io_workqueue_create); in create_io_worker()
829 schedule_work(&worker->work); in create_io_worker()
843 struct io_worker *worker; in io_wq_for_each_worker() local
846 list_for_each_entry_rcu(worker, &wqe->all_list, all_list) { in io_wq_for_each_worker()
847 if (io_worker_get(worker)) { in io_wq_for_each_worker()
849 if (worker->task) in io_wq_for_each_worker()
850 ret = func(worker, data); in io_wq_for_each_worker()
851 io_worker_release(worker); in io_wq_for_each_worker()
860 static bool io_wq_worker_wake(struct io_worker *worker, void *data) in io_wq_worker_wake() argument
862 set_notify_signal(worker->task); in io_wq_worker_wake()
863 wake_up_process(worker->task); in io_wq_worker_wake()
973 static bool io_wq_worker_cancel(struct io_worker *worker, void *data) in io_wq_worker_cancel() argument
981 spin_lock(&worker->lock); in io_wq_worker_cancel()
982 if (worker->cur_work && in io_wq_worker_cancel()
983 match->fn(worker->cur_work, match->data)) { in io_wq_worker_cancel()
984 set_notify_signal(worker->task); in io_wq_worker_cancel()
987 spin_unlock(&worker->lock); in io_wq_worker_cancel()
1198 struct io_worker *worker; in io_task_work_match() local
1202 worker = container_of(cb, struct io_worker, create_work); in io_task_work_match()
1203 return worker->wqe->wq == data; in io_task_work_match()
1216 struct io_worker *worker; in io_wq_cancel_tw_create() local
1218 worker = container_of(cb, struct io_worker, create_work); in io_wq_cancel_tw_create()
1219 io_worker_cancel_cb(worker); in io_wq_cancel_tw_create()
1225 kfree(worker); in io_wq_cancel_tw_create()
1290 static bool io_wq_worker_affinity(struct io_worker *worker, void *data) in io_wq_worker_affinity() argument
1295 cpumask_set_cpu(od->cpu, worker->wqe->cpu_mask); in io_wq_worker_affinity()
1297 cpumask_clear_cpu(od->cpu, worker->wqe->cpu_mask); in io_wq_worker_affinity()