Lines Matching refs:worker
182 struct worker *manager; /* L: purely informational */
274 struct worker *rescuer; /* MD: rescue worker */
434 #define for_each_pool_worker(worker, pool) \ argument
435 list_for_each_entry((worker), &(pool)->workers, node) \
844 static struct worker *first_idle_worker(struct worker_pool *pool) in first_idle_worker()
849 return list_first_entry(&pool->idle_list, struct worker, entry); in first_idle_worker()
863 struct worker *worker = first_idle_worker(pool); in wake_up_worker() local
865 if (likely(worker)) in wake_up_worker()
866 wake_up_process(worker->task); in wake_up_worker()
877 struct worker *worker = kthread_data(task); in wq_worker_running() local
879 if (!worker->sleeping) in wq_worker_running()
889 if (!(worker->flags & WORKER_NOT_RUNNING)) in wq_worker_running()
890 worker->pool->nr_running++; in wq_worker_running()
892 worker->sleeping = 0; in wq_worker_running()
904 struct worker *worker = kthread_data(task); in wq_worker_sleeping() local
912 if (worker->flags & WORKER_NOT_RUNNING) in wq_worker_sleeping()
915 pool = worker->pool; in wq_worker_sleeping()
918 if (worker->sleeping) in wq_worker_sleeping()
921 worker->sleeping = 1; in wq_worker_sleeping()
929 if (worker->flags & WORKER_NOT_RUNNING) { in wq_worker_sleeping()
966 struct worker *worker = kthread_data(task); in wq_worker_last_func() local
968 return worker->last_func; in wq_worker_last_func()
981 static inline void worker_set_flags(struct worker *worker, unsigned int flags) in worker_set_flags() argument
983 struct worker_pool *pool = worker->pool; in worker_set_flags()
985 WARN_ON_ONCE(worker->task != current); in worker_set_flags()
989 !(worker->flags & WORKER_NOT_RUNNING)) { in worker_set_flags()
993 worker->flags |= flags; in worker_set_flags()
1006 static inline void worker_clr_flags(struct worker *worker, unsigned int flags) in worker_clr_flags() argument
1008 struct worker_pool *pool = worker->pool; in worker_clr_flags()
1009 unsigned int oflags = worker->flags; in worker_clr_flags()
1011 WARN_ON_ONCE(worker->task != current); in worker_clr_flags()
1013 worker->flags &= ~flags; in worker_clr_flags()
1021 if (!(worker->flags & WORKER_NOT_RUNNING)) in worker_clr_flags()
1058 static struct worker *find_worker_executing_work(struct worker_pool *pool, in find_worker_executing_work()
1061 struct worker *worker; in find_worker_executing_work() local
1063 hash_for_each_possible(pool->busy_hash, worker, hentry, in find_worker_executing_work()
1065 if (worker->current_work == work && in find_worker_executing_work()
1066 worker->current_func == work->func) in find_worker_executing_work()
1067 return worker; in find_worker_executing_work()
1385 struct worker *worker; in is_chained_work() local
1387 worker = current_wq_worker(); in is_chained_work()
1392 return worker && worker->current_pwq->wq == wq; in is_chained_work()
1470 struct worker *worker; in __queue_work() local
1474 worker = find_worker_executing_work(last_pool, work); in __queue_work()
1476 if (worker && worker->current_pwq->wq == wq) { in __queue_work()
1477 pwq = worker->current_pwq; in __queue_work()
1802 static void worker_enter_idle(struct worker *worker) in worker_enter_idle() argument
1804 struct worker_pool *pool = worker->pool; in worker_enter_idle()
1806 if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) || in worker_enter_idle()
1807 WARN_ON_ONCE(!list_empty(&worker->entry) && in worker_enter_idle()
1808 (worker->hentry.next || worker->hentry.pprev))) in worker_enter_idle()
1812 worker->flags |= WORKER_IDLE; in worker_enter_idle()
1814 worker->last_active = jiffies; in worker_enter_idle()
1817 list_add(&worker->entry, &pool->idle_list); in worker_enter_idle()
1835 static void worker_leave_idle(struct worker *worker) in worker_leave_idle() argument
1837 struct worker_pool *pool = worker->pool; in worker_leave_idle()
1839 if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE))) in worker_leave_idle()
1841 worker_clr_flags(worker, WORKER_IDLE); in worker_leave_idle()
1843 list_del_init(&worker->entry); in worker_leave_idle()
1846 static struct worker *alloc_worker(int node) in alloc_worker()
1848 struct worker *worker; in alloc_worker() local
1850 worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node); in alloc_worker()
1851 if (worker) { in alloc_worker()
1852 INIT_LIST_HEAD(&worker->entry); in alloc_worker()
1853 INIT_LIST_HEAD(&worker->scheduled); in alloc_worker()
1854 INIT_LIST_HEAD(&worker->node); in alloc_worker()
1856 worker->flags = WORKER_PREP; in alloc_worker()
1858 return worker; in alloc_worker()
1870 static void worker_attach_to_pool(struct worker *worker, in worker_attach_to_pool() argument
1881 worker->flags |= WORKER_UNBOUND; in worker_attach_to_pool()
1883 kthread_set_per_cpu(worker->task, pool->cpu); in worker_attach_to_pool()
1885 if (worker->rescue_wq) in worker_attach_to_pool()
1886 set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask); in worker_attach_to_pool()
1888 list_add_tail(&worker->node, &pool->workers); in worker_attach_to_pool()
1889 worker->pool = pool; in worker_attach_to_pool()
1902 static void worker_detach_from_pool(struct worker *worker) in worker_detach_from_pool() argument
1904 struct worker_pool *pool = worker->pool; in worker_detach_from_pool()
1909 kthread_set_per_cpu(worker->task, -1); in worker_detach_from_pool()
1910 list_del(&worker->node); in worker_detach_from_pool()
1911 worker->pool = NULL; in worker_detach_from_pool()
1918 worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND); in worker_detach_from_pool()
1936 static struct worker *create_worker(struct worker_pool *pool) in create_worker()
1938 struct worker *worker; in create_worker() local
1947 worker = alloc_worker(pool->node); in create_worker()
1948 if (!worker) in create_worker()
1951 worker->id = id; in create_worker()
1959 worker->task = kthread_create_on_node(worker_thread, worker, pool->node, in create_worker()
1961 if (IS_ERR(worker->task)) in create_worker()
1964 set_user_nice(worker->task, pool->attrs->nice); in create_worker()
1965 kthread_bind_mask(worker->task, pool->attrs->cpumask); in create_worker()
1968 worker_attach_to_pool(worker, pool); in create_worker()
1972 worker->pool->nr_workers++; in create_worker()
1973 worker_enter_idle(worker); in create_worker()
1974 wake_up_process(worker->task); in create_worker()
1977 return worker; in create_worker()
1981 kfree(worker); in create_worker()
1995 static void destroy_worker(struct worker *worker) in destroy_worker() argument
1997 struct worker_pool *pool = worker->pool; in destroy_worker()
2002 if (WARN_ON(worker->current_work) || in destroy_worker()
2003 WARN_ON(!list_empty(&worker->scheduled)) || in destroy_worker()
2004 WARN_ON(!(worker->flags & WORKER_IDLE))) in destroy_worker()
2010 list_del_init(&worker->entry); in destroy_worker()
2011 worker->flags |= WORKER_DIE; in destroy_worker()
2012 wake_up_process(worker->task); in destroy_worker()
2022 struct worker *worker; in idle_worker_timeout() local
2026 worker = list_entry(pool->idle_list.prev, struct worker, entry); in idle_worker_timeout()
2027 expires = worker->last_active + IDLE_WORKER_TIMEOUT; in idle_worker_timeout()
2034 destroy_worker(worker); in idle_worker_timeout()
2159 static bool manage_workers(struct worker *worker) in manage_workers() argument
2161 struct worker_pool *pool = worker->pool; in manage_workers()
2167 pool->manager = worker; in manage_workers()
2191 static void process_one_work(struct worker *worker, struct work_struct *work) in process_one_work() argument
2196 struct worker_pool *pool = worker->pool; in process_one_work()
2199 struct worker *collision; in process_one_work()
2230 hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work); in process_one_work()
2231 worker->current_work = work; in process_one_work()
2232 worker->current_func = work->func; in process_one_work()
2233 worker->current_pwq = pwq; in process_one_work()
2235 worker->current_color = get_work_color(work_data); in process_one_work()
2241 strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN); in process_one_work()
2252 worker_set_flags(worker, WORKER_CPU_INTENSIVE); in process_one_work()
2299 worker->current_func(work); in process_one_work()
2304 trace_workqueue_execute_end(work, worker->current_func); in process_one_work()
2312 worker->current_func); in process_one_work()
2331 worker_clr_flags(worker, WORKER_CPU_INTENSIVE); in process_one_work()
2334 worker->last_func = worker->current_func; in process_one_work()
2337 hash_del(&worker->hentry); in process_one_work()
2338 worker->current_work = NULL; in process_one_work()
2339 worker->current_func = NULL; in process_one_work()
2340 worker->current_pwq = NULL; in process_one_work()
2341 worker->current_color = INT_MAX; in process_one_work()
2357 static void process_scheduled_works(struct worker *worker) in process_scheduled_works() argument
2359 while (!list_empty(&worker->scheduled)) { in process_scheduled_works()
2360 struct work_struct *work = list_first_entry(&worker->scheduled, in process_scheduled_works()
2362 process_one_work(worker, work); in process_scheduled_works()
2390 struct worker *worker = __worker; in worker_thread() local
2391 struct worker_pool *pool = worker->pool; in worker_thread()
2399 if (unlikely(worker->flags & WORKER_DIE)) { in worker_thread()
2401 WARN_ON_ONCE(!list_empty(&worker->entry)); in worker_thread()
2404 set_task_comm(worker->task, "kworker/dying"); in worker_thread()
2405 ida_free(&pool->worker_ida, worker->id); in worker_thread()
2406 worker_detach_from_pool(worker); in worker_thread()
2407 kfree(worker); in worker_thread()
2411 worker_leave_idle(worker); in worker_thread()
2418 if (unlikely(!may_start_working(pool)) && manage_workers(worker)) in worker_thread()
2426 WARN_ON_ONCE(!list_empty(&worker->scheduled)); in worker_thread()
2435 worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND); in worker_thread()
2446 process_one_work(worker, work); in worker_thread()
2447 if (unlikely(!list_empty(&worker->scheduled))) in worker_thread()
2448 process_scheduled_works(worker); in worker_thread()
2450 move_linked_works(work, &worker->scheduled, NULL); in worker_thread()
2451 process_scheduled_works(worker); in worker_thread()
2455 worker_set_flags(worker, WORKER_PREP); in worker_thread()
2464 worker_enter_idle(worker); in worker_thread()
2494 struct worker *rescuer = __rescuer; in rescuer_thread()
2628 struct worker *worker; in check_flush_dependency() local
2633 worker = current_wq_worker(); in check_flush_dependency()
2638 WARN_ONCE(worker && ((worker->current_pwq->wq->flags & in check_flush_dependency()
2641 worker->current_pwq->wq->name, worker->current_func, in check_flush_dependency()
2683 struct work_struct *target, struct worker *worker) in insert_wq_barrier() argument
2709 if (worker) { in insert_wq_barrier()
2710 head = worker->scheduled.next; in insert_wq_barrier()
2711 work_color = worker->current_color; in insert_wq_barrier()
3016 struct worker *worker = NULL; in start_flush_work() local
3036 worker = find_worker_executing_work(pool, work); in start_flush_work()
3037 if (!worker) in start_flush_work()
3039 pwq = worker->current_pwq; in start_flush_work()
3044 insert_wq_barrier(pwq, barr, work, worker); in start_flush_work()
3579 struct worker *worker; in put_unbound_pool() local
3607 while ((worker = first_idle_worker(pool))) in put_unbound_pool()
3608 destroy_worker(worker); in put_unbound_pool()
4273 struct worker *rescuer; in init_rescuer()
4434 struct worker *rescuer = wq->rescuer; in destroy_workqueue()
4547 struct worker *worker = current_wq_worker(); in current_work() local
4549 return worker ? worker->current_work : NULL; in current_work()
4563 struct worker *worker = current_wq_worker(); in current_is_workqueue_rescuer() local
4565 return worker && worker->rescue_wq; in current_is_workqueue_rescuer()
4656 struct worker *worker = current_wq_worker(); in set_worker_desc() local
4659 if (worker) { in set_worker_desc()
4661 vsnprintf(worker->desc, sizeof(worker->desc), fmt, args); in set_worker_desc()
4687 struct worker *worker; in print_worker_info() local
4696 worker = kthread_probe_data(task); in print_worker_info()
4702 copy_from_kernel_nofault(&fn, &worker->current_func, sizeof(fn)); in print_worker_info()
4703 copy_from_kernel_nofault(&pwq, &worker->current_pwq, sizeof(pwq)); in print_worker_info()
4706 copy_from_kernel_nofault(desc, worker->desc, sizeof(desc) - 1); in print_worker_info()
4742 struct worker *worker; in show_pwq() local
4753 hash_for_each(pool->busy_hash, bkt, worker, hentry) { in show_pwq()
4754 if (worker->current_pwq == pwq) { in show_pwq()
4763 hash_for_each(pool->busy_hash, bkt, worker, hentry) { in show_pwq()
4764 if (worker->current_pwq != pwq) in show_pwq()
4768 task_pid_nr(worker->task), in show_pwq()
4769 worker->rescue_wq ? "(RESCUER)" : "", in show_pwq()
4770 worker->current_func); in show_pwq()
4771 list_for_each_entry(work, &worker->scheduled, entry) in show_pwq()
4860 struct worker *worker; in show_one_worker_pool() local
4885 list_for_each_entry(worker, &pool->idle_list, entry) { in show_one_worker_pool()
4887 task_pid_nr(worker->task)); in show_one_worker_pool()
4942 struct worker *worker = kthread_data(task); in wq_worker_comm() local
4943 struct worker_pool *pool = worker->pool; in wq_worker_comm()
4952 if (worker->desc[0] != '\0') { in wq_worker_comm()
4953 if (worker->current_work) in wq_worker_comm()
4955 worker->desc); in wq_worker_comm()
4958 worker->desc); in wq_worker_comm()
4988 struct worker *worker; in unbind_workers() local
5002 for_each_pool_worker(worker, pool) in unbind_workers()
5003 worker->flags |= WORKER_UNBOUND; in unbind_workers()
5026 for_each_pool_worker(worker, pool) { in unbind_workers()
5027 kthread_set_per_cpu(worker->task, -1); in unbind_workers()
5029 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, wq_unbound_cpumask) < 0); in unbind_workers()
5031 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0); in unbind_workers()
5046 struct worker *worker; in rebind_workers() local
5057 for_each_pool_worker(worker, pool) { in rebind_workers()
5058 kthread_set_per_cpu(worker->task, pool->cpu); in rebind_workers()
5059 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, in rebind_workers()
5067 for_each_pool_worker(worker, pool) { in rebind_workers()
5068 unsigned int worker_flags = worker->flags; in rebind_workers()
5088 WRITE_ONCE(worker->flags, worker_flags); in rebind_workers()
5107 struct worker *worker; in restore_unbound_workers_cpumask() local
5118 for_each_pool_worker(worker, pool) in restore_unbound_workers_cpumask()
5119 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0); in restore_unbound_workers_cpumask()