• Home
  • Raw
  • Download

Lines Matching refs:worker

167 	struct worker		*manager;	/* L: purely informational */
250 struct worker *rescuer; /* I: rescue worker */
389 #define for_each_pool_worker(worker, pool) \ argument
390 list_for_each_entry((worker), &(pool)->workers, node) \
824 static struct worker *first_idle_worker(struct worker_pool *pool) in first_idle_worker()
829 return list_first_entry(&pool->idle_list, struct worker, entry); in first_idle_worker()
843 struct worker *worker = first_idle_worker(pool); in wake_up_worker() local
845 if (likely(worker)) in wake_up_worker()
846 wake_up_process(worker->task); in wake_up_worker()
862 struct worker *worker = kthread_data(task); in wq_worker_waking_up() local
864 if (!(worker->flags & WORKER_NOT_RUNNING)) { in wq_worker_waking_up()
865 WARN_ON_ONCE(worker->pool->cpu != cpu); in wq_worker_waking_up()
866 atomic_inc(&worker->pool->nr_running); in wq_worker_waking_up()
887 struct worker *worker = kthread_data(task), *to_wakeup = NULL; in wq_worker_sleeping() local
895 if (worker->flags & WORKER_NOT_RUNNING) in wq_worker_sleeping()
898 pool = worker->pool; in wq_worker_sleeping()
931 static inline void worker_set_flags(struct worker *worker, unsigned int flags) in worker_set_flags() argument
933 struct worker_pool *pool = worker->pool; in worker_set_flags()
935 WARN_ON_ONCE(worker->task != current); in worker_set_flags()
939 !(worker->flags & WORKER_NOT_RUNNING)) { in worker_set_flags()
943 worker->flags |= flags; in worker_set_flags()
956 static inline void worker_clr_flags(struct worker *worker, unsigned int flags) in worker_clr_flags() argument
958 struct worker_pool *pool = worker->pool; in worker_clr_flags()
959 unsigned int oflags = worker->flags; in worker_clr_flags()
961 WARN_ON_ONCE(worker->task != current); in worker_clr_flags()
963 worker->flags &= ~flags; in worker_clr_flags()
971 if (!(worker->flags & WORKER_NOT_RUNNING)) in worker_clr_flags()
1008 static struct worker *find_worker_executing_work(struct worker_pool *pool, in find_worker_executing_work()
1011 struct worker *worker; in find_worker_executing_work() local
1013 hash_for_each_possible(pool->busy_hash, worker, hentry, in find_worker_executing_work()
1015 if (worker->current_work == work && in find_worker_executing_work()
1016 worker->current_func == work->func) in find_worker_executing_work()
1017 return worker; in find_worker_executing_work()
1327 struct worker *worker; in is_chained_work() local
1329 worker = current_wq_worker(); in is_chained_work()
1334 return worker && worker->current_pwq->wq == wq; in is_chained_work()
1376 struct worker *worker; in __queue_work() local
1380 worker = find_worker_executing_work(last_pool, work); in __queue_work()
1382 if (worker && worker->current_pwq->wq == wq) { in __queue_work()
1383 pwq = worker->current_pwq; in __queue_work()
1590 static void worker_enter_idle(struct worker *worker) in worker_enter_idle() argument
1592 struct worker_pool *pool = worker->pool; in worker_enter_idle()
1594 if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) || in worker_enter_idle()
1595 WARN_ON_ONCE(!list_empty(&worker->entry) && in worker_enter_idle()
1596 (worker->hentry.next || worker->hentry.pprev))) in worker_enter_idle()
1600 worker->flags |= WORKER_IDLE; in worker_enter_idle()
1602 worker->last_active = jiffies; in worker_enter_idle()
1605 list_add(&worker->entry, &pool->idle_list); in worker_enter_idle()
1630 static void worker_leave_idle(struct worker *worker) in worker_leave_idle() argument
1632 struct worker_pool *pool = worker->pool; in worker_leave_idle()
1634 if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE))) in worker_leave_idle()
1636 worker_clr_flags(worker, WORKER_IDLE); in worker_leave_idle()
1638 list_del_init(&worker->entry); in worker_leave_idle()
1641 static struct worker *alloc_worker(int node) in alloc_worker()
1643 struct worker *worker; in alloc_worker() local
1645 worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node); in alloc_worker()
1646 if (worker) { in alloc_worker()
1647 INIT_LIST_HEAD(&worker->entry); in alloc_worker()
1648 INIT_LIST_HEAD(&worker->scheduled); in alloc_worker()
1649 INIT_LIST_HEAD(&worker->node); in alloc_worker()
1651 worker->flags = WORKER_PREP; in alloc_worker()
1653 return worker; in alloc_worker()
1665 static void worker_attach_to_pool(struct worker *worker, in worker_attach_to_pool() argument
1674 set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask); in worker_attach_to_pool()
1682 worker->flags |= WORKER_UNBOUND; in worker_attach_to_pool()
1684 list_add_tail(&worker->node, &pool->workers); in worker_attach_to_pool()
1698 static void worker_detach_from_pool(struct worker *worker, in worker_detach_from_pool() argument
1704 list_del(&worker->node); in worker_detach_from_pool()
1710 worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND); in worker_detach_from_pool()
1728 static struct worker *create_worker(struct worker_pool *pool) in create_worker()
1730 struct worker *worker = NULL; in create_worker() local
1739 worker = alloc_worker(pool->node); in create_worker()
1740 if (!worker) in create_worker()
1743 worker->pool = pool; in create_worker()
1744 worker->id = id; in create_worker()
1752 worker->task = kthread_create_on_node(worker_thread, worker, pool->node, in create_worker()
1754 if (IS_ERR(worker->task)) in create_worker()
1757 set_user_nice(worker->task, pool->attrs->nice); in create_worker()
1758 kthread_bind_mask(worker->task, pool->attrs->cpumask); in create_worker()
1761 worker_attach_to_pool(worker, pool); in create_worker()
1765 worker->pool->nr_workers++; in create_worker()
1766 worker_enter_idle(worker); in create_worker()
1767 wake_up_process(worker->task); in create_worker()
1770 return worker; in create_worker()
1775 kfree(worker); in create_worker()
1789 static void destroy_worker(struct worker *worker) in destroy_worker() argument
1791 struct worker_pool *pool = worker->pool; in destroy_worker()
1796 if (WARN_ON(worker->current_work) || in destroy_worker()
1797 WARN_ON(!list_empty(&worker->scheduled)) || in destroy_worker()
1798 WARN_ON(!(worker->flags & WORKER_IDLE))) in destroy_worker()
1804 list_del_init(&worker->entry); in destroy_worker()
1805 worker->flags |= WORKER_DIE; in destroy_worker()
1806 wake_up_process(worker->task); in destroy_worker()
1816 struct worker *worker; in idle_worker_timeout() local
1820 worker = list_entry(pool->idle_list.prev, struct worker, entry); in idle_worker_timeout()
1821 expires = worker->last_active + IDLE_WORKER_TIMEOUT; in idle_worker_timeout()
1828 destroy_worker(worker); in idle_worker_timeout()
1953 static bool manage_workers(struct worker *worker) in manage_workers() argument
1955 struct worker_pool *pool = worker->pool; in manage_workers()
1961 pool->manager = worker; in manage_workers()
1985 static void process_one_work(struct worker *worker, struct work_struct *work) in process_one_work() argument
1990 struct worker_pool *pool = worker->pool; in process_one_work()
1993 struct worker *collision; in process_one_work()
2024 hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work); in process_one_work()
2025 worker->current_work = work; in process_one_work()
2026 worker->current_func = work->func; in process_one_work()
2027 worker->current_pwq = pwq; in process_one_work()
2039 worker_set_flags(worker, WORKER_CPU_INTENSIVE); in process_one_work()
2064 worker->current_func(work); in process_one_work()
2077 worker->current_func); in process_one_work()
2096 worker_clr_flags(worker, WORKER_CPU_INTENSIVE); in process_one_work()
2099 hash_del(&worker->hentry); in process_one_work()
2100 worker->current_work = NULL; in process_one_work()
2101 worker->current_func = NULL; in process_one_work()
2102 worker->current_pwq = NULL; in process_one_work()
2103 worker->desc_valid = false; in process_one_work()
2119 static void process_scheduled_works(struct worker *worker) in process_scheduled_works() argument
2121 while (!list_empty(&worker->scheduled)) { in process_scheduled_works()
2122 struct work_struct *work = list_first_entry(&worker->scheduled, in process_scheduled_works()
2124 process_one_work(worker, work); in process_scheduled_works()
2142 struct worker *worker = __worker; in worker_thread() local
2143 struct worker_pool *pool = worker->pool; in worker_thread()
2146 worker->task->flags |= PF_WQ_WORKER; in worker_thread()
2151 if (unlikely(worker->flags & WORKER_DIE)) { in worker_thread()
2153 WARN_ON_ONCE(!list_empty(&worker->entry)); in worker_thread()
2154 worker->task->flags &= ~PF_WQ_WORKER; in worker_thread()
2156 set_task_comm(worker->task, "kworker/dying"); in worker_thread()
2157 ida_simple_remove(&pool->worker_ida, worker->id); in worker_thread()
2158 worker_detach_from_pool(worker, pool); in worker_thread()
2159 kfree(worker); in worker_thread()
2163 worker_leave_idle(worker); in worker_thread()
2170 if (unlikely(!may_start_working(pool)) && manage_workers(worker)) in worker_thread()
2178 WARN_ON_ONCE(!list_empty(&worker->scheduled)); in worker_thread()
2187 worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND); in worker_thread()
2196 process_one_work(worker, work); in worker_thread()
2197 if (unlikely(!list_empty(&worker->scheduled))) in worker_thread()
2198 process_scheduled_works(worker); in worker_thread()
2200 move_linked_works(work, &worker->scheduled, NULL); in worker_thread()
2201 process_scheduled_works(worker); in worker_thread()
2205 worker_set_flags(worker, WORKER_PREP); in worker_thread()
2214 worker_enter_idle(worker); in worker_thread()
2244 struct worker *rescuer = __rescuer; in rescuer_thread()
2397 struct work_struct *target, struct worker *worker) in insert_wq_barrier() argument
2417 if (worker) in insert_wq_barrier()
2418 head = worker->scheduled.next; in insert_wq_barrier()
2714 struct worker *worker = NULL; in start_flush_work() local
2734 worker = find_worker_executing_work(pool, work); in start_flush_work()
2735 if (!worker) in start_flush_work()
2737 pwq = worker->current_pwq; in start_flush_work()
2740 insert_wq_barrier(pwq, barr, work, worker); in start_flush_work()
3174 struct worker *worker; in put_unbound_pool() local
3201 while ((worker = first_idle_worker(pool))) in put_unbound_pool()
3202 destroy_worker(worker); in put_unbound_pool()
3910 struct worker *rescuer; in __alloc_workqueue_key()
3982 struct worker *rescuer = wq->rescuer; in destroy_workqueue()
4094 struct worker *worker = current_wq_worker(); in current_work() local
4096 return worker ? worker->current_work : NULL; in current_work()
4110 struct worker *worker = current_wq_worker(); in current_is_workqueue_rescuer() local
4112 return worker && worker->rescue_wq; in current_is_workqueue_rescuer()
4201 struct worker *worker = current_wq_worker(); in set_worker_desc() local
4204 if (worker) { in set_worker_desc()
4206 vsnprintf(worker->desc, sizeof(worker->desc), fmt, args); in set_worker_desc()
4208 worker->desc_valid = true; in set_worker_desc()
4233 struct worker *worker; in print_worker_info() local
4242 worker = probe_kthread_data(task); in print_worker_info()
4248 probe_kernel_read(&fn, &worker->current_func, sizeof(fn)); in print_worker_info()
4249 probe_kernel_read(&pwq, &worker->current_pwq, sizeof(pwq)); in print_worker_info()
4254 probe_kernel_read(&desc_valid, &worker->desc_valid, sizeof(desc_valid)); in print_worker_info()
4256 probe_kernel_read(desc, worker->desc, sizeof(desc) - 1); in print_worker_info()
4292 struct worker *worker; in show_pwq() local
4303 hash_for_each(pool->busy_hash, bkt, worker, hentry) { in show_pwq()
4304 if (worker->current_pwq == pwq) { in show_pwq()
4313 hash_for_each(pool->busy_hash, bkt, worker, hentry) { in show_pwq()
4314 if (worker->current_pwq != pwq) in show_pwq()
4318 task_pid_nr(worker->task), in show_pwq()
4319 worker == pwq->wq->rescuer ? "(RESCUER)" : "", in show_pwq()
4320 worker->current_func); in show_pwq()
4321 list_for_each_entry(work, &worker->scheduled, entry) in show_pwq()
4401 struct worker *worker; in show_workqueue_state() local
4414 list_for_each_entry(worker, &pool->idle_list, entry) { in show_workqueue_state()
4416 task_pid_nr(worker->task)); in show_workqueue_state()
4446 struct worker *worker; in wq_unbind_fn() local
4459 for_each_pool_worker(worker, pool) in wq_unbind_fn()
4460 worker->flags |= WORKER_UNBOUND; in wq_unbind_fn()
4504 struct worker *worker; in rebind_workers() local
4515 for_each_pool_worker(worker, pool) in rebind_workers()
4516 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, in rebind_workers()
4533 for_each_pool_worker(worker, pool) { in rebind_workers()
4534 unsigned int worker_flags = worker->flags; in rebind_workers()
4545 wake_up_process(worker->task); in rebind_workers()
4565 ACCESS_ONCE(worker->flags) = worker_flags; in rebind_workers()
4584 struct worker *worker; in restore_unbound_workers_cpumask() local
4598 for_each_pool_worker(worker, pool) in restore_unbound_workers_cpumask()
4599 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, in restore_unbound_workers_cpumask()