• Home
  • Raw
  • Download

Lines Matching refs:pwq

427 #define for_each_pwq(pwq, wq)						\  argument
428 list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node, \
624 static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq, in set_work_pwq() argument
627 set_work_data(work, (unsigned long)pwq, in set_work_pwq()
1090 static void get_pwq(struct pool_workqueue *pwq) in get_pwq() argument
1092 lockdep_assert_held(&pwq->pool->lock); in get_pwq()
1093 WARN_ON_ONCE(pwq->refcnt <= 0); in get_pwq()
1094 pwq->refcnt++; in get_pwq()
1104 static void put_pwq(struct pool_workqueue *pwq) in put_pwq() argument
1106 lockdep_assert_held(&pwq->pool->lock); in put_pwq()
1107 if (likely(--pwq->refcnt)) in put_pwq()
1109 if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND))) in put_pwq()
1119 schedule_work(&pwq->unbound_release_work); in put_pwq()
1128 static void put_pwq_unlocked(struct pool_workqueue *pwq) in put_pwq_unlocked() argument
1130 if (pwq) { in put_pwq_unlocked()
1135 spin_lock_irq(&pwq->pool->lock); in put_pwq_unlocked()
1136 put_pwq(pwq); in put_pwq_unlocked()
1137 spin_unlock_irq(&pwq->pool->lock); in put_pwq_unlocked()
1143 struct pool_workqueue *pwq = get_work_pwq(work); in pwq_activate_delayed_work() local
1146 if (list_empty(&pwq->pool->worklist)) in pwq_activate_delayed_work()
1147 pwq->pool->watchdog_ts = jiffies; in pwq_activate_delayed_work()
1148 move_linked_works(work, &pwq->pool->worklist, NULL); in pwq_activate_delayed_work()
1150 pwq->nr_active++; in pwq_activate_delayed_work()
1153 static void pwq_activate_first_delayed(struct pool_workqueue *pwq) in pwq_activate_first_delayed() argument
1155 struct work_struct *work = list_first_entry(&pwq->delayed_works, in pwq_activate_first_delayed()
1172 static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color) in pwq_dec_nr_in_flight() argument
1178 pwq->nr_in_flight[color]--; in pwq_dec_nr_in_flight()
1180 pwq->nr_active--; in pwq_dec_nr_in_flight()
1181 if (!list_empty(&pwq->delayed_works)) { in pwq_dec_nr_in_flight()
1183 if (pwq->nr_active < pwq->max_active) in pwq_dec_nr_in_flight()
1184 pwq_activate_first_delayed(pwq); in pwq_dec_nr_in_flight()
1188 if (likely(pwq->flush_color != color)) in pwq_dec_nr_in_flight()
1192 if (pwq->nr_in_flight[color]) in pwq_dec_nr_in_flight()
1196 pwq->flush_color = -1; in pwq_dec_nr_in_flight()
1202 if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush)) in pwq_dec_nr_in_flight()
1203 complete(&pwq->wq->first_flusher->done); in pwq_dec_nr_in_flight()
1205 put_pwq(pwq); in pwq_dec_nr_in_flight()
1239 struct pool_workqueue *pwq; in try_to_grab_pending() local
1278 pwq = get_work_pwq(work); in try_to_grab_pending()
1279 if (pwq && pwq->pool == pool) { in try_to_grab_pending()
1293 pwq_dec_nr_in_flight(pwq, get_work_color(work)); in try_to_grab_pending()
1325 static void insert_work(struct pool_workqueue *pwq, struct work_struct *work, in insert_work() argument
1328 struct worker_pool *pool = pwq->pool; in insert_work()
1331 set_work_pwq(work, pwq, extra_flags); in insert_work()
1333 get_pwq(pwq); in insert_work()
1398 struct pool_workqueue *pwq; in __queue_work() local
1425 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); in __queue_work()
1427 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); in __queue_work()
1435 if (last_pool && last_pool != pwq->pool) { in __queue_work()
1443 pwq = worker->current_pwq; in __queue_work()
1447 spin_lock(&pwq->pool->lock); in __queue_work()
1450 spin_lock(&pwq->pool->lock); in __queue_work()
1461 if (unlikely(!pwq->refcnt)) { in __queue_work()
1463 spin_unlock(&pwq->pool->lock); in __queue_work()
1473 trace_workqueue_queue_work(req_cpu, pwq, work); in __queue_work()
1478 pwq->nr_in_flight[pwq->work_color]++; in __queue_work()
1479 work_flags = work_color_to_flags(pwq->work_color); in __queue_work()
1481 if (likely(pwq->nr_active < pwq->max_active)) { in __queue_work()
1483 pwq->nr_active++; in __queue_work()
1484 worklist = &pwq->pool->worklist; in __queue_work()
1486 pwq->pool->watchdog_ts = jiffies; in __queue_work()
1489 worklist = &pwq->delayed_works; in __queue_work()
1492 insert_work(pwq, work, worklist, work_flags); in __queue_work()
1495 spin_unlock(&pwq->pool->lock); in __queue_work()
2014 struct pool_workqueue *pwq = get_work_pwq(work); in send_mayday() local
2015 struct workqueue_struct *wq = pwq->wq; in send_mayday()
2023 if (list_empty(&pwq->mayday_node)) { in send_mayday()
2029 get_pwq(pwq); in send_mayday()
2030 list_add_tail(&pwq->mayday_node, &wq->maydays); in send_mayday()
2167 struct pool_workqueue *pwq = get_work_pwq(work); in process_one_work() local
2169 bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE; in process_one_work()
2205 worker->current_pwq = pwq; in process_one_work()
2212 strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN); in process_one_work()
2245 lock_map_acquire(&pwq->wq->lockdep_map); in process_one_work()
2277 lock_map_release(&pwq->wq->lockdep_map); in process_one_work()
2312 pwq_dec_nr_in_flight(pwq, work_color); in process_one_work()
2493 struct pool_workqueue *pwq = list_first_entry(&wq->maydays, in rescuer_thread() local
2495 struct worker_pool *pool = pwq->pool; in rescuer_thread()
2500 list_del_init(&pwq->mayday_node); in rescuer_thread()
2514 if (get_work_pwq(work) == pwq) { in rescuer_thread()
2540 if (wq->rescuer && list_empty(&pwq->mayday_node)) { in rescuer_thread()
2541 get_pwq(pwq); in rescuer_thread()
2542 list_add_tail(&pwq->mayday_node, &wq->maydays); in rescuer_thread()
2552 put_pwq(pwq); in rescuer_thread()
2651 static void insert_wq_barrier(struct pool_workqueue *pwq, in insert_wq_barrier() argument
2687 insert_work(pwq, &barr->work, head, in insert_wq_barrier()
2726 struct pool_workqueue *pwq; in flush_workqueue_prep_pwqs() local
2733 for_each_pwq(pwq, wq) { in flush_workqueue_prep_pwqs()
2734 struct worker_pool *pool = pwq->pool; in flush_workqueue_prep_pwqs()
2739 WARN_ON_ONCE(pwq->flush_color != -1); in flush_workqueue_prep_pwqs()
2741 if (pwq->nr_in_flight[flush_color]) { in flush_workqueue_prep_pwqs()
2742 pwq->flush_color = flush_color; in flush_workqueue_prep_pwqs()
2749 WARN_ON_ONCE(work_color != work_next_color(pwq->work_color)); in flush_workqueue_prep_pwqs()
2750 pwq->work_color = work_color; in flush_workqueue_prep_pwqs()
2934 struct pool_workqueue *pwq; in drain_workqueue() local
2950 for_each_pwq(pwq, wq) { in drain_workqueue()
2953 spin_lock_irq(&pwq->pool->lock); in drain_workqueue()
2954 drained = !pwq->nr_active && list_empty(&pwq->delayed_works); in drain_workqueue()
2955 spin_unlock_irq(&pwq->pool->lock); in drain_workqueue()
2980 struct pool_workqueue *pwq; in start_flush_work() local
2993 pwq = get_work_pwq(work); in start_flush_work()
2994 if (pwq) { in start_flush_work()
2995 if (unlikely(pwq->pool != pool)) in start_flush_work()
3001 pwq = worker->current_pwq; in start_flush_work()
3004 check_flush_dependency(pwq->wq, work); in start_flush_work()
3006 insert_wq_barrier(pwq, barr, work, worker); in start_flush_work()
3019 (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) { in start_flush_work()
3020 lock_map_acquire(&pwq->wq->lockdep_map); in start_flush_work()
3021 lock_map_release(&pwq->wq->lockdep_map); in start_flush_work()
3657 struct pool_workqueue *pwq = container_of(work, struct pool_workqueue, in pwq_unbound_release_workfn() local
3659 struct workqueue_struct *wq = pwq->wq; in pwq_unbound_release_workfn()
3660 struct worker_pool *pool = pwq->pool; in pwq_unbound_release_workfn()
3667 list_del_rcu(&pwq->pwqs_node); in pwq_unbound_release_workfn()
3675 call_rcu(&pwq->rcu, rcu_free_pwq); in pwq_unbound_release_workfn()
3695 static void pwq_adjust_max_active(struct pool_workqueue *pwq) in pwq_adjust_max_active() argument
3697 struct workqueue_struct *wq = pwq->wq; in pwq_adjust_max_active()
3705 if (!freezable && pwq->max_active == wq->saved_max_active) in pwq_adjust_max_active()
3709 spin_lock_irqsave(&pwq->pool->lock, flags); in pwq_adjust_max_active()
3717 pwq->max_active = wq->saved_max_active; in pwq_adjust_max_active()
3719 while (!list_empty(&pwq->delayed_works) && in pwq_adjust_max_active()
3720 pwq->nr_active < pwq->max_active) in pwq_adjust_max_active()
3721 pwq_activate_first_delayed(pwq); in pwq_adjust_max_active()
3727 wake_up_worker(pwq->pool); in pwq_adjust_max_active()
3729 pwq->max_active = 0; in pwq_adjust_max_active()
3732 spin_unlock_irqrestore(&pwq->pool->lock, flags); in pwq_adjust_max_active()
3736 static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq, in init_pwq() argument
3739 BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK); in init_pwq()
3741 memset(pwq, 0, sizeof(*pwq)); in init_pwq()
3743 pwq->pool = pool; in init_pwq()
3744 pwq->wq = wq; in init_pwq()
3745 pwq->flush_color = -1; in init_pwq()
3746 pwq->refcnt = 1; in init_pwq()
3747 INIT_LIST_HEAD(&pwq->delayed_works); in init_pwq()
3748 INIT_LIST_HEAD(&pwq->pwqs_node); in init_pwq()
3749 INIT_LIST_HEAD(&pwq->mayday_node); in init_pwq()
3750 INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn); in init_pwq()
3754 static void link_pwq(struct pool_workqueue *pwq) in link_pwq() argument
3756 struct workqueue_struct *wq = pwq->wq; in link_pwq()
3761 if (!list_empty(&pwq->pwqs_node)) in link_pwq()
3765 pwq->work_color = wq->work_color; in link_pwq()
3768 pwq_adjust_max_active(pwq); in link_pwq()
3771 list_add_rcu(&pwq->pwqs_node, &wq->pwqs); in link_pwq()
3779 struct pool_workqueue *pwq; in alloc_unbound_pwq() local
3787 pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node); in alloc_unbound_pwq()
3788 if (!pwq) { in alloc_unbound_pwq()
3793 init_pwq(pwq, wq, pool); in alloc_unbound_pwq()
3794 return pwq; in alloc_unbound_pwq()
3852 struct pool_workqueue *pwq) in numa_pwq_tbl_install() argument
3860 link_pwq(pwq); in numa_pwq_tbl_install()
3863 rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq); in numa_pwq_tbl_install()
4085 struct pool_workqueue *old_pwq = NULL, *pwq; in wq_update_unbound_numa() local
4104 pwq = unbound_pwq_by_node(wq, node); in wq_update_unbound_numa()
4113 if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask)) in wq_update_unbound_numa()
4120 pwq = alloc_unbound_pwq(wq, target_attrs); in wq_update_unbound_numa()
4121 if (!pwq) { in wq_update_unbound_numa()
4129 old_pwq = numa_pwq_tbl_install(wq, node, pwq); in wq_update_unbound_numa()
4154 struct pool_workqueue *pwq = in alloc_and_link_pwqs() local
4159 init_pwq(pwq, wq, &cpu_pools[highpri]); in alloc_and_link_pwqs()
4162 link_pwq(pwq); in alloc_and_link_pwqs()
4234 struct pool_workqueue *pwq; in alloc_workqueue() local
4301 for_each_pwq(pwq, wq) in alloc_workqueue()
4302 pwq_adjust_max_active(pwq); in alloc_workqueue()
4332 struct pool_workqueue *pwq; in destroy_workqueue() local
4360 for_each_pwq(pwq, wq) { in destroy_workqueue()
4364 if (WARN_ON(pwq->nr_in_flight[i])) { in destroy_workqueue()
4371 if (WARN_ON((pwq != wq->dfl_pwq) && (pwq->refcnt > 1)) || in destroy_workqueue()
4372 WARN_ON(pwq->nr_active) || in destroy_workqueue()
4373 WARN_ON(!list_empty(&pwq->delayed_works))) { in destroy_workqueue()
4403 pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]); in destroy_workqueue()
4405 put_pwq_unlocked(pwq); in destroy_workqueue()
4412 pwq = wq->dfl_pwq; in destroy_workqueue()
4414 put_pwq_unlocked(pwq); in destroy_workqueue()
4431 struct pool_workqueue *pwq; in workqueue_set_max_active() local
4444 for_each_pwq(pwq, wq) in workqueue_set_max_active()
4445 pwq_adjust_max_active(pwq); in workqueue_set_max_active()
4502 struct pool_workqueue *pwq; in workqueue_congested() local
4512 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); in workqueue_congested()
4514 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); in workqueue_congested()
4516 ret = !list_empty(&pwq->delayed_works); in workqueue_congested()
4599 struct pool_workqueue *pwq = NULL; in print_worker_info() local
4617 probe_kernel_read(&pwq, &worker->current_pwq, sizeof(pwq)); in print_worker_info()
4618 probe_kernel_read(&wq, &pwq->wq, sizeof(wq)); in print_worker_info()
4652 static void show_pwq(struct pool_workqueue *pwq) in show_pwq() argument
4654 struct worker_pool *pool = pwq->pool; in show_pwq()
4664 pwq->nr_active, pwq->max_active, pwq->refcnt, in show_pwq()
4665 !list_empty(&pwq->mayday_node) ? " MAYDAY" : ""); in show_pwq()
4668 if (worker->current_pwq == pwq) { in show_pwq()
4678 if (worker->current_pwq != pwq) in show_pwq()
4683 worker == pwq->wq->rescuer ? "(RESCUER)" : "", in show_pwq()
4693 if (get_work_pwq(work) == pwq) { in show_pwq()
4703 if (get_work_pwq(work) != pwq) in show_pwq()
4712 if (!list_empty(&pwq->delayed_works)) { in show_pwq()
4716 list_for_each_entry(work, &pwq->delayed_works, entry) { in show_pwq()
4742 struct pool_workqueue *pwq; in show_workqueue_state() local
4745 for_each_pwq(pwq, wq) { in show_workqueue_state()
4746 if (pwq->nr_active || !list_empty(&pwq->delayed_works)) { in show_workqueue_state()
4756 for_each_pwq(pwq, wq) { in show_workqueue_state()
4757 spin_lock_irqsave(&pwq->pool->lock, flags); in show_workqueue_state()
4758 if (pwq->nr_active || !list_empty(&pwq->delayed_works)) in show_workqueue_state()
4759 show_pwq(pwq); in show_workqueue_state()
4760 spin_unlock_irqrestore(&pwq->pool->lock, flags); in show_workqueue_state()
5142 struct pool_workqueue *pwq; in freeze_workqueues_begin() local
5151 for_each_pwq(pwq, wq) in freeze_workqueues_begin()
5152 pwq_adjust_max_active(pwq); in freeze_workqueues_begin()
5176 struct pool_workqueue *pwq; in freeze_workqueues_busy() local
5190 for_each_pwq(pwq, wq) { in freeze_workqueues_busy()
5191 WARN_ON_ONCE(pwq->nr_active < 0); in freeze_workqueues_busy()
5192 if (pwq->nr_active) { in freeze_workqueues_busy()
5217 struct pool_workqueue *pwq; in thaw_workqueues() local
5229 for_each_pwq(pwq, wq) in thaw_workqueues()
5230 pwq_adjust_max_active(pwq); in thaw_workqueues()