• Home
  • Raw
  • Download

Lines Matching refs:pwq

382 static void show_pwq(struct pool_workqueue *pwq);
451 #define for_each_pwq(pwq, wq) \ argument
452 list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node, \
646 static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq, in set_work_pwq() argument
649 set_work_data(work, (unsigned long)pwq, in set_work_pwq()
1120 static void get_pwq(struct pool_workqueue *pwq) in get_pwq() argument
1122 lockdep_assert_held(&pwq->pool->lock); in get_pwq()
1123 WARN_ON_ONCE(pwq->refcnt <= 0); in get_pwq()
1124 pwq->refcnt++; in get_pwq()
1134 static void put_pwq(struct pool_workqueue *pwq) in put_pwq() argument
1136 lockdep_assert_held(&pwq->pool->lock); in put_pwq()
1137 if (likely(--pwq->refcnt)) in put_pwq()
1139 if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND))) in put_pwq()
1149 schedule_work(&pwq->unbound_release_work); in put_pwq()
1158 static void put_pwq_unlocked(struct pool_workqueue *pwq) in put_pwq_unlocked() argument
1160 if (pwq) { in put_pwq_unlocked()
1165 raw_spin_lock_irq(&pwq->pool->lock); in put_pwq_unlocked()
1166 put_pwq(pwq); in put_pwq_unlocked()
1167 raw_spin_unlock_irq(&pwq->pool->lock); in put_pwq_unlocked()
1173 struct pool_workqueue *pwq = get_work_pwq(work); in pwq_activate_inactive_work() local
1176 if (list_empty(&pwq->pool->worklist)) in pwq_activate_inactive_work()
1177 pwq->pool->watchdog_ts = jiffies; in pwq_activate_inactive_work()
1178 move_linked_works(work, &pwq->pool->worklist, NULL); in pwq_activate_inactive_work()
1180 pwq->nr_active++; in pwq_activate_inactive_work()
1183 static void pwq_activate_first_inactive(struct pool_workqueue *pwq) in pwq_activate_first_inactive() argument
1185 struct work_struct *work = list_first_entry(&pwq->inactive_works, in pwq_activate_first_inactive()
1202 static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, unsigned long work_data) in pwq_dec_nr_in_flight() argument
1207 pwq->nr_active--; in pwq_dec_nr_in_flight()
1208 if (!list_empty(&pwq->inactive_works)) { in pwq_dec_nr_in_flight()
1210 if (pwq->nr_active < pwq->max_active) in pwq_dec_nr_in_flight()
1211 pwq_activate_first_inactive(pwq); in pwq_dec_nr_in_flight()
1215 pwq->nr_in_flight[color]--; in pwq_dec_nr_in_flight()
1218 if (likely(pwq->flush_color != color)) in pwq_dec_nr_in_flight()
1222 if (pwq->nr_in_flight[color]) in pwq_dec_nr_in_flight()
1226 pwq->flush_color = -1; in pwq_dec_nr_in_flight()
1232 if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush)) in pwq_dec_nr_in_flight()
1233 complete(&pwq->wq->first_flusher->done); in pwq_dec_nr_in_flight()
1235 put_pwq(pwq); in pwq_dec_nr_in_flight()
1272 struct pool_workqueue *pwq; in try_to_grab_pending() local
1311 pwq = get_work_pwq(work); in try_to_grab_pending()
1312 if (pwq && pwq->pool == pool) { in try_to_grab_pending()
1330 pwq_dec_nr_in_flight(pwq, *work_data_bits(work)); in try_to_grab_pending()
1362 static void insert_work(struct pool_workqueue *pwq, struct work_struct *work, in insert_work() argument
1365 struct worker_pool *pool = pwq->pool; in insert_work()
1371 set_work_pwq(work, pwq, extra_flags); in insert_work()
1373 get_pwq(pwq); in insert_work()
1431 struct pool_workqueue *pwq; in __queue_work() local
1456 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); in __queue_work()
1460 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); in __queue_work()
1469 if (last_pool && last_pool != pwq->pool) { in __queue_work()
1477 pwq = worker->current_pwq; in __queue_work()
1481 raw_spin_lock(&pwq->pool->lock); in __queue_work()
1484 raw_spin_lock(&pwq->pool->lock); in __queue_work()
1495 if (unlikely(!pwq->refcnt)) { in __queue_work()
1497 raw_spin_unlock(&pwq->pool->lock); in __queue_work()
1507 trace_workqueue_queue_work(req_cpu, pwq, work); in __queue_work()
1512 pwq->nr_in_flight[pwq->work_color]++; in __queue_work()
1513 work_flags = work_color_to_flags(pwq->work_color); in __queue_work()
1515 if (likely(pwq->nr_active < pwq->max_active)) { in __queue_work()
1517 pwq->nr_active++; in __queue_work()
1518 worklist = &pwq->pool->worklist; in __queue_work()
1520 pwq->pool->watchdog_ts = jiffies; in __queue_work()
1523 worklist = &pwq->inactive_works; in __queue_work()
1527 insert_work(pwq, work, worklist, work_flags); in __queue_work()
1530 raw_spin_unlock(&pwq->pool->lock); in __queue_work()
2042 struct pool_workqueue *pwq = get_work_pwq(work); in send_mayday() local
2043 struct workqueue_struct *wq = pwq->wq; in send_mayday()
2051 if (list_empty(&pwq->mayday_node)) { in send_mayday()
2057 get_pwq(pwq); in send_mayday()
2058 list_add_tail(&pwq->mayday_node, &wq->maydays); in send_mayday()
2195 struct pool_workqueue *pwq = get_work_pwq(work); in process_one_work() local
2197 bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE; in process_one_work()
2233 worker->current_pwq = pwq; in process_one_work()
2241 strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN); in process_one_work()
2274 lock_map_acquire(&pwq->wq->lockdep_map); in process_one_work()
2306 lock_map_release(&pwq->wq->lockdep_map); in process_one_work()
2342 pwq_dec_nr_in_flight(pwq, work_data); in process_one_work()
2523 struct pool_workqueue *pwq = list_first_entry(&wq->maydays, in rescuer_thread() local
2525 struct worker_pool *pool = pwq->pool; in rescuer_thread()
2530 list_del_init(&pwq->mayday_node); in rescuer_thread()
2544 if (get_work_pwq(work) == pwq) { in rescuer_thread()
2564 if (pwq->nr_active && need_to_create_worker(pool)) { in rescuer_thread()
2570 if (wq->rescuer && list_empty(&pwq->mayday_node)) { in rescuer_thread()
2571 get_pwq(pwq); in rescuer_thread()
2572 list_add_tail(&pwq->mayday_node, &wq->maydays); in rescuer_thread()
2582 put_pwq(pwq); in rescuer_thread()
2681 static void insert_wq_barrier(struct pool_workqueue *pwq, in insert_wq_barrier() argument
2722 pwq->nr_in_flight[work_color]++; in insert_wq_barrier()
2726 insert_work(pwq, &barr->work, head, work_flags); in insert_wq_barrier()
2764 struct pool_workqueue *pwq; in flush_workqueue_prep_pwqs() local
2771 for_each_pwq(pwq, wq) { in flush_workqueue_prep_pwqs()
2772 struct worker_pool *pool = pwq->pool; in flush_workqueue_prep_pwqs()
2777 WARN_ON_ONCE(pwq->flush_color != -1); in flush_workqueue_prep_pwqs()
2779 if (pwq->nr_in_flight[flush_color]) { in flush_workqueue_prep_pwqs()
2780 pwq->flush_color = flush_color; in flush_workqueue_prep_pwqs()
2787 WARN_ON_ONCE(work_color != work_next_color(pwq->work_color)); in flush_workqueue_prep_pwqs()
2788 pwq->work_color = work_color; in flush_workqueue_prep_pwqs()
2972 struct pool_workqueue *pwq; in drain_workqueue() local
2988 for_each_pwq(pwq, wq) { in drain_workqueue()
2991 raw_spin_lock_irq(&pwq->pool->lock); in drain_workqueue()
2992 drained = !pwq->nr_active && list_empty(&pwq->inactive_works); in drain_workqueue()
2993 raw_spin_unlock_irq(&pwq->pool->lock); in drain_workqueue()
3018 struct pool_workqueue *pwq; in start_flush_work() local
3031 pwq = get_work_pwq(work); in start_flush_work()
3032 if (pwq) { in start_flush_work()
3033 if (unlikely(pwq->pool != pool)) in start_flush_work()
3039 pwq = worker->current_pwq; in start_flush_work()
3042 check_flush_dependency(pwq->wq, work); in start_flush_work()
3044 insert_wq_barrier(pwq, barr, work, worker); in start_flush_work()
3057 (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) { in start_flush_work()
3058 lock_map_acquire(&pwq->wq->lockdep_map); in start_flush_work()
3059 lock_map_release(&pwq->wq->lockdep_map); in start_flush_work()
3714 struct pool_workqueue *pwq = container_of(work, struct pool_workqueue, in pwq_unbound_release_workfn() local
3716 struct workqueue_struct *wq = pwq->wq; in pwq_unbound_release_workfn()
3717 struct worker_pool *pool = pwq->pool; in pwq_unbound_release_workfn()
3724 if (!list_empty(&pwq->pwqs_node)) { in pwq_unbound_release_workfn()
3729 list_del_rcu(&pwq->pwqs_node); in pwq_unbound_release_workfn()
3738 call_rcu(&pwq->rcu, rcu_free_pwq); in pwq_unbound_release_workfn()
3758 static void pwq_adjust_max_active(struct pool_workqueue *pwq) in pwq_adjust_max_active() argument
3760 struct workqueue_struct *wq = pwq->wq; in pwq_adjust_max_active()
3768 if (!freezable && pwq->max_active == wq->saved_max_active) in pwq_adjust_max_active()
3772 raw_spin_lock_irqsave(&pwq->pool->lock, flags); in pwq_adjust_max_active()
3782 pwq->max_active = wq->saved_max_active; in pwq_adjust_max_active()
3784 while (!list_empty(&pwq->inactive_works) && in pwq_adjust_max_active()
3785 pwq->nr_active < pwq->max_active) { in pwq_adjust_max_active()
3786 pwq_activate_first_inactive(pwq); in pwq_adjust_max_active()
3797 wake_up_worker(pwq->pool); in pwq_adjust_max_active()
3799 pwq->max_active = 0; in pwq_adjust_max_active()
3802 raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); in pwq_adjust_max_active()
3806 static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq, in init_pwq() argument
3809 BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK); in init_pwq()
3811 memset(pwq, 0, sizeof(*pwq)); in init_pwq()
3813 pwq->pool = pool; in init_pwq()
3814 pwq->wq = wq; in init_pwq()
3815 pwq->flush_color = -1; in init_pwq()
3816 pwq->refcnt = 1; in init_pwq()
3817 INIT_LIST_HEAD(&pwq->inactive_works); in init_pwq()
3818 INIT_LIST_HEAD(&pwq->pwqs_node); in init_pwq()
3819 INIT_LIST_HEAD(&pwq->mayday_node); in init_pwq()
3820 INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn); in init_pwq()
3824 static void link_pwq(struct pool_workqueue *pwq) in link_pwq() argument
3826 struct workqueue_struct *wq = pwq->wq; in link_pwq()
3831 if (!list_empty(&pwq->pwqs_node)) in link_pwq()
3835 pwq->work_color = wq->work_color; in link_pwq()
3838 pwq_adjust_max_active(pwq); in link_pwq()
3841 list_add_rcu(&pwq->pwqs_node, &wq->pwqs); in link_pwq()
3849 struct pool_workqueue *pwq; in alloc_unbound_pwq() local
3857 pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node); in alloc_unbound_pwq()
3858 if (!pwq) { in alloc_unbound_pwq()
3863 init_pwq(pwq, wq, pool); in alloc_unbound_pwq()
3864 return pwq; in alloc_unbound_pwq()
3922 struct pool_workqueue *pwq) in numa_pwq_tbl_install() argument
3930 link_pwq(pwq); in numa_pwq_tbl_install()
3933 rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq); in numa_pwq_tbl_install()
4157 struct pool_workqueue *old_pwq = NULL, *pwq; in wq_update_unbound_numa() local
4176 pwq = unbound_pwq_by_node(wq, node); in wq_update_unbound_numa()
4185 if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask)) in wq_update_unbound_numa()
4192 pwq = alloc_unbound_pwq(wq, target_attrs); in wq_update_unbound_numa()
4193 if (!pwq) { in wq_update_unbound_numa()
4201 old_pwq = numa_pwq_tbl_install(wq, node, pwq); in wq_update_unbound_numa()
4226 struct pool_workqueue *pwq = in alloc_and_link_pwqs() local
4231 init_pwq(pwq, wq, &cpu_pools[highpri]); in alloc_and_link_pwqs()
4234 link_pwq(pwq); in alloc_and_link_pwqs()
4306 struct pool_workqueue *pwq; in alloc_workqueue() local
4373 for_each_pwq(pwq, wq) in alloc_workqueue()
4374 pwq_adjust_max_active(pwq); in alloc_workqueue()
4396 static bool pwq_busy(struct pool_workqueue *pwq) in pwq_busy() argument
4401 if (pwq->nr_in_flight[i]) in pwq_busy()
4404 if ((pwq != pwq->wq->dfl_pwq) && (pwq->refcnt > 1)) in pwq_busy()
4406 if (pwq->nr_active || !list_empty(&pwq->inactive_works)) in pwq_busy()
4420 struct pool_workqueue *pwq; in destroy_workqueue() local
4452 for_each_pwq(pwq, wq) { in destroy_workqueue()
4453 raw_spin_lock_irq(&pwq->pool->lock); in destroy_workqueue()
4454 if (WARN_ON(pwq_busy(pwq))) { in destroy_workqueue()
4457 show_pwq(pwq); in destroy_workqueue()
4458 raw_spin_unlock_irq(&pwq->pool->lock); in destroy_workqueue()
4464 raw_spin_unlock_irq(&pwq->pool->lock); in destroy_workqueue()
4489 pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]); in destroy_workqueue()
4491 put_pwq_unlocked(pwq); in destroy_workqueue()
4498 pwq = wq->dfl_pwq; in destroy_workqueue()
4500 put_pwq_unlocked(pwq); in destroy_workqueue()
4517 struct pool_workqueue *pwq; in workqueue_set_max_active() local
4530 for_each_pwq(pwq, wq) in workqueue_set_max_active()
4531 pwq_adjust_max_active(pwq); in workqueue_set_max_active()
4588 struct pool_workqueue *pwq; in workqueue_congested() local
4598 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); in workqueue_congested()
4600 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); in workqueue_congested()
4602 ret = !list_empty(&pwq->inactive_works); in workqueue_congested()
4685 struct pool_workqueue *pwq = NULL; in print_worker_info() local
4703 copy_from_kernel_nofault(&pwq, &worker->current_pwq, sizeof(pwq)); in print_worker_info()
4704 copy_from_kernel_nofault(&wq, &pwq->wq, sizeof(wq)); in print_worker_info()
4738 static void show_pwq(struct pool_workqueue *pwq) in show_pwq() argument
4740 struct worker_pool *pool = pwq->pool; in show_pwq()
4750 pwq->nr_active, pwq->max_active, pwq->refcnt, in show_pwq()
4751 !list_empty(&pwq->mayday_node) ? " MAYDAY" : ""); in show_pwq()
4754 if (worker->current_pwq == pwq) { in show_pwq()
4764 if (worker->current_pwq != pwq) in show_pwq()
4779 if (get_work_pwq(work) == pwq) { in show_pwq()
4789 if (get_work_pwq(work) != pwq) in show_pwq()
4798 if (!list_empty(&pwq->inactive_works)) { in show_pwq()
4802 list_for_each_entry(work, &pwq->inactive_works, entry) { in show_pwq()
4816 struct pool_workqueue *pwq; in show_one_workqueue() local
4820 for_each_pwq(pwq, wq) { in show_one_workqueue()
4821 if (pwq->nr_active || !list_empty(&pwq->inactive_works)) { in show_one_workqueue()
4831 for_each_pwq(pwq, wq) { in show_one_workqueue()
4832 raw_spin_lock_irqsave(&pwq->pool->lock, flags); in show_one_workqueue()
4833 if (pwq->nr_active || !list_empty(&pwq->inactive_works)) { in show_one_workqueue()
4840 show_pwq(pwq); in show_one_workqueue()
4843 raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); in show_one_workqueue()
5257 struct pool_workqueue *pwq; in freeze_workqueues_begin() local
5266 for_each_pwq(pwq, wq) in freeze_workqueues_begin()
5267 pwq_adjust_max_active(pwq); in freeze_workqueues_begin()
5291 struct pool_workqueue *pwq; in freeze_workqueues_busy() local
5305 for_each_pwq(pwq, wq) { in freeze_workqueues_busy()
5306 WARN_ON_ONCE(pwq->nr_active < 0); in freeze_workqueues_busy()
5307 if (pwq->nr_active) { in freeze_workqueues_busy()
5332 struct pool_workqueue *pwq; in thaw_workqueues() local
5344 for_each_pwq(pwq, wq) in thaw_workqueues()
5345 pwq_adjust_max_active(pwq); in thaw_workqueues()