• Home
  • Raw
  • Download

Lines Matching refs:wq

207 	struct workqueue_struct *wq;		/* I: the owning workqueue */  member
381 static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
396 #define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \ argument
398 !lockdep_is_held(&wq->mutex) && \
451 #define for_each_pwq(pwq, wq) \ argument
452 list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node, \
453 lockdep_is_held(&(wq->mutex)))
586 static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq, in unbound_pwq_by_node() argument
589 assert_rcu_or_wq_mutex_or_pool_mutex(wq); in unbound_pwq_by_node()
598 return wq->dfl_pwq; in unbound_pwq_by_node()
600 return rcu_dereference_raw(wq->numa_pwq_tbl[node]); in unbound_pwq_by_node()
1139 if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND))) in put_pwq()
1232 if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush)) in pwq_dec_nr_in_flight()
1233 complete(&pwq->wq->first_flusher->done); in pwq_dec_nr_in_flight()
1383 static bool is_chained_work(struct workqueue_struct *wq) in is_chained_work() argument
1392 return worker && worker->current_pwq->wq == wq; in is_chained_work()
1428 static void __queue_work(int cpu, struct workqueue_struct *wq, in __queue_work() argument
1447 if (unlikely(wq->flags & __WQ_DRAINING) && in __queue_work()
1448 WARN_ON_ONCE(!is_chained_work(wq))) in __queue_work()
1453 if (wq->flags & WQ_UNBOUND) { in __queue_work()
1456 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); in __queue_work()
1460 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); in __queue_work()
1476 if (worker && worker->current_pwq->wq == wq) { in __queue_work()
1496 if (wq->flags & WQ_UNBOUND) { in __queue_work()
1503 wq->name, cpu); in __queue_work()
1546 bool queue_work_on(int cpu, struct workqueue_struct *wq, in queue_work_on() argument
1555 __queue_work(cpu, wq, work); in queue_work_on()
1617 bool queue_work_node(int node, struct workqueue_struct *wq, in queue_work_node() argument
1632 WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)); in queue_work_node()
1639 __queue_work(cpu, wq, work); in queue_work_node()
1653 __queue_work(dwork->cpu, dwork->wq, &dwork->work); in delayed_work_timer_fn()
1657 static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, in __queue_delayed_work() argument
1663 WARN_ON_ONCE(!wq); in __queue_delayed_work()
1675 __queue_work(cpu, wq, &dwork->work); in __queue_delayed_work()
1679 dwork->wq = wq; in __queue_delayed_work()
1700 bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, in queue_delayed_work_on() argument
1711 __queue_delayed_work(cpu, wq, dwork, delay); in queue_delayed_work_on()
1738 bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, in mod_delayed_work_on() argument
1749 __queue_delayed_work(cpu, wq, dwork, delay); in mod_delayed_work_on()
1764 __queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work); in rcu_work_rcufn()
1778 bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork) in queue_rcu_work() argument
1783 rwork->wq = wq; in queue_rcu_work()
2043 struct workqueue_struct *wq = pwq->wq; in send_mayday() local
2047 if (!wq->rescuer) in send_mayday()
2058 list_add_tail(&pwq->mayday_node, &wq->maydays); in send_mayday()
2059 wake_up_process(wq->rescuer->task); in send_mayday()
2197 bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE; in process_one_work()
2241 strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN); in process_one_work()
2274 lock_map_acquire(&pwq->wq->lockdep_map); in process_one_work()
2306 lock_map_release(&pwq->wq->lockdep_map); in process_one_work()
2495 struct workqueue_struct *wq = rescuer->rescue_wq; in rescuer_thread() local
2522 while (!list_empty(&wq->maydays)) { in rescuer_thread()
2523 struct pool_workqueue *pwq = list_first_entry(&wq->maydays, in rescuer_thread()
2570 if (wq->rescuer && list_empty(&pwq->mayday_node)) { in rescuer_thread()
2572 list_add_tail(&pwq->mayday_node, &wq->maydays); in rescuer_thread()
2638 WARN_ONCE(worker && ((worker->current_pwq->wq->flags & in check_flush_dependency()
2641 worker->current_pwq->wq->name, worker->current_func, in check_flush_dependency()
2760 static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq, in flush_workqueue_prep_pwqs() argument
2767 WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush)); in flush_workqueue_prep_pwqs()
2768 atomic_set(&wq->nr_pwqs_to_flush, 1); in flush_workqueue_prep_pwqs()
2771 for_each_pwq(pwq, wq) { in flush_workqueue_prep_pwqs()
2781 atomic_inc(&wq->nr_pwqs_to_flush); in flush_workqueue_prep_pwqs()
2794 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush)) in flush_workqueue_prep_pwqs()
2795 complete(&wq->first_flusher->done); in flush_workqueue_prep_pwqs()
2807 void __flush_workqueue(struct workqueue_struct *wq) in __flush_workqueue() argument
2812 .done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map), in __flush_workqueue()
2819 lock_map_acquire(&wq->lockdep_map); in __flush_workqueue()
2820 lock_map_release(&wq->lockdep_map); in __flush_workqueue()
2822 mutex_lock(&wq->mutex); in __flush_workqueue()
2827 next_color = work_next_color(wq->work_color); in __flush_workqueue()
2829 if (next_color != wq->flush_color) { in __flush_workqueue()
2835 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow)); in __flush_workqueue()
2836 this_flusher.flush_color = wq->work_color; in __flush_workqueue()
2837 wq->work_color = next_color; in __flush_workqueue()
2839 if (!wq->first_flusher) { in __flush_workqueue()
2841 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color); in __flush_workqueue()
2843 wq->first_flusher = &this_flusher; in __flush_workqueue()
2845 if (!flush_workqueue_prep_pwqs(wq, wq->flush_color, in __flush_workqueue()
2846 wq->work_color)) { in __flush_workqueue()
2848 wq->flush_color = next_color; in __flush_workqueue()
2849 wq->first_flusher = NULL; in __flush_workqueue()
2854 WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color); in __flush_workqueue()
2855 list_add_tail(&this_flusher.list, &wq->flusher_queue); in __flush_workqueue()
2856 flush_workqueue_prep_pwqs(wq, -1, wq->work_color); in __flush_workqueue()
2864 list_add_tail(&this_flusher.list, &wq->flusher_overflow); in __flush_workqueue()
2867 check_flush_dependency(wq, NULL); in __flush_workqueue()
2869 mutex_unlock(&wq->mutex); in __flush_workqueue()
2879 if (READ_ONCE(wq->first_flusher) != &this_flusher) in __flush_workqueue()
2882 mutex_lock(&wq->mutex); in __flush_workqueue()
2885 if (wq->first_flusher != &this_flusher) in __flush_workqueue()
2888 WRITE_ONCE(wq->first_flusher, NULL); in __flush_workqueue()
2891 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color); in __flush_workqueue()
2897 list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) { in __flush_workqueue()
2898 if (next->flush_color != wq->flush_color) in __flush_workqueue()
2904 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) && in __flush_workqueue()
2905 wq->flush_color != work_next_color(wq->work_color)); in __flush_workqueue()
2908 wq->flush_color = work_next_color(wq->flush_color); in __flush_workqueue()
2911 if (!list_empty(&wq->flusher_overflow)) { in __flush_workqueue()
2918 list_for_each_entry(tmp, &wq->flusher_overflow, list) in __flush_workqueue()
2919 tmp->flush_color = wq->work_color; in __flush_workqueue()
2921 wq->work_color = work_next_color(wq->work_color); in __flush_workqueue()
2923 list_splice_tail_init(&wq->flusher_overflow, in __flush_workqueue()
2924 &wq->flusher_queue); in __flush_workqueue()
2925 flush_workqueue_prep_pwqs(wq, -1, wq->work_color); in __flush_workqueue()
2928 if (list_empty(&wq->flusher_queue)) { in __flush_workqueue()
2929 WARN_ON_ONCE(wq->flush_color != wq->work_color); in __flush_workqueue()
2937 WARN_ON_ONCE(wq->flush_color == wq->work_color); in __flush_workqueue()
2938 WARN_ON_ONCE(wq->flush_color != next->flush_color); in __flush_workqueue()
2941 wq->first_flusher = next; in __flush_workqueue()
2943 if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1)) in __flush_workqueue()
2950 wq->first_flusher = NULL; in __flush_workqueue()
2954 mutex_unlock(&wq->mutex); in __flush_workqueue()
2969 void drain_workqueue(struct workqueue_struct *wq) in drain_workqueue() argument
2979 mutex_lock(&wq->mutex); in drain_workqueue()
2980 if (!wq->nr_drainers++) in drain_workqueue()
2981 wq->flags |= __WQ_DRAINING; in drain_workqueue()
2982 mutex_unlock(&wq->mutex); in drain_workqueue()
2984 __flush_workqueue(wq); in drain_workqueue()
2986 mutex_lock(&wq->mutex); in drain_workqueue()
2988 for_each_pwq(pwq, wq) { in drain_workqueue()
3001 wq->name, __func__, flush_cnt); in drain_workqueue()
3003 mutex_unlock(&wq->mutex); in drain_workqueue()
3007 if (!--wq->nr_drainers) in drain_workqueue()
3008 wq->flags &= ~__WQ_DRAINING; in drain_workqueue()
3009 mutex_unlock(&wq->mutex); in drain_workqueue()
3042 check_flush_dependency(pwq->wq, work); in start_flush_work()
3057 (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) { in start_flush_work()
3058 lock_map_acquire(&pwq->wq->lockdep_map); in start_flush_work()
3059 lock_map_release(&pwq->wq->lockdep_map); in start_flush_work()
3226 __queue_work(dwork->cpu, dwork->wq, &dwork->work); in flush_delayed_work()
3492 static void wq_init_lockdep(struct workqueue_struct *wq) in wq_init_lockdep() argument
3496 lockdep_register_key(&wq->key); in wq_init_lockdep()
3497 lock_name = kasprintf(GFP_KERNEL, "%s%s", "(wq_completion)", wq->name); in wq_init_lockdep()
3499 lock_name = wq->name; in wq_init_lockdep()
3501 wq->lock_name = lock_name; in wq_init_lockdep()
3502 lockdep_init_map(&wq->lockdep_map, lock_name, &wq->key, 0); in wq_init_lockdep()
3505 static void wq_unregister_lockdep(struct workqueue_struct *wq) in wq_unregister_lockdep() argument
3507 lockdep_unregister_key(&wq->key); in wq_unregister_lockdep()
3510 static void wq_free_lockdep(struct workqueue_struct *wq) in wq_free_lockdep() argument
3512 if (wq->lock_name != wq->name) in wq_free_lockdep()
3513 kfree(wq->lock_name); in wq_free_lockdep()
3516 static void wq_init_lockdep(struct workqueue_struct *wq) in wq_init_lockdep() argument
3520 static void wq_unregister_lockdep(struct workqueue_struct *wq) in wq_unregister_lockdep() argument
3524 static void wq_free_lockdep(struct workqueue_struct *wq) in wq_free_lockdep() argument
3531 struct workqueue_struct *wq = in rcu_free_wq() local
3534 wq_free_lockdep(wq); in rcu_free_wq()
3536 if (!(wq->flags & WQ_UNBOUND)) in rcu_free_wq()
3537 free_percpu(wq->cpu_pwqs); in rcu_free_wq()
3539 free_workqueue_attrs(wq->unbound_attrs); in rcu_free_wq()
3541 kfree(wq); in rcu_free_wq()
3716 struct workqueue_struct *wq = pwq->wq; in pwq_unbound_release_workfn() local
3725 if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND))) in pwq_unbound_release_workfn()
3728 mutex_lock(&wq->mutex); in pwq_unbound_release_workfn()
3730 is_last = list_empty(&wq->pwqs); in pwq_unbound_release_workfn()
3731 mutex_unlock(&wq->mutex); in pwq_unbound_release_workfn()
3745 wq_unregister_lockdep(wq); in pwq_unbound_release_workfn()
3746 call_rcu(&wq->rcu, rcu_free_wq); in pwq_unbound_release_workfn()
3760 struct workqueue_struct *wq = pwq->wq; in pwq_adjust_max_active() local
3761 bool freezable = wq->flags & WQ_FREEZABLE; in pwq_adjust_max_active()
3765 lockdep_assert_held(&wq->mutex); in pwq_adjust_max_active()
3768 if (!freezable && pwq->max_active == wq->saved_max_active) in pwq_adjust_max_active()
3782 pwq->max_active = wq->saved_max_active; in pwq_adjust_max_active()
3806 static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq, in init_pwq() argument
3814 pwq->wq = wq; in init_pwq()
3826 struct workqueue_struct *wq = pwq->wq; in link_pwq() local
3828 lockdep_assert_held(&wq->mutex); in link_pwq()
3835 pwq->work_color = wq->work_color; in link_pwq()
3841 list_add_rcu(&pwq->pwqs_node, &wq->pwqs); in link_pwq()
3845 static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq, in alloc_unbound_pwq() argument
3863 init_pwq(pwq, wq, pool); in alloc_unbound_pwq()
3920 static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq, in numa_pwq_tbl_install() argument
3927 lockdep_assert_held(&wq->mutex); in numa_pwq_tbl_install()
3932 old_pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]); in numa_pwq_tbl_install()
3933 rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq); in numa_pwq_tbl_install()
3939 struct workqueue_struct *wq; /* target workqueue */ member
3964 apply_wqattrs_prepare(struct workqueue_struct *wq, in apply_wqattrs_prepare() argument
4004 ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs); in apply_wqattrs_prepare()
4010 ctx->pwq_tbl[node] = alloc_unbound_pwq(wq, tmp_attrs); in apply_wqattrs_prepare()
4024 ctx->wq = wq; in apply_wqattrs_prepare()
4041 mutex_lock(&ctx->wq->mutex); in apply_wqattrs_commit()
4043 copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs); in apply_wqattrs_commit()
4047 ctx->pwq_tbl[node] = numa_pwq_tbl_install(ctx->wq, node, in apply_wqattrs_commit()
4052 swap(ctx->wq->dfl_pwq, ctx->dfl_pwq); in apply_wqattrs_commit()
4054 mutex_unlock(&ctx->wq->mutex); in apply_wqattrs_commit()
4070 static int apply_workqueue_attrs_locked(struct workqueue_struct *wq, in apply_workqueue_attrs_locked() argument
4076 if (WARN_ON(!(wq->flags & WQ_UNBOUND))) in apply_workqueue_attrs_locked()
4080 if (!list_empty(&wq->pwqs)) { in apply_workqueue_attrs_locked()
4081 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) in apply_workqueue_attrs_locked()
4084 wq->flags &= ~__WQ_ORDERED; in apply_workqueue_attrs_locked()
4087 ctx = apply_wqattrs_prepare(wq, attrs, wq_unbound_cpumask); in apply_workqueue_attrs_locked()
4116 int apply_workqueue_attrs(struct workqueue_struct *wq, in apply_workqueue_attrs() argument
4124 ret = apply_workqueue_attrs_locked(wq, attrs); in apply_workqueue_attrs()
4152 static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu, in wq_update_unbound_numa() argument
4163 if (!wq_numa_enabled || !(wq->flags & WQ_UNBOUND) || in wq_update_unbound_numa()
4164 wq->unbound_attrs->no_numa) in wq_update_unbound_numa()
4175 copy_workqueue_attrs(target_attrs, wq->unbound_attrs); in wq_update_unbound_numa()
4176 pwq = unbound_pwq_by_node(wq, node); in wq_update_unbound_numa()
4184 if (wq_calc_node_cpumask(wq->dfl_pwq->pool->attrs, node, cpu_off, cpumask)) { in wq_update_unbound_numa()
4192 pwq = alloc_unbound_pwq(wq, target_attrs); in wq_update_unbound_numa()
4195 wq->name); in wq_update_unbound_numa()
4200 mutex_lock(&wq->mutex); in wq_update_unbound_numa()
4201 old_pwq = numa_pwq_tbl_install(wq, node, pwq); in wq_update_unbound_numa()
4205 mutex_lock(&wq->mutex); in wq_update_unbound_numa()
4206 raw_spin_lock_irq(&wq->dfl_pwq->pool->lock); in wq_update_unbound_numa()
4207 get_pwq(wq->dfl_pwq); in wq_update_unbound_numa()
4208 raw_spin_unlock_irq(&wq->dfl_pwq->pool->lock); in wq_update_unbound_numa()
4209 old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq); in wq_update_unbound_numa()
4211 mutex_unlock(&wq->mutex); in wq_update_unbound_numa()
4215 static int alloc_and_link_pwqs(struct workqueue_struct *wq) in alloc_and_link_pwqs() argument
4217 bool highpri = wq->flags & WQ_HIGHPRI; in alloc_and_link_pwqs()
4220 if (!(wq->flags & WQ_UNBOUND)) { in alloc_and_link_pwqs()
4221 wq->cpu_pwqs = alloc_percpu(struct pool_workqueue); in alloc_and_link_pwqs()
4222 if (!wq->cpu_pwqs) in alloc_and_link_pwqs()
4227 per_cpu_ptr(wq->cpu_pwqs, cpu); in alloc_and_link_pwqs()
4231 init_pwq(pwq, wq, &cpu_pools[highpri]); in alloc_and_link_pwqs()
4233 mutex_lock(&wq->mutex); in alloc_and_link_pwqs()
4235 mutex_unlock(&wq->mutex); in alloc_and_link_pwqs()
4241 if (wq->flags & __WQ_ORDERED) { in alloc_and_link_pwqs()
4242 ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]); in alloc_and_link_pwqs()
4244 WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node || in alloc_and_link_pwqs()
4245 wq->pwqs.prev != &wq->dfl_pwq->pwqs_node), in alloc_and_link_pwqs()
4246 "ordering guarantee broken for workqueue %s\n", wq->name); in alloc_and_link_pwqs()
4248 ret = apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]); in alloc_and_link_pwqs()
4271 static int init_rescuer(struct workqueue_struct *wq) in init_rescuer() argument
4276 if (!(wq->flags & WQ_MEM_RECLAIM)) in init_rescuer()
4283 rescuer->rescue_wq = wq; in init_rescuer()
4284 rescuer->task = kthread_create(rescuer_thread, rescuer, "%s", wq->name); in init_rescuer()
4291 wq->rescuer = rescuer; in init_rescuer()
4305 struct workqueue_struct *wq; in alloc_workqueue() local
4324 tbl_size = nr_node_ids * sizeof(wq->numa_pwq_tbl[0]); in alloc_workqueue()
4326 wq = kzalloc(sizeof(*wq) + tbl_size, GFP_KERNEL); in alloc_workqueue()
4327 if (!wq) in alloc_workqueue()
4331 wq->unbound_attrs = alloc_workqueue_attrs(); in alloc_workqueue()
4332 if (!wq->unbound_attrs) in alloc_workqueue()
4337 vsnprintf(wq->name, sizeof(wq->name), fmt, args); in alloc_workqueue()
4341 max_active = wq_clamp_max_active(max_active, flags, wq->name); in alloc_workqueue()
4344 wq->flags = flags; in alloc_workqueue()
4345 wq->saved_max_active = max_active; in alloc_workqueue()
4346 mutex_init(&wq->mutex); in alloc_workqueue()
4347 atomic_set(&wq->nr_pwqs_to_flush, 0); in alloc_workqueue()
4348 INIT_LIST_HEAD(&wq->pwqs); in alloc_workqueue()
4349 INIT_LIST_HEAD(&wq->flusher_queue); in alloc_workqueue()
4350 INIT_LIST_HEAD(&wq->flusher_overflow); in alloc_workqueue()
4351 INIT_LIST_HEAD(&wq->maydays); in alloc_workqueue()
4353 wq_init_lockdep(wq); in alloc_workqueue()
4354 INIT_LIST_HEAD(&wq->list); in alloc_workqueue()
4356 if (alloc_and_link_pwqs(wq) < 0) in alloc_workqueue()
4359 if (wq_online && init_rescuer(wq) < 0) in alloc_workqueue()
4362 if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq)) in alloc_workqueue()
4372 mutex_lock(&wq->mutex); in alloc_workqueue()
4373 for_each_pwq(pwq, wq) in alloc_workqueue()
4375 mutex_unlock(&wq->mutex); in alloc_workqueue()
4377 list_add_tail_rcu(&wq->list, &workqueues); in alloc_workqueue()
4381 return wq; in alloc_workqueue()
4384 wq_unregister_lockdep(wq); in alloc_workqueue()
4385 wq_free_lockdep(wq); in alloc_workqueue()
4387 free_workqueue_attrs(wq->unbound_attrs); in alloc_workqueue()
4388 kfree(wq); in alloc_workqueue()
4391 destroy_workqueue(wq); in alloc_workqueue()
4404 if ((pwq != pwq->wq->dfl_pwq) && (pwq->refcnt > 1)) in pwq_busy()
4418 void destroy_workqueue(struct workqueue_struct *wq) in destroy_workqueue() argument
4427 workqueue_sysfs_unregister(wq); in destroy_workqueue()
4430 drain_workqueue(wq); in destroy_workqueue()
4433 if (wq->rescuer) { in destroy_workqueue()
4434 struct worker *rescuer = wq->rescuer; in destroy_workqueue()
4438 wq->rescuer = NULL; in destroy_workqueue()
4451 mutex_lock(&wq->mutex); in destroy_workqueue()
4452 for_each_pwq(pwq, wq) { in destroy_workqueue()
4456 __func__, wq->name); in destroy_workqueue()
4459 mutex_unlock(&wq->mutex); in destroy_workqueue()
4461 show_one_workqueue(wq); in destroy_workqueue()
4466 mutex_unlock(&wq->mutex); in destroy_workqueue()
4472 list_del_rcu(&wq->list); in destroy_workqueue()
4475 if (!(wq->flags & WQ_UNBOUND)) { in destroy_workqueue()
4476 wq_unregister_lockdep(wq); in destroy_workqueue()
4481 call_rcu(&wq->rcu, rcu_free_wq); in destroy_workqueue()
4489 pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]); in destroy_workqueue()
4490 RCU_INIT_POINTER(wq->numa_pwq_tbl[node], NULL); in destroy_workqueue()
4498 pwq = wq->dfl_pwq; in destroy_workqueue()
4499 wq->dfl_pwq = NULL; in destroy_workqueue()
4515 void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) in workqueue_set_max_active() argument
4520 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) in workqueue_set_max_active()
4523 max_active = wq_clamp_max_active(max_active, wq->flags, wq->name); in workqueue_set_max_active()
4525 mutex_lock(&wq->mutex); in workqueue_set_max_active()
4527 wq->flags &= ~__WQ_ORDERED; in workqueue_set_max_active()
4528 wq->saved_max_active = max_active; in workqueue_set_max_active()
4530 for_each_pwq(pwq, wq) in workqueue_set_max_active()
4533 mutex_unlock(&wq->mutex); in workqueue_set_max_active()
4586 bool workqueue_congested(int cpu, struct workqueue_struct *wq) in workqueue_congested() argument
4597 if (!(wq->flags & WQ_UNBOUND)) in workqueue_congested()
4598 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); in workqueue_congested()
4600 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); in workqueue_congested()
4686 struct workqueue_struct *wq = NULL; in print_worker_info() local
4704 copy_from_kernel_nofault(&wq, &pwq->wq, sizeof(wq)); in print_worker_info()
4705 copy_from_kernel_nofault(name, wq->name, sizeof(name) - 1); in print_worker_info()
4814 void show_one_workqueue(struct workqueue_struct *wq) in show_one_workqueue() argument
4820 for_each_pwq(pwq, wq) { in show_one_workqueue()
4829 pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags); in show_one_workqueue()
4831 for_each_pwq(pwq, wq) { in show_one_workqueue()
4911 struct workqueue_struct *wq; in show_all_workqueues() local
4919 list_for_each_entry_rcu(wq, &workqueues, list) in show_all_workqueues()
4920 show_one_workqueue(wq); in show_all_workqueues()
5138 struct workqueue_struct *wq; in workqueue_online_cpu() local
5155 list_for_each_entry(wq, &workqueues, list) in workqueue_online_cpu()
5156 wq_update_unbound_numa(wq, cpu, true); in workqueue_online_cpu()
5164 struct workqueue_struct *wq; in workqueue_offline_cpu() local
5174 list_for_each_entry(wq, &workqueues, list) in workqueue_offline_cpu()
5175 wq_update_unbound_numa(wq, cpu, false); in workqueue_offline_cpu()
5256 struct workqueue_struct *wq; in freeze_workqueues_begin() local
5264 list_for_each_entry(wq, &workqueues, list) { in freeze_workqueues_begin()
5265 mutex_lock(&wq->mutex); in freeze_workqueues_begin()
5266 for_each_pwq(pwq, wq) in freeze_workqueues_begin()
5268 mutex_unlock(&wq->mutex); in freeze_workqueues_begin()
5290 struct workqueue_struct *wq; in freeze_workqueues_busy() local
5297 list_for_each_entry(wq, &workqueues, list) { in freeze_workqueues_busy()
5298 if (!(wq->flags & WQ_FREEZABLE)) in freeze_workqueues_busy()
5305 for_each_pwq(pwq, wq) { in freeze_workqueues_busy()
5331 struct workqueue_struct *wq; in thaw_workqueues() local
5342 list_for_each_entry(wq, &workqueues, list) { in thaw_workqueues()
5343 mutex_lock(&wq->mutex); in thaw_workqueues()
5344 for_each_pwq(pwq, wq) in thaw_workqueues()
5346 mutex_unlock(&wq->mutex); in thaw_workqueues()
5358 struct workqueue_struct *wq; in workqueue_apply_unbound_cpumask() local
5363 list_for_each_entry(wq, &workqueues, list) { in workqueue_apply_unbound_cpumask()
5364 if (!(wq->flags & WQ_UNBOUND)) in workqueue_apply_unbound_cpumask()
5368 if (!list_empty(&wq->pwqs)) { in workqueue_apply_unbound_cpumask()
5369 if (wq->flags & __WQ_ORDERED_EXPLICIT) in workqueue_apply_unbound_cpumask()
5371 wq->flags &= ~__WQ_ORDERED; in workqueue_apply_unbound_cpumask()
5374 ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs, unbound_cpumask); in workqueue_apply_unbound_cpumask()
5451 struct workqueue_struct *wq; member
5459 return wq_dev->wq; in dev_to_wq()
5465 struct workqueue_struct *wq = dev_to_wq(dev); in per_cpu_show() local
5467 return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND)); in per_cpu_show()
5474 struct workqueue_struct *wq = dev_to_wq(dev); in max_active_show() local
5476 return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active); in max_active_show()
5483 struct workqueue_struct *wq = dev_to_wq(dev); in max_active_store() local
5489 workqueue_set_max_active(wq, val); in max_active_store()
5504 struct workqueue_struct *wq = dev_to_wq(dev); in wq_pool_ids_show() local
5513 unbound_pwq_by_node(wq, node)->pool->id); in wq_pool_ids_show()
5526 struct workqueue_struct *wq = dev_to_wq(dev); in wq_nice_show() local
5529 mutex_lock(&wq->mutex); in wq_nice_show()
5530 written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice); in wq_nice_show()
5531 mutex_unlock(&wq->mutex); in wq_nice_show()
5537 static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq) in wq_sysfs_prep_attrs() argument
5547 copy_workqueue_attrs(attrs, wq->unbound_attrs); in wq_sysfs_prep_attrs()
5554 struct workqueue_struct *wq = dev_to_wq(dev); in wq_nice_store() local
5560 attrs = wq_sysfs_prep_attrs(wq); in wq_nice_store()
5566 ret = apply_workqueue_attrs_locked(wq, attrs); in wq_nice_store()
5579 struct workqueue_struct *wq = dev_to_wq(dev); in wq_cpumask_show() local
5582 mutex_lock(&wq->mutex); in wq_cpumask_show()
5584 cpumask_pr_args(wq->unbound_attrs->cpumask)); in wq_cpumask_show()
5585 mutex_unlock(&wq->mutex); in wq_cpumask_show()
5593 struct workqueue_struct *wq = dev_to_wq(dev); in wq_cpumask_store() local
5599 attrs = wq_sysfs_prep_attrs(wq); in wq_cpumask_store()
5605 ret = apply_workqueue_attrs_locked(wq, attrs); in wq_cpumask_store()
5616 struct workqueue_struct *wq = dev_to_wq(dev); in wq_numa_show() local
5619 mutex_lock(&wq->mutex); in wq_numa_show()
5621 !wq->unbound_attrs->no_numa); in wq_numa_show()
5622 mutex_unlock(&wq->mutex); in wq_numa_show()
5630 struct workqueue_struct *wq = dev_to_wq(dev); in wq_numa_store() local
5636 attrs = wq_sysfs_prep_attrs(wq); in wq_numa_store()
5643 ret = apply_workqueue_attrs_locked(wq, attrs); in wq_numa_store()
5733 int workqueue_sysfs_register(struct workqueue_struct *wq) in workqueue_sysfs_register() argument
5743 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) in workqueue_sysfs_register()
5746 wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL); in workqueue_sysfs_register()
5750 wq_dev->wq = wq; in workqueue_sysfs_register()
5753 dev_set_name(&wq_dev->dev, "%s", wq->name); in workqueue_sysfs_register()
5764 wq->wq_dev = NULL; in workqueue_sysfs_register()
5768 if (wq->flags & WQ_UNBOUND) { in workqueue_sysfs_register()
5775 wq->wq_dev = NULL; in workqueue_sysfs_register()
5792 static void workqueue_sysfs_unregister(struct workqueue_struct *wq) in workqueue_sysfs_unregister() argument
5794 struct wq_device *wq_dev = wq->wq_dev; in workqueue_sysfs_unregister()
5796 if (!wq->wq_dev) in workqueue_sysfs_unregister()
5799 wq->wq_dev = NULL; in workqueue_sysfs_unregister()
5803 static void workqueue_sysfs_unregister(struct workqueue_struct *wq) { } in workqueue_sysfs_unregister() argument
6089 struct workqueue_struct *wq; in workqueue_init() local
6112 list_for_each_entry(wq, &workqueues, list) { in workqueue_init()
6113 wq_update_unbound_numa(wq, smp_processor_id(), true); in workqueue_init()
6114 WARN(init_rescuer(wq), in workqueue_init()
6116 wq->name); in workqueue_init()