1 /*
2 * kernel/workqueue.c - generic async execution with shared worker pool
3 *
4 * Copyright (C) 2002 Ingo Molnar
5 *
6 * Derived from the taskqueue/keventd code by:
7 * David Woodhouse <dwmw2@infradead.org>
8 * Andrew Morton
9 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
10 * Theodore Ts'o <tytso@mit.edu>
11 *
12 * Made to use alloc_percpu by Christoph Lameter.
13 *
14 * Copyright (C) 2010 SUSE Linux Products GmbH
15 * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
16 *
17 * This is the generic async execution mechanism. Work items as are
18 * executed in process context. The worker pool is shared and
19 * automatically managed. There are two worker pools for each CPU (one for
20 * normal work items and the other for high priority ones) and some extra
21 * pools for workqueues which are not bound to any specific CPU - the
22 * number of these backing pools is dynamic.
23 *
24 * Please read Documentation/core-api/workqueue.rst for details.
25 */
26
27 #include <linux/export.h>
28 #include <linux/kernel.h>
29 #include <linux/sched.h>
30 #include <linux/init.h>
31 #include <linux/signal.h>
32 #include <linux/completion.h>
33 #include <linux/workqueue.h>
34 #include <linux/slab.h>
35 #include <linux/cpu.h>
36 #include <linux/notifier.h>
37 #include <linux/kthread.h>
38 #include <linux/hardirq.h>
39 #include <linux/mempolicy.h>
40 #include <linux/freezer.h>
41 #include <linux/kallsyms.h>
42 #include <linux/debug_locks.h>
43 #include <linux/lockdep.h>
44 #include <linux/idr.h>
45 #include <linux/jhash.h>
46 #include <linux/hashtable.h>
47 #include <linux/rculist.h>
48 #include <linux/nodemask.h>
49 #include <linux/moduleparam.h>
50 #include <linux/uaccess.h>
51 #include <linux/nmi.h>
52
53 #include "workqueue_internal.h"
54
55 enum {
56 /*
57 * worker_pool flags
58 *
59 * A bound pool is either associated or disassociated with its CPU.
60 * While associated (!DISASSOCIATED), all workers are bound to the
61 * CPU and none has %WORKER_UNBOUND set and concurrency management
62 * is in effect.
63 *
64 * While DISASSOCIATED, the cpu may be offline and all workers have
65 * %WORKER_UNBOUND set and concurrency management disabled, and may
66 * be executing on any CPU. The pool behaves as an unbound one.
67 *
68 * Note that DISASSOCIATED should be flipped only while holding
69 * attach_mutex to avoid changing binding state while
70 * worker_attach_to_pool() is in progress.
71 */
72 POOL_MANAGER_ACTIVE = 1 << 0, /* being managed */
73 POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */
74
75 /* worker flags */
76 WORKER_DIE = 1 << 1, /* die die die */
77 WORKER_IDLE = 1 << 2, /* is idle */
78 WORKER_PREP = 1 << 3, /* preparing to run works */
79 WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */
80 WORKER_UNBOUND = 1 << 7, /* worker is unbound */
81 WORKER_REBOUND = 1 << 8, /* worker was rebound */
82
83 WORKER_NOT_RUNNING = WORKER_PREP | WORKER_CPU_INTENSIVE |
84 WORKER_UNBOUND | WORKER_REBOUND,
85
86 NR_STD_WORKER_POOLS = 2, /* # standard pools per cpu */
87
88 UNBOUND_POOL_HASH_ORDER = 6, /* hashed by pool->attrs */
89 BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */
90
91 MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */
92 IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */
93
94 MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2,
95 /* call for help after 10ms
96 (min two ticks) */
97 MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */
98 CREATE_COOLDOWN = HZ, /* time to breath after fail */
99
100 /*
101 * Rescue workers are used only on emergencies and shared by
102 * all cpus. Give MIN_NICE.
103 */
104 RESCUER_NICE_LEVEL = MIN_NICE,
105 HIGHPRI_NICE_LEVEL = MIN_NICE,
106
107 WQ_NAME_LEN = 24,
108 };
109
110 /*
111 * Structure fields follow one of the following exclusion rules.
112 *
113 * I: Modifiable by initialization/destruction paths and read-only for
114 * everyone else.
115 *
116 * P: Preemption protected. Disabling preemption is enough and should
117 * only be modified and accessed from the local cpu.
118 *
119 * L: pool->lock protected. Access with pool->lock held.
120 *
121 * X: During normal operation, modification requires pool->lock and should
122 * be done only from local cpu. Either disabling preemption on local
123 * cpu or grabbing pool->lock is enough for read access. If
124 * POOL_DISASSOCIATED is set, it's identical to L.
125 *
126 * A: pool->attach_mutex protected.
127 *
128 * PL: wq_pool_mutex protected.
129 *
130 * PR: wq_pool_mutex protected for writes. Sched-RCU protected for reads.
131 *
132 * PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads.
133 *
134 * PWR: wq_pool_mutex and wq->mutex protected for writes. Either or
135 * sched-RCU for reads.
136 *
137 * WQ: wq->mutex protected.
138 *
139 * WR: wq->mutex protected for writes. Sched-RCU protected for reads.
140 *
141 * MD: wq_mayday_lock protected.
142 */
143
144 /* struct worker is defined in workqueue_internal.h */
145
146 struct worker_pool {
147 spinlock_t lock; /* the pool lock */
148 int cpu; /* I: the associated cpu */
149 int node; /* I: the associated node ID */
150 int id; /* I: pool ID */
151 unsigned int flags; /* X: flags */
152
153 unsigned long watchdog_ts; /* L: watchdog timestamp */
154
155 struct list_head worklist; /* L: list of pending works */
156 int nr_workers; /* L: total number of workers */
157
158 /* nr_idle includes the ones off idle_list for rebinding */
159 int nr_idle; /* L: currently idle ones */
160
161 struct list_head idle_list; /* X: list of idle workers */
162 struct timer_list idle_timer; /* L: worker idle timeout */
163 struct timer_list mayday_timer; /* L: SOS timer for workers */
164
165 /* a workers is either on busy_hash or idle_list, or the manager */
166 DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER);
167 /* L: hash of busy workers */
168
169 /* see manage_workers() for details on the two manager mutexes */
170 struct worker *manager; /* L: purely informational */
171 struct mutex attach_mutex; /* attach/detach exclusion */
172 struct list_head workers; /* A: attached workers */
173 struct completion *detach_completion; /* all workers detached */
174
175 struct ida worker_ida; /* worker IDs for task name */
176
177 struct workqueue_attrs *attrs; /* I: worker attributes */
178 struct hlist_node hash_node; /* PL: unbound_pool_hash node */
179 int refcnt; /* PL: refcnt for unbound pools */
180
181 /*
182 * The current concurrency level. As it's likely to be accessed
183 * from other CPUs during try_to_wake_up(), put it in a separate
184 * cacheline.
185 */
186 atomic_t nr_running ____cacheline_aligned_in_smp;
187
188 /*
189 * Destruction of pool is sched-RCU protected to allow dereferences
190 * from get_work_pool().
191 */
192 struct rcu_head rcu;
193 } ____cacheline_aligned_in_smp;
194
195 /*
196 * The per-pool workqueue. While queued, the lower WORK_STRUCT_FLAG_BITS
197 * of work_struct->data are used for flags and the remaining high bits
198 * point to the pwq; thus, pwqs need to be aligned at two's power of the
199 * number of flag bits.
200 */
201 struct pool_workqueue {
202 struct worker_pool *pool; /* I: the associated pool */
203 struct workqueue_struct *wq; /* I: the owning workqueue */
204 int work_color; /* L: current color */
205 int flush_color; /* L: flushing color */
206 int refcnt; /* L: reference count */
207 int nr_in_flight[WORK_NR_COLORS];
208 /* L: nr of in_flight works */
209 int nr_active; /* L: nr of active works */
210 int max_active; /* L: max active works */
211 struct list_head delayed_works; /* L: delayed works */
212 struct list_head pwqs_node; /* WR: node on wq->pwqs */
213 struct list_head mayday_node; /* MD: node on wq->maydays */
214
215 /*
216 * Release of unbound pwq is punted to system_wq. See put_pwq()
217 * and pwq_unbound_release_workfn() for details. pool_workqueue
218 * itself is also sched-RCU protected so that the first pwq can be
219 * determined without grabbing wq->mutex.
220 */
221 struct work_struct unbound_release_work;
222 struct rcu_head rcu;
223 } __aligned(1 << WORK_STRUCT_FLAG_BITS);
224
225 /*
226 * Structure used to wait for workqueue flush.
227 */
228 struct wq_flusher {
229 struct list_head list; /* WQ: list of flushers */
230 int flush_color; /* WQ: flush color waiting for */
231 struct completion done; /* flush completion */
232 };
233
234 struct wq_device;
235
236 /*
237 * The externally visible workqueue. It relays the issued work items to
238 * the appropriate worker_pool through its pool_workqueues.
239 */
240 struct workqueue_struct {
241 struct list_head pwqs; /* WR: all pwqs of this wq */
242 struct list_head list; /* PR: list of all workqueues */
243
244 struct mutex mutex; /* protects this wq */
245 int work_color; /* WQ: current work color */
246 int flush_color; /* WQ: current flush color */
247 atomic_t nr_pwqs_to_flush; /* flush in progress */
248 struct wq_flusher *first_flusher; /* WQ: first flusher */
249 struct list_head flusher_queue; /* WQ: flush waiters */
250 struct list_head flusher_overflow; /* WQ: flush overflow list */
251
252 struct list_head maydays; /* MD: pwqs requesting rescue */
253 struct worker *rescuer; /* I: rescue worker */
254
255 int nr_drainers; /* WQ: drain in progress */
256 int saved_max_active; /* WQ: saved pwq max_active */
257
258 struct workqueue_attrs *unbound_attrs; /* PW: only for unbound wqs */
259 struct pool_workqueue *dfl_pwq; /* PW: only for unbound wqs */
260
261 #ifdef CONFIG_SYSFS
262 struct wq_device *wq_dev; /* I: for sysfs interface */
263 #endif
264 #ifdef CONFIG_LOCKDEP
265 struct lockdep_map lockdep_map;
266 #endif
267 char name[WQ_NAME_LEN]; /* I: workqueue name */
268
269 /*
270 * Destruction of workqueue_struct is sched-RCU protected to allow
271 * walking the workqueues list without grabbing wq_pool_mutex.
272 * This is used to dump all workqueues from sysrq.
273 */
274 struct rcu_head rcu;
275
276 /* hot fields used during command issue, aligned to cacheline */
277 unsigned int flags ____cacheline_aligned; /* WQ: WQ_* flags */
278 struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwqs */
279 struct pool_workqueue __rcu *numa_pwq_tbl[]; /* PWR: unbound pwqs indexed by node */
280 };
281
282 static struct kmem_cache *pwq_cache;
283
284 static cpumask_var_t *wq_numa_possible_cpumask;
285 /* possible CPUs of each node */
286
287 static bool wq_disable_numa;
288 module_param_named(disable_numa, wq_disable_numa, bool, 0444);
289
290 /* see the comment above the definition of WQ_POWER_EFFICIENT */
291 static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT);
292 module_param_named(power_efficient, wq_power_efficient, bool, 0444);
293
294 static bool wq_online; /* can kworkers be created yet? */
295
296 static bool wq_numa_enabled; /* unbound NUMA affinity enabled */
297
298 /* buf for wq_update_unbound_numa_attrs(), protected by CPU hotplug exclusion */
299 static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
300
301 static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
302 static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
303 static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */
304
305 static LIST_HEAD(workqueues); /* PR: list of all workqueues */
306 static bool workqueue_freezing; /* PL: have wqs started freezing? */
307
308 /* PL: allowable cpus for unbound wqs and work items */
309 static cpumask_var_t wq_unbound_cpumask;
310
311 /* CPU where unbound work was last round robin scheduled from this CPU */
312 static DEFINE_PER_CPU(int, wq_rr_cpu_last);
313
314 /*
315 * Local execution of unbound work items is no longer guaranteed. The
316 * following always forces round-robin CPU selection on unbound work items
317 * to uncover usages which depend on it.
318 */
319 #ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU
320 static bool wq_debug_force_rr_cpu = true;
321 #else
322 static bool wq_debug_force_rr_cpu = false;
323 #endif
324 module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644);
325
326 /* the per-cpu worker pools */
327 static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], cpu_worker_pools);
328
329 static DEFINE_IDR(worker_pool_idr); /* PR: idr of all pools */
330
331 /* PL: hash of all unbound pools keyed by pool->attrs */
332 static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
333
334 /* I: attributes used when instantiating standard unbound pools on demand */
335 static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
336
337 /* I: attributes used when instantiating ordered pools on demand */
338 static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS];
339
340 struct workqueue_struct *system_wq __read_mostly;
341 EXPORT_SYMBOL(system_wq);
342 struct workqueue_struct *system_highpri_wq __read_mostly;
343 EXPORT_SYMBOL_GPL(system_highpri_wq);
344 struct workqueue_struct *system_long_wq __read_mostly;
345 EXPORT_SYMBOL_GPL(system_long_wq);
346 struct workqueue_struct *system_unbound_wq __read_mostly;
347 EXPORT_SYMBOL_GPL(system_unbound_wq);
348 struct workqueue_struct *system_freezable_wq __read_mostly;
349 EXPORT_SYMBOL_GPL(system_freezable_wq);
350 struct workqueue_struct *system_power_efficient_wq __read_mostly;
351 EXPORT_SYMBOL_GPL(system_power_efficient_wq);
352 struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
353 EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
354
355 static int worker_thread(void *__worker);
356 static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
357
358 #define CREATE_TRACE_POINTS
359 #include <trace/events/workqueue.h>
360
361 #define assert_rcu_or_pool_mutex() \
362 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
363 !lockdep_is_held(&wq_pool_mutex), \
364 "sched RCU or wq_pool_mutex should be held")
365
366 #define assert_rcu_or_wq_mutex(wq) \
367 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
368 !lockdep_is_held(&wq->mutex), \
369 "sched RCU or wq->mutex should be held")
370
371 #define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \
372 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
373 !lockdep_is_held(&wq->mutex) && \
374 !lockdep_is_held(&wq_pool_mutex), \
375 "sched RCU, wq->mutex or wq_pool_mutex should be held")
376
377 #define for_each_cpu_worker_pool(pool, cpu) \
378 for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
379 (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
380 (pool)++)
381
382 /**
383 * for_each_pool - iterate through all worker_pools in the system
384 * @pool: iteration cursor
385 * @pi: integer used for iteration
386 *
387 * This must be called either with wq_pool_mutex held or sched RCU read
388 * locked. If the pool needs to be used beyond the locking in effect, the
389 * caller is responsible for guaranteeing that the pool stays online.
390 *
391 * The if/else clause exists only for the lockdep assertion and can be
392 * ignored.
393 */
394 #define for_each_pool(pool, pi) \
395 idr_for_each_entry(&worker_pool_idr, pool, pi) \
396 if (({ assert_rcu_or_pool_mutex(); false; })) { } \
397 else
398
399 /**
400 * for_each_pool_worker - iterate through all workers of a worker_pool
401 * @worker: iteration cursor
402 * @pool: worker_pool to iterate workers of
403 *
404 * This must be called with @pool->attach_mutex.
405 *
406 * The if/else clause exists only for the lockdep assertion and can be
407 * ignored.
408 */
409 #define for_each_pool_worker(worker, pool) \
410 list_for_each_entry((worker), &(pool)->workers, node) \
411 if (({ lockdep_assert_held(&pool->attach_mutex); false; })) { } \
412 else
413
414 /**
415 * for_each_pwq - iterate through all pool_workqueues of the specified workqueue
416 * @pwq: iteration cursor
417 * @wq: the target workqueue
418 *
419 * This must be called either with wq->mutex held or sched RCU read locked.
420 * If the pwq needs to be used beyond the locking in effect, the caller is
421 * responsible for guaranteeing that the pwq stays online.
422 *
423 * The if/else clause exists only for the lockdep assertion and can be
424 * ignored.
425 */
426 #define for_each_pwq(pwq, wq) \
427 list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node) \
428 if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \
429 else
430
431 #ifdef CONFIG_DEBUG_OBJECTS_WORK
432
433 static struct debug_obj_descr work_debug_descr;
434
work_debug_hint(void * addr)435 static void *work_debug_hint(void *addr)
436 {
437 return ((struct work_struct *) addr)->func;
438 }
439
work_is_static_object(void * addr)440 static bool work_is_static_object(void *addr)
441 {
442 struct work_struct *work = addr;
443
444 return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work));
445 }
446
447 /*
448 * fixup_init is called when:
449 * - an active object is initialized
450 */
work_fixup_init(void * addr,enum debug_obj_state state)451 static bool work_fixup_init(void *addr, enum debug_obj_state state)
452 {
453 struct work_struct *work = addr;
454
455 switch (state) {
456 case ODEBUG_STATE_ACTIVE:
457 cancel_work_sync(work);
458 debug_object_init(work, &work_debug_descr);
459 return true;
460 default:
461 return false;
462 }
463 }
464
465 /*
466 * fixup_free is called when:
467 * - an active object is freed
468 */
work_fixup_free(void * addr,enum debug_obj_state state)469 static bool work_fixup_free(void *addr, enum debug_obj_state state)
470 {
471 struct work_struct *work = addr;
472
473 switch (state) {
474 case ODEBUG_STATE_ACTIVE:
475 cancel_work_sync(work);
476 debug_object_free(work, &work_debug_descr);
477 return true;
478 default:
479 return false;
480 }
481 }
482
483 static struct debug_obj_descr work_debug_descr = {
484 .name = "work_struct",
485 .debug_hint = work_debug_hint,
486 .is_static_object = work_is_static_object,
487 .fixup_init = work_fixup_init,
488 .fixup_free = work_fixup_free,
489 };
490
debug_work_activate(struct work_struct * work)491 static inline void debug_work_activate(struct work_struct *work)
492 {
493 debug_object_activate(work, &work_debug_descr);
494 }
495
debug_work_deactivate(struct work_struct * work)496 static inline void debug_work_deactivate(struct work_struct *work)
497 {
498 debug_object_deactivate(work, &work_debug_descr);
499 }
500
__init_work(struct work_struct * work,int onstack)501 void __init_work(struct work_struct *work, int onstack)
502 {
503 if (onstack)
504 debug_object_init_on_stack(work, &work_debug_descr);
505 else
506 debug_object_init(work, &work_debug_descr);
507 }
508 EXPORT_SYMBOL_GPL(__init_work);
509
destroy_work_on_stack(struct work_struct * work)510 void destroy_work_on_stack(struct work_struct *work)
511 {
512 debug_object_free(work, &work_debug_descr);
513 }
514 EXPORT_SYMBOL_GPL(destroy_work_on_stack);
515
destroy_delayed_work_on_stack(struct delayed_work * work)516 void destroy_delayed_work_on_stack(struct delayed_work *work)
517 {
518 destroy_timer_on_stack(&work->timer);
519 debug_object_free(&work->work, &work_debug_descr);
520 }
521 EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stack);
522
523 #else
debug_work_activate(struct work_struct * work)524 static inline void debug_work_activate(struct work_struct *work) { }
debug_work_deactivate(struct work_struct * work)525 static inline void debug_work_deactivate(struct work_struct *work) { }
526 #endif
527
528 /**
529 * worker_pool_assign_id - allocate ID and assing it to @pool
530 * @pool: the pool pointer of interest
531 *
532 * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned
533 * successfully, -errno on failure.
534 */
worker_pool_assign_id(struct worker_pool * pool)535 static int worker_pool_assign_id(struct worker_pool *pool)
536 {
537 int ret;
538
539 lockdep_assert_held(&wq_pool_mutex);
540
541 ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE,
542 GFP_KERNEL);
543 if (ret >= 0) {
544 pool->id = ret;
545 return 0;
546 }
547 return ret;
548 }
549
550 /**
551 * unbound_pwq_by_node - return the unbound pool_workqueue for the given node
552 * @wq: the target workqueue
553 * @node: the node ID
554 *
555 * This must be called with any of wq_pool_mutex, wq->mutex or sched RCU
556 * read locked.
557 * If the pwq needs to be used beyond the locking in effect, the caller is
558 * responsible for guaranteeing that the pwq stays online.
559 *
560 * Return: The unbound pool_workqueue for @node.
561 */
unbound_pwq_by_node(struct workqueue_struct * wq,int node)562 static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq,
563 int node)
564 {
565 assert_rcu_or_wq_mutex_or_pool_mutex(wq);
566
567 /*
568 * XXX: @node can be NUMA_NO_NODE if CPU goes offline while a
569 * delayed item is pending. The plan is to keep CPU -> NODE
570 * mapping valid and stable across CPU on/offlines. Once that
571 * happens, this workaround can be removed.
572 */
573 if (unlikely(node == NUMA_NO_NODE))
574 return wq->dfl_pwq;
575
576 return rcu_dereference_raw(wq->numa_pwq_tbl[node]);
577 }
578
work_color_to_flags(int color)579 static unsigned int work_color_to_flags(int color)
580 {
581 return color << WORK_STRUCT_COLOR_SHIFT;
582 }
583
get_work_color(struct work_struct * work)584 static int get_work_color(struct work_struct *work)
585 {
586 return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
587 ((1 << WORK_STRUCT_COLOR_BITS) - 1);
588 }
589
work_next_color(int color)590 static int work_next_color(int color)
591 {
592 return (color + 1) % WORK_NR_COLORS;
593 }
594
595 /*
596 * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data
597 * contain the pointer to the queued pwq. Once execution starts, the flag
598 * is cleared and the high bits contain OFFQ flags and pool ID.
599 *
600 * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling()
601 * and clear_work_data() can be used to set the pwq, pool or clear
602 * work->data. These functions should only be called while the work is
603 * owned - ie. while the PENDING bit is set.
604 *
605 * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq
606 * corresponding to a work. Pool is available once the work has been
607 * queued anywhere after initialization until it is sync canceled. pwq is
608 * available only while the work item is queued.
609 *
610 * %WORK_OFFQ_CANCELING is used to mark a work item which is being
611 * canceled. While being canceled, a work item may have its PENDING set
612 * but stay off timer and worklist for arbitrarily long and nobody should
613 * try to steal the PENDING bit.
614 */
set_work_data(struct work_struct * work,unsigned long data,unsigned long flags)615 static inline void set_work_data(struct work_struct *work, unsigned long data,
616 unsigned long flags)
617 {
618 WARN_ON_ONCE(!work_pending(work));
619 atomic_long_set(&work->data, data | flags | work_static(work));
620 }
621
set_work_pwq(struct work_struct * work,struct pool_workqueue * pwq,unsigned long extra_flags)622 static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq,
623 unsigned long extra_flags)
624 {
625 set_work_data(work, (unsigned long)pwq,
626 WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags);
627 }
628
set_work_pool_and_keep_pending(struct work_struct * work,int pool_id)629 static void set_work_pool_and_keep_pending(struct work_struct *work,
630 int pool_id)
631 {
632 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT,
633 WORK_STRUCT_PENDING);
634 }
635
set_work_pool_and_clear_pending(struct work_struct * work,int pool_id)636 static void set_work_pool_and_clear_pending(struct work_struct *work,
637 int pool_id)
638 {
639 /*
640 * The following wmb is paired with the implied mb in
641 * test_and_set_bit(PENDING) and ensures all updates to @work made
642 * here are visible to and precede any updates by the next PENDING
643 * owner.
644 */
645 smp_wmb();
646 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
647 /*
648 * The following mb guarantees that previous clear of a PENDING bit
649 * will not be reordered with any speculative LOADS or STORES from
650 * work->current_func, which is executed afterwards. This possible
651 * reordering can lead to a missed execution on attempt to qeueue
652 * the same @work. E.g. consider this case:
653 *
654 * CPU#0 CPU#1
655 * ---------------------------- --------------------------------
656 *
657 * 1 STORE event_indicated
658 * 2 queue_work_on() {
659 * 3 test_and_set_bit(PENDING)
660 * 4 } set_..._and_clear_pending() {
661 * 5 set_work_data() # clear bit
662 * 6 smp_mb()
663 * 7 work->current_func() {
664 * 8 LOAD event_indicated
665 * }
666 *
667 * Without an explicit full barrier speculative LOAD on line 8 can
668 * be executed before CPU#0 does STORE on line 1. If that happens,
669 * CPU#0 observes the PENDING bit is still set and new execution of
670 * a @work is not queued in a hope, that CPU#1 will eventually
671 * finish the queued @work. Meanwhile CPU#1 does not see
672 * event_indicated is set, because speculative LOAD was executed
673 * before actual STORE.
674 */
675 smp_mb();
676 }
677
clear_work_data(struct work_struct * work)678 static void clear_work_data(struct work_struct *work)
679 {
680 smp_wmb(); /* see set_work_pool_and_clear_pending() */
681 set_work_data(work, WORK_STRUCT_NO_POOL, 0);
682 }
683
get_work_pwq(struct work_struct * work)684 static struct pool_workqueue *get_work_pwq(struct work_struct *work)
685 {
686 unsigned long data = atomic_long_read(&work->data);
687
688 if (data & WORK_STRUCT_PWQ)
689 return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
690 else
691 return NULL;
692 }
693
694 /**
695 * get_work_pool - return the worker_pool a given work was associated with
696 * @work: the work item of interest
697 *
698 * Pools are created and destroyed under wq_pool_mutex, and allows read
699 * access under sched-RCU read lock. As such, this function should be
700 * called under wq_pool_mutex or with preemption disabled.
701 *
702 * All fields of the returned pool are accessible as long as the above
703 * mentioned locking is in effect. If the returned pool needs to be used
704 * beyond the critical section, the caller is responsible for ensuring the
705 * returned pool is and stays online.
706 *
707 * Return: The worker_pool @work was last associated with. %NULL if none.
708 */
get_work_pool(struct work_struct * work)709 static struct worker_pool *get_work_pool(struct work_struct *work)
710 {
711 unsigned long data = atomic_long_read(&work->data);
712 int pool_id;
713
714 assert_rcu_or_pool_mutex();
715
716 if (data & WORK_STRUCT_PWQ)
717 return ((struct pool_workqueue *)
718 (data & WORK_STRUCT_WQ_DATA_MASK))->pool;
719
720 pool_id = data >> WORK_OFFQ_POOL_SHIFT;
721 if (pool_id == WORK_OFFQ_POOL_NONE)
722 return NULL;
723
724 return idr_find(&worker_pool_idr, pool_id);
725 }
726
727 /**
728 * get_work_pool_id - return the worker pool ID a given work is associated with
729 * @work: the work item of interest
730 *
731 * Return: The worker_pool ID @work was last associated with.
732 * %WORK_OFFQ_POOL_NONE if none.
733 */
get_work_pool_id(struct work_struct * work)734 static int get_work_pool_id(struct work_struct *work)
735 {
736 unsigned long data = atomic_long_read(&work->data);
737
738 if (data & WORK_STRUCT_PWQ)
739 return ((struct pool_workqueue *)
740 (data & WORK_STRUCT_WQ_DATA_MASK))->pool->id;
741
742 return data >> WORK_OFFQ_POOL_SHIFT;
743 }
744
mark_work_canceling(struct work_struct * work)745 static void mark_work_canceling(struct work_struct *work)
746 {
747 unsigned long pool_id = get_work_pool_id(work);
748
749 pool_id <<= WORK_OFFQ_POOL_SHIFT;
750 set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING);
751 }
752
work_is_canceling(struct work_struct * work)753 static bool work_is_canceling(struct work_struct *work)
754 {
755 unsigned long data = atomic_long_read(&work->data);
756
757 return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING);
758 }
759
760 /*
761 * Policy functions. These define the policies on how the global worker
762 * pools are managed. Unless noted otherwise, these functions assume that
763 * they're being called with pool->lock held.
764 */
765
__need_more_worker(struct worker_pool * pool)766 static bool __need_more_worker(struct worker_pool *pool)
767 {
768 return !atomic_read(&pool->nr_running);
769 }
770
771 /*
772 * Need to wake up a worker? Called from anything but currently
773 * running workers.
774 *
775 * Note that, because unbound workers never contribute to nr_running, this
776 * function will always return %true for unbound pools as long as the
777 * worklist isn't empty.
778 */
need_more_worker(struct worker_pool * pool)779 static bool need_more_worker(struct worker_pool *pool)
780 {
781 return !list_empty(&pool->worklist) && __need_more_worker(pool);
782 }
783
784 /* Can I start working? Called from busy but !running workers. */
may_start_working(struct worker_pool * pool)785 static bool may_start_working(struct worker_pool *pool)
786 {
787 return pool->nr_idle;
788 }
789
790 /* Do I need to keep working? Called from currently running workers. */
keep_working(struct worker_pool * pool)791 static bool keep_working(struct worker_pool *pool)
792 {
793 return !list_empty(&pool->worklist) &&
794 atomic_read(&pool->nr_running) <= 1;
795 }
796
797 /* Do we need a new worker? Called from manager. */
need_to_create_worker(struct worker_pool * pool)798 static bool need_to_create_worker(struct worker_pool *pool)
799 {
800 return need_more_worker(pool) && !may_start_working(pool);
801 }
802
803 /* Do we have too many workers and should some go away? */
too_many_workers(struct worker_pool * pool)804 static bool too_many_workers(struct worker_pool *pool)
805 {
806 bool managing = pool->flags & POOL_MANAGER_ACTIVE;
807 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
808 int nr_busy = pool->nr_workers - nr_idle;
809
810 return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
811 }
812
813 /*
814 * Wake up functions.
815 */
816
817 /* Return the first idle worker. Safe with preemption disabled */
first_idle_worker(struct worker_pool * pool)818 static struct worker *first_idle_worker(struct worker_pool *pool)
819 {
820 if (unlikely(list_empty(&pool->idle_list)))
821 return NULL;
822
823 return list_first_entry(&pool->idle_list, struct worker, entry);
824 }
825
826 /**
827 * wake_up_worker - wake up an idle worker
828 * @pool: worker pool to wake worker from
829 *
830 * Wake up the first idle worker of @pool.
831 *
832 * CONTEXT:
833 * spin_lock_irq(pool->lock).
834 */
wake_up_worker(struct worker_pool * pool)835 static void wake_up_worker(struct worker_pool *pool)
836 {
837 struct worker *worker = first_idle_worker(pool);
838
839 if (likely(worker))
840 wake_up_process(worker->task);
841 }
842
843 /**
844 * wq_worker_waking_up - a worker is waking up
845 * @task: task waking up
846 * @cpu: CPU @task is waking up to
847 *
848 * This function is called during try_to_wake_up() when a worker is
849 * being awoken.
850 *
851 * CONTEXT:
852 * spin_lock_irq(rq->lock)
853 */
wq_worker_waking_up(struct task_struct * task,int cpu)854 void wq_worker_waking_up(struct task_struct *task, int cpu)
855 {
856 struct worker *worker = kthread_data(task);
857
858 if (!(worker->flags & WORKER_NOT_RUNNING)) {
859 WARN_ON_ONCE(worker->pool->cpu != cpu);
860 atomic_inc(&worker->pool->nr_running);
861 }
862 }
863
864 /**
865 * wq_worker_sleeping - a worker is going to sleep
866 * @task: task going to sleep
867 *
868 * This function is called during schedule() when a busy worker is
869 * going to sleep. Worker on the same cpu can be woken up by
870 * returning pointer to its task.
871 *
872 * CONTEXT:
873 * spin_lock_irq(rq->lock)
874 *
875 * Return:
876 * Worker task on @cpu to wake up, %NULL if none.
877 */
wq_worker_sleeping(struct task_struct * task)878 struct task_struct *wq_worker_sleeping(struct task_struct *task)
879 {
880 struct worker *worker = kthread_data(task), *to_wakeup = NULL;
881 struct worker_pool *pool;
882
883 /*
884 * Rescuers, which may not have all the fields set up like normal
885 * workers, also reach here, let's not access anything before
886 * checking NOT_RUNNING.
887 */
888 if (worker->flags & WORKER_NOT_RUNNING)
889 return NULL;
890
891 pool = worker->pool;
892
893 /* this can only happen on the local cpu */
894 if (WARN_ON_ONCE(pool->cpu != raw_smp_processor_id()))
895 return NULL;
896
897 /*
898 * The counterpart of the following dec_and_test, implied mb,
899 * worklist not empty test sequence is in insert_work().
900 * Please read comment there.
901 *
902 * NOT_RUNNING is clear. This means that we're bound to and
903 * running on the local cpu w/ rq lock held and preemption
904 * disabled, which in turn means that none else could be
905 * manipulating idle_list, so dereferencing idle_list without pool
906 * lock is safe.
907 */
908 if (atomic_dec_and_test(&pool->nr_running) &&
909 !list_empty(&pool->worklist))
910 to_wakeup = first_idle_worker(pool);
911 return to_wakeup ? to_wakeup->task : NULL;
912 }
913
914 /**
915 * wq_worker_last_func - retrieve worker's last work function
916 *
917 * Determine the last function a worker executed. This is called from
918 * the scheduler to get a worker's last known identity.
919 *
920 * CONTEXT:
921 * spin_lock_irq(rq->lock)
922 *
923 * Return:
924 * The last work function %current executed as a worker, NULL if it
925 * hasn't executed any work yet.
926 */
wq_worker_last_func(struct task_struct * task)927 work_func_t wq_worker_last_func(struct task_struct *task)
928 {
929 struct worker *worker = kthread_data(task);
930
931 return worker->last_func;
932 }
933
934 /**
935 * worker_set_flags - set worker flags and adjust nr_running accordingly
936 * @worker: self
937 * @flags: flags to set
938 *
939 * Set @flags in @worker->flags and adjust nr_running accordingly.
940 *
941 * CONTEXT:
942 * spin_lock_irq(pool->lock)
943 */
worker_set_flags(struct worker * worker,unsigned int flags)944 static inline void worker_set_flags(struct worker *worker, unsigned int flags)
945 {
946 struct worker_pool *pool = worker->pool;
947
948 WARN_ON_ONCE(worker->task != current);
949
950 /* If transitioning into NOT_RUNNING, adjust nr_running. */
951 if ((flags & WORKER_NOT_RUNNING) &&
952 !(worker->flags & WORKER_NOT_RUNNING)) {
953 atomic_dec(&pool->nr_running);
954 }
955
956 worker->flags |= flags;
957 }
958
959 /**
960 * worker_clr_flags - clear worker flags and adjust nr_running accordingly
961 * @worker: self
962 * @flags: flags to clear
963 *
964 * Clear @flags in @worker->flags and adjust nr_running accordingly.
965 *
966 * CONTEXT:
967 * spin_lock_irq(pool->lock)
968 */
worker_clr_flags(struct worker * worker,unsigned int flags)969 static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
970 {
971 struct worker_pool *pool = worker->pool;
972 unsigned int oflags = worker->flags;
973
974 WARN_ON_ONCE(worker->task != current);
975
976 worker->flags &= ~flags;
977
978 /*
979 * If transitioning out of NOT_RUNNING, increment nr_running. Note
980 * that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask
981 * of multiple flags, not a single flag.
982 */
983 if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
984 if (!(worker->flags & WORKER_NOT_RUNNING))
985 atomic_inc(&pool->nr_running);
986 }
987
988 /**
989 * find_worker_executing_work - find worker which is executing a work
990 * @pool: pool of interest
991 * @work: work to find worker for
992 *
993 * Find a worker which is executing @work on @pool by searching
994 * @pool->busy_hash which is keyed by the address of @work. For a worker
995 * to match, its current execution should match the address of @work and
996 * its work function. This is to avoid unwanted dependency between
997 * unrelated work executions through a work item being recycled while still
998 * being executed.
999 *
1000 * This is a bit tricky. A work item may be freed once its execution
1001 * starts and nothing prevents the freed area from being recycled for
1002 * another work item. If the same work item address ends up being reused
1003 * before the original execution finishes, workqueue will identify the
1004 * recycled work item as currently executing and make it wait until the
1005 * current execution finishes, introducing an unwanted dependency.
1006 *
1007 * This function checks the work item address and work function to avoid
1008 * false positives. Note that this isn't complete as one may construct a
1009 * work function which can introduce dependency onto itself through a
1010 * recycled work item. Well, if somebody wants to shoot oneself in the
1011 * foot that badly, there's only so much we can do, and if such deadlock
1012 * actually occurs, it should be easy to locate the culprit work function.
1013 *
1014 * CONTEXT:
1015 * spin_lock_irq(pool->lock).
1016 *
1017 * Return:
1018 * Pointer to worker which is executing @work if found, %NULL
1019 * otherwise.
1020 */
find_worker_executing_work(struct worker_pool * pool,struct work_struct * work)1021 static struct worker *find_worker_executing_work(struct worker_pool *pool,
1022 struct work_struct *work)
1023 {
1024 struct worker *worker;
1025
1026 hash_for_each_possible(pool->busy_hash, worker, hentry,
1027 (unsigned long)work)
1028 if (worker->current_work == work &&
1029 worker->current_func == work->func)
1030 return worker;
1031
1032 return NULL;
1033 }
1034
1035 /**
1036 * move_linked_works - move linked works to a list
1037 * @work: start of series of works to be scheduled
1038 * @head: target list to append @work to
1039 * @nextp: out parameter for nested worklist walking
1040 *
1041 * Schedule linked works starting from @work to @head. Work series to
1042 * be scheduled starts at @work and includes any consecutive work with
1043 * WORK_STRUCT_LINKED set in its predecessor.
1044 *
1045 * If @nextp is not NULL, it's updated to point to the next work of
1046 * the last scheduled work. This allows move_linked_works() to be
1047 * nested inside outer list_for_each_entry_safe().
1048 *
1049 * CONTEXT:
1050 * spin_lock_irq(pool->lock).
1051 */
move_linked_works(struct work_struct * work,struct list_head * head,struct work_struct ** nextp)1052 static void move_linked_works(struct work_struct *work, struct list_head *head,
1053 struct work_struct **nextp)
1054 {
1055 struct work_struct *n;
1056
1057 /*
1058 * Linked worklist will always end before the end of the list,
1059 * use NULL for list head.
1060 */
1061 list_for_each_entry_safe_from(work, n, NULL, entry) {
1062 list_move_tail(&work->entry, head);
1063 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
1064 break;
1065 }
1066
1067 /*
1068 * If we're already inside safe list traversal and have moved
1069 * multiple works to the scheduled queue, the next position
1070 * needs to be updated.
1071 */
1072 if (nextp)
1073 *nextp = n;
1074 }
1075
1076 /**
1077 * get_pwq - get an extra reference on the specified pool_workqueue
1078 * @pwq: pool_workqueue to get
1079 *
1080 * Obtain an extra reference on @pwq. The caller should guarantee that
1081 * @pwq has positive refcnt and be holding the matching pool->lock.
1082 */
get_pwq(struct pool_workqueue * pwq)1083 static void get_pwq(struct pool_workqueue *pwq)
1084 {
1085 lockdep_assert_held(&pwq->pool->lock);
1086 WARN_ON_ONCE(pwq->refcnt <= 0);
1087 pwq->refcnt++;
1088 }
1089
1090 /**
1091 * put_pwq - put a pool_workqueue reference
1092 * @pwq: pool_workqueue to put
1093 *
1094 * Drop a reference of @pwq. If its refcnt reaches zero, schedule its
1095 * destruction. The caller should be holding the matching pool->lock.
1096 */
put_pwq(struct pool_workqueue * pwq)1097 static void put_pwq(struct pool_workqueue *pwq)
1098 {
1099 lockdep_assert_held(&pwq->pool->lock);
1100 if (likely(--pwq->refcnt))
1101 return;
1102 if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND)))
1103 return;
1104 /*
1105 * @pwq can't be released under pool->lock, bounce to
1106 * pwq_unbound_release_workfn(). This never recurses on the same
1107 * pool->lock as this path is taken only for unbound workqueues and
1108 * the release work item is scheduled on a per-cpu workqueue. To
1109 * avoid lockdep warning, unbound pool->locks are given lockdep
1110 * subclass of 1 in get_unbound_pool().
1111 */
1112 schedule_work(&pwq->unbound_release_work);
1113 }
1114
1115 /**
1116 * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock
1117 * @pwq: pool_workqueue to put (can be %NULL)
1118 *
1119 * put_pwq() with locking. This function also allows %NULL @pwq.
1120 */
put_pwq_unlocked(struct pool_workqueue * pwq)1121 static void put_pwq_unlocked(struct pool_workqueue *pwq)
1122 {
1123 if (pwq) {
1124 /*
1125 * As both pwqs and pools are sched-RCU protected, the
1126 * following lock operations are safe.
1127 */
1128 spin_lock_irq(&pwq->pool->lock);
1129 put_pwq(pwq);
1130 spin_unlock_irq(&pwq->pool->lock);
1131 }
1132 }
1133
pwq_activate_delayed_work(struct work_struct * work)1134 static void pwq_activate_delayed_work(struct work_struct *work)
1135 {
1136 struct pool_workqueue *pwq = get_work_pwq(work);
1137
1138 trace_workqueue_activate_work(work);
1139 if (list_empty(&pwq->pool->worklist))
1140 pwq->pool->watchdog_ts = jiffies;
1141 move_linked_works(work, &pwq->pool->worklist, NULL);
1142 __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
1143 pwq->nr_active++;
1144 }
1145
pwq_activate_first_delayed(struct pool_workqueue * pwq)1146 static void pwq_activate_first_delayed(struct pool_workqueue *pwq)
1147 {
1148 struct work_struct *work = list_first_entry(&pwq->delayed_works,
1149 struct work_struct, entry);
1150
1151 pwq_activate_delayed_work(work);
1152 }
1153
1154 /**
1155 * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight
1156 * @pwq: pwq of interest
1157 * @color: color of work which left the queue
1158 *
1159 * A work either has completed or is removed from pending queue,
1160 * decrement nr_in_flight of its pwq and handle workqueue flushing.
1161 *
1162 * CONTEXT:
1163 * spin_lock_irq(pool->lock).
1164 */
pwq_dec_nr_in_flight(struct pool_workqueue * pwq,int color)1165 static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color)
1166 {
1167 /* uncolored work items don't participate in flushing or nr_active */
1168 if (color == WORK_NO_COLOR)
1169 goto out_put;
1170
1171 pwq->nr_in_flight[color]--;
1172
1173 pwq->nr_active--;
1174 if (!list_empty(&pwq->delayed_works)) {
1175 /* one down, submit a delayed one */
1176 if (pwq->nr_active < pwq->max_active)
1177 pwq_activate_first_delayed(pwq);
1178 }
1179
1180 /* is flush in progress and are we at the flushing tip? */
1181 if (likely(pwq->flush_color != color))
1182 goto out_put;
1183
1184 /* are there still in-flight works? */
1185 if (pwq->nr_in_flight[color])
1186 goto out_put;
1187
1188 /* this pwq is done, clear flush_color */
1189 pwq->flush_color = -1;
1190
1191 /*
1192 * If this was the last pwq, wake up the first flusher. It
1193 * will handle the rest.
1194 */
1195 if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush))
1196 complete(&pwq->wq->first_flusher->done);
1197 out_put:
1198 put_pwq(pwq);
1199 }
1200
1201 /**
1202 * try_to_grab_pending - steal work item from worklist and disable irq
1203 * @work: work item to steal
1204 * @is_dwork: @work is a delayed_work
1205 * @flags: place to store irq state
1206 *
1207 * Try to grab PENDING bit of @work. This function can handle @work in any
1208 * stable state - idle, on timer or on worklist.
1209 *
1210 * Return:
1211 * 1 if @work was pending and we successfully stole PENDING
1212 * 0 if @work was idle and we claimed PENDING
1213 * -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry
1214 * -ENOENT if someone else is canceling @work, this state may persist
1215 * for arbitrarily long
1216 *
1217 * Note:
1218 * On >= 0 return, the caller owns @work's PENDING bit. To avoid getting
1219 * interrupted while holding PENDING and @work off queue, irq must be
1220 * disabled on entry. This, combined with delayed_work->timer being
1221 * irqsafe, ensures that we return -EAGAIN for finite short period of time.
1222 *
1223 * On successful return, >= 0, irq is disabled and the caller is
1224 * responsible for releasing it using local_irq_restore(*@flags).
1225 *
1226 * This function is safe to call from any context including IRQ handler.
1227 */
try_to_grab_pending(struct work_struct * work,bool is_dwork,unsigned long * flags)1228 static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
1229 unsigned long *flags)
1230 {
1231 struct worker_pool *pool;
1232 struct pool_workqueue *pwq;
1233
1234 local_irq_save(*flags);
1235
1236 /* try to steal the timer if it exists */
1237 if (is_dwork) {
1238 struct delayed_work *dwork = to_delayed_work(work);
1239
1240 /*
1241 * dwork->timer is irqsafe. If del_timer() fails, it's
1242 * guaranteed that the timer is not queued anywhere and not
1243 * running on the local CPU.
1244 */
1245 if (likely(del_timer(&dwork->timer)))
1246 return 1;
1247 }
1248
1249 /* try to claim PENDING the normal way */
1250 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
1251 return 0;
1252
1253 /*
1254 * The queueing is in progress, or it is already queued. Try to
1255 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
1256 */
1257 pool = get_work_pool(work);
1258 if (!pool)
1259 goto fail;
1260
1261 spin_lock(&pool->lock);
1262 /*
1263 * work->data is guaranteed to point to pwq only while the work
1264 * item is queued on pwq->wq, and both updating work->data to point
1265 * to pwq on queueing and to pool on dequeueing are done under
1266 * pwq->pool->lock. This in turn guarantees that, if work->data
1267 * points to pwq which is associated with a locked pool, the work
1268 * item is currently queued on that pool.
1269 */
1270 pwq = get_work_pwq(work);
1271 if (pwq && pwq->pool == pool) {
1272 debug_work_deactivate(work);
1273
1274 /*
1275 * A delayed work item cannot be grabbed directly because
1276 * it might have linked NO_COLOR work items which, if left
1277 * on the delayed_list, will confuse pwq->nr_active
1278 * management later on and cause stall. Make sure the work
1279 * item is activated before grabbing.
1280 */
1281 if (*work_data_bits(work) & WORK_STRUCT_DELAYED)
1282 pwq_activate_delayed_work(work);
1283
1284 list_del_init(&work->entry);
1285 pwq_dec_nr_in_flight(pwq, get_work_color(work));
1286
1287 /* work->data points to pwq iff queued, point to pool */
1288 set_work_pool_and_keep_pending(work, pool->id);
1289
1290 spin_unlock(&pool->lock);
1291 return 1;
1292 }
1293 spin_unlock(&pool->lock);
1294 fail:
1295 local_irq_restore(*flags);
1296 if (work_is_canceling(work))
1297 return -ENOENT;
1298 cpu_relax();
1299 return -EAGAIN;
1300 }
1301
1302 /**
1303 * insert_work - insert a work into a pool
1304 * @pwq: pwq @work belongs to
1305 * @work: work to insert
1306 * @head: insertion point
1307 * @extra_flags: extra WORK_STRUCT_* flags to set
1308 *
1309 * Insert @work which belongs to @pwq after @head. @extra_flags is or'd to
1310 * work_struct flags.
1311 *
1312 * CONTEXT:
1313 * spin_lock_irq(pool->lock).
1314 */
insert_work(struct pool_workqueue * pwq,struct work_struct * work,struct list_head * head,unsigned int extra_flags)1315 static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
1316 struct list_head *head, unsigned int extra_flags)
1317 {
1318 struct worker_pool *pool = pwq->pool;
1319
1320 /* we own @work, set data and link */
1321 set_work_pwq(work, pwq, extra_flags);
1322 list_add_tail(&work->entry, head);
1323 get_pwq(pwq);
1324
1325 /*
1326 * Ensure either wq_worker_sleeping() sees the above
1327 * list_add_tail() or we see zero nr_running to avoid workers lying
1328 * around lazily while there are works to be processed.
1329 */
1330 smp_mb();
1331
1332 if (__need_more_worker(pool))
1333 wake_up_worker(pool);
1334 }
1335
1336 /*
1337 * Test whether @work is being queued from another work executing on the
1338 * same workqueue.
1339 */
is_chained_work(struct workqueue_struct * wq)1340 static bool is_chained_work(struct workqueue_struct *wq)
1341 {
1342 struct worker *worker;
1343
1344 worker = current_wq_worker();
1345 /*
1346 * Return %true iff I'm a worker execuing a work item on @wq. If
1347 * I'm @worker, it's safe to dereference it without locking.
1348 */
1349 return worker && worker->current_pwq->wq == wq;
1350 }
1351
1352 /*
1353 * When queueing an unbound work item to a wq, prefer local CPU if allowed
1354 * by wq_unbound_cpumask. Otherwise, round robin among the allowed ones to
1355 * avoid perturbing sensitive tasks.
1356 */
wq_select_unbound_cpu(int cpu)1357 static int wq_select_unbound_cpu(int cpu)
1358 {
1359 static bool printed_dbg_warning;
1360 int new_cpu;
1361
1362 if (likely(!wq_debug_force_rr_cpu)) {
1363 if (cpumask_test_cpu(cpu, wq_unbound_cpumask))
1364 return cpu;
1365 } else if (!printed_dbg_warning) {
1366 pr_warn("workqueue: round-robin CPU selection forced, expect performance impact\n");
1367 printed_dbg_warning = true;
1368 }
1369
1370 if (cpumask_empty(wq_unbound_cpumask))
1371 return cpu;
1372
1373 new_cpu = __this_cpu_read(wq_rr_cpu_last);
1374 new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask);
1375 if (unlikely(new_cpu >= nr_cpu_ids)) {
1376 new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask);
1377 if (unlikely(new_cpu >= nr_cpu_ids))
1378 return cpu;
1379 }
1380 __this_cpu_write(wq_rr_cpu_last, new_cpu);
1381
1382 return new_cpu;
1383 }
1384
__queue_work(int cpu,struct workqueue_struct * wq,struct work_struct * work)1385 static void __queue_work(int cpu, struct workqueue_struct *wq,
1386 struct work_struct *work)
1387 {
1388 struct pool_workqueue *pwq;
1389 struct worker_pool *last_pool;
1390 struct list_head *worklist;
1391 unsigned int work_flags;
1392 unsigned int req_cpu = cpu;
1393
1394 /*
1395 * While a work item is PENDING && off queue, a task trying to
1396 * steal the PENDING will busy-loop waiting for it to either get
1397 * queued or lose PENDING. Grabbing PENDING and queueing should
1398 * happen with IRQ disabled.
1399 */
1400 WARN_ON_ONCE(!irqs_disabled());
1401
1402 debug_work_activate(work);
1403
1404 /* if draining, only works from the same workqueue are allowed */
1405 if (unlikely(wq->flags & __WQ_DRAINING) &&
1406 WARN_ON_ONCE(!is_chained_work(wq)))
1407 return;
1408 retry:
1409 /* pwq which will be used unless @work is executing elsewhere */
1410 if (wq->flags & WQ_UNBOUND) {
1411 if (req_cpu == WORK_CPU_UNBOUND)
1412 cpu = wq_select_unbound_cpu(raw_smp_processor_id());
1413 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
1414 } else {
1415 if (req_cpu == WORK_CPU_UNBOUND)
1416 cpu = raw_smp_processor_id();
1417 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
1418 }
1419
1420 /*
1421 * If @work was previously on a different pool, it might still be
1422 * running there, in which case the work needs to be queued on that
1423 * pool to guarantee non-reentrancy.
1424 */
1425 last_pool = get_work_pool(work);
1426 if (last_pool && last_pool != pwq->pool) {
1427 struct worker *worker;
1428
1429 spin_lock(&last_pool->lock);
1430
1431 worker = find_worker_executing_work(last_pool, work);
1432
1433 if (worker && worker->current_pwq->wq == wq) {
1434 pwq = worker->current_pwq;
1435 } else {
1436 /* meh... not running there, queue here */
1437 spin_unlock(&last_pool->lock);
1438 spin_lock(&pwq->pool->lock);
1439 }
1440 } else {
1441 spin_lock(&pwq->pool->lock);
1442 }
1443
1444 /*
1445 * pwq is determined and locked. For unbound pools, we could have
1446 * raced with pwq release and it could already be dead. If its
1447 * refcnt is zero, repeat pwq selection. Note that pwqs never die
1448 * without another pwq replacing it in the numa_pwq_tbl or while
1449 * work items are executing on it, so the retrying is guaranteed to
1450 * make forward-progress.
1451 */
1452 if (unlikely(!pwq->refcnt)) {
1453 if (wq->flags & WQ_UNBOUND) {
1454 spin_unlock(&pwq->pool->lock);
1455 cpu_relax();
1456 goto retry;
1457 }
1458 /* oops */
1459 WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt",
1460 wq->name, cpu);
1461 }
1462
1463 /* pwq determined, queue */
1464 trace_workqueue_queue_work(req_cpu, pwq, work);
1465
1466 if (WARN_ON(!list_empty(&work->entry))) {
1467 spin_unlock(&pwq->pool->lock);
1468 return;
1469 }
1470
1471 pwq->nr_in_flight[pwq->work_color]++;
1472 work_flags = work_color_to_flags(pwq->work_color);
1473
1474 if (likely(pwq->nr_active < pwq->max_active)) {
1475 trace_workqueue_activate_work(work);
1476 pwq->nr_active++;
1477 worklist = &pwq->pool->worklist;
1478 if (list_empty(worklist))
1479 pwq->pool->watchdog_ts = jiffies;
1480 } else {
1481 work_flags |= WORK_STRUCT_DELAYED;
1482 worklist = &pwq->delayed_works;
1483 }
1484
1485 insert_work(pwq, work, worklist, work_flags);
1486
1487 spin_unlock(&pwq->pool->lock);
1488 }
1489
1490 /**
1491 * queue_work_on - queue work on specific cpu
1492 * @cpu: CPU number to execute work on
1493 * @wq: workqueue to use
1494 * @work: work to queue
1495 *
1496 * We queue the work to a specific CPU, the caller must ensure it
1497 * can't go away.
1498 *
1499 * Return: %false if @work was already on a queue, %true otherwise.
1500 */
queue_work_on(int cpu,struct workqueue_struct * wq,struct work_struct * work)1501 bool queue_work_on(int cpu, struct workqueue_struct *wq,
1502 struct work_struct *work)
1503 {
1504 bool ret = false;
1505 unsigned long flags;
1506
1507 local_irq_save(flags);
1508
1509 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1510 __queue_work(cpu, wq, work);
1511 ret = true;
1512 }
1513
1514 local_irq_restore(flags);
1515 return ret;
1516 }
1517 EXPORT_SYMBOL(queue_work_on);
1518
delayed_work_timer_fn(unsigned long __data)1519 void delayed_work_timer_fn(unsigned long __data)
1520 {
1521 struct delayed_work *dwork = (struct delayed_work *)__data;
1522
1523 /* should have been called from irqsafe timer with irq already off */
1524 __queue_work(dwork->cpu, dwork->wq, &dwork->work);
1525 }
1526 EXPORT_SYMBOL(delayed_work_timer_fn);
1527
__queue_delayed_work(int cpu,struct workqueue_struct * wq,struct delayed_work * dwork,unsigned long delay)1528 static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
1529 struct delayed_work *dwork, unsigned long delay)
1530 {
1531 struct timer_list *timer = &dwork->timer;
1532 struct work_struct *work = &dwork->work;
1533
1534 WARN_ON_ONCE(!wq);
1535 WARN_ON_ONCE(timer->function != delayed_work_timer_fn ||
1536 timer->data != (unsigned long)dwork);
1537 WARN_ON_ONCE(timer_pending(timer));
1538 WARN_ON_ONCE(!list_empty(&work->entry));
1539
1540 /*
1541 * If @delay is 0, queue @dwork->work immediately. This is for
1542 * both optimization and correctness. The earliest @timer can
1543 * expire is on the closest next tick and delayed_work users depend
1544 * on that there's no such delay when @delay is 0.
1545 */
1546 if (!delay) {
1547 __queue_work(cpu, wq, &dwork->work);
1548 return;
1549 }
1550
1551 dwork->wq = wq;
1552 dwork->cpu = cpu;
1553 timer->expires = jiffies + delay;
1554
1555 if (unlikely(cpu != WORK_CPU_UNBOUND))
1556 add_timer_on(timer, cpu);
1557 else
1558 add_timer(timer);
1559 }
1560
1561 /**
1562 * queue_delayed_work_on - queue work on specific CPU after delay
1563 * @cpu: CPU number to execute work on
1564 * @wq: workqueue to use
1565 * @dwork: work to queue
1566 * @delay: number of jiffies to wait before queueing
1567 *
1568 * Return: %false if @work was already on a queue, %true otherwise. If
1569 * @delay is zero and @dwork is idle, it will be scheduled for immediate
1570 * execution.
1571 */
queue_delayed_work_on(int cpu,struct workqueue_struct * wq,struct delayed_work * dwork,unsigned long delay)1572 bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
1573 struct delayed_work *dwork, unsigned long delay)
1574 {
1575 struct work_struct *work = &dwork->work;
1576 bool ret = false;
1577 unsigned long flags;
1578
1579 /* read the comment in __queue_work() */
1580 local_irq_save(flags);
1581
1582 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1583 __queue_delayed_work(cpu, wq, dwork, delay);
1584 ret = true;
1585 }
1586
1587 local_irq_restore(flags);
1588 return ret;
1589 }
1590 EXPORT_SYMBOL(queue_delayed_work_on);
1591
1592 /**
1593 * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
1594 * @cpu: CPU number to execute work on
1595 * @wq: workqueue to use
1596 * @dwork: work to queue
1597 * @delay: number of jiffies to wait before queueing
1598 *
1599 * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise,
1600 * modify @dwork's timer so that it expires after @delay. If @delay is
1601 * zero, @work is guaranteed to be scheduled immediately regardless of its
1602 * current state.
1603 *
1604 * Return: %false if @dwork was idle and queued, %true if @dwork was
1605 * pending and its timer was modified.
1606 *
1607 * This function is safe to call from any context including IRQ handler.
1608 * See try_to_grab_pending() for details.
1609 */
mod_delayed_work_on(int cpu,struct workqueue_struct * wq,struct delayed_work * dwork,unsigned long delay)1610 bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
1611 struct delayed_work *dwork, unsigned long delay)
1612 {
1613 unsigned long flags;
1614 int ret;
1615
1616 do {
1617 ret = try_to_grab_pending(&dwork->work, true, &flags);
1618 } while (unlikely(ret == -EAGAIN));
1619
1620 if (likely(ret >= 0)) {
1621 __queue_delayed_work(cpu, wq, dwork, delay);
1622 local_irq_restore(flags);
1623 }
1624
1625 /* -ENOENT from try_to_grab_pending() becomes %true */
1626 return ret;
1627 }
1628 EXPORT_SYMBOL_GPL(mod_delayed_work_on);
1629
1630 /**
1631 * worker_enter_idle - enter idle state
1632 * @worker: worker which is entering idle state
1633 *
1634 * @worker is entering idle state. Update stats and idle timer if
1635 * necessary.
1636 *
1637 * LOCKING:
1638 * spin_lock_irq(pool->lock).
1639 */
worker_enter_idle(struct worker * worker)1640 static void worker_enter_idle(struct worker *worker)
1641 {
1642 struct worker_pool *pool = worker->pool;
1643
1644 if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) ||
1645 WARN_ON_ONCE(!list_empty(&worker->entry) &&
1646 (worker->hentry.next || worker->hentry.pprev)))
1647 return;
1648
1649 /* can't use worker_set_flags(), also called from create_worker() */
1650 worker->flags |= WORKER_IDLE;
1651 pool->nr_idle++;
1652 worker->last_active = jiffies;
1653
1654 /* idle_list is LIFO */
1655 list_add(&worker->entry, &pool->idle_list);
1656
1657 if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
1658 mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
1659
1660 /*
1661 * Sanity check nr_running. Because wq_unbind_fn() releases
1662 * pool->lock between setting %WORKER_UNBOUND and zapping
1663 * nr_running, the warning may trigger spuriously. Check iff
1664 * unbind is not in progress.
1665 */
1666 WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
1667 pool->nr_workers == pool->nr_idle &&
1668 atomic_read(&pool->nr_running));
1669 }
1670
1671 /**
1672 * worker_leave_idle - leave idle state
1673 * @worker: worker which is leaving idle state
1674 *
1675 * @worker is leaving idle state. Update stats.
1676 *
1677 * LOCKING:
1678 * spin_lock_irq(pool->lock).
1679 */
worker_leave_idle(struct worker * worker)1680 static void worker_leave_idle(struct worker *worker)
1681 {
1682 struct worker_pool *pool = worker->pool;
1683
1684 if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE)))
1685 return;
1686 worker_clr_flags(worker, WORKER_IDLE);
1687 pool->nr_idle--;
1688 list_del_init(&worker->entry);
1689 }
1690
alloc_worker(int node)1691 static struct worker *alloc_worker(int node)
1692 {
1693 struct worker *worker;
1694
1695 worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node);
1696 if (worker) {
1697 INIT_LIST_HEAD(&worker->entry);
1698 INIT_LIST_HEAD(&worker->scheduled);
1699 INIT_LIST_HEAD(&worker->node);
1700 /* on creation a worker is in !idle && prep state */
1701 worker->flags = WORKER_PREP;
1702 }
1703 return worker;
1704 }
1705
1706 /**
1707 * worker_attach_to_pool() - attach a worker to a pool
1708 * @worker: worker to be attached
1709 * @pool: the target pool
1710 *
1711 * Attach @worker to @pool. Once attached, the %WORKER_UNBOUND flag and
1712 * cpu-binding of @worker are kept coordinated with the pool across
1713 * cpu-[un]hotplugs.
1714 */
worker_attach_to_pool(struct worker * worker,struct worker_pool * pool)1715 static void worker_attach_to_pool(struct worker *worker,
1716 struct worker_pool *pool)
1717 {
1718 mutex_lock(&pool->attach_mutex);
1719
1720 /*
1721 * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any
1722 * online CPUs. It'll be re-applied when any of the CPUs come up.
1723 */
1724 set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
1725
1726 /*
1727 * The pool->attach_mutex ensures %POOL_DISASSOCIATED remains
1728 * stable across this function. See the comments above the
1729 * flag definition for details.
1730 */
1731 if (pool->flags & POOL_DISASSOCIATED)
1732 worker->flags |= WORKER_UNBOUND;
1733
1734 list_add_tail(&worker->node, &pool->workers);
1735
1736 mutex_unlock(&pool->attach_mutex);
1737 }
1738
1739 /**
1740 * worker_detach_from_pool() - detach a worker from its pool
1741 * @worker: worker which is attached to its pool
1742 * @pool: the pool @worker is attached to
1743 *
1744 * Undo the attaching which had been done in worker_attach_to_pool(). The
1745 * caller worker shouldn't access to the pool after detached except it has
1746 * other reference to the pool.
1747 */
worker_detach_from_pool(struct worker * worker,struct worker_pool * pool)1748 static void worker_detach_from_pool(struct worker *worker,
1749 struct worker_pool *pool)
1750 {
1751 struct completion *detach_completion = NULL;
1752
1753 mutex_lock(&pool->attach_mutex);
1754 list_del(&worker->node);
1755 if (list_empty(&pool->workers))
1756 detach_completion = pool->detach_completion;
1757 mutex_unlock(&pool->attach_mutex);
1758
1759 /* clear leftover flags without pool->lock after it is detached */
1760 worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND);
1761
1762 if (detach_completion)
1763 complete(detach_completion);
1764 }
1765
1766 /**
1767 * create_worker - create a new workqueue worker
1768 * @pool: pool the new worker will belong to
1769 *
1770 * Create and start a new worker which is attached to @pool.
1771 *
1772 * CONTEXT:
1773 * Might sleep. Does GFP_KERNEL allocations.
1774 *
1775 * Return:
1776 * Pointer to the newly created worker.
1777 */
create_worker(struct worker_pool * pool)1778 static struct worker *create_worker(struct worker_pool *pool)
1779 {
1780 struct worker *worker = NULL;
1781 int id = -1;
1782 char id_buf[16];
1783
1784 /* ID is needed to determine kthread name */
1785 id = ida_simple_get(&pool->worker_ida, 0, 0, GFP_KERNEL);
1786 if (id < 0)
1787 goto fail;
1788
1789 worker = alloc_worker(pool->node);
1790 if (!worker)
1791 goto fail;
1792
1793 worker->pool = pool;
1794 worker->id = id;
1795
1796 if (pool->cpu >= 0)
1797 snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id,
1798 pool->attrs->nice < 0 ? "H" : "");
1799 else
1800 snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id);
1801
1802 worker->task = kthread_create_on_node(worker_thread, worker, pool->node,
1803 "kworker/%s", id_buf);
1804 if (IS_ERR(worker->task))
1805 goto fail;
1806
1807 set_user_nice(worker->task, pool->attrs->nice);
1808 kthread_bind_mask(worker->task, pool->attrs->cpumask);
1809
1810 /* successful, attach the worker to the pool */
1811 worker_attach_to_pool(worker, pool);
1812
1813 /* start the newly created worker */
1814 spin_lock_irq(&pool->lock);
1815 worker->pool->nr_workers++;
1816 worker_enter_idle(worker);
1817 wake_up_process(worker->task);
1818 spin_unlock_irq(&pool->lock);
1819
1820 return worker;
1821
1822 fail:
1823 if (id >= 0)
1824 ida_simple_remove(&pool->worker_ida, id);
1825 kfree(worker);
1826 return NULL;
1827 }
1828
1829 /**
1830 * destroy_worker - destroy a workqueue worker
1831 * @worker: worker to be destroyed
1832 *
1833 * Destroy @worker and adjust @pool stats accordingly. The worker should
1834 * be idle.
1835 *
1836 * CONTEXT:
1837 * spin_lock_irq(pool->lock).
1838 */
destroy_worker(struct worker * worker)1839 static void destroy_worker(struct worker *worker)
1840 {
1841 struct worker_pool *pool = worker->pool;
1842
1843 lockdep_assert_held(&pool->lock);
1844
1845 /* sanity check frenzy */
1846 if (WARN_ON(worker->current_work) ||
1847 WARN_ON(!list_empty(&worker->scheduled)) ||
1848 WARN_ON(!(worker->flags & WORKER_IDLE)))
1849 return;
1850
1851 pool->nr_workers--;
1852 pool->nr_idle--;
1853
1854 list_del_init(&worker->entry);
1855 worker->flags |= WORKER_DIE;
1856 wake_up_process(worker->task);
1857 }
1858
idle_worker_timeout(unsigned long __pool)1859 static void idle_worker_timeout(unsigned long __pool)
1860 {
1861 struct worker_pool *pool = (void *)__pool;
1862
1863 spin_lock_irq(&pool->lock);
1864
1865 while (too_many_workers(pool)) {
1866 struct worker *worker;
1867 unsigned long expires;
1868
1869 /* idle_list is kept in LIFO order, check the last one */
1870 worker = list_entry(pool->idle_list.prev, struct worker, entry);
1871 expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1872
1873 if (time_before(jiffies, expires)) {
1874 mod_timer(&pool->idle_timer, expires);
1875 break;
1876 }
1877
1878 destroy_worker(worker);
1879 }
1880
1881 spin_unlock_irq(&pool->lock);
1882 }
1883
send_mayday(struct work_struct * work)1884 static void send_mayday(struct work_struct *work)
1885 {
1886 struct pool_workqueue *pwq = get_work_pwq(work);
1887 struct workqueue_struct *wq = pwq->wq;
1888
1889 lockdep_assert_held(&wq_mayday_lock);
1890
1891 if (!wq->rescuer)
1892 return;
1893
1894 /* mayday mayday mayday */
1895 if (list_empty(&pwq->mayday_node)) {
1896 /*
1897 * If @pwq is for an unbound wq, its base ref may be put at
1898 * any time due to an attribute change. Pin @pwq until the
1899 * rescuer is done with it.
1900 */
1901 get_pwq(pwq);
1902 list_add_tail(&pwq->mayday_node, &wq->maydays);
1903 wake_up_process(wq->rescuer->task);
1904 }
1905 }
1906
pool_mayday_timeout(unsigned long __pool)1907 static void pool_mayday_timeout(unsigned long __pool)
1908 {
1909 struct worker_pool *pool = (void *)__pool;
1910 struct work_struct *work;
1911
1912 spin_lock_irq(&pool->lock);
1913 spin_lock(&wq_mayday_lock); /* for wq->maydays */
1914
1915 if (need_to_create_worker(pool)) {
1916 /*
1917 * We've been trying to create a new worker but
1918 * haven't been successful. We might be hitting an
1919 * allocation deadlock. Send distress signals to
1920 * rescuers.
1921 */
1922 list_for_each_entry(work, &pool->worklist, entry)
1923 send_mayday(work);
1924 }
1925
1926 spin_unlock(&wq_mayday_lock);
1927 spin_unlock_irq(&pool->lock);
1928
1929 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
1930 }
1931
1932 /**
1933 * maybe_create_worker - create a new worker if necessary
1934 * @pool: pool to create a new worker for
1935 *
1936 * Create a new worker for @pool if necessary. @pool is guaranteed to
1937 * have at least one idle worker on return from this function. If
1938 * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
1939 * sent to all rescuers with works scheduled on @pool to resolve
1940 * possible allocation deadlock.
1941 *
1942 * On return, need_to_create_worker() is guaranteed to be %false and
1943 * may_start_working() %true.
1944 *
1945 * LOCKING:
1946 * spin_lock_irq(pool->lock) which may be released and regrabbed
1947 * multiple times. Does GFP_KERNEL allocations. Called only from
1948 * manager.
1949 */
maybe_create_worker(struct worker_pool * pool)1950 static void maybe_create_worker(struct worker_pool *pool)
1951 __releases(&pool->lock)
1952 __acquires(&pool->lock)
1953 {
1954 restart:
1955 spin_unlock_irq(&pool->lock);
1956
1957 /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
1958 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
1959
1960 while (true) {
1961 if (create_worker(pool) || !need_to_create_worker(pool))
1962 break;
1963
1964 schedule_timeout_interruptible(CREATE_COOLDOWN);
1965
1966 if (!need_to_create_worker(pool))
1967 break;
1968 }
1969
1970 del_timer_sync(&pool->mayday_timer);
1971 spin_lock_irq(&pool->lock);
1972 /*
1973 * This is necessary even after a new worker was just successfully
1974 * created as @pool->lock was dropped and the new worker might have
1975 * already become busy.
1976 */
1977 if (need_to_create_worker(pool))
1978 goto restart;
1979 }
1980
1981 /**
1982 * manage_workers - manage worker pool
1983 * @worker: self
1984 *
1985 * Assume the manager role and manage the worker pool @worker belongs
1986 * to. At any given time, there can be only zero or one manager per
1987 * pool. The exclusion is handled automatically by this function.
1988 *
1989 * The caller can safely start processing works on false return. On
1990 * true return, it's guaranteed that need_to_create_worker() is false
1991 * and may_start_working() is true.
1992 *
1993 * CONTEXT:
1994 * spin_lock_irq(pool->lock) which may be released and regrabbed
1995 * multiple times. Does GFP_KERNEL allocations.
1996 *
1997 * Return:
1998 * %false if the pool doesn't need management and the caller can safely
1999 * start processing works, %true if management function was performed and
2000 * the conditions that the caller verified before calling the function may
2001 * no longer be true.
2002 */
manage_workers(struct worker * worker)2003 static bool manage_workers(struct worker *worker)
2004 {
2005 struct worker_pool *pool = worker->pool;
2006
2007 if (pool->flags & POOL_MANAGER_ACTIVE)
2008 return false;
2009
2010 pool->flags |= POOL_MANAGER_ACTIVE;
2011 pool->manager = worker;
2012
2013 maybe_create_worker(pool);
2014
2015 pool->manager = NULL;
2016 pool->flags &= ~POOL_MANAGER_ACTIVE;
2017 wake_up(&wq_manager_wait);
2018 return true;
2019 }
2020
2021 /**
2022 * process_one_work - process single work
2023 * @worker: self
2024 * @work: work to process
2025 *
2026 * Process @work. This function contains all the logics necessary to
2027 * process a single work including synchronization against and
2028 * interaction with other workers on the same cpu, queueing and
2029 * flushing. As long as context requirement is met, any worker can
2030 * call this function to process a work.
2031 *
2032 * CONTEXT:
2033 * spin_lock_irq(pool->lock) which is released and regrabbed.
2034 */
process_one_work(struct worker * worker,struct work_struct * work)2035 static void process_one_work(struct worker *worker, struct work_struct *work)
2036 __releases(&pool->lock)
2037 __acquires(&pool->lock)
2038 {
2039 struct pool_workqueue *pwq = get_work_pwq(work);
2040 struct worker_pool *pool = worker->pool;
2041 bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE;
2042 int work_color;
2043 struct worker *collision;
2044 #ifdef CONFIG_LOCKDEP
2045 /*
2046 * It is permissible to free the struct work_struct from
2047 * inside the function that is called from it, this we need to
2048 * take into account for lockdep too. To avoid bogus "held
2049 * lock freed" warnings as well as problems when looking into
2050 * work->lockdep_map, make a copy and use that here.
2051 */
2052 struct lockdep_map lockdep_map;
2053
2054 lockdep_copy_map(&lockdep_map, &work->lockdep_map);
2055 #endif
2056 /* ensure we're on the correct CPU */
2057 WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
2058 raw_smp_processor_id() != pool->cpu);
2059
2060 /*
2061 * A single work shouldn't be executed concurrently by
2062 * multiple workers on a single cpu. Check whether anyone is
2063 * already processing the work. If so, defer the work to the
2064 * currently executing one.
2065 */
2066 collision = find_worker_executing_work(pool, work);
2067 if (unlikely(collision)) {
2068 move_linked_works(work, &collision->scheduled, NULL);
2069 return;
2070 }
2071
2072 /* claim and dequeue */
2073 debug_work_deactivate(work);
2074 hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
2075 worker->current_work = work;
2076 worker->current_func = work->func;
2077 worker->current_pwq = pwq;
2078 work_color = get_work_color(work);
2079
2080 list_del_init(&work->entry);
2081
2082 /*
2083 * CPU intensive works don't participate in concurrency management.
2084 * They're the scheduler's responsibility. This takes @worker out
2085 * of concurrency management and the next code block will chain
2086 * execution of the pending work items.
2087 */
2088 if (unlikely(cpu_intensive))
2089 worker_set_flags(worker, WORKER_CPU_INTENSIVE);
2090
2091 /*
2092 * Wake up another worker if necessary. The condition is always
2093 * false for normal per-cpu workers since nr_running would always
2094 * be >= 1 at this point. This is used to chain execution of the
2095 * pending work items for WORKER_NOT_RUNNING workers such as the
2096 * UNBOUND and CPU_INTENSIVE ones.
2097 */
2098 if (need_more_worker(pool))
2099 wake_up_worker(pool);
2100
2101 /*
2102 * Record the last pool and clear PENDING which should be the last
2103 * update to @work. Also, do this inside @pool->lock so that
2104 * PENDING and queued state changes happen together while IRQ is
2105 * disabled.
2106 */
2107 set_work_pool_and_clear_pending(work, pool->id);
2108
2109 spin_unlock_irq(&pool->lock);
2110
2111 lock_map_acquire(&pwq->wq->lockdep_map);
2112 lock_map_acquire(&lockdep_map);
2113 /*
2114 * Strictly speaking we should mark the invariant state without holding
2115 * any locks, that is, before these two lock_map_acquire()'s.
2116 *
2117 * However, that would result in:
2118 *
2119 * A(W1)
2120 * WFC(C)
2121 * A(W1)
2122 * C(C)
2123 *
2124 * Which would create W1->C->W1 dependencies, even though there is no
2125 * actual deadlock possible. There are two solutions, using a
2126 * read-recursive acquire on the work(queue) 'locks', but this will then
2127 * hit the lockdep limitation on recursive locks, or simply discard
2128 * these locks.
2129 *
2130 * AFAICT there is no possible deadlock scenario between the
2131 * flush_work() and complete() primitives (except for single-threaded
2132 * workqueues), so hiding them isn't a problem.
2133 */
2134 lockdep_invariant_state(true);
2135 trace_workqueue_execute_start(work);
2136 worker->current_func(work);
2137 /*
2138 * While we must be careful to not use "work" after this, the trace
2139 * point will only record its address.
2140 */
2141 trace_workqueue_execute_end(work);
2142 lock_map_release(&lockdep_map);
2143 lock_map_release(&pwq->wq->lockdep_map);
2144
2145 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
2146 pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
2147 " last function: %pf\n",
2148 current->comm, preempt_count(), task_pid_nr(current),
2149 worker->current_func);
2150 debug_show_held_locks(current);
2151 dump_stack();
2152 }
2153
2154 /*
2155 * The following prevents a kworker from hogging CPU on !PREEMPT
2156 * kernels, where a requeueing work item waiting for something to
2157 * happen could deadlock with stop_machine as such work item could
2158 * indefinitely requeue itself while all other CPUs are trapped in
2159 * stop_machine. At the same time, report a quiescent RCU state so
2160 * the same condition doesn't freeze RCU.
2161 */
2162 cond_resched_rcu_qs();
2163
2164 spin_lock_irq(&pool->lock);
2165
2166 /* clear cpu intensive status */
2167 if (unlikely(cpu_intensive))
2168 worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
2169
2170 /* tag the worker for identification in schedule() */
2171 worker->last_func = worker->current_func;
2172
2173 /* we're done with it, release */
2174 hash_del(&worker->hentry);
2175 worker->current_work = NULL;
2176 worker->current_func = NULL;
2177 worker->current_pwq = NULL;
2178 worker->desc_valid = false;
2179 pwq_dec_nr_in_flight(pwq, work_color);
2180 }
2181
2182 /**
2183 * process_scheduled_works - process scheduled works
2184 * @worker: self
2185 *
2186 * Process all scheduled works. Please note that the scheduled list
2187 * may change while processing a work, so this function repeatedly
2188 * fetches a work from the top and executes it.
2189 *
2190 * CONTEXT:
2191 * spin_lock_irq(pool->lock) which may be released and regrabbed
2192 * multiple times.
2193 */
process_scheduled_works(struct worker * worker)2194 static void process_scheduled_works(struct worker *worker)
2195 {
2196 while (!list_empty(&worker->scheduled)) {
2197 struct work_struct *work = list_first_entry(&worker->scheduled,
2198 struct work_struct, entry);
2199 process_one_work(worker, work);
2200 }
2201 }
2202
2203 /**
2204 * worker_thread - the worker thread function
2205 * @__worker: self
2206 *
2207 * The worker thread function. All workers belong to a worker_pool -
2208 * either a per-cpu one or dynamic unbound one. These workers process all
2209 * work items regardless of their specific target workqueue. The only
2210 * exception is work items which belong to workqueues with a rescuer which
2211 * will be explained in rescuer_thread().
2212 *
2213 * Return: 0
2214 */
worker_thread(void * __worker)2215 static int worker_thread(void *__worker)
2216 {
2217 struct worker *worker = __worker;
2218 struct worker_pool *pool = worker->pool;
2219
2220 /* tell the scheduler that this is a workqueue worker */
2221 worker->task->flags |= PF_WQ_WORKER;
2222 woke_up:
2223 spin_lock_irq(&pool->lock);
2224
2225 /* am I supposed to die? */
2226 if (unlikely(worker->flags & WORKER_DIE)) {
2227 spin_unlock_irq(&pool->lock);
2228 WARN_ON_ONCE(!list_empty(&worker->entry));
2229 worker->task->flags &= ~PF_WQ_WORKER;
2230
2231 set_task_comm(worker->task, "kworker/dying");
2232 ida_simple_remove(&pool->worker_ida, worker->id);
2233 worker_detach_from_pool(worker, pool);
2234 kfree(worker);
2235 return 0;
2236 }
2237
2238 worker_leave_idle(worker);
2239 recheck:
2240 /* no more worker necessary? */
2241 if (!need_more_worker(pool))
2242 goto sleep;
2243
2244 /* do we need to manage? */
2245 if (unlikely(!may_start_working(pool)) && manage_workers(worker))
2246 goto recheck;
2247
2248 /*
2249 * ->scheduled list can only be filled while a worker is
2250 * preparing to process a work or actually processing it.
2251 * Make sure nobody diddled with it while I was sleeping.
2252 */
2253 WARN_ON_ONCE(!list_empty(&worker->scheduled));
2254
2255 /*
2256 * Finish PREP stage. We're guaranteed to have at least one idle
2257 * worker or that someone else has already assumed the manager
2258 * role. This is where @worker starts participating in concurrency
2259 * management if applicable and concurrency management is restored
2260 * after being rebound. See rebind_workers() for details.
2261 */
2262 worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND);
2263
2264 do {
2265 struct work_struct *work =
2266 list_first_entry(&pool->worklist,
2267 struct work_struct, entry);
2268
2269 pool->watchdog_ts = jiffies;
2270
2271 if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
2272 /* optimization path, not strictly necessary */
2273 process_one_work(worker, work);
2274 if (unlikely(!list_empty(&worker->scheduled)))
2275 process_scheduled_works(worker);
2276 } else {
2277 move_linked_works(work, &worker->scheduled, NULL);
2278 process_scheduled_works(worker);
2279 }
2280 } while (keep_working(pool));
2281
2282 worker_set_flags(worker, WORKER_PREP);
2283 sleep:
2284 /*
2285 * pool->lock is held and there's no work to process and no need to
2286 * manage, sleep. Workers are woken up only while holding
2287 * pool->lock or from local cpu, so setting the current state
2288 * before releasing pool->lock is enough to prevent losing any
2289 * event.
2290 */
2291 worker_enter_idle(worker);
2292 __set_current_state(TASK_IDLE);
2293 spin_unlock_irq(&pool->lock);
2294 schedule();
2295 goto woke_up;
2296 }
2297
2298 /**
2299 * rescuer_thread - the rescuer thread function
2300 * @__rescuer: self
2301 *
2302 * Workqueue rescuer thread function. There's one rescuer for each
2303 * workqueue which has WQ_MEM_RECLAIM set.
2304 *
2305 * Regular work processing on a pool may block trying to create a new
2306 * worker which uses GFP_KERNEL allocation which has slight chance of
2307 * developing into deadlock if some works currently on the same queue
2308 * need to be processed to satisfy the GFP_KERNEL allocation. This is
2309 * the problem rescuer solves.
2310 *
2311 * When such condition is possible, the pool summons rescuers of all
2312 * workqueues which have works queued on the pool and let them process
2313 * those works so that forward progress can be guaranteed.
2314 *
2315 * This should happen rarely.
2316 *
2317 * Return: 0
2318 */
rescuer_thread(void * __rescuer)2319 static int rescuer_thread(void *__rescuer)
2320 {
2321 struct worker *rescuer = __rescuer;
2322 struct workqueue_struct *wq = rescuer->rescue_wq;
2323 struct list_head *scheduled = &rescuer->scheduled;
2324 bool should_stop;
2325
2326 set_user_nice(current, RESCUER_NICE_LEVEL);
2327
2328 /*
2329 * Mark rescuer as worker too. As WORKER_PREP is never cleared, it
2330 * doesn't participate in concurrency management.
2331 */
2332 rescuer->task->flags |= PF_WQ_WORKER;
2333 repeat:
2334 set_current_state(TASK_IDLE);
2335
2336 /*
2337 * By the time the rescuer is requested to stop, the workqueue
2338 * shouldn't have any work pending, but @wq->maydays may still have
2339 * pwq(s) queued. This can happen by non-rescuer workers consuming
2340 * all the work items before the rescuer got to them. Go through
2341 * @wq->maydays processing before acting on should_stop so that the
2342 * list is always empty on exit.
2343 */
2344 should_stop = kthread_should_stop();
2345
2346 /* see whether any pwq is asking for help */
2347 spin_lock_irq(&wq_mayday_lock);
2348
2349 while (!list_empty(&wq->maydays)) {
2350 struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
2351 struct pool_workqueue, mayday_node);
2352 struct worker_pool *pool = pwq->pool;
2353 struct work_struct *work, *n;
2354 bool first = true;
2355
2356 __set_current_state(TASK_RUNNING);
2357 list_del_init(&pwq->mayday_node);
2358
2359 spin_unlock_irq(&wq_mayday_lock);
2360
2361 worker_attach_to_pool(rescuer, pool);
2362
2363 spin_lock_irq(&pool->lock);
2364 rescuer->pool = pool;
2365
2366 /*
2367 * Slurp in all works issued via this workqueue and
2368 * process'em.
2369 */
2370 WARN_ON_ONCE(!list_empty(scheduled));
2371 list_for_each_entry_safe(work, n, &pool->worklist, entry) {
2372 if (get_work_pwq(work) == pwq) {
2373 if (first)
2374 pool->watchdog_ts = jiffies;
2375 move_linked_works(work, scheduled, &n);
2376 }
2377 first = false;
2378 }
2379
2380 if (!list_empty(scheduled)) {
2381 process_scheduled_works(rescuer);
2382
2383 /*
2384 * The above execution of rescued work items could
2385 * have created more to rescue through
2386 * pwq_activate_first_delayed() or chained
2387 * queueing. Let's put @pwq back on mayday list so
2388 * that such back-to-back work items, which may be
2389 * being used to relieve memory pressure, don't
2390 * incur MAYDAY_INTERVAL delay inbetween.
2391 */
2392 if (need_to_create_worker(pool)) {
2393 spin_lock(&wq_mayday_lock);
2394 /*
2395 * Queue iff we aren't racing destruction
2396 * and somebody else hasn't queued it already.
2397 */
2398 if (wq->rescuer && list_empty(&pwq->mayday_node)) {
2399 get_pwq(pwq);
2400 list_add_tail(&pwq->mayday_node, &wq->maydays);
2401 }
2402 spin_unlock(&wq_mayday_lock);
2403 }
2404 }
2405
2406 /*
2407 * Put the reference grabbed by send_mayday(). @pool won't
2408 * go away while we're still attached to it.
2409 */
2410 put_pwq(pwq);
2411
2412 /*
2413 * Leave this pool. If need_more_worker() is %true, notify a
2414 * regular worker; otherwise, we end up with 0 concurrency
2415 * and stalling the execution.
2416 */
2417 if (need_more_worker(pool))
2418 wake_up_worker(pool);
2419
2420 rescuer->pool = NULL;
2421 spin_unlock_irq(&pool->lock);
2422
2423 worker_detach_from_pool(rescuer, pool);
2424
2425 spin_lock_irq(&wq_mayday_lock);
2426 }
2427
2428 spin_unlock_irq(&wq_mayday_lock);
2429
2430 if (should_stop) {
2431 __set_current_state(TASK_RUNNING);
2432 rescuer->task->flags &= ~PF_WQ_WORKER;
2433 return 0;
2434 }
2435
2436 /* rescuers should never participate in concurrency management */
2437 WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
2438 schedule();
2439 goto repeat;
2440 }
2441
2442 /**
2443 * check_flush_dependency - check for flush dependency sanity
2444 * @target_wq: workqueue being flushed
2445 * @target_work: work item being flushed (NULL for workqueue flushes)
2446 *
2447 * %current is trying to flush the whole @target_wq or @target_work on it.
2448 * If @target_wq doesn't have %WQ_MEM_RECLAIM, verify that %current is not
2449 * reclaiming memory or running on a workqueue which doesn't have
2450 * %WQ_MEM_RECLAIM as that can break forward-progress guarantee leading to
2451 * a deadlock.
2452 */
check_flush_dependency(struct workqueue_struct * target_wq,struct work_struct * target_work)2453 static void check_flush_dependency(struct workqueue_struct *target_wq,
2454 struct work_struct *target_work)
2455 {
2456 work_func_t target_func = target_work ? target_work->func : NULL;
2457 struct worker *worker;
2458
2459 if (target_wq->flags & WQ_MEM_RECLAIM)
2460 return;
2461
2462 worker = current_wq_worker();
2463
2464 WARN_ONCE(current->flags & PF_MEMALLOC,
2465 "workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%pf",
2466 current->pid, current->comm, target_wq->name, target_func);
2467 WARN_ONCE(worker && ((worker->current_pwq->wq->flags &
2468 (WQ_MEM_RECLAIM | __WQ_LEGACY)) == WQ_MEM_RECLAIM),
2469 "workqueue: WQ_MEM_RECLAIM %s:%pf is flushing !WQ_MEM_RECLAIM %s:%pf",
2470 worker->current_pwq->wq->name, worker->current_func,
2471 target_wq->name, target_func);
2472 }
2473
2474 struct wq_barrier {
2475 struct work_struct work;
2476 struct completion done;
2477 struct task_struct *task; /* purely informational */
2478 };
2479
wq_barrier_func(struct work_struct * work)2480 static void wq_barrier_func(struct work_struct *work)
2481 {
2482 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
2483 complete(&barr->done);
2484 }
2485
2486 /**
2487 * insert_wq_barrier - insert a barrier work
2488 * @pwq: pwq to insert barrier into
2489 * @barr: wq_barrier to insert
2490 * @target: target work to attach @barr to
2491 * @worker: worker currently executing @target, NULL if @target is not executing
2492 *
2493 * @barr is linked to @target such that @barr is completed only after
2494 * @target finishes execution. Please note that the ordering
2495 * guarantee is observed only with respect to @target and on the local
2496 * cpu.
2497 *
2498 * Currently, a queued barrier can't be canceled. This is because
2499 * try_to_grab_pending() can't determine whether the work to be
2500 * grabbed is at the head of the queue and thus can't clear LINKED
2501 * flag of the previous work while there must be a valid next work
2502 * after a work with LINKED flag set.
2503 *
2504 * Note that when @worker is non-NULL, @target may be modified
2505 * underneath us, so we can't reliably determine pwq from @target.
2506 *
2507 * CONTEXT:
2508 * spin_lock_irq(pool->lock).
2509 */
insert_wq_barrier(struct pool_workqueue * pwq,struct wq_barrier * barr,struct work_struct * target,struct worker * worker)2510 static void insert_wq_barrier(struct pool_workqueue *pwq,
2511 struct wq_barrier *barr,
2512 struct work_struct *target, struct worker *worker)
2513 {
2514 struct list_head *head;
2515 unsigned int linked = 0;
2516
2517 /*
2518 * debugobject calls are safe here even with pool->lock locked
2519 * as we know for sure that this will not trigger any of the
2520 * checks and call back into the fixup functions where we
2521 * might deadlock.
2522 */
2523 INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
2524 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
2525
2526 /*
2527 * Explicitly init the crosslock for wq_barrier::done, make its lock
2528 * key a subkey of the corresponding work. As a result we won't
2529 * build a dependency between wq_barrier::done and unrelated work.
2530 */
2531 lockdep_init_map_crosslock((struct lockdep_map *)&barr->done.map,
2532 "(complete)wq_barr::done",
2533 target->lockdep_map.key, 1);
2534 __init_completion(&barr->done);
2535 barr->task = current;
2536
2537 /*
2538 * If @target is currently being executed, schedule the
2539 * barrier to the worker; otherwise, put it after @target.
2540 */
2541 if (worker)
2542 head = worker->scheduled.next;
2543 else {
2544 unsigned long *bits = work_data_bits(target);
2545
2546 head = target->entry.next;
2547 /* there can already be other linked works, inherit and set */
2548 linked = *bits & WORK_STRUCT_LINKED;
2549 __set_bit(WORK_STRUCT_LINKED_BIT, bits);
2550 }
2551
2552 debug_work_activate(&barr->work);
2553 insert_work(pwq, &barr->work, head,
2554 work_color_to_flags(WORK_NO_COLOR) | linked);
2555 }
2556
2557 /**
2558 * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing
2559 * @wq: workqueue being flushed
2560 * @flush_color: new flush color, < 0 for no-op
2561 * @work_color: new work color, < 0 for no-op
2562 *
2563 * Prepare pwqs for workqueue flushing.
2564 *
2565 * If @flush_color is non-negative, flush_color on all pwqs should be
2566 * -1. If no pwq has in-flight commands at the specified color, all
2567 * pwq->flush_color's stay at -1 and %false is returned. If any pwq
2568 * has in flight commands, its pwq->flush_color is set to
2569 * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq
2570 * wakeup logic is armed and %true is returned.
2571 *
2572 * The caller should have initialized @wq->first_flusher prior to
2573 * calling this function with non-negative @flush_color. If
2574 * @flush_color is negative, no flush color update is done and %false
2575 * is returned.
2576 *
2577 * If @work_color is non-negative, all pwqs should have the same
2578 * work_color which is previous to @work_color and all will be
2579 * advanced to @work_color.
2580 *
2581 * CONTEXT:
2582 * mutex_lock(wq->mutex).
2583 *
2584 * Return:
2585 * %true if @flush_color >= 0 and there's something to flush. %false
2586 * otherwise.
2587 */
flush_workqueue_prep_pwqs(struct workqueue_struct * wq,int flush_color,int work_color)2588 static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
2589 int flush_color, int work_color)
2590 {
2591 bool wait = false;
2592 struct pool_workqueue *pwq;
2593
2594 if (flush_color >= 0) {
2595 WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush));
2596 atomic_set(&wq->nr_pwqs_to_flush, 1);
2597 }
2598
2599 for_each_pwq(pwq, wq) {
2600 struct worker_pool *pool = pwq->pool;
2601
2602 spin_lock_irq(&pool->lock);
2603
2604 if (flush_color >= 0) {
2605 WARN_ON_ONCE(pwq->flush_color != -1);
2606
2607 if (pwq->nr_in_flight[flush_color]) {
2608 pwq->flush_color = flush_color;
2609 atomic_inc(&wq->nr_pwqs_to_flush);
2610 wait = true;
2611 }
2612 }
2613
2614 if (work_color >= 0) {
2615 WARN_ON_ONCE(work_color != work_next_color(pwq->work_color));
2616 pwq->work_color = work_color;
2617 }
2618
2619 spin_unlock_irq(&pool->lock);
2620 }
2621
2622 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
2623 complete(&wq->first_flusher->done);
2624
2625 return wait;
2626 }
2627
2628 /**
2629 * flush_workqueue - ensure that any scheduled work has run to completion.
2630 * @wq: workqueue to flush
2631 *
2632 * This function sleeps until all work items which were queued on entry
2633 * have finished execution, but it is not livelocked by new incoming ones.
2634 */
flush_workqueue(struct workqueue_struct * wq)2635 void flush_workqueue(struct workqueue_struct *wq)
2636 {
2637 struct wq_flusher this_flusher = {
2638 .list = LIST_HEAD_INIT(this_flusher.list),
2639 .flush_color = -1,
2640 .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
2641 };
2642 int next_color;
2643
2644 if (WARN_ON(!wq_online))
2645 return;
2646
2647 lock_map_acquire(&wq->lockdep_map);
2648 lock_map_release(&wq->lockdep_map);
2649
2650 mutex_lock(&wq->mutex);
2651
2652 /*
2653 * Start-to-wait phase
2654 */
2655 next_color = work_next_color(wq->work_color);
2656
2657 if (next_color != wq->flush_color) {
2658 /*
2659 * Color space is not full. The current work_color
2660 * becomes our flush_color and work_color is advanced
2661 * by one.
2662 */
2663 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow));
2664 this_flusher.flush_color = wq->work_color;
2665 wq->work_color = next_color;
2666
2667 if (!wq->first_flusher) {
2668 /* no flush in progress, become the first flusher */
2669 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
2670
2671 wq->first_flusher = &this_flusher;
2672
2673 if (!flush_workqueue_prep_pwqs(wq, wq->flush_color,
2674 wq->work_color)) {
2675 /* nothing to flush, done */
2676 wq->flush_color = next_color;
2677 wq->first_flusher = NULL;
2678 goto out_unlock;
2679 }
2680 } else {
2681 /* wait in queue */
2682 WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color);
2683 list_add_tail(&this_flusher.list, &wq->flusher_queue);
2684 flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
2685 }
2686 } else {
2687 /*
2688 * Oops, color space is full, wait on overflow queue.
2689 * The next flush completion will assign us
2690 * flush_color and transfer to flusher_queue.
2691 */
2692 list_add_tail(&this_flusher.list, &wq->flusher_overflow);
2693 }
2694
2695 check_flush_dependency(wq, NULL);
2696
2697 mutex_unlock(&wq->mutex);
2698
2699 wait_for_completion(&this_flusher.done);
2700
2701 /*
2702 * Wake-up-and-cascade phase
2703 *
2704 * First flushers are responsible for cascading flushes and
2705 * handling overflow. Non-first flushers can simply return.
2706 */
2707 if (wq->first_flusher != &this_flusher)
2708 return;
2709
2710 mutex_lock(&wq->mutex);
2711
2712 /* we might have raced, check again with mutex held */
2713 if (wq->first_flusher != &this_flusher)
2714 goto out_unlock;
2715
2716 wq->first_flusher = NULL;
2717
2718 WARN_ON_ONCE(!list_empty(&this_flusher.list));
2719 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
2720
2721 while (true) {
2722 struct wq_flusher *next, *tmp;
2723
2724 /* complete all the flushers sharing the current flush color */
2725 list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
2726 if (next->flush_color != wq->flush_color)
2727 break;
2728 list_del_init(&next->list);
2729 complete(&next->done);
2730 }
2731
2732 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) &&
2733 wq->flush_color != work_next_color(wq->work_color));
2734
2735 /* this flush_color is finished, advance by one */
2736 wq->flush_color = work_next_color(wq->flush_color);
2737
2738 /* one color has been freed, handle overflow queue */
2739 if (!list_empty(&wq->flusher_overflow)) {
2740 /*
2741 * Assign the same color to all overflowed
2742 * flushers, advance work_color and append to
2743 * flusher_queue. This is the start-to-wait
2744 * phase for these overflowed flushers.
2745 */
2746 list_for_each_entry(tmp, &wq->flusher_overflow, list)
2747 tmp->flush_color = wq->work_color;
2748
2749 wq->work_color = work_next_color(wq->work_color);
2750
2751 list_splice_tail_init(&wq->flusher_overflow,
2752 &wq->flusher_queue);
2753 flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
2754 }
2755
2756 if (list_empty(&wq->flusher_queue)) {
2757 WARN_ON_ONCE(wq->flush_color != wq->work_color);
2758 break;
2759 }
2760
2761 /*
2762 * Need to flush more colors. Make the next flusher
2763 * the new first flusher and arm pwqs.
2764 */
2765 WARN_ON_ONCE(wq->flush_color == wq->work_color);
2766 WARN_ON_ONCE(wq->flush_color != next->flush_color);
2767
2768 list_del_init(&next->list);
2769 wq->first_flusher = next;
2770
2771 if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1))
2772 break;
2773
2774 /*
2775 * Meh... this color is already done, clear first
2776 * flusher and repeat cascading.
2777 */
2778 wq->first_flusher = NULL;
2779 }
2780
2781 out_unlock:
2782 mutex_unlock(&wq->mutex);
2783 }
2784 EXPORT_SYMBOL(flush_workqueue);
2785
2786 /**
2787 * drain_workqueue - drain a workqueue
2788 * @wq: workqueue to drain
2789 *
2790 * Wait until the workqueue becomes empty. While draining is in progress,
2791 * only chain queueing is allowed. IOW, only currently pending or running
2792 * work items on @wq can queue further work items on it. @wq is flushed
2793 * repeatedly until it becomes empty. The number of flushing is determined
2794 * by the depth of chaining and should be relatively short. Whine if it
2795 * takes too long.
2796 */
drain_workqueue(struct workqueue_struct * wq)2797 void drain_workqueue(struct workqueue_struct *wq)
2798 {
2799 unsigned int flush_cnt = 0;
2800 struct pool_workqueue *pwq;
2801
2802 /*
2803 * __queue_work() needs to test whether there are drainers, is much
2804 * hotter than drain_workqueue() and already looks at @wq->flags.
2805 * Use __WQ_DRAINING so that queue doesn't have to check nr_drainers.
2806 */
2807 mutex_lock(&wq->mutex);
2808 if (!wq->nr_drainers++)
2809 wq->flags |= __WQ_DRAINING;
2810 mutex_unlock(&wq->mutex);
2811 reflush:
2812 flush_workqueue(wq);
2813
2814 mutex_lock(&wq->mutex);
2815
2816 for_each_pwq(pwq, wq) {
2817 bool drained;
2818
2819 spin_lock_irq(&pwq->pool->lock);
2820 drained = !pwq->nr_active && list_empty(&pwq->delayed_works);
2821 spin_unlock_irq(&pwq->pool->lock);
2822
2823 if (drained)
2824 continue;
2825
2826 if (++flush_cnt == 10 ||
2827 (flush_cnt % 100 == 0 && flush_cnt <= 1000))
2828 pr_warn("workqueue %s: drain_workqueue() isn't complete after %u tries\n",
2829 wq->name, flush_cnt);
2830
2831 mutex_unlock(&wq->mutex);
2832 goto reflush;
2833 }
2834
2835 if (!--wq->nr_drainers)
2836 wq->flags &= ~__WQ_DRAINING;
2837 mutex_unlock(&wq->mutex);
2838 }
2839 EXPORT_SYMBOL_GPL(drain_workqueue);
2840
start_flush_work(struct work_struct * work,struct wq_barrier * barr)2841 static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
2842 {
2843 struct worker *worker = NULL;
2844 struct worker_pool *pool;
2845 struct pool_workqueue *pwq;
2846
2847 might_sleep();
2848
2849 local_irq_disable();
2850 pool = get_work_pool(work);
2851 if (!pool) {
2852 local_irq_enable();
2853 return false;
2854 }
2855
2856 spin_lock(&pool->lock);
2857 /* see the comment in try_to_grab_pending() with the same code */
2858 pwq = get_work_pwq(work);
2859 if (pwq) {
2860 if (unlikely(pwq->pool != pool))
2861 goto already_gone;
2862 } else {
2863 worker = find_worker_executing_work(pool, work);
2864 if (!worker)
2865 goto already_gone;
2866 pwq = worker->current_pwq;
2867 }
2868
2869 check_flush_dependency(pwq->wq, work);
2870
2871 insert_wq_barrier(pwq, barr, work, worker);
2872 spin_unlock_irq(&pool->lock);
2873
2874 /*
2875 * Force a lock recursion deadlock when using flush_work() inside a
2876 * single-threaded or rescuer equipped workqueue.
2877 *
2878 * For single threaded workqueues the deadlock happens when the work
2879 * is after the work issuing the flush_work(). For rescuer equipped
2880 * workqueues the deadlock happens when the rescuer stalls, blocking
2881 * forward progress.
2882 */
2883 if (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer) {
2884 lock_map_acquire(&pwq->wq->lockdep_map);
2885 lock_map_release(&pwq->wq->lockdep_map);
2886 }
2887
2888 return true;
2889 already_gone:
2890 spin_unlock_irq(&pool->lock);
2891 return false;
2892 }
2893
2894 /**
2895 * flush_work - wait for a work to finish executing the last queueing instance
2896 * @work: the work to flush
2897 *
2898 * Wait until @work has finished execution. @work is guaranteed to be idle
2899 * on return if it hasn't been requeued since flush started.
2900 *
2901 * Return:
2902 * %true if flush_work() waited for the work to finish execution,
2903 * %false if it was already idle.
2904 */
flush_work(struct work_struct * work)2905 bool flush_work(struct work_struct *work)
2906 {
2907 struct wq_barrier barr;
2908
2909 if (WARN_ON(!wq_online))
2910 return false;
2911
2912 lock_map_acquire(&work->lockdep_map);
2913 lock_map_release(&work->lockdep_map);
2914
2915 if (start_flush_work(work, &barr)) {
2916 wait_for_completion(&barr.done);
2917 destroy_work_on_stack(&barr.work);
2918 return true;
2919 } else {
2920 return false;
2921 }
2922 }
2923 EXPORT_SYMBOL_GPL(flush_work);
2924
2925 struct cwt_wait {
2926 wait_queue_entry_t wait;
2927 struct work_struct *work;
2928 };
2929
cwt_wakefn(wait_queue_entry_t * wait,unsigned mode,int sync,void * key)2930 static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
2931 {
2932 struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
2933
2934 if (cwait->work != key)
2935 return 0;
2936 return autoremove_wake_function(wait, mode, sync, key);
2937 }
2938
__cancel_work_timer(struct work_struct * work,bool is_dwork)2939 static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
2940 {
2941 static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
2942 unsigned long flags;
2943 int ret;
2944
2945 do {
2946 ret = try_to_grab_pending(work, is_dwork, &flags);
2947 /*
2948 * If someone else is already canceling, wait for it to
2949 * finish. flush_work() doesn't work for PREEMPT_NONE
2950 * because we may get scheduled between @work's completion
2951 * and the other canceling task resuming and clearing
2952 * CANCELING - flush_work() will return false immediately
2953 * as @work is no longer busy, try_to_grab_pending() will
2954 * return -ENOENT as @work is still being canceled and the
2955 * other canceling task won't be able to clear CANCELING as
2956 * we're hogging the CPU.
2957 *
2958 * Let's wait for completion using a waitqueue. As this
2959 * may lead to the thundering herd problem, use a custom
2960 * wake function which matches @work along with exclusive
2961 * wait and wakeup.
2962 */
2963 if (unlikely(ret == -ENOENT)) {
2964 struct cwt_wait cwait;
2965
2966 init_wait(&cwait.wait);
2967 cwait.wait.func = cwt_wakefn;
2968 cwait.work = work;
2969
2970 prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait,
2971 TASK_UNINTERRUPTIBLE);
2972 if (work_is_canceling(work))
2973 schedule();
2974 finish_wait(&cancel_waitq, &cwait.wait);
2975 }
2976 } while (unlikely(ret < 0));
2977
2978 /* tell other tasks trying to grab @work to back off */
2979 mark_work_canceling(work);
2980 local_irq_restore(flags);
2981
2982 /*
2983 * This allows canceling during early boot. We know that @work
2984 * isn't executing.
2985 */
2986 if (wq_online)
2987 flush_work(work);
2988
2989 clear_work_data(work);
2990
2991 /*
2992 * Paired with prepare_to_wait() above so that either
2993 * waitqueue_active() is visible here or !work_is_canceling() is
2994 * visible there.
2995 */
2996 smp_mb();
2997 if (waitqueue_active(&cancel_waitq))
2998 __wake_up(&cancel_waitq, TASK_NORMAL, 1, work);
2999
3000 return ret;
3001 }
3002
3003 /**
3004 * cancel_work_sync - cancel a work and wait for it to finish
3005 * @work: the work to cancel
3006 *
3007 * Cancel @work and wait for its execution to finish. This function
3008 * can be used even if the work re-queues itself or migrates to
3009 * another workqueue. On return from this function, @work is
3010 * guaranteed to be not pending or executing on any CPU.
3011 *
3012 * cancel_work_sync(&delayed_work->work) must not be used for
3013 * delayed_work's. Use cancel_delayed_work_sync() instead.
3014 *
3015 * The caller must ensure that the workqueue on which @work was last
3016 * queued can't be destroyed before this function returns.
3017 *
3018 * Return:
3019 * %true if @work was pending, %false otherwise.
3020 */
cancel_work_sync(struct work_struct * work)3021 bool cancel_work_sync(struct work_struct *work)
3022 {
3023 return __cancel_work_timer(work, false);
3024 }
3025 EXPORT_SYMBOL_GPL(cancel_work_sync);
3026
3027 /**
3028 * flush_delayed_work - wait for a dwork to finish executing the last queueing
3029 * @dwork: the delayed work to flush
3030 *
3031 * Delayed timer is cancelled and the pending work is queued for
3032 * immediate execution. Like flush_work(), this function only
3033 * considers the last queueing instance of @dwork.
3034 *
3035 * Return:
3036 * %true if flush_work() waited for the work to finish execution,
3037 * %false if it was already idle.
3038 */
flush_delayed_work(struct delayed_work * dwork)3039 bool flush_delayed_work(struct delayed_work *dwork)
3040 {
3041 local_irq_disable();
3042 if (del_timer_sync(&dwork->timer))
3043 __queue_work(dwork->cpu, dwork->wq, &dwork->work);
3044 local_irq_enable();
3045 return flush_work(&dwork->work);
3046 }
3047 EXPORT_SYMBOL(flush_delayed_work);
3048
__cancel_work(struct work_struct * work,bool is_dwork)3049 static bool __cancel_work(struct work_struct *work, bool is_dwork)
3050 {
3051 unsigned long flags;
3052 int ret;
3053
3054 do {
3055 ret = try_to_grab_pending(work, is_dwork, &flags);
3056 } while (unlikely(ret == -EAGAIN));
3057
3058 if (unlikely(ret < 0))
3059 return false;
3060
3061 set_work_pool_and_clear_pending(work, get_work_pool_id(work));
3062 local_irq_restore(flags);
3063 return ret;
3064 }
3065
3066 /*
3067 * See cancel_delayed_work()
3068 */
cancel_work(struct work_struct * work)3069 bool cancel_work(struct work_struct *work)
3070 {
3071 return __cancel_work(work, false);
3072 }
3073
3074 /**
3075 * cancel_delayed_work - cancel a delayed work
3076 * @dwork: delayed_work to cancel
3077 *
3078 * Kill off a pending delayed_work.
3079 *
3080 * Return: %true if @dwork was pending and canceled; %false if it wasn't
3081 * pending.
3082 *
3083 * Note:
3084 * The work callback function may still be running on return, unless
3085 * it returns %true and the work doesn't re-arm itself. Explicitly flush or
3086 * use cancel_delayed_work_sync() to wait on it.
3087 *
3088 * This function is safe to call from any context including IRQ handler.
3089 */
cancel_delayed_work(struct delayed_work * dwork)3090 bool cancel_delayed_work(struct delayed_work *dwork)
3091 {
3092 return __cancel_work(&dwork->work, true);
3093 }
3094 EXPORT_SYMBOL(cancel_delayed_work);
3095
3096 /**
3097 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
3098 * @dwork: the delayed work cancel
3099 *
3100 * This is cancel_work_sync() for delayed works.
3101 *
3102 * Return:
3103 * %true if @dwork was pending, %false otherwise.
3104 */
cancel_delayed_work_sync(struct delayed_work * dwork)3105 bool cancel_delayed_work_sync(struct delayed_work *dwork)
3106 {
3107 return __cancel_work_timer(&dwork->work, true);
3108 }
3109 EXPORT_SYMBOL(cancel_delayed_work_sync);
3110
3111 /**
3112 * schedule_on_each_cpu - execute a function synchronously on each online CPU
3113 * @func: the function to call
3114 *
3115 * schedule_on_each_cpu() executes @func on each online CPU using the
3116 * system workqueue and blocks until all CPUs have completed.
3117 * schedule_on_each_cpu() is very slow.
3118 *
3119 * Return:
3120 * 0 on success, -errno on failure.
3121 */
schedule_on_each_cpu(work_func_t func)3122 int schedule_on_each_cpu(work_func_t func)
3123 {
3124 int cpu;
3125 struct work_struct __percpu *works;
3126
3127 works = alloc_percpu(struct work_struct);
3128 if (!works)
3129 return -ENOMEM;
3130
3131 get_online_cpus();
3132
3133 for_each_online_cpu(cpu) {
3134 struct work_struct *work = per_cpu_ptr(works, cpu);
3135
3136 INIT_WORK(work, func);
3137 schedule_work_on(cpu, work);
3138 }
3139
3140 for_each_online_cpu(cpu)
3141 flush_work(per_cpu_ptr(works, cpu));
3142
3143 put_online_cpus();
3144 free_percpu(works);
3145 return 0;
3146 }
3147
3148 /**
3149 * execute_in_process_context - reliably execute the routine with user context
3150 * @fn: the function to execute
3151 * @ew: guaranteed storage for the execute work structure (must
3152 * be available when the work executes)
3153 *
3154 * Executes the function immediately if process context is available,
3155 * otherwise schedules the function for delayed execution.
3156 *
3157 * Return: 0 - function was executed
3158 * 1 - function was scheduled for execution
3159 */
execute_in_process_context(work_func_t fn,struct execute_work * ew)3160 int execute_in_process_context(work_func_t fn, struct execute_work *ew)
3161 {
3162 if (!in_interrupt()) {
3163 fn(&ew->work);
3164 return 0;
3165 }
3166
3167 INIT_WORK(&ew->work, fn);
3168 schedule_work(&ew->work);
3169
3170 return 1;
3171 }
3172 EXPORT_SYMBOL_GPL(execute_in_process_context);
3173
3174 /**
3175 * free_workqueue_attrs - free a workqueue_attrs
3176 * @attrs: workqueue_attrs to free
3177 *
3178 * Undo alloc_workqueue_attrs().
3179 */
free_workqueue_attrs(struct workqueue_attrs * attrs)3180 void free_workqueue_attrs(struct workqueue_attrs *attrs)
3181 {
3182 if (attrs) {
3183 free_cpumask_var(attrs->cpumask);
3184 kfree(attrs);
3185 }
3186 }
3187
3188 /**
3189 * alloc_workqueue_attrs - allocate a workqueue_attrs
3190 * @gfp_mask: allocation mask to use
3191 *
3192 * Allocate a new workqueue_attrs, initialize with default settings and
3193 * return it.
3194 *
3195 * Return: The allocated new workqueue_attr on success. %NULL on failure.
3196 */
alloc_workqueue_attrs(gfp_t gfp_mask)3197 struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask)
3198 {
3199 struct workqueue_attrs *attrs;
3200
3201 attrs = kzalloc(sizeof(*attrs), gfp_mask);
3202 if (!attrs)
3203 goto fail;
3204 if (!alloc_cpumask_var(&attrs->cpumask, gfp_mask))
3205 goto fail;
3206
3207 cpumask_copy(attrs->cpumask, cpu_possible_mask);
3208 return attrs;
3209 fail:
3210 free_workqueue_attrs(attrs);
3211 return NULL;
3212 }
3213
copy_workqueue_attrs(struct workqueue_attrs * to,const struct workqueue_attrs * from)3214 static void copy_workqueue_attrs(struct workqueue_attrs *to,
3215 const struct workqueue_attrs *from)
3216 {
3217 to->nice = from->nice;
3218 cpumask_copy(to->cpumask, from->cpumask);
3219 /*
3220 * Unlike hash and equality test, this function doesn't ignore
3221 * ->no_numa as it is used for both pool and wq attrs. Instead,
3222 * get_unbound_pool() explicitly clears ->no_numa after copying.
3223 */
3224 to->no_numa = from->no_numa;
3225 }
3226
3227 /* hash value of the content of @attr */
wqattrs_hash(const struct workqueue_attrs * attrs)3228 static u32 wqattrs_hash(const struct workqueue_attrs *attrs)
3229 {
3230 u32 hash = 0;
3231
3232 hash = jhash_1word(attrs->nice, hash);
3233 hash = jhash(cpumask_bits(attrs->cpumask),
3234 BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
3235 return hash;
3236 }
3237
3238 /* content equality test */
wqattrs_equal(const struct workqueue_attrs * a,const struct workqueue_attrs * b)3239 static bool wqattrs_equal(const struct workqueue_attrs *a,
3240 const struct workqueue_attrs *b)
3241 {
3242 if (a->nice != b->nice)
3243 return false;
3244 if (!cpumask_equal(a->cpumask, b->cpumask))
3245 return false;
3246 return true;
3247 }
3248
3249 /**
3250 * init_worker_pool - initialize a newly zalloc'd worker_pool
3251 * @pool: worker_pool to initialize
3252 *
3253 * Initialize a newly zalloc'd @pool. It also allocates @pool->attrs.
3254 *
3255 * Return: 0 on success, -errno on failure. Even on failure, all fields
3256 * inside @pool proper are initialized and put_unbound_pool() can be called
3257 * on @pool safely to release it.
3258 */
init_worker_pool(struct worker_pool * pool)3259 static int init_worker_pool(struct worker_pool *pool)
3260 {
3261 spin_lock_init(&pool->lock);
3262 pool->id = -1;
3263 pool->cpu = -1;
3264 pool->node = NUMA_NO_NODE;
3265 pool->flags |= POOL_DISASSOCIATED;
3266 pool->watchdog_ts = jiffies;
3267 INIT_LIST_HEAD(&pool->worklist);
3268 INIT_LIST_HEAD(&pool->idle_list);
3269 hash_init(pool->busy_hash);
3270
3271 setup_deferrable_timer(&pool->idle_timer, idle_worker_timeout,
3272 (unsigned long)pool);
3273
3274 setup_timer(&pool->mayday_timer, pool_mayday_timeout,
3275 (unsigned long)pool);
3276
3277 mutex_init(&pool->attach_mutex);
3278 INIT_LIST_HEAD(&pool->workers);
3279
3280 ida_init(&pool->worker_ida);
3281 INIT_HLIST_NODE(&pool->hash_node);
3282 pool->refcnt = 1;
3283
3284 /* shouldn't fail above this point */
3285 pool->attrs = alloc_workqueue_attrs(GFP_KERNEL);
3286 if (!pool->attrs)
3287 return -ENOMEM;
3288 return 0;
3289 }
3290
rcu_free_wq(struct rcu_head * rcu)3291 static void rcu_free_wq(struct rcu_head *rcu)
3292 {
3293 struct workqueue_struct *wq =
3294 container_of(rcu, struct workqueue_struct, rcu);
3295
3296 if (!(wq->flags & WQ_UNBOUND))
3297 free_percpu(wq->cpu_pwqs);
3298 else
3299 free_workqueue_attrs(wq->unbound_attrs);
3300
3301 kfree(wq->rescuer);
3302 kfree(wq);
3303 }
3304
rcu_free_pool(struct rcu_head * rcu)3305 static void rcu_free_pool(struct rcu_head *rcu)
3306 {
3307 struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu);
3308
3309 ida_destroy(&pool->worker_ida);
3310 free_workqueue_attrs(pool->attrs);
3311 kfree(pool);
3312 }
3313
3314 /**
3315 * put_unbound_pool - put a worker_pool
3316 * @pool: worker_pool to put
3317 *
3318 * Put @pool. If its refcnt reaches zero, it gets destroyed in sched-RCU
3319 * safe manner. get_unbound_pool() calls this function on its failure path
3320 * and this function should be able to release pools which went through,
3321 * successfully or not, init_worker_pool().
3322 *
3323 * Should be called with wq_pool_mutex held.
3324 */
put_unbound_pool(struct worker_pool * pool)3325 static void put_unbound_pool(struct worker_pool *pool)
3326 {
3327 DECLARE_COMPLETION_ONSTACK(detach_completion);
3328 struct worker *worker;
3329
3330 lockdep_assert_held(&wq_pool_mutex);
3331
3332 if (--pool->refcnt)
3333 return;
3334
3335 /* sanity checks */
3336 if (WARN_ON(!(pool->cpu < 0)) ||
3337 WARN_ON(!list_empty(&pool->worklist)))
3338 return;
3339
3340 /* release id and unhash */
3341 if (pool->id >= 0)
3342 idr_remove(&worker_pool_idr, pool->id);
3343 hash_del(&pool->hash_node);
3344
3345 /*
3346 * Become the manager and destroy all workers. This prevents
3347 * @pool's workers from blocking on attach_mutex. We're the last
3348 * manager and @pool gets freed with the flag set.
3349 */
3350 spin_lock_irq(&pool->lock);
3351 wait_event_lock_irq(wq_manager_wait,
3352 !(pool->flags & POOL_MANAGER_ACTIVE), pool->lock);
3353 pool->flags |= POOL_MANAGER_ACTIVE;
3354
3355 while ((worker = first_idle_worker(pool)))
3356 destroy_worker(worker);
3357 WARN_ON(pool->nr_workers || pool->nr_idle);
3358 spin_unlock_irq(&pool->lock);
3359
3360 mutex_lock(&pool->attach_mutex);
3361 if (!list_empty(&pool->workers))
3362 pool->detach_completion = &detach_completion;
3363 mutex_unlock(&pool->attach_mutex);
3364
3365 if (pool->detach_completion)
3366 wait_for_completion(pool->detach_completion);
3367
3368 /* shut down the timers */
3369 del_timer_sync(&pool->idle_timer);
3370 del_timer_sync(&pool->mayday_timer);
3371
3372 /* sched-RCU protected to allow dereferences from get_work_pool() */
3373 call_rcu_sched(&pool->rcu, rcu_free_pool);
3374 }
3375
3376 /**
3377 * get_unbound_pool - get a worker_pool with the specified attributes
3378 * @attrs: the attributes of the worker_pool to get
3379 *
3380 * Obtain a worker_pool which has the same attributes as @attrs, bump the
3381 * reference count and return it. If there already is a matching
3382 * worker_pool, it will be used; otherwise, this function attempts to
3383 * create a new one.
3384 *
3385 * Should be called with wq_pool_mutex held.
3386 *
3387 * Return: On success, a worker_pool with the same attributes as @attrs.
3388 * On failure, %NULL.
3389 */
get_unbound_pool(const struct workqueue_attrs * attrs)3390 static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
3391 {
3392 u32 hash = wqattrs_hash(attrs);
3393 struct worker_pool *pool;
3394 int node;
3395 int target_node = NUMA_NO_NODE;
3396
3397 lockdep_assert_held(&wq_pool_mutex);
3398
3399 /* do we already have a matching pool? */
3400 hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
3401 if (wqattrs_equal(pool->attrs, attrs)) {
3402 pool->refcnt++;
3403 return pool;
3404 }
3405 }
3406
3407 /* if cpumask is contained inside a NUMA node, we belong to that node */
3408 if (wq_numa_enabled) {
3409 for_each_node(node) {
3410 if (cpumask_subset(attrs->cpumask,
3411 wq_numa_possible_cpumask[node])) {
3412 target_node = node;
3413 break;
3414 }
3415 }
3416 }
3417
3418 /* nope, create a new one */
3419 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, target_node);
3420 if (!pool || init_worker_pool(pool) < 0)
3421 goto fail;
3422
3423 lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */
3424 copy_workqueue_attrs(pool->attrs, attrs);
3425 pool->node = target_node;
3426
3427 /*
3428 * no_numa isn't a worker_pool attribute, always clear it. See
3429 * 'struct workqueue_attrs' comments for detail.
3430 */
3431 pool->attrs->no_numa = false;
3432
3433 if (worker_pool_assign_id(pool) < 0)
3434 goto fail;
3435
3436 /* create and start the initial worker */
3437 if (wq_online && !create_worker(pool))
3438 goto fail;
3439
3440 /* install */
3441 hash_add(unbound_pool_hash, &pool->hash_node, hash);
3442
3443 return pool;
3444 fail:
3445 if (pool)
3446 put_unbound_pool(pool);
3447 return NULL;
3448 }
3449
rcu_free_pwq(struct rcu_head * rcu)3450 static void rcu_free_pwq(struct rcu_head *rcu)
3451 {
3452 kmem_cache_free(pwq_cache,
3453 container_of(rcu, struct pool_workqueue, rcu));
3454 }
3455
3456 /*
3457 * Scheduled on system_wq by put_pwq() when an unbound pwq hits zero refcnt
3458 * and needs to be destroyed.
3459 */
pwq_unbound_release_workfn(struct work_struct * work)3460 static void pwq_unbound_release_workfn(struct work_struct *work)
3461 {
3462 struct pool_workqueue *pwq = container_of(work, struct pool_workqueue,
3463 unbound_release_work);
3464 struct workqueue_struct *wq = pwq->wq;
3465 struct worker_pool *pool = pwq->pool;
3466 bool is_last;
3467
3468 if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
3469 return;
3470
3471 mutex_lock(&wq->mutex);
3472 list_del_rcu(&pwq->pwqs_node);
3473 is_last = list_empty(&wq->pwqs);
3474 mutex_unlock(&wq->mutex);
3475
3476 mutex_lock(&wq_pool_mutex);
3477 put_unbound_pool(pool);
3478 mutex_unlock(&wq_pool_mutex);
3479
3480 call_rcu_sched(&pwq->rcu, rcu_free_pwq);
3481
3482 /*
3483 * If we're the last pwq going away, @wq is already dead and no one
3484 * is gonna access it anymore. Schedule RCU free.
3485 */
3486 if (is_last)
3487 call_rcu_sched(&wq->rcu, rcu_free_wq);
3488 }
3489
3490 /**
3491 * pwq_adjust_max_active - update a pwq's max_active to the current setting
3492 * @pwq: target pool_workqueue
3493 *
3494 * If @pwq isn't freezing, set @pwq->max_active to the associated
3495 * workqueue's saved_max_active and activate delayed work items
3496 * accordingly. If @pwq is freezing, clear @pwq->max_active to zero.
3497 */
pwq_adjust_max_active(struct pool_workqueue * pwq)3498 static void pwq_adjust_max_active(struct pool_workqueue *pwq)
3499 {
3500 struct workqueue_struct *wq = pwq->wq;
3501 bool freezable = wq->flags & WQ_FREEZABLE;
3502 unsigned long flags;
3503
3504 /* for @wq->saved_max_active */
3505 lockdep_assert_held(&wq->mutex);
3506
3507 /* fast exit for non-freezable wqs */
3508 if (!freezable && pwq->max_active == wq->saved_max_active)
3509 return;
3510
3511 /* this function can be called during early boot w/ irq disabled */
3512 spin_lock_irqsave(&pwq->pool->lock, flags);
3513
3514 /*
3515 * During [un]freezing, the caller is responsible for ensuring that
3516 * this function is called at least once after @workqueue_freezing
3517 * is updated and visible.
3518 */
3519 if (!freezable || !workqueue_freezing) {
3520 pwq->max_active = wq->saved_max_active;
3521
3522 while (!list_empty(&pwq->delayed_works) &&
3523 pwq->nr_active < pwq->max_active)
3524 pwq_activate_first_delayed(pwq);
3525
3526 /*
3527 * Need to kick a worker after thawed or an unbound wq's
3528 * max_active is bumped. It's a slow path. Do it always.
3529 */
3530 wake_up_worker(pwq->pool);
3531 } else {
3532 pwq->max_active = 0;
3533 }
3534
3535 spin_unlock_irqrestore(&pwq->pool->lock, flags);
3536 }
3537
3538 /* initialize newly alloced @pwq which is associated with @wq and @pool */
init_pwq(struct pool_workqueue * pwq,struct workqueue_struct * wq,struct worker_pool * pool)3539 static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
3540 struct worker_pool *pool)
3541 {
3542 BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
3543
3544 memset(pwq, 0, sizeof(*pwq));
3545
3546 pwq->pool = pool;
3547 pwq->wq = wq;
3548 pwq->flush_color = -1;
3549 pwq->refcnt = 1;
3550 INIT_LIST_HEAD(&pwq->delayed_works);
3551 INIT_LIST_HEAD(&pwq->pwqs_node);
3552 INIT_LIST_HEAD(&pwq->mayday_node);
3553 INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn);
3554 }
3555
3556 /* sync @pwq with the current state of its associated wq and link it */
link_pwq(struct pool_workqueue * pwq)3557 static void link_pwq(struct pool_workqueue *pwq)
3558 {
3559 struct workqueue_struct *wq = pwq->wq;
3560
3561 lockdep_assert_held(&wq->mutex);
3562
3563 /* may be called multiple times, ignore if already linked */
3564 if (!list_empty(&pwq->pwqs_node))
3565 return;
3566
3567 /* set the matching work_color */
3568 pwq->work_color = wq->work_color;
3569
3570 /* sync max_active to the current setting */
3571 pwq_adjust_max_active(pwq);
3572
3573 /* link in @pwq */
3574 list_add_rcu(&pwq->pwqs_node, &wq->pwqs);
3575 }
3576
3577 /* obtain a pool matching @attr and create a pwq associating the pool and @wq */
alloc_unbound_pwq(struct workqueue_struct * wq,const struct workqueue_attrs * attrs)3578 static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq,
3579 const struct workqueue_attrs *attrs)
3580 {
3581 struct worker_pool *pool;
3582 struct pool_workqueue *pwq;
3583
3584 lockdep_assert_held(&wq_pool_mutex);
3585
3586 pool = get_unbound_pool(attrs);
3587 if (!pool)
3588 return NULL;
3589
3590 pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node);
3591 if (!pwq) {
3592 put_unbound_pool(pool);
3593 return NULL;
3594 }
3595
3596 init_pwq(pwq, wq, pool);
3597 return pwq;
3598 }
3599
3600 /**
3601 * wq_calc_node_cpumask - calculate a wq_attrs' cpumask for the specified node
3602 * @attrs: the wq_attrs of the default pwq of the target workqueue
3603 * @node: the target NUMA node
3604 * @cpu_going_down: if >= 0, the CPU to consider as offline
3605 * @cpumask: outarg, the resulting cpumask
3606 *
3607 * Calculate the cpumask a workqueue with @attrs should use on @node. If
3608 * @cpu_going_down is >= 0, that cpu is considered offline during
3609 * calculation. The result is stored in @cpumask.
3610 *
3611 * If NUMA affinity is not enabled, @attrs->cpumask is always used. If
3612 * enabled and @node has online CPUs requested by @attrs, the returned
3613 * cpumask is the intersection of the possible CPUs of @node and
3614 * @attrs->cpumask.
3615 *
3616 * The caller is responsible for ensuring that the cpumask of @node stays
3617 * stable.
3618 *
3619 * Return: %true if the resulting @cpumask is different from @attrs->cpumask,
3620 * %false if equal.
3621 */
wq_calc_node_cpumask(const struct workqueue_attrs * attrs,int node,int cpu_going_down,cpumask_t * cpumask)3622 static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node,
3623 int cpu_going_down, cpumask_t *cpumask)
3624 {
3625 if (!wq_numa_enabled || attrs->no_numa)
3626 goto use_dfl;
3627
3628 /* does @node have any online CPUs @attrs wants? */
3629 cpumask_and(cpumask, cpumask_of_node(node), attrs->cpumask);
3630 if (cpu_going_down >= 0)
3631 cpumask_clear_cpu(cpu_going_down, cpumask);
3632
3633 if (cpumask_empty(cpumask))
3634 goto use_dfl;
3635
3636 /* yeap, return possible CPUs in @node that @attrs wants */
3637 cpumask_and(cpumask, attrs->cpumask, wq_numa_possible_cpumask[node]);
3638
3639 if (cpumask_empty(cpumask)) {
3640 pr_warn_once("WARNING: workqueue cpumask: online intersect > "
3641 "possible intersect\n");
3642 return false;
3643 }
3644
3645 return !cpumask_equal(cpumask, attrs->cpumask);
3646
3647 use_dfl:
3648 cpumask_copy(cpumask, attrs->cpumask);
3649 return false;
3650 }
3651
3652 /* install @pwq into @wq's numa_pwq_tbl[] for @node and return the old pwq */
numa_pwq_tbl_install(struct workqueue_struct * wq,int node,struct pool_workqueue * pwq)3653 static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq,
3654 int node,
3655 struct pool_workqueue *pwq)
3656 {
3657 struct pool_workqueue *old_pwq;
3658
3659 lockdep_assert_held(&wq_pool_mutex);
3660 lockdep_assert_held(&wq->mutex);
3661
3662 /* link_pwq() can handle duplicate calls */
3663 link_pwq(pwq);
3664
3665 old_pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
3666 rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq);
3667 return old_pwq;
3668 }
3669
3670 /* context to store the prepared attrs & pwqs before applying */
3671 struct apply_wqattrs_ctx {
3672 struct workqueue_struct *wq; /* target workqueue */
3673 struct workqueue_attrs *attrs; /* attrs to apply */
3674 struct list_head list; /* queued for batching commit */
3675 struct pool_workqueue *dfl_pwq;
3676 struct pool_workqueue *pwq_tbl[];
3677 };
3678
3679 /* free the resources after success or abort */
apply_wqattrs_cleanup(struct apply_wqattrs_ctx * ctx)3680 static void apply_wqattrs_cleanup(struct apply_wqattrs_ctx *ctx)
3681 {
3682 if (ctx) {
3683 int node;
3684
3685 for_each_node(node)
3686 put_pwq_unlocked(ctx->pwq_tbl[node]);
3687 put_pwq_unlocked(ctx->dfl_pwq);
3688
3689 free_workqueue_attrs(ctx->attrs);
3690
3691 kfree(ctx);
3692 }
3693 }
3694
3695 /* allocate the attrs and pwqs for later installation */
3696 static struct apply_wqattrs_ctx *
apply_wqattrs_prepare(struct workqueue_struct * wq,const struct workqueue_attrs * attrs)3697 apply_wqattrs_prepare(struct workqueue_struct *wq,
3698 const struct workqueue_attrs *attrs)
3699 {
3700 struct apply_wqattrs_ctx *ctx;
3701 struct workqueue_attrs *new_attrs, *tmp_attrs;
3702 int node;
3703
3704 lockdep_assert_held(&wq_pool_mutex);
3705
3706 ctx = kzalloc(sizeof(*ctx) + nr_node_ids * sizeof(ctx->pwq_tbl[0]),
3707 GFP_KERNEL);
3708
3709 new_attrs = alloc_workqueue_attrs(GFP_KERNEL);
3710 tmp_attrs = alloc_workqueue_attrs(GFP_KERNEL);
3711 if (!ctx || !new_attrs || !tmp_attrs)
3712 goto out_free;
3713
3714 /*
3715 * Calculate the attrs of the default pwq.
3716 * If the user configured cpumask doesn't overlap with the
3717 * wq_unbound_cpumask, we fallback to the wq_unbound_cpumask.
3718 */
3719 copy_workqueue_attrs(new_attrs, attrs);
3720 cpumask_and(new_attrs->cpumask, new_attrs->cpumask, wq_unbound_cpumask);
3721 if (unlikely(cpumask_empty(new_attrs->cpumask)))
3722 cpumask_copy(new_attrs->cpumask, wq_unbound_cpumask);
3723
3724 /*
3725 * We may create multiple pwqs with differing cpumasks. Make a
3726 * copy of @new_attrs which will be modified and used to obtain
3727 * pools.
3728 */
3729 copy_workqueue_attrs(tmp_attrs, new_attrs);
3730
3731 /*
3732 * If something goes wrong during CPU up/down, we'll fall back to
3733 * the default pwq covering whole @attrs->cpumask. Always create
3734 * it even if we don't use it immediately.
3735 */
3736 ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs);
3737 if (!ctx->dfl_pwq)
3738 goto out_free;
3739
3740 for_each_node(node) {
3741 if (wq_calc_node_cpumask(new_attrs, node, -1, tmp_attrs->cpumask)) {
3742 ctx->pwq_tbl[node] = alloc_unbound_pwq(wq, tmp_attrs);
3743 if (!ctx->pwq_tbl[node])
3744 goto out_free;
3745 } else {
3746 ctx->dfl_pwq->refcnt++;
3747 ctx->pwq_tbl[node] = ctx->dfl_pwq;
3748 }
3749 }
3750
3751 /* save the user configured attrs and sanitize it. */
3752 copy_workqueue_attrs(new_attrs, attrs);
3753 cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask);
3754 ctx->attrs = new_attrs;
3755
3756 ctx->wq = wq;
3757 free_workqueue_attrs(tmp_attrs);
3758 return ctx;
3759
3760 out_free:
3761 free_workqueue_attrs(tmp_attrs);
3762 free_workqueue_attrs(new_attrs);
3763 apply_wqattrs_cleanup(ctx);
3764 return NULL;
3765 }
3766
3767 /* set attrs and install prepared pwqs, @ctx points to old pwqs on return */
apply_wqattrs_commit(struct apply_wqattrs_ctx * ctx)3768 static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx)
3769 {
3770 int node;
3771
3772 /* all pwqs have been created successfully, let's install'em */
3773 mutex_lock(&ctx->wq->mutex);
3774
3775 copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs);
3776
3777 /* save the previous pwq and install the new one */
3778 for_each_node(node)
3779 ctx->pwq_tbl[node] = numa_pwq_tbl_install(ctx->wq, node,
3780 ctx->pwq_tbl[node]);
3781
3782 /* @dfl_pwq might not have been used, ensure it's linked */
3783 link_pwq(ctx->dfl_pwq);
3784 swap(ctx->wq->dfl_pwq, ctx->dfl_pwq);
3785
3786 mutex_unlock(&ctx->wq->mutex);
3787 }
3788
apply_wqattrs_lock(void)3789 static void apply_wqattrs_lock(void)
3790 {
3791 /* CPUs should stay stable across pwq creations and installations */
3792 get_online_cpus();
3793 mutex_lock(&wq_pool_mutex);
3794 }
3795
apply_wqattrs_unlock(void)3796 static void apply_wqattrs_unlock(void)
3797 {
3798 mutex_unlock(&wq_pool_mutex);
3799 put_online_cpus();
3800 }
3801
apply_workqueue_attrs_locked(struct workqueue_struct * wq,const struct workqueue_attrs * attrs)3802 static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
3803 const struct workqueue_attrs *attrs)
3804 {
3805 struct apply_wqattrs_ctx *ctx;
3806
3807 /* only unbound workqueues can change attributes */
3808 if (WARN_ON(!(wq->flags & WQ_UNBOUND)))
3809 return -EINVAL;
3810
3811 /* creating multiple pwqs breaks ordering guarantee */
3812 if (!list_empty(&wq->pwqs)) {
3813 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
3814 return -EINVAL;
3815
3816 wq->flags &= ~__WQ_ORDERED;
3817 }
3818
3819 ctx = apply_wqattrs_prepare(wq, attrs);
3820 if (!ctx)
3821 return -ENOMEM;
3822
3823 /* the ctx has been prepared successfully, let's commit it */
3824 apply_wqattrs_commit(ctx);
3825 apply_wqattrs_cleanup(ctx);
3826
3827 return 0;
3828 }
3829
3830 /**
3831 * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue
3832 * @wq: the target workqueue
3833 * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs()
3834 *
3835 * Apply @attrs to an unbound workqueue @wq. Unless disabled, on NUMA
3836 * machines, this function maps a separate pwq to each NUMA node with
3837 * possibles CPUs in @attrs->cpumask so that work items are affine to the
3838 * NUMA node it was issued on. Older pwqs are released as in-flight work
3839 * items finish. Note that a work item which repeatedly requeues itself
3840 * back-to-back will stay on its current pwq.
3841 *
3842 * Performs GFP_KERNEL allocations.
3843 *
3844 * Return: 0 on success and -errno on failure.
3845 */
apply_workqueue_attrs(struct workqueue_struct * wq,const struct workqueue_attrs * attrs)3846 int apply_workqueue_attrs(struct workqueue_struct *wq,
3847 const struct workqueue_attrs *attrs)
3848 {
3849 int ret;
3850
3851 apply_wqattrs_lock();
3852 ret = apply_workqueue_attrs_locked(wq, attrs);
3853 apply_wqattrs_unlock();
3854
3855 return ret;
3856 }
3857
3858 /**
3859 * wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug
3860 * @wq: the target workqueue
3861 * @cpu: the CPU coming up or going down
3862 * @online: whether @cpu is coming up or going down
3863 *
3864 * This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and
3865 * %CPU_DOWN_FAILED. @cpu is being hot[un]plugged, update NUMA affinity of
3866 * @wq accordingly.
3867 *
3868 * If NUMA affinity can't be adjusted due to memory allocation failure, it
3869 * falls back to @wq->dfl_pwq which may not be optimal but is always
3870 * correct.
3871 *
3872 * Note that when the last allowed CPU of a NUMA node goes offline for a
3873 * workqueue with a cpumask spanning multiple nodes, the workers which were
3874 * already executing the work items for the workqueue will lose their CPU
3875 * affinity and may execute on any CPU. This is similar to how per-cpu
3876 * workqueues behave on CPU_DOWN. If a workqueue user wants strict
3877 * affinity, it's the user's responsibility to flush the work item from
3878 * CPU_DOWN_PREPARE.
3879 */
wq_update_unbound_numa(struct workqueue_struct * wq,int cpu,bool online)3880 static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
3881 bool online)
3882 {
3883 int node = cpu_to_node(cpu);
3884 int cpu_off = online ? -1 : cpu;
3885 struct pool_workqueue *old_pwq = NULL, *pwq;
3886 struct workqueue_attrs *target_attrs;
3887 cpumask_t *cpumask;
3888
3889 lockdep_assert_held(&wq_pool_mutex);
3890
3891 if (!wq_numa_enabled || !(wq->flags & WQ_UNBOUND) ||
3892 wq->unbound_attrs->no_numa)
3893 return;
3894
3895 /*
3896 * We don't wanna alloc/free wq_attrs for each wq for each CPU.
3897 * Let's use a preallocated one. The following buf is protected by
3898 * CPU hotplug exclusion.
3899 */
3900 target_attrs = wq_update_unbound_numa_attrs_buf;
3901 cpumask = target_attrs->cpumask;
3902
3903 copy_workqueue_attrs(target_attrs, wq->unbound_attrs);
3904 pwq = unbound_pwq_by_node(wq, node);
3905
3906 /*
3907 * Let's determine what needs to be done. If the target cpumask is
3908 * different from the default pwq's, we need to compare it to @pwq's
3909 * and create a new one if they don't match. If the target cpumask
3910 * equals the default pwq's, the default pwq should be used.
3911 */
3912 if (wq_calc_node_cpumask(wq->dfl_pwq->pool->attrs, node, cpu_off, cpumask)) {
3913 if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask))
3914 return;
3915 } else {
3916 goto use_dfl_pwq;
3917 }
3918
3919 /* create a new pwq */
3920 pwq = alloc_unbound_pwq(wq, target_attrs);
3921 if (!pwq) {
3922 pr_warn("workqueue: allocation failed while updating NUMA affinity of \"%s\"\n",
3923 wq->name);
3924 goto use_dfl_pwq;
3925 }
3926
3927 /* Install the new pwq. */
3928 mutex_lock(&wq->mutex);
3929 old_pwq = numa_pwq_tbl_install(wq, node, pwq);
3930 goto out_unlock;
3931
3932 use_dfl_pwq:
3933 mutex_lock(&wq->mutex);
3934 spin_lock_irq(&wq->dfl_pwq->pool->lock);
3935 get_pwq(wq->dfl_pwq);
3936 spin_unlock_irq(&wq->dfl_pwq->pool->lock);
3937 old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq);
3938 out_unlock:
3939 mutex_unlock(&wq->mutex);
3940 put_pwq_unlocked(old_pwq);
3941 }
3942
alloc_and_link_pwqs(struct workqueue_struct * wq)3943 static int alloc_and_link_pwqs(struct workqueue_struct *wq)
3944 {
3945 bool highpri = wq->flags & WQ_HIGHPRI;
3946 int cpu, ret;
3947
3948 if (!(wq->flags & WQ_UNBOUND)) {
3949 wq->cpu_pwqs = alloc_percpu(struct pool_workqueue);
3950 if (!wq->cpu_pwqs)
3951 return -ENOMEM;
3952
3953 for_each_possible_cpu(cpu) {
3954 struct pool_workqueue *pwq =
3955 per_cpu_ptr(wq->cpu_pwqs, cpu);
3956 struct worker_pool *cpu_pools =
3957 per_cpu(cpu_worker_pools, cpu);
3958
3959 init_pwq(pwq, wq, &cpu_pools[highpri]);
3960
3961 mutex_lock(&wq->mutex);
3962 link_pwq(pwq);
3963 mutex_unlock(&wq->mutex);
3964 }
3965 return 0;
3966 } else if (wq->flags & __WQ_ORDERED) {
3967 ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
3968 /* there should only be single pwq for ordering guarantee */
3969 WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node ||
3970 wq->pwqs.prev != &wq->dfl_pwq->pwqs_node),
3971 "ordering guarantee broken for workqueue %s\n", wq->name);
3972 return ret;
3973 } else {
3974 return apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
3975 }
3976 }
3977
wq_clamp_max_active(int max_active,unsigned int flags,const char * name)3978 static int wq_clamp_max_active(int max_active, unsigned int flags,
3979 const char *name)
3980 {
3981 int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
3982
3983 if (max_active < 1 || max_active > lim)
3984 pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n",
3985 max_active, name, 1, lim);
3986
3987 return clamp_val(max_active, 1, lim);
3988 }
3989
__alloc_workqueue_key(const char * fmt,unsigned int flags,int max_active,struct lock_class_key * key,const char * lock_name,...)3990 struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
3991 unsigned int flags,
3992 int max_active,
3993 struct lock_class_key *key,
3994 const char *lock_name, ...)
3995 {
3996 size_t tbl_size = 0;
3997 va_list args;
3998 struct workqueue_struct *wq;
3999 struct pool_workqueue *pwq;
4000
4001 /*
4002 * Unbound && max_active == 1 used to imply ordered, which is no
4003 * longer the case on NUMA machines due to per-node pools. While
4004 * alloc_ordered_workqueue() is the right way to create an ordered
4005 * workqueue, keep the previous behavior to avoid subtle breakages
4006 * on NUMA.
4007 */
4008 if ((flags & WQ_UNBOUND) && max_active == 1)
4009 flags |= __WQ_ORDERED;
4010
4011 /* see the comment above the definition of WQ_POWER_EFFICIENT */
4012 if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
4013 flags |= WQ_UNBOUND;
4014
4015 /* allocate wq and format name */
4016 if (flags & WQ_UNBOUND)
4017 tbl_size = nr_node_ids * sizeof(wq->numa_pwq_tbl[0]);
4018
4019 wq = kzalloc(sizeof(*wq) + tbl_size, GFP_KERNEL);
4020 if (!wq)
4021 return NULL;
4022
4023 if (flags & WQ_UNBOUND) {
4024 wq->unbound_attrs = alloc_workqueue_attrs(GFP_KERNEL);
4025 if (!wq->unbound_attrs)
4026 goto err_free_wq;
4027 }
4028
4029 va_start(args, lock_name);
4030 vsnprintf(wq->name, sizeof(wq->name), fmt, args);
4031 va_end(args);
4032
4033 max_active = max_active ?: WQ_DFL_ACTIVE;
4034 max_active = wq_clamp_max_active(max_active, flags, wq->name);
4035
4036 /* init wq */
4037 wq->flags = flags;
4038 wq->saved_max_active = max_active;
4039 mutex_init(&wq->mutex);
4040 atomic_set(&wq->nr_pwqs_to_flush, 0);
4041 INIT_LIST_HEAD(&wq->pwqs);
4042 INIT_LIST_HEAD(&wq->flusher_queue);
4043 INIT_LIST_HEAD(&wq->flusher_overflow);
4044 INIT_LIST_HEAD(&wq->maydays);
4045
4046 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
4047 INIT_LIST_HEAD(&wq->list);
4048
4049 if (alloc_and_link_pwqs(wq) < 0)
4050 goto err_free_wq;
4051
4052 /*
4053 * Workqueues which may be used during memory reclaim should
4054 * have a rescuer to guarantee forward progress.
4055 */
4056 if (flags & WQ_MEM_RECLAIM) {
4057 struct worker *rescuer;
4058
4059 rescuer = alloc_worker(NUMA_NO_NODE);
4060 if (!rescuer)
4061 goto err_destroy;
4062
4063 rescuer->rescue_wq = wq;
4064 rescuer->task = kthread_create(rescuer_thread, rescuer, "%s",
4065 wq->name);
4066 if (IS_ERR(rescuer->task)) {
4067 kfree(rescuer);
4068 goto err_destroy;
4069 }
4070
4071 wq->rescuer = rescuer;
4072 kthread_bind_mask(rescuer->task, cpu_possible_mask);
4073 wake_up_process(rescuer->task);
4074 }
4075
4076 if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq))
4077 goto err_destroy;
4078
4079 /*
4080 * wq_pool_mutex protects global freeze state and workqueues list.
4081 * Grab it, adjust max_active and add the new @wq to workqueues
4082 * list.
4083 */
4084 mutex_lock(&wq_pool_mutex);
4085
4086 mutex_lock(&wq->mutex);
4087 for_each_pwq(pwq, wq)
4088 pwq_adjust_max_active(pwq);
4089 mutex_unlock(&wq->mutex);
4090
4091 list_add_tail_rcu(&wq->list, &workqueues);
4092
4093 mutex_unlock(&wq_pool_mutex);
4094
4095 return wq;
4096
4097 err_free_wq:
4098 free_workqueue_attrs(wq->unbound_attrs);
4099 kfree(wq);
4100 return NULL;
4101 err_destroy:
4102 destroy_workqueue(wq);
4103 return NULL;
4104 }
4105 EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
4106
4107 /**
4108 * destroy_workqueue - safely terminate a workqueue
4109 * @wq: target workqueue
4110 *
4111 * Safely destroy a workqueue. All work currently pending will be done first.
4112 */
destroy_workqueue(struct workqueue_struct * wq)4113 void destroy_workqueue(struct workqueue_struct *wq)
4114 {
4115 struct pool_workqueue *pwq;
4116 int node;
4117
4118 /*
4119 * Remove it from sysfs first so that sanity check failure doesn't
4120 * lead to sysfs name conflicts.
4121 */
4122 workqueue_sysfs_unregister(wq);
4123
4124 /* drain it before proceeding with destruction */
4125 drain_workqueue(wq);
4126
4127 /* kill rescuer, if sanity checks fail, leave it w/o rescuer */
4128 if (wq->rescuer) {
4129 struct worker *rescuer = wq->rescuer;
4130
4131 /* this prevents new queueing */
4132 spin_lock_irq(&wq_mayday_lock);
4133 wq->rescuer = NULL;
4134 spin_unlock_irq(&wq_mayday_lock);
4135
4136 /* rescuer will empty maydays list before exiting */
4137 kthread_stop(rescuer->task);
4138 kfree(rescuer);
4139 }
4140
4141 /* sanity checks */
4142 mutex_lock(&wq->mutex);
4143 for_each_pwq(pwq, wq) {
4144 int i;
4145
4146 for (i = 0; i < WORK_NR_COLORS; i++) {
4147 if (WARN_ON(pwq->nr_in_flight[i])) {
4148 mutex_unlock(&wq->mutex);
4149 show_workqueue_state();
4150 return;
4151 }
4152 }
4153
4154 if (WARN_ON((pwq != wq->dfl_pwq) && (pwq->refcnt > 1)) ||
4155 WARN_ON(pwq->nr_active) ||
4156 WARN_ON(!list_empty(&pwq->delayed_works))) {
4157 mutex_unlock(&wq->mutex);
4158 show_workqueue_state();
4159 return;
4160 }
4161 }
4162 mutex_unlock(&wq->mutex);
4163
4164 /*
4165 * wq list is used to freeze wq, remove from list after
4166 * flushing is complete in case freeze races us.
4167 */
4168 mutex_lock(&wq_pool_mutex);
4169 list_del_rcu(&wq->list);
4170 mutex_unlock(&wq_pool_mutex);
4171
4172 if (!(wq->flags & WQ_UNBOUND)) {
4173 /*
4174 * The base ref is never dropped on per-cpu pwqs. Directly
4175 * schedule RCU free.
4176 */
4177 call_rcu_sched(&wq->rcu, rcu_free_wq);
4178 } else {
4179 /*
4180 * We're the sole accessor of @wq at this point. Directly
4181 * access numa_pwq_tbl[] and dfl_pwq to put the base refs.
4182 * @wq will be freed when the last pwq is released.
4183 */
4184 for_each_node(node) {
4185 pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
4186 RCU_INIT_POINTER(wq->numa_pwq_tbl[node], NULL);
4187 put_pwq_unlocked(pwq);
4188 }
4189
4190 /*
4191 * Put dfl_pwq. @wq may be freed any time after dfl_pwq is
4192 * put. Don't access it afterwards.
4193 */
4194 pwq = wq->dfl_pwq;
4195 wq->dfl_pwq = NULL;
4196 put_pwq_unlocked(pwq);
4197 }
4198 }
4199 EXPORT_SYMBOL_GPL(destroy_workqueue);
4200
4201 /**
4202 * workqueue_set_max_active - adjust max_active of a workqueue
4203 * @wq: target workqueue
4204 * @max_active: new max_active value.
4205 *
4206 * Set max_active of @wq to @max_active.
4207 *
4208 * CONTEXT:
4209 * Don't call from IRQ context.
4210 */
workqueue_set_max_active(struct workqueue_struct * wq,int max_active)4211 void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
4212 {
4213 struct pool_workqueue *pwq;
4214
4215 /* disallow meddling with max_active for ordered workqueues */
4216 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
4217 return;
4218
4219 max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
4220
4221 mutex_lock(&wq->mutex);
4222
4223 wq->flags &= ~__WQ_ORDERED;
4224 wq->saved_max_active = max_active;
4225
4226 for_each_pwq(pwq, wq)
4227 pwq_adjust_max_active(pwq);
4228
4229 mutex_unlock(&wq->mutex);
4230 }
4231 EXPORT_SYMBOL_GPL(workqueue_set_max_active);
4232
4233 /**
4234 * current_work - retrieve %current task's work struct
4235 *
4236 * Determine if %current task is a workqueue worker and what it's working on.
4237 * Useful to find out the context that the %current task is running in.
4238 *
4239 * Return: work struct if %current task is a workqueue worker, %NULL otherwise.
4240 */
current_work(void)4241 struct work_struct *current_work(void)
4242 {
4243 struct worker *worker = current_wq_worker();
4244
4245 return worker ? worker->current_work : NULL;
4246 }
4247 EXPORT_SYMBOL(current_work);
4248
4249 /**
4250 * current_is_workqueue_rescuer - is %current workqueue rescuer?
4251 *
4252 * Determine whether %current is a workqueue rescuer. Can be used from
4253 * work functions to determine whether it's being run off the rescuer task.
4254 *
4255 * Return: %true if %current is a workqueue rescuer. %false otherwise.
4256 */
current_is_workqueue_rescuer(void)4257 bool current_is_workqueue_rescuer(void)
4258 {
4259 struct worker *worker = current_wq_worker();
4260
4261 return worker && worker->rescue_wq;
4262 }
4263
4264 /**
4265 * workqueue_congested - test whether a workqueue is congested
4266 * @cpu: CPU in question
4267 * @wq: target workqueue
4268 *
4269 * Test whether @wq's cpu workqueue for @cpu is congested. There is
4270 * no synchronization around this function and the test result is
4271 * unreliable and only useful as advisory hints or for debugging.
4272 *
4273 * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU.
4274 * Note that both per-cpu and unbound workqueues may be associated with
4275 * multiple pool_workqueues which have separate congested states. A
4276 * workqueue being congested on one CPU doesn't mean the workqueue is also
4277 * contested on other CPUs / NUMA nodes.
4278 *
4279 * Return:
4280 * %true if congested, %false otherwise.
4281 */
workqueue_congested(int cpu,struct workqueue_struct * wq)4282 bool workqueue_congested(int cpu, struct workqueue_struct *wq)
4283 {
4284 struct pool_workqueue *pwq;
4285 bool ret;
4286
4287 rcu_read_lock_sched();
4288
4289 if (cpu == WORK_CPU_UNBOUND)
4290 cpu = smp_processor_id();
4291
4292 if (!(wq->flags & WQ_UNBOUND))
4293 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
4294 else
4295 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
4296
4297 ret = !list_empty(&pwq->delayed_works);
4298 rcu_read_unlock_sched();
4299
4300 return ret;
4301 }
4302 EXPORT_SYMBOL_GPL(workqueue_congested);
4303
4304 /**
4305 * work_busy - test whether a work is currently pending or running
4306 * @work: the work to be tested
4307 *
4308 * Test whether @work is currently pending or running. There is no
4309 * synchronization around this function and the test result is
4310 * unreliable and only useful as advisory hints or for debugging.
4311 *
4312 * Return:
4313 * OR'd bitmask of WORK_BUSY_* bits.
4314 */
work_busy(struct work_struct * work)4315 unsigned int work_busy(struct work_struct *work)
4316 {
4317 struct worker_pool *pool;
4318 unsigned long flags;
4319 unsigned int ret = 0;
4320
4321 if (work_pending(work))
4322 ret |= WORK_BUSY_PENDING;
4323
4324 local_irq_save(flags);
4325 pool = get_work_pool(work);
4326 if (pool) {
4327 spin_lock(&pool->lock);
4328 if (find_worker_executing_work(pool, work))
4329 ret |= WORK_BUSY_RUNNING;
4330 spin_unlock(&pool->lock);
4331 }
4332 local_irq_restore(flags);
4333
4334 return ret;
4335 }
4336 EXPORT_SYMBOL_GPL(work_busy);
4337
4338 /**
4339 * set_worker_desc - set description for the current work item
4340 * @fmt: printf-style format string
4341 * @...: arguments for the format string
4342 *
4343 * This function can be called by a running work function to describe what
4344 * the work item is about. If the worker task gets dumped, this
4345 * information will be printed out together to help debugging. The
4346 * description can be at most WORKER_DESC_LEN including the trailing '\0'.
4347 */
set_worker_desc(const char * fmt,...)4348 void set_worker_desc(const char *fmt, ...)
4349 {
4350 struct worker *worker = current_wq_worker();
4351 va_list args;
4352
4353 if (worker) {
4354 va_start(args, fmt);
4355 vsnprintf(worker->desc, sizeof(worker->desc), fmt, args);
4356 va_end(args);
4357 worker->desc_valid = true;
4358 }
4359 }
4360
4361 /**
4362 * print_worker_info - print out worker information and description
4363 * @log_lvl: the log level to use when printing
4364 * @task: target task
4365 *
4366 * If @task is a worker and currently executing a work item, print out the
4367 * name of the workqueue being serviced and worker description set with
4368 * set_worker_desc() by the currently executing work item.
4369 *
4370 * This function can be safely called on any task as long as the
4371 * task_struct itself is accessible. While safe, this function isn't
4372 * synchronized and may print out mixups or garbages of limited length.
4373 */
print_worker_info(const char * log_lvl,struct task_struct * task)4374 void print_worker_info(const char *log_lvl, struct task_struct *task)
4375 {
4376 work_func_t *fn = NULL;
4377 char name[WQ_NAME_LEN] = { };
4378 char desc[WORKER_DESC_LEN] = { };
4379 struct pool_workqueue *pwq = NULL;
4380 struct workqueue_struct *wq = NULL;
4381 bool desc_valid = false;
4382 struct worker *worker;
4383
4384 if (!(task->flags & PF_WQ_WORKER))
4385 return;
4386
4387 /*
4388 * This function is called without any synchronization and @task
4389 * could be in any state. Be careful with dereferences.
4390 */
4391 worker = kthread_probe_data(task);
4392
4393 /*
4394 * Carefully copy the associated workqueue's workfn and name. Keep
4395 * the original last '\0' in case the original contains garbage.
4396 */
4397 probe_kernel_read(&fn, &worker->current_func, sizeof(fn));
4398 probe_kernel_read(&pwq, &worker->current_pwq, sizeof(pwq));
4399 probe_kernel_read(&wq, &pwq->wq, sizeof(wq));
4400 probe_kernel_read(name, wq->name, sizeof(name) - 1);
4401
4402 /* copy worker description */
4403 probe_kernel_read(&desc_valid, &worker->desc_valid, sizeof(desc_valid));
4404 if (desc_valid)
4405 probe_kernel_read(desc, worker->desc, sizeof(desc) - 1);
4406
4407 if (fn || name[0] || desc[0]) {
4408 printk("%sWorkqueue: %s %pf", log_lvl, name, fn);
4409 if (desc[0])
4410 pr_cont(" (%s)", desc);
4411 pr_cont("\n");
4412 }
4413 }
4414
pr_cont_pool_info(struct worker_pool * pool)4415 static void pr_cont_pool_info(struct worker_pool *pool)
4416 {
4417 pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask);
4418 if (pool->node != NUMA_NO_NODE)
4419 pr_cont(" node=%d", pool->node);
4420 pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice);
4421 }
4422
pr_cont_work(bool comma,struct work_struct * work)4423 static void pr_cont_work(bool comma, struct work_struct *work)
4424 {
4425 if (work->func == wq_barrier_func) {
4426 struct wq_barrier *barr;
4427
4428 barr = container_of(work, struct wq_barrier, work);
4429
4430 pr_cont("%s BAR(%d)", comma ? "," : "",
4431 task_pid_nr(barr->task));
4432 } else {
4433 pr_cont("%s %pf", comma ? "," : "", work->func);
4434 }
4435 }
4436
show_pwq(struct pool_workqueue * pwq)4437 static void show_pwq(struct pool_workqueue *pwq)
4438 {
4439 struct worker_pool *pool = pwq->pool;
4440 struct work_struct *work;
4441 struct worker *worker;
4442 bool has_in_flight = false, has_pending = false;
4443 int bkt;
4444
4445 pr_info(" pwq %d:", pool->id);
4446 pr_cont_pool_info(pool);
4447
4448 pr_cont(" active=%d/%d refcnt=%d%s\n",
4449 pwq->nr_active, pwq->max_active, pwq->refcnt,
4450 !list_empty(&pwq->mayday_node) ? " MAYDAY" : "");
4451
4452 hash_for_each(pool->busy_hash, bkt, worker, hentry) {
4453 if (worker->current_pwq == pwq) {
4454 has_in_flight = true;
4455 break;
4456 }
4457 }
4458 if (has_in_flight) {
4459 bool comma = false;
4460
4461 pr_info(" in-flight:");
4462 hash_for_each(pool->busy_hash, bkt, worker, hentry) {
4463 if (worker->current_pwq != pwq)
4464 continue;
4465
4466 pr_cont("%s %d%s:%pf", comma ? "," : "",
4467 task_pid_nr(worker->task),
4468 worker == pwq->wq->rescuer ? "(RESCUER)" : "",
4469 worker->current_func);
4470 list_for_each_entry(work, &worker->scheduled, entry)
4471 pr_cont_work(false, work);
4472 comma = true;
4473 }
4474 pr_cont("\n");
4475 }
4476
4477 list_for_each_entry(work, &pool->worklist, entry) {
4478 if (get_work_pwq(work) == pwq) {
4479 has_pending = true;
4480 break;
4481 }
4482 }
4483 if (has_pending) {
4484 bool comma = false;
4485
4486 pr_info(" pending:");
4487 list_for_each_entry(work, &pool->worklist, entry) {
4488 if (get_work_pwq(work) != pwq)
4489 continue;
4490
4491 pr_cont_work(comma, work);
4492 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
4493 }
4494 pr_cont("\n");
4495 }
4496
4497 if (!list_empty(&pwq->delayed_works)) {
4498 bool comma = false;
4499
4500 pr_info(" delayed:");
4501 list_for_each_entry(work, &pwq->delayed_works, entry) {
4502 pr_cont_work(comma, work);
4503 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
4504 }
4505 pr_cont("\n");
4506 }
4507 }
4508
4509 /**
4510 * show_workqueue_state - dump workqueue state
4511 *
4512 * Called from a sysrq handler or try_to_freeze_tasks() and prints out
4513 * all busy workqueues and pools.
4514 */
show_workqueue_state(void)4515 void show_workqueue_state(void)
4516 {
4517 struct workqueue_struct *wq;
4518 struct worker_pool *pool;
4519 unsigned long flags;
4520 int pi;
4521
4522 rcu_read_lock_sched();
4523
4524 pr_info("Showing busy workqueues and worker pools:\n");
4525
4526 list_for_each_entry_rcu(wq, &workqueues, list) {
4527 struct pool_workqueue *pwq;
4528 bool idle = true;
4529
4530 for_each_pwq(pwq, wq) {
4531 if (pwq->nr_active || !list_empty(&pwq->delayed_works)) {
4532 idle = false;
4533 break;
4534 }
4535 }
4536 if (idle)
4537 continue;
4538
4539 pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
4540
4541 for_each_pwq(pwq, wq) {
4542 spin_lock_irqsave(&pwq->pool->lock, flags);
4543 if (pwq->nr_active || !list_empty(&pwq->delayed_works))
4544 show_pwq(pwq);
4545 spin_unlock_irqrestore(&pwq->pool->lock, flags);
4546 /*
4547 * We could be printing a lot from atomic context, e.g.
4548 * sysrq-t -> show_workqueue_state(). Avoid triggering
4549 * hard lockup.
4550 */
4551 touch_nmi_watchdog();
4552 }
4553 }
4554
4555 for_each_pool(pool, pi) {
4556 struct worker *worker;
4557 bool first = true;
4558
4559 spin_lock_irqsave(&pool->lock, flags);
4560 if (pool->nr_workers == pool->nr_idle)
4561 goto next_pool;
4562
4563 pr_info("pool %d:", pool->id);
4564 pr_cont_pool_info(pool);
4565 pr_cont(" hung=%us workers=%d",
4566 jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000,
4567 pool->nr_workers);
4568 if (pool->manager)
4569 pr_cont(" manager: %d",
4570 task_pid_nr(pool->manager->task));
4571 list_for_each_entry(worker, &pool->idle_list, entry) {
4572 pr_cont(" %s%d", first ? "idle: " : "",
4573 task_pid_nr(worker->task));
4574 first = false;
4575 }
4576 pr_cont("\n");
4577 next_pool:
4578 spin_unlock_irqrestore(&pool->lock, flags);
4579 /*
4580 * We could be printing a lot from atomic context, e.g.
4581 * sysrq-t -> show_workqueue_state(). Avoid triggering
4582 * hard lockup.
4583 */
4584 touch_nmi_watchdog();
4585 }
4586
4587 rcu_read_unlock_sched();
4588 }
4589
4590 /*
4591 * CPU hotplug.
4592 *
4593 * There are two challenges in supporting CPU hotplug. Firstly, there
4594 * are a lot of assumptions on strong associations among work, pwq and
4595 * pool which make migrating pending and scheduled works very
4596 * difficult to implement without impacting hot paths. Secondly,
4597 * worker pools serve mix of short, long and very long running works making
4598 * blocked draining impractical.
4599 *
4600 * This is solved by allowing the pools to be disassociated from the CPU
4601 * running as an unbound one and allowing it to be reattached later if the
4602 * cpu comes back online.
4603 */
4604
wq_unbind_fn(struct work_struct * work)4605 static void wq_unbind_fn(struct work_struct *work)
4606 {
4607 int cpu = smp_processor_id();
4608 struct worker_pool *pool;
4609 struct worker *worker;
4610
4611 for_each_cpu_worker_pool(pool, cpu) {
4612 mutex_lock(&pool->attach_mutex);
4613 spin_lock_irq(&pool->lock);
4614
4615 /*
4616 * We've blocked all attach/detach operations. Make all workers
4617 * unbound and set DISASSOCIATED. Before this, all workers
4618 * except for the ones which are still executing works from
4619 * before the last CPU down must be on the cpu. After
4620 * this, they may become diasporas.
4621 */
4622 for_each_pool_worker(worker, pool)
4623 worker->flags |= WORKER_UNBOUND;
4624
4625 pool->flags |= POOL_DISASSOCIATED;
4626
4627 spin_unlock_irq(&pool->lock);
4628 mutex_unlock(&pool->attach_mutex);
4629
4630 /*
4631 * Call schedule() so that we cross rq->lock and thus can
4632 * guarantee sched callbacks see the %WORKER_UNBOUND flag.
4633 * This is necessary as scheduler callbacks may be invoked
4634 * from other cpus.
4635 */
4636 schedule();
4637
4638 /*
4639 * Sched callbacks are disabled now. Zap nr_running.
4640 * After this, nr_running stays zero and need_more_worker()
4641 * and keep_working() are always true as long as the
4642 * worklist is not empty. This pool now behaves as an
4643 * unbound (in terms of concurrency management) pool which
4644 * are served by workers tied to the pool.
4645 */
4646 atomic_set(&pool->nr_running, 0);
4647
4648 /*
4649 * With concurrency management just turned off, a busy
4650 * worker blocking could lead to lengthy stalls. Kick off
4651 * unbound chain execution of currently pending work items.
4652 */
4653 spin_lock_irq(&pool->lock);
4654 wake_up_worker(pool);
4655 spin_unlock_irq(&pool->lock);
4656 }
4657 }
4658
4659 /**
4660 * rebind_workers - rebind all workers of a pool to the associated CPU
4661 * @pool: pool of interest
4662 *
4663 * @pool->cpu is coming online. Rebind all workers to the CPU.
4664 */
rebind_workers(struct worker_pool * pool)4665 static void rebind_workers(struct worker_pool *pool)
4666 {
4667 struct worker *worker;
4668
4669 lockdep_assert_held(&pool->attach_mutex);
4670
4671 /*
4672 * Restore CPU affinity of all workers. As all idle workers should
4673 * be on the run-queue of the associated CPU before any local
4674 * wake-ups for concurrency management happen, restore CPU affinity
4675 * of all workers first and then clear UNBOUND. As we're called
4676 * from CPU_ONLINE, the following shouldn't fail.
4677 */
4678 for_each_pool_worker(worker, pool)
4679 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
4680 pool->attrs->cpumask) < 0);
4681
4682 spin_lock_irq(&pool->lock);
4683
4684 /*
4685 * XXX: CPU hotplug notifiers are weird and can call DOWN_FAILED
4686 * w/o preceding DOWN_PREPARE. Work around it. CPU hotplug is
4687 * being reworked and this can go away in time.
4688 */
4689 if (!(pool->flags & POOL_DISASSOCIATED)) {
4690 spin_unlock_irq(&pool->lock);
4691 return;
4692 }
4693
4694 pool->flags &= ~POOL_DISASSOCIATED;
4695
4696 for_each_pool_worker(worker, pool) {
4697 unsigned int worker_flags = worker->flags;
4698
4699 /*
4700 * A bound idle worker should actually be on the runqueue
4701 * of the associated CPU for local wake-ups targeting it to
4702 * work. Kick all idle workers so that they migrate to the
4703 * associated CPU. Doing this in the same loop as
4704 * replacing UNBOUND with REBOUND is safe as no worker will
4705 * be bound before @pool->lock is released.
4706 */
4707 if (worker_flags & WORKER_IDLE)
4708 wake_up_process(worker->task);
4709
4710 /*
4711 * We want to clear UNBOUND but can't directly call
4712 * worker_clr_flags() or adjust nr_running. Atomically
4713 * replace UNBOUND with another NOT_RUNNING flag REBOUND.
4714 * @worker will clear REBOUND using worker_clr_flags() when
4715 * it initiates the next execution cycle thus restoring
4716 * concurrency management. Note that when or whether
4717 * @worker clears REBOUND doesn't affect correctness.
4718 *
4719 * ACCESS_ONCE() is necessary because @worker->flags may be
4720 * tested without holding any lock in
4721 * wq_worker_waking_up(). Without it, NOT_RUNNING test may
4722 * fail incorrectly leading to premature concurrency
4723 * management operations.
4724 */
4725 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
4726 worker_flags |= WORKER_REBOUND;
4727 worker_flags &= ~WORKER_UNBOUND;
4728 ACCESS_ONCE(worker->flags) = worker_flags;
4729 }
4730
4731 spin_unlock_irq(&pool->lock);
4732 }
4733
4734 /**
4735 * restore_unbound_workers_cpumask - restore cpumask of unbound workers
4736 * @pool: unbound pool of interest
4737 * @cpu: the CPU which is coming up
4738 *
4739 * An unbound pool may end up with a cpumask which doesn't have any online
4740 * CPUs. When a worker of such pool get scheduled, the scheduler resets
4741 * its cpus_allowed. If @cpu is in @pool's cpumask which didn't have any
4742 * online CPU before, cpus_allowed of all its workers should be restored.
4743 */
restore_unbound_workers_cpumask(struct worker_pool * pool,int cpu)4744 static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
4745 {
4746 static cpumask_t cpumask;
4747 struct worker *worker;
4748
4749 lockdep_assert_held(&pool->attach_mutex);
4750
4751 /* is @cpu allowed for @pool? */
4752 if (!cpumask_test_cpu(cpu, pool->attrs->cpumask))
4753 return;
4754
4755 cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask);
4756
4757 /* as we're called from CPU_ONLINE, the following shouldn't fail */
4758 for_each_pool_worker(worker, pool)
4759 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0);
4760 }
4761
workqueue_prepare_cpu(unsigned int cpu)4762 int workqueue_prepare_cpu(unsigned int cpu)
4763 {
4764 struct worker_pool *pool;
4765
4766 for_each_cpu_worker_pool(pool, cpu) {
4767 if (pool->nr_workers)
4768 continue;
4769 if (!create_worker(pool))
4770 return -ENOMEM;
4771 }
4772 return 0;
4773 }
4774
workqueue_online_cpu(unsigned int cpu)4775 int workqueue_online_cpu(unsigned int cpu)
4776 {
4777 struct worker_pool *pool;
4778 struct workqueue_struct *wq;
4779 int pi;
4780
4781 mutex_lock(&wq_pool_mutex);
4782
4783 for_each_pool(pool, pi) {
4784 mutex_lock(&pool->attach_mutex);
4785
4786 if (pool->cpu == cpu)
4787 rebind_workers(pool);
4788 else if (pool->cpu < 0)
4789 restore_unbound_workers_cpumask(pool, cpu);
4790
4791 mutex_unlock(&pool->attach_mutex);
4792 }
4793
4794 /* update NUMA affinity of unbound workqueues */
4795 list_for_each_entry(wq, &workqueues, list)
4796 wq_update_unbound_numa(wq, cpu, true);
4797
4798 mutex_unlock(&wq_pool_mutex);
4799 return 0;
4800 }
4801
workqueue_offline_cpu(unsigned int cpu)4802 int workqueue_offline_cpu(unsigned int cpu)
4803 {
4804 struct work_struct unbind_work;
4805 struct workqueue_struct *wq;
4806
4807 /* unbinding per-cpu workers should happen on the local CPU */
4808 INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn);
4809 queue_work_on(cpu, system_highpri_wq, &unbind_work);
4810
4811 /* update NUMA affinity of unbound workqueues */
4812 mutex_lock(&wq_pool_mutex);
4813 list_for_each_entry(wq, &workqueues, list)
4814 wq_update_unbound_numa(wq, cpu, false);
4815 mutex_unlock(&wq_pool_mutex);
4816
4817 /* wait for per-cpu unbinding to finish */
4818 flush_work(&unbind_work);
4819 destroy_work_on_stack(&unbind_work);
4820 return 0;
4821 }
4822
4823 #ifdef CONFIG_SMP
4824
4825 struct work_for_cpu {
4826 struct work_struct work;
4827 long (*fn)(void *);
4828 void *arg;
4829 long ret;
4830 };
4831
work_for_cpu_fn(struct work_struct * work)4832 static void work_for_cpu_fn(struct work_struct *work)
4833 {
4834 struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
4835
4836 wfc->ret = wfc->fn(wfc->arg);
4837 }
4838
4839 /**
4840 * work_on_cpu - run a function in thread context on a particular cpu
4841 * @cpu: the cpu to run on
4842 * @fn: the function to run
4843 * @arg: the function arg
4844 *
4845 * It is up to the caller to ensure that the cpu doesn't go offline.
4846 * The caller must not hold any locks which would prevent @fn from completing.
4847 *
4848 * Return: The value @fn returns.
4849 */
work_on_cpu(int cpu,long (* fn)(void *),void * arg)4850 long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
4851 {
4852 struct work_for_cpu wfc = { .fn = fn, .arg = arg };
4853
4854 INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
4855 schedule_work_on(cpu, &wfc.work);
4856 flush_work(&wfc.work);
4857 destroy_work_on_stack(&wfc.work);
4858 return wfc.ret;
4859 }
4860 EXPORT_SYMBOL_GPL(work_on_cpu);
4861
4862 /**
4863 * work_on_cpu_safe - run a function in thread context on a particular cpu
4864 * @cpu: the cpu to run on
4865 * @fn: the function to run
4866 * @arg: the function argument
4867 *
4868 * Disables CPU hotplug and calls work_on_cpu(). The caller must not hold
4869 * any locks which would prevent @fn from completing.
4870 *
4871 * Return: The value @fn returns.
4872 */
work_on_cpu_safe(int cpu,long (* fn)(void *),void * arg)4873 long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
4874 {
4875 long ret = -ENODEV;
4876
4877 get_online_cpus();
4878 if (cpu_online(cpu))
4879 ret = work_on_cpu(cpu, fn, arg);
4880 put_online_cpus();
4881 return ret;
4882 }
4883 EXPORT_SYMBOL_GPL(work_on_cpu_safe);
4884 #endif /* CONFIG_SMP */
4885
4886 #ifdef CONFIG_FREEZER
4887
4888 /**
4889 * freeze_workqueues_begin - begin freezing workqueues
4890 *
4891 * Start freezing workqueues. After this function returns, all freezable
4892 * workqueues will queue new works to their delayed_works list instead of
4893 * pool->worklist.
4894 *
4895 * CONTEXT:
4896 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
4897 */
freeze_workqueues_begin(void)4898 void freeze_workqueues_begin(void)
4899 {
4900 struct workqueue_struct *wq;
4901 struct pool_workqueue *pwq;
4902
4903 mutex_lock(&wq_pool_mutex);
4904
4905 WARN_ON_ONCE(workqueue_freezing);
4906 workqueue_freezing = true;
4907
4908 list_for_each_entry(wq, &workqueues, list) {
4909 mutex_lock(&wq->mutex);
4910 for_each_pwq(pwq, wq)
4911 pwq_adjust_max_active(pwq);
4912 mutex_unlock(&wq->mutex);
4913 }
4914
4915 mutex_unlock(&wq_pool_mutex);
4916 }
4917
4918 /**
4919 * freeze_workqueues_busy - are freezable workqueues still busy?
4920 *
4921 * Check whether freezing is complete. This function must be called
4922 * between freeze_workqueues_begin() and thaw_workqueues().
4923 *
4924 * CONTEXT:
4925 * Grabs and releases wq_pool_mutex.
4926 *
4927 * Return:
4928 * %true if some freezable workqueues are still busy. %false if freezing
4929 * is complete.
4930 */
freeze_workqueues_busy(void)4931 bool freeze_workqueues_busy(void)
4932 {
4933 bool busy = false;
4934 struct workqueue_struct *wq;
4935 struct pool_workqueue *pwq;
4936
4937 mutex_lock(&wq_pool_mutex);
4938
4939 WARN_ON_ONCE(!workqueue_freezing);
4940
4941 list_for_each_entry(wq, &workqueues, list) {
4942 if (!(wq->flags & WQ_FREEZABLE))
4943 continue;
4944 /*
4945 * nr_active is monotonically decreasing. It's safe
4946 * to peek without lock.
4947 */
4948 rcu_read_lock_sched();
4949 for_each_pwq(pwq, wq) {
4950 WARN_ON_ONCE(pwq->nr_active < 0);
4951 if (pwq->nr_active) {
4952 busy = true;
4953 rcu_read_unlock_sched();
4954 goto out_unlock;
4955 }
4956 }
4957 rcu_read_unlock_sched();
4958 }
4959 out_unlock:
4960 mutex_unlock(&wq_pool_mutex);
4961 return busy;
4962 }
4963
4964 /**
4965 * thaw_workqueues - thaw workqueues
4966 *
4967 * Thaw workqueues. Normal queueing is restored and all collected
4968 * frozen works are transferred to their respective pool worklists.
4969 *
4970 * CONTEXT:
4971 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
4972 */
thaw_workqueues(void)4973 void thaw_workqueues(void)
4974 {
4975 struct workqueue_struct *wq;
4976 struct pool_workqueue *pwq;
4977
4978 mutex_lock(&wq_pool_mutex);
4979
4980 if (!workqueue_freezing)
4981 goto out_unlock;
4982
4983 workqueue_freezing = false;
4984
4985 /* restore max_active and repopulate worklist */
4986 list_for_each_entry(wq, &workqueues, list) {
4987 mutex_lock(&wq->mutex);
4988 for_each_pwq(pwq, wq)
4989 pwq_adjust_max_active(pwq);
4990 mutex_unlock(&wq->mutex);
4991 }
4992
4993 out_unlock:
4994 mutex_unlock(&wq_pool_mutex);
4995 }
4996 #endif /* CONFIG_FREEZER */
4997
workqueue_apply_unbound_cpumask(void)4998 static int workqueue_apply_unbound_cpumask(void)
4999 {
5000 LIST_HEAD(ctxs);
5001 int ret = 0;
5002 struct workqueue_struct *wq;
5003 struct apply_wqattrs_ctx *ctx, *n;
5004
5005 lockdep_assert_held(&wq_pool_mutex);
5006
5007 list_for_each_entry(wq, &workqueues, list) {
5008 if (!(wq->flags & WQ_UNBOUND))
5009 continue;
5010 /* creating multiple pwqs breaks ordering guarantee */
5011 if (wq->flags & __WQ_ORDERED)
5012 continue;
5013
5014 ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs);
5015 if (!ctx) {
5016 ret = -ENOMEM;
5017 break;
5018 }
5019
5020 list_add_tail(&ctx->list, &ctxs);
5021 }
5022
5023 list_for_each_entry_safe(ctx, n, &ctxs, list) {
5024 if (!ret)
5025 apply_wqattrs_commit(ctx);
5026 apply_wqattrs_cleanup(ctx);
5027 }
5028
5029 return ret;
5030 }
5031
5032 /**
5033 * workqueue_set_unbound_cpumask - Set the low-level unbound cpumask
5034 * @cpumask: the cpumask to set
5035 *
5036 * The low-level workqueues cpumask is a global cpumask that limits
5037 * the affinity of all unbound workqueues. This function check the @cpumask
5038 * and apply it to all unbound workqueues and updates all pwqs of them.
5039 *
5040 * Retun: 0 - Success
5041 * -EINVAL - Invalid @cpumask
5042 * -ENOMEM - Failed to allocate memory for attrs or pwqs.
5043 */
workqueue_set_unbound_cpumask(cpumask_var_t cpumask)5044 int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
5045 {
5046 int ret = -EINVAL;
5047 cpumask_var_t saved_cpumask;
5048
5049 if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL))
5050 return -ENOMEM;
5051
5052 cpumask_and(cpumask, cpumask, cpu_possible_mask);
5053 if (!cpumask_empty(cpumask)) {
5054 apply_wqattrs_lock();
5055
5056 /* save the old wq_unbound_cpumask. */
5057 cpumask_copy(saved_cpumask, wq_unbound_cpumask);
5058
5059 /* update wq_unbound_cpumask at first and apply it to wqs. */
5060 cpumask_copy(wq_unbound_cpumask, cpumask);
5061 ret = workqueue_apply_unbound_cpumask();
5062
5063 /* restore the wq_unbound_cpumask when failed. */
5064 if (ret < 0)
5065 cpumask_copy(wq_unbound_cpumask, saved_cpumask);
5066
5067 apply_wqattrs_unlock();
5068 }
5069
5070 free_cpumask_var(saved_cpumask);
5071 return ret;
5072 }
5073
5074 #ifdef CONFIG_SYSFS
5075 /*
5076 * Workqueues with WQ_SYSFS flag set is visible to userland via
5077 * /sys/bus/workqueue/devices/WQ_NAME. All visible workqueues have the
5078 * following attributes.
5079 *
5080 * per_cpu RO bool : whether the workqueue is per-cpu or unbound
5081 * max_active RW int : maximum number of in-flight work items
5082 *
5083 * Unbound workqueues have the following extra attributes.
5084 *
5085 * id RO int : the associated pool ID
5086 * nice RW int : nice value of the workers
5087 * cpumask RW mask : bitmask of allowed CPUs for the workers
5088 */
5089 struct wq_device {
5090 struct workqueue_struct *wq;
5091 struct device dev;
5092 };
5093
dev_to_wq(struct device * dev)5094 static struct workqueue_struct *dev_to_wq(struct device *dev)
5095 {
5096 struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
5097
5098 return wq_dev->wq;
5099 }
5100
per_cpu_show(struct device * dev,struct device_attribute * attr,char * buf)5101 static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr,
5102 char *buf)
5103 {
5104 struct workqueue_struct *wq = dev_to_wq(dev);
5105
5106 return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND));
5107 }
5108 static DEVICE_ATTR_RO(per_cpu);
5109
max_active_show(struct device * dev,struct device_attribute * attr,char * buf)5110 static ssize_t max_active_show(struct device *dev,
5111 struct device_attribute *attr, char *buf)
5112 {
5113 struct workqueue_struct *wq = dev_to_wq(dev);
5114
5115 return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active);
5116 }
5117
max_active_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5118 static ssize_t max_active_store(struct device *dev,
5119 struct device_attribute *attr, const char *buf,
5120 size_t count)
5121 {
5122 struct workqueue_struct *wq = dev_to_wq(dev);
5123 int val;
5124
5125 if (sscanf(buf, "%d", &val) != 1 || val <= 0)
5126 return -EINVAL;
5127
5128 workqueue_set_max_active(wq, val);
5129 return count;
5130 }
5131 static DEVICE_ATTR_RW(max_active);
5132
5133 static struct attribute *wq_sysfs_attrs[] = {
5134 &dev_attr_per_cpu.attr,
5135 &dev_attr_max_active.attr,
5136 NULL,
5137 };
5138 ATTRIBUTE_GROUPS(wq_sysfs);
5139
wq_pool_ids_show(struct device * dev,struct device_attribute * attr,char * buf)5140 static ssize_t wq_pool_ids_show(struct device *dev,
5141 struct device_attribute *attr, char *buf)
5142 {
5143 struct workqueue_struct *wq = dev_to_wq(dev);
5144 const char *delim = "";
5145 int node, written = 0;
5146
5147 rcu_read_lock_sched();
5148 for_each_node(node) {
5149 written += scnprintf(buf + written, PAGE_SIZE - written,
5150 "%s%d:%d", delim, node,
5151 unbound_pwq_by_node(wq, node)->pool->id);
5152 delim = " ";
5153 }
5154 written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
5155 rcu_read_unlock_sched();
5156
5157 return written;
5158 }
5159
wq_nice_show(struct device * dev,struct device_attribute * attr,char * buf)5160 static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr,
5161 char *buf)
5162 {
5163 struct workqueue_struct *wq = dev_to_wq(dev);
5164 int written;
5165
5166 mutex_lock(&wq->mutex);
5167 written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice);
5168 mutex_unlock(&wq->mutex);
5169
5170 return written;
5171 }
5172
5173 /* prepare workqueue_attrs for sysfs store operations */
wq_sysfs_prep_attrs(struct workqueue_struct * wq)5174 static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq)
5175 {
5176 struct workqueue_attrs *attrs;
5177
5178 lockdep_assert_held(&wq_pool_mutex);
5179
5180 attrs = alloc_workqueue_attrs(GFP_KERNEL);
5181 if (!attrs)
5182 return NULL;
5183
5184 copy_workqueue_attrs(attrs, wq->unbound_attrs);
5185 return attrs;
5186 }
5187
wq_nice_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5188 static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr,
5189 const char *buf, size_t count)
5190 {
5191 struct workqueue_struct *wq = dev_to_wq(dev);
5192 struct workqueue_attrs *attrs;
5193 int ret = -ENOMEM;
5194
5195 apply_wqattrs_lock();
5196
5197 attrs = wq_sysfs_prep_attrs(wq);
5198 if (!attrs)
5199 goto out_unlock;
5200
5201 if (sscanf(buf, "%d", &attrs->nice) == 1 &&
5202 attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE)
5203 ret = apply_workqueue_attrs_locked(wq, attrs);
5204 else
5205 ret = -EINVAL;
5206
5207 out_unlock:
5208 apply_wqattrs_unlock();
5209 free_workqueue_attrs(attrs);
5210 return ret ?: count;
5211 }
5212
wq_cpumask_show(struct device * dev,struct device_attribute * attr,char * buf)5213 static ssize_t wq_cpumask_show(struct device *dev,
5214 struct device_attribute *attr, char *buf)
5215 {
5216 struct workqueue_struct *wq = dev_to_wq(dev);
5217 int written;
5218
5219 mutex_lock(&wq->mutex);
5220 written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
5221 cpumask_pr_args(wq->unbound_attrs->cpumask));
5222 mutex_unlock(&wq->mutex);
5223 return written;
5224 }
5225
wq_cpumask_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5226 static ssize_t wq_cpumask_store(struct device *dev,
5227 struct device_attribute *attr,
5228 const char *buf, size_t count)
5229 {
5230 struct workqueue_struct *wq = dev_to_wq(dev);
5231 struct workqueue_attrs *attrs;
5232 int ret = -ENOMEM;
5233
5234 apply_wqattrs_lock();
5235
5236 attrs = wq_sysfs_prep_attrs(wq);
5237 if (!attrs)
5238 goto out_unlock;
5239
5240 ret = cpumask_parse(buf, attrs->cpumask);
5241 if (!ret)
5242 ret = apply_workqueue_attrs_locked(wq, attrs);
5243
5244 out_unlock:
5245 apply_wqattrs_unlock();
5246 free_workqueue_attrs(attrs);
5247 return ret ?: count;
5248 }
5249
wq_numa_show(struct device * dev,struct device_attribute * attr,char * buf)5250 static ssize_t wq_numa_show(struct device *dev, struct device_attribute *attr,
5251 char *buf)
5252 {
5253 struct workqueue_struct *wq = dev_to_wq(dev);
5254 int written;
5255
5256 mutex_lock(&wq->mutex);
5257 written = scnprintf(buf, PAGE_SIZE, "%d\n",
5258 !wq->unbound_attrs->no_numa);
5259 mutex_unlock(&wq->mutex);
5260
5261 return written;
5262 }
5263
wq_numa_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5264 static ssize_t wq_numa_store(struct device *dev, struct device_attribute *attr,
5265 const char *buf, size_t count)
5266 {
5267 struct workqueue_struct *wq = dev_to_wq(dev);
5268 struct workqueue_attrs *attrs;
5269 int v, ret = -ENOMEM;
5270
5271 apply_wqattrs_lock();
5272
5273 attrs = wq_sysfs_prep_attrs(wq);
5274 if (!attrs)
5275 goto out_unlock;
5276
5277 ret = -EINVAL;
5278 if (sscanf(buf, "%d", &v) == 1) {
5279 attrs->no_numa = !v;
5280 ret = apply_workqueue_attrs_locked(wq, attrs);
5281 }
5282
5283 out_unlock:
5284 apply_wqattrs_unlock();
5285 free_workqueue_attrs(attrs);
5286 return ret ?: count;
5287 }
5288
5289 static struct device_attribute wq_sysfs_unbound_attrs[] = {
5290 __ATTR(pool_ids, 0444, wq_pool_ids_show, NULL),
5291 __ATTR(nice, 0644, wq_nice_show, wq_nice_store),
5292 __ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store),
5293 __ATTR(numa, 0644, wq_numa_show, wq_numa_store),
5294 __ATTR_NULL,
5295 };
5296
5297 static struct bus_type wq_subsys = {
5298 .name = "workqueue",
5299 .dev_groups = wq_sysfs_groups,
5300 };
5301
wq_unbound_cpumask_show(struct device * dev,struct device_attribute * attr,char * buf)5302 static ssize_t wq_unbound_cpumask_show(struct device *dev,
5303 struct device_attribute *attr, char *buf)
5304 {
5305 int written;
5306
5307 mutex_lock(&wq_pool_mutex);
5308 written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
5309 cpumask_pr_args(wq_unbound_cpumask));
5310 mutex_unlock(&wq_pool_mutex);
5311
5312 return written;
5313 }
5314
wq_unbound_cpumask_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5315 static ssize_t wq_unbound_cpumask_store(struct device *dev,
5316 struct device_attribute *attr, const char *buf, size_t count)
5317 {
5318 cpumask_var_t cpumask;
5319 int ret;
5320
5321 if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
5322 return -ENOMEM;
5323
5324 ret = cpumask_parse(buf, cpumask);
5325 if (!ret)
5326 ret = workqueue_set_unbound_cpumask(cpumask);
5327
5328 free_cpumask_var(cpumask);
5329 return ret ? ret : count;
5330 }
5331
5332 static struct device_attribute wq_sysfs_cpumask_attr =
5333 __ATTR(cpumask, 0644, wq_unbound_cpumask_show,
5334 wq_unbound_cpumask_store);
5335
wq_sysfs_init(void)5336 static int __init wq_sysfs_init(void)
5337 {
5338 int err;
5339
5340 err = subsys_virtual_register(&wq_subsys, NULL);
5341 if (err)
5342 return err;
5343
5344 return device_create_file(wq_subsys.dev_root, &wq_sysfs_cpumask_attr);
5345 }
5346 core_initcall(wq_sysfs_init);
5347
wq_device_release(struct device * dev)5348 static void wq_device_release(struct device *dev)
5349 {
5350 struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
5351
5352 kfree(wq_dev);
5353 }
5354
5355 /**
5356 * workqueue_sysfs_register - make a workqueue visible in sysfs
5357 * @wq: the workqueue to register
5358 *
5359 * Expose @wq in sysfs under /sys/bus/workqueue/devices.
5360 * alloc_workqueue*() automatically calls this function if WQ_SYSFS is set
5361 * which is the preferred method.
5362 *
5363 * Workqueue user should use this function directly iff it wants to apply
5364 * workqueue_attrs before making the workqueue visible in sysfs; otherwise,
5365 * apply_workqueue_attrs() may race against userland updating the
5366 * attributes.
5367 *
5368 * Return: 0 on success, -errno on failure.
5369 */
workqueue_sysfs_register(struct workqueue_struct * wq)5370 int workqueue_sysfs_register(struct workqueue_struct *wq)
5371 {
5372 struct wq_device *wq_dev;
5373 int ret;
5374
5375 /*
5376 * Adjusting max_active or creating new pwqs by applying
5377 * attributes breaks ordering guarantee. Disallow exposing ordered
5378 * workqueues.
5379 */
5380 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
5381 return -EINVAL;
5382
5383 wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
5384 if (!wq_dev)
5385 return -ENOMEM;
5386
5387 wq_dev->wq = wq;
5388 wq_dev->dev.bus = &wq_subsys;
5389 wq_dev->dev.release = wq_device_release;
5390 dev_set_name(&wq_dev->dev, "%s", wq->name);
5391
5392 /*
5393 * unbound_attrs are created separately. Suppress uevent until
5394 * everything is ready.
5395 */
5396 dev_set_uevent_suppress(&wq_dev->dev, true);
5397
5398 ret = device_register(&wq_dev->dev);
5399 if (ret) {
5400 put_device(&wq_dev->dev);
5401 wq->wq_dev = NULL;
5402 return ret;
5403 }
5404
5405 if (wq->flags & WQ_UNBOUND) {
5406 struct device_attribute *attr;
5407
5408 for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) {
5409 ret = device_create_file(&wq_dev->dev, attr);
5410 if (ret) {
5411 device_unregister(&wq_dev->dev);
5412 wq->wq_dev = NULL;
5413 return ret;
5414 }
5415 }
5416 }
5417
5418 dev_set_uevent_suppress(&wq_dev->dev, false);
5419 kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD);
5420 return 0;
5421 }
5422
5423 /**
5424 * workqueue_sysfs_unregister - undo workqueue_sysfs_register()
5425 * @wq: the workqueue to unregister
5426 *
5427 * If @wq is registered to sysfs by workqueue_sysfs_register(), unregister.
5428 */
workqueue_sysfs_unregister(struct workqueue_struct * wq)5429 static void workqueue_sysfs_unregister(struct workqueue_struct *wq)
5430 {
5431 struct wq_device *wq_dev = wq->wq_dev;
5432
5433 if (!wq->wq_dev)
5434 return;
5435
5436 wq->wq_dev = NULL;
5437 device_unregister(&wq_dev->dev);
5438 }
5439 #else /* CONFIG_SYSFS */
workqueue_sysfs_unregister(struct workqueue_struct * wq)5440 static void workqueue_sysfs_unregister(struct workqueue_struct *wq) { }
5441 #endif /* CONFIG_SYSFS */
5442
5443 /*
5444 * Workqueue watchdog.
5445 *
5446 * Stall may be caused by various bugs - missing WQ_MEM_RECLAIM, illegal
5447 * flush dependency, a concurrency managed work item which stays RUNNING
5448 * indefinitely. Workqueue stalls can be very difficult to debug as the
5449 * usual warning mechanisms don't trigger and internal workqueue state is
5450 * largely opaque.
5451 *
5452 * Workqueue watchdog monitors all worker pools periodically and dumps
5453 * state if some pools failed to make forward progress for a while where
5454 * forward progress is defined as the first item on ->worklist changing.
5455 *
5456 * This mechanism is controlled through the kernel parameter
5457 * "workqueue.watchdog_thresh" which can be updated at runtime through the
5458 * corresponding sysfs parameter file.
5459 */
5460 #ifdef CONFIG_WQ_WATCHDOG
5461
5462 static void wq_watchdog_timer_fn(unsigned long data);
5463
5464 static unsigned long wq_watchdog_thresh = 30;
5465 static struct timer_list wq_watchdog_timer =
5466 TIMER_DEFERRED_INITIALIZER(wq_watchdog_timer_fn, 0, 0);
5467
5468 static unsigned long wq_watchdog_touched = INITIAL_JIFFIES;
5469 static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES;
5470
wq_watchdog_reset_touched(void)5471 static void wq_watchdog_reset_touched(void)
5472 {
5473 int cpu;
5474
5475 wq_watchdog_touched = jiffies;
5476 for_each_possible_cpu(cpu)
5477 per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
5478 }
5479
wq_watchdog_timer_fn(unsigned long data)5480 static void wq_watchdog_timer_fn(unsigned long data)
5481 {
5482 unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ;
5483 bool lockup_detected = false;
5484 struct worker_pool *pool;
5485 int pi;
5486
5487 if (!thresh)
5488 return;
5489
5490 rcu_read_lock();
5491
5492 for_each_pool(pool, pi) {
5493 unsigned long pool_ts, touched, ts;
5494
5495 if (list_empty(&pool->worklist))
5496 continue;
5497
5498 /* get the latest of pool and touched timestamps */
5499 pool_ts = READ_ONCE(pool->watchdog_ts);
5500 touched = READ_ONCE(wq_watchdog_touched);
5501
5502 if (time_after(pool_ts, touched))
5503 ts = pool_ts;
5504 else
5505 ts = touched;
5506
5507 if (pool->cpu >= 0) {
5508 unsigned long cpu_touched =
5509 READ_ONCE(per_cpu(wq_watchdog_touched_cpu,
5510 pool->cpu));
5511 if (time_after(cpu_touched, ts))
5512 ts = cpu_touched;
5513 }
5514
5515 /* did we stall? */
5516 if (time_after(jiffies, ts + thresh)) {
5517 lockup_detected = true;
5518 pr_emerg("BUG: workqueue lockup - pool");
5519 pr_cont_pool_info(pool);
5520 pr_cont(" stuck for %us!\n",
5521 jiffies_to_msecs(jiffies - pool_ts) / 1000);
5522 }
5523 }
5524
5525 rcu_read_unlock();
5526
5527 if (lockup_detected)
5528 show_workqueue_state();
5529
5530 wq_watchdog_reset_touched();
5531 mod_timer(&wq_watchdog_timer, jiffies + thresh);
5532 }
5533
wq_watchdog_touch(int cpu)5534 notrace void wq_watchdog_touch(int cpu)
5535 {
5536 if (cpu >= 0)
5537 per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
5538 else
5539 wq_watchdog_touched = jiffies;
5540 }
5541
wq_watchdog_set_thresh(unsigned long thresh)5542 static void wq_watchdog_set_thresh(unsigned long thresh)
5543 {
5544 wq_watchdog_thresh = 0;
5545 del_timer_sync(&wq_watchdog_timer);
5546
5547 if (thresh) {
5548 wq_watchdog_thresh = thresh;
5549 wq_watchdog_reset_touched();
5550 mod_timer(&wq_watchdog_timer, jiffies + thresh * HZ);
5551 }
5552 }
5553
wq_watchdog_param_set_thresh(const char * val,const struct kernel_param * kp)5554 static int wq_watchdog_param_set_thresh(const char *val,
5555 const struct kernel_param *kp)
5556 {
5557 unsigned long thresh;
5558 int ret;
5559
5560 ret = kstrtoul(val, 0, &thresh);
5561 if (ret)
5562 return ret;
5563
5564 if (system_wq)
5565 wq_watchdog_set_thresh(thresh);
5566 else
5567 wq_watchdog_thresh = thresh;
5568
5569 return 0;
5570 }
5571
5572 static const struct kernel_param_ops wq_watchdog_thresh_ops = {
5573 .set = wq_watchdog_param_set_thresh,
5574 .get = param_get_ulong,
5575 };
5576
5577 module_param_cb(watchdog_thresh, &wq_watchdog_thresh_ops, &wq_watchdog_thresh,
5578 0644);
5579
wq_watchdog_init(void)5580 static void wq_watchdog_init(void)
5581 {
5582 wq_watchdog_set_thresh(wq_watchdog_thresh);
5583 }
5584
5585 #else /* CONFIG_WQ_WATCHDOG */
5586
wq_watchdog_init(void)5587 static inline void wq_watchdog_init(void) { }
5588
5589 #endif /* CONFIG_WQ_WATCHDOG */
5590
wq_numa_init(void)5591 static void __init wq_numa_init(void)
5592 {
5593 cpumask_var_t *tbl;
5594 int node, cpu;
5595
5596 if (num_possible_nodes() <= 1)
5597 return;
5598
5599 if (wq_disable_numa) {
5600 pr_info("workqueue: NUMA affinity support disabled\n");
5601 return;
5602 }
5603
5604 wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs(GFP_KERNEL);
5605 BUG_ON(!wq_update_unbound_numa_attrs_buf);
5606
5607 /*
5608 * We want masks of possible CPUs of each node which isn't readily
5609 * available. Build one from cpu_to_node() which should have been
5610 * fully initialized by now.
5611 */
5612 tbl = kzalloc(nr_node_ids * sizeof(tbl[0]), GFP_KERNEL);
5613 BUG_ON(!tbl);
5614
5615 for_each_node(node)
5616 BUG_ON(!zalloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
5617 node_online(node) ? node : NUMA_NO_NODE));
5618
5619 for_each_possible_cpu(cpu) {
5620 node = cpu_to_node(cpu);
5621 if (WARN_ON(node == NUMA_NO_NODE)) {
5622 pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu);
5623 /* happens iff arch is bonkers, let's just proceed */
5624 return;
5625 }
5626 cpumask_set_cpu(cpu, tbl[node]);
5627 }
5628
5629 wq_numa_possible_cpumask = tbl;
5630 wq_numa_enabled = true;
5631 }
5632
5633 /**
5634 * workqueue_init_early - early init for workqueue subsystem
5635 *
5636 * This is the first half of two-staged workqueue subsystem initialization
5637 * and invoked as soon as the bare basics - memory allocation, cpumasks and
5638 * idr are up. It sets up all the data structures and system workqueues
5639 * and allows early boot code to create workqueues and queue/cancel work
5640 * items. Actual work item execution starts only after kthreads can be
5641 * created and scheduled right before early initcalls.
5642 */
workqueue_init_early(void)5643 int __init workqueue_init_early(void)
5644 {
5645 int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
5646 int i, cpu;
5647
5648 WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
5649
5650 BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL));
5651 cpumask_copy(wq_unbound_cpumask, cpu_possible_mask);
5652
5653 pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
5654
5655 /* initialize CPU pools */
5656 for_each_possible_cpu(cpu) {
5657 struct worker_pool *pool;
5658
5659 i = 0;
5660 for_each_cpu_worker_pool(pool, cpu) {
5661 BUG_ON(init_worker_pool(pool));
5662 pool->cpu = cpu;
5663 cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
5664 pool->attrs->nice = std_nice[i++];
5665 pool->node = cpu_to_node(cpu);
5666
5667 /* alloc pool ID */
5668 mutex_lock(&wq_pool_mutex);
5669 BUG_ON(worker_pool_assign_id(pool));
5670 mutex_unlock(&wq_pool_mutex);
5671 }
5672 }
5673
5674 /* create default unbound and ordered wq attrs */
5675 for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
5676 struct workqueue_attrs *attrs;
5677
5678 BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
5679 attrs->nice = std_nice[i];
5680 unbound_std_wq_attrs[i] = attrs;
5681
5682 /*
5683 * An ordered wq should have only one pwq as ordering is
5684 * guaranteed by max_active which is enforced by pwqs.
5685 * Turn off NUMA so that dfl_pwq is used for all nodes.
5686 */
5687 BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
5688 attrs->nice = std_nice[i];
5689 attrs->no_numa = true;
5690 ordered_wq_attrs[i] = attrs;
5691 }
5692
5693 system_wq = alloc_workqueue("events", 0, 0);
5694 system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0);
5695 system_long_wq = alloc_workqueue("events_long", 0, 0);
5696 system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
5697 WQ_UNBOUND_MAX_ACTIVE);
5698 system_freezable_wq = alloc_workqueue("events_freezable",
5699 WQ_FREEZABLE, 0);
5700 system_power_efficient_wq = alloc_workqueue("events_power_efficient",
5701 WQ_POWER_EFFICIENT, 0);
5702 system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient",
5703 WQ_FREEZABLE | WQ_POWER_EFFICIENT,
5704 0);
5705 BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq ||
5706 !system_unbound_wq || !system_freezable_wq ||
5707 !system_power_efficient_wq ||
5708 !system_freezable_power_efficient_wq);
5709
5710 return 0;
5711 }
5712
5713 /**
5714 * workqueue_init - bring workqueue subsystem fully online
5715 *
5716 * This is the latter half of two-staged workqueue subsystem initialization
5717 * and invoked as soon as kthreads can be created and scheduled.
5718 * Workqueues have been created and work items queued on them, but there
5719 * are no kworkers executing the work items yet. Populate the worker pools
5720 * with the initial workers and enable future kworker creations.
5721 */
workqueue_init(void)5722 int __init workqueue_init(void)
5723 {
5724 struct workqueue_struct *wq;
5725 struct worker_pool *pool;
5726 int cpu, bkt;
5727
5728 /*
5729 * It'd be simpler to initialize NUMA in workqueue_init_early() but
5730 * CPU to node mapping may not be available that early on some
5731 * archs such as power and arm64. As per-cpu pools created
5732 * previously could be missing node hint and unbound pools NUMA
5733 * affinity, fix them up.
5734 */
5735 wq_numa_init();
5736
5737 mutex_lock(&wq_pool_mutex);
5738
5739 for_each_possible_cpu(cpu) {
5740 for_each_cpu_worker_pool(pool, cpu) {
5741 pool->node = cpu_to_node(cpu);
5742 }
5743 }
5744
5745 list_for_each_entry(wq, &workqueues, list)
5746 wq_update_unbound_numa(wq, smp_processor_id(), true);
5747
5748 mutex_unlock(&wq_pool_mutex);
5749
5750 /* create the initial workers */
5751 for_each_online_cpu(cpu) {
5752 for_each_cpu_worker_pool(pool, cpu) {
5753 pool->flags &= ~POOL_DISASSOCIATED;
5754 BUG_ON(!create_worker(pool));
5755 }
5756 }
5757
5758 hash_for_each(unbound_pool_hash, bkt, pool, hash_node)
5759 BUG_ON(!create_worker(pool));
5760
5761 wq_online = true;
5762 wq_watchdog_init();
5763
5764 return 0;
5765 }
5766