• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * kernel/workqueue.c - generic async execution with shared worker pool
4  *
5  * Copyright (C) 2002		Ingo Molnar
6  *
7  *   Derived from the taskqueue/keventd code by:
8  *     David Woodhouse <dwmw2@infradead.org>
9  *     Andrew Morton
10  *     Kai Petzke <wpp@marie.physik.tu-berlin.de>
11  *     Theodore Ts'o <tytso@mit.edu>
12  *
13  * Made to use alloc_percpu by Christoph Lameter.
14  *
15  * Copyright (C) 2010		SUSE Linux Products GmbH
16  * Copyright (C) 2010		Tejun Heo <tj@kernel.org>
17  *
18  * This is the generic async execution mechanism.  Work items as are
19  * executed in process context.  The worker pool is shared and
20  * automatically managed.  There are two worker pools for each CPU (one for
21  * normal work items and the other for high priority ones) and some extra
22  * pools for workqueues which are not bound to any specific CPU - the
23  * number of these backing pools is dynamic.
24  *
25  * Please read Documentation/core-api/workqueue.rst for details.
26  */
27 
28 #include <linux/export.h>
29 #include <linux/kernel.h>
30 #include <linux/sched.h>
31 #include <linux/init.h>
32 #include <linux/signal.h>
33 #include <linux/completion.h>
34 #include <linux/workqueue.h>
35 #include <linux/slab.h>
36 #include <linux/cpu.h>
37 #include <linux/notifier.h>
38 #include <linux/kthread.h>
39 #include <linux/hardirq.h>
40 #include <linux/mempolicy.h>
41 #include <linux/freezer.h>
42 #include <linux/debug_locks.h>
43 #include <linux/lockdep.h>
44 #include <linux/idr.h>
45 #include <linux/jhash.h>
46 #include <linux/hashtable.h>
47 #include <linux/rculist.h>
48 #include <linux/nodemask.h>
49 #include <linux/moduleparam.h>
50 #include <linux/uaccess.h>
51 #include <linux/sched/isolation.h>
52 #include <linux/nmi.h>
53 #include <linux/kvm_para.h>
54 
55 #include "workqueue_internal.h"
56 
57 enum {
58 	/*
59 	 * worker_pool flags
60 	 *
61 	 * A bound pool is either associated or disassociated with its CPU.
62 	 * While associated (!DISASSOCIATED), all workers are bound to the
63 	 * CPU and none has %WORKER_UNBOUND set and concurrency management
64 	 * is in effect.
65 	 *
66 	 * While DISASSOCIATED, the cpu may be offline and all workers have
67 	 * %WORKER_UNBOUND set and concurrency management disabled, and may
68 	 * be executing on any CPU.  The pool behaves as an unbound one.
69 	 *
70 	 * Note that DISASSOCIATED should be flipped only while holding
71 	 * wq_pool_attach_mutex to avoid changing binding state while
72 	 * worker_attach_to_pool() is in progress.
73 	 */
74 	POOL_MANAGER_ACTIVE	= 1 << 0,	/* being managed */
75 	POOL_DISASSOCIATED	= 1 << 2,	/* cpu can't serve workers */
76 
77 	/* worker flags */
78 	WORKER_DIE		= 1 << 1,	/* die die die */
79 	WORKER_IDLE		= 1 << 2,	/* is idle */
80 	WORKER_PREP		= 1 << 3,	/* preparing to run works */
81 	WORKER_CPU_INTENSIVE	= 1 << 6,	/* cpu intensive */
82 	WORKER_UNBOUND		= 1 << 7,	/* worker is unbound */
83 	WORKER_REBOUND		= 1 << 8,	/* worker was rebound */
84 
85 	WORKER_NOT_RUNNING	= WORKER_PREP | WORKER_CPU_INTENSIVE |
86 				  WORKER_UNBOUND | WORKER_REBOUND,
87 
88 	NR_STD_WORKER_POOLS	= 2,		/* # standard pools per cpu */
89 
90 	UNBOUND_POOL_HASH_ORDER	= 6,		/* hashed by pool->attrs */
91 	BUSY_WORKER_HASH_ORDER	= 6,		/* 64 pointers */
92 
93 	MAX_IDLE_WORKERS_RATIO	= 4,		/* 1/4 of busy can be idle */
94 	IDLE_WORKER_TIMEOUT	= 300 * HZ,	/* keep idle ones for 5 mins */
95 
96 	MAYDAY_INITIAL_TIMEOUT  = HZ / 100 >= 2 ? HZ / 100 : 2,
97 						/* call for help after 10ms
98 						   (min two ticks) */
99 	MAYDAY_INTERVAL		= HZ / 10,	/* and then every 100ms */
100 	CREATE_COOLDOWN		= HZ,		/* time to breath after fail */
101 
102 	/*
103 	 * Rescue workers are used only on emergencies and shared by
104 	 * all cpus.  Give MIN_NICE.
105 	 */
106 	RESCUER_NICE_LEVEL	= MIN_NICE,
107 	HIGHPRI_NICE_LEVEL	= MIN_NICE,
108 
109 	WQ_NAME_LEN		= 24,
110 };
111 
112 /*
113  * Structure fields follow one of the following exclusion rules.
114  *
115  * I: Modifiable by initialization/destruction paths and read-only for
116  *    everyone else.
117  *
118  * P: Preemption protected.  Disabling preemption is enough and should
119  *    only be modified and accessed from the local cpu.
120  *
121  * L: pool->lock protected.  Access with pool->lock held.
122  *
123  * X: During normal operation, modification requires pool->lock and should
124  *    be done only from local cpu.  Either disabling preemption on local
125  *    cpu or grabbing pool->lock is enough for read access.  If
126  *    POOL_DISASSOCIATED is set, it's identical to L.
127  *
128  * A: wq_pool_attach_mutex protected.
129  *
130  * PL: wq_pool_mutex protected.
131  *
132  * PR: wq_pool_mutex protected for writes.  RCU protected for reads.
133  *
134  * PW: wq_pool_mutex and wq->mutex protected for writes.  Either for reads.
135  *
136  * PWR: wq_pool_mutex and wq->mutex protected for writes.  Either or
137  *      RCU for reads.
138  *
139  * WQ: wq->mutex protected.
140  *
141  * WR: wq->mutex protected for writes.  RCU protected for reads.
142  *
143  * MD: wq_mayday_lock protected.
144  */
145 
146 /* struct worker is defined in workqueue_internal.h */
147 
148 struct worker_pool {
149 	raw_spinlock_t		lock;		/* the pool lock */
150 	int			cpu;		/* I: the associated cpu */
151 	int			node;		/* I: the associated node ID */
152 	int			id;		/* I: pool ID */
153 	unsigned int		flags;		/* X: flags */
154 
155 	unsigned long		watchdog_ts;	/* L: watchdog timestamp */
156 
157 	struct list_head	worklist;	/* L: list of pending works */
158 
159 	int			nr_workers;	/* L: total number of workers */
160 	int			nr_idle;	/* L: currently idle workers */
161 
162 	struct list_head	idle_list;	/* X: list of idle workers */
163 	struct timer_list	idle_timer;	/* L: worker idle timeout */
164 	struct timer_list	mayday_timer;	/* L: SOS timer for workers */
165 
166 	/* a workers is either on busy_hash or idle_list, or the manager */
167 	DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER);
168 						/* L: hash of busy workers */
169 
170 	struct worker		*manager;	/* L: purely informational */
171 	struct list_head	workers;	/* A: attached workers */
172 	struct completion	*detach_completion; /* all workers detached */
173 
174 	struct ida		worker_ida;	/* worker IDs for task name */
175 
176 	struct workqueue_attrs	*attrs;		/* I: worker attributes */
177 	struct hlist_node	hash_node;	/* PL: unbound_pool_hash node */
178 	int			refcnt;		/* PL: refcnt for unbound pools */
179 
180 	/*
181 	 * The current concurrency level.  As it's likely to be accessed
182 	 * from other CPUs during try_to_wake_up(), put it in a separate
183 	 * cacheline.
184 	 */
185 	atomic_t		nr_running ____cacheline_aligned_in_smp;
186 
187 	/*
188 	 * Destruction of pool is RCU protected to allow dereferences
189 	 * from get_work_pool().
190 	 */
191 	struct rcu_head		rcu;
192 } ____cacheline_aligned_in_smp;
193 
194 /*
195  * The per-pool workqueue.  While queued, the lower WORK_STRUCT_FLAG_BITS
196  * of work_struct->data are used for flags and the remaining high bits
197  * point to the pwq; thus, pwqs need to be aligned at two's power of the
198  * number of flag bits.
199  */
200 struct pool_workqueue {
201 	struct worker_pool	*pool;		/* I: the associated pool */
202 	struct workqueue_struct *wq;		/* I: the owning workqueue */
203 	int			work_color;	/* L: current color */
204 	int			flush_color;	/* L: flushing color */
205 	int			refcnt;		/* L: reference count */
206 	int			nr_in_flight[WORK_NR_COLORS];
207 						/* L: nr of in_flight works */
208 	int			nr_active;	/* L: nr of active works */
209 	int			max_active;	/* L: max active works */
210 	struct list_head	inactive_works;	/* L: inactive works */
211 	struct list_head	pwqs_node;	/* WR: node on wq->pwqs */
212 	struct list_head	mayday_node;	/* MD: node on wq->maydays */
213 
214 	/*
215 	 * Release of unbound pwq is punted to system_wq.  See put_pwq()
216 	 * and pwq_unbound_release_workfn() for details.  pool_workqueue
217 	 * itself is also RCU protected so that the first pwq can be
218 	 * determined without grabbing wq->mutex.
219 	 */
220 	struct work_struct	unbound_release_work;
221 	struct rcu_head		rcu;
222 } __aligned(1 << WORK_STRUCT_FLAG_BITS);
223 
224 /*
225  * Structure used to wait for workqueue flush.
226  */
227 struct wq_flusher {
228 	struct list_head	list;		/* WQ: list of flushers */
229 	int			flush_color;	/* WQ: flush color waiting for */
230 	struct completion	done;		/* flush completion */
231 };
232 
233 struct wq_device;
234 
235 /*
236  * The externally visible workqueue.  It relays the issued work items to
237  * the appropriate worker_pool through its pool_workqueues.
238  */
239 struct workqueue_struct {
240 	struct list_head	pwqs;		/* WR: all pwqs of this wq */
241 	struct list_head	list;		/* PR: list of all workqueues */
242 
243 	struct mutex		mutex;		/* protects this wq */
244 	int			work_color;	/* WQ: current work color */
245 	int			flush_color;	/* WQ: current flush color */
246 	atomic_t		nr_pwqs_to_flush; /* flush in progress */
247 	struct wq_flusher	*first_flusher;	/* WQ: first flusher */
248 	struct list_head	flusher_queue;	/* WQ: flush waiters */
249 	struct list_head	flusher_overflow; /* WQ: flush overflow list */
250 
251 	struct list_head	maydays;	/* MD: pwqs requesting rescue */
252 	struct worker		*rescuer;	/* MD: rescue worker */
253 
254 	int			nr_drainers;	/* WQ: drain in progress */
255 	int			saved_max_active; /* WQ: saved pwq max_active */
256 
257 	struct workqueue_attrs	*unbound_attrs;	/* PW: only for unbound wqs */
258 	struct pool_workqueue	*dfl_pwq;	/* PW: only for unbound wqs */
259 
260 #ifdef CONFIG_SYSFS
261 	struct wq_device	*wq_dev;	/* I: for sysfs interface */
262 #endif
263 #ifdef CONFIG_LOCKDEP
264 	char			*lock_name;
265 	struct lock_class_key	key;
266 	struct lockdep_map	lockdep_map;
267 #endif
268 	char			name[WQ_NAME_LEN]; /* I: workqueue name */
269 
270 	/*
271 	 * Destruction of workqueue_struct is RCU protected to allow walking
272 	 * the workqueues list without grabbing wq_pool_mutex.
273 	 * This is used to dump all workqueues from sysrq.
274 	 */
275 	struct rcu_head		rcu;
276 
277 	/* hot fields used during command issue, aligned to cacheline */
278 	unsigned int		flags ____cacheline_aligned; /* WQ: WQ_* flags */
279 	struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwqs */
280 	struct pool_workqueue __rcu *numa_pwq_tbl[]; /* PWR: unbound pwqs indexed by node */
281 };
282 
283 static struct kmem_cache *pwq_cache;
284 
285 static cpumask_var_t *wq_numa_possible_cpumask;
286 					/* possible CPUs of each node */
287 
288 static bool wq_disable_numa;
289 module_param_named(disable_numa, wq_disable_numa, bool, 0444);
290 
291 /* see the comment above the definition of WQ_POWER_EFFICIENT */
292 static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT);
293 module_param_named(power_efficient, wq_power_efficient, bool, 0444);
294 
295 static bool wq_online;			/* can kworkers be created yet? */
296 
297 static bool wq_numa_enabled;		/* unbound NUMA affinity enabled */
298 
299 /* buf for wq_update_unbound_numa_attrs(), protected by CPU hotplug exclusion */
300 static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
301 
302 static DEFINE_MUTEX(wq_pool_mutex);	/* protects pools and workqueues list */
303 static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */
304 static DEFINE_RAW_SPINLOCK(wq_mayday_lock);	/* protects wq->maydays list */
305 /* wait for manager to go away */
306 static struct rcuwait manager_wait = __RCUWAIT_INITIALIZER(manager_wait);
307 
308 static LIST_HEAD(workqueues);		/* PR: list of all workqueues */
309 static bool workqueue_freezing;		/* PL: have wqs started freezing? */
310 
311 /* PL: allowable cpus for unbound wqs and work items */
312 static cpumask_var_t wq_unbound_cpumask;
313 
314 /* CPU where unbound work was last round robin scheduled from this CPU */
315 static DEFINE_PER_CPU(int, wq_rr_cpu_last);
316 
317 /*
318  * Local execution of unbound work items is no longer guaranteed.  The
319  * following always forces round-robin CPU selection on unbound work items
320  * to uncover usages which depend on it.
321  */
322 #ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU
323 static bool wq_debug_force_rr_cpu = true;
324 #else
325 static bool wq_debug_force_rr_cpu = false;
326 #endif
327 module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644);
328 
329 /* the per-cpu worker pools */
330 static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], cpu_worker_pools);
331 
332 static DEFINE_IDR(worker_pool_idr);	/* PR: idr of all pools */
333 
334 /* PL: hash of all unbound pools keyed by pool->attrs */
335 static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
336 
337 /* I: attributes used when instantiating standard unbound pools on demand */
338 static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
339 
340 /* I: attributes used when instantiating ordered pools on demand */
341 static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS];
342 
343 struct workqueue_struct *system_wq __read_mostly;
344 EXPORT_SYMBOL(system_wq);
345 struct workqueue_struct *system_highpri_wq __read_mostly;
346 EXPORT_SYMBOL_GPL(system_highpri_wq);
347 struct workqueue_struct *system_long_wq __read_mostly;
348 EXPORT_SYMBOL_GPL(system_long_wq);
349 struct workqueue_struct *system_unbound_wq __read_mostly;
350 EXPORT_SYMBOL_GPL(system_unbound_wq);
351 struct workqueue_struct *system_freezable_wq __read_mostly;
352 EXPORT_SYMBOL_GPL(system_freezable_wq);
353 struct workqueue_struct *system_power_efficient_wq __read_mostly;
354 EXPORT_SYMBOL_GPL(system_power_efficient_wq);
355 struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
356 EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
357 
358 static int worker_thread(void *__worker);
359 static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
360 static void show_pwq(struct pool_workqueue *pwq);
361 
362 #define CREATE_TRACE_POINTS
363 #include <trace/events/workqueue.h>
364 
365 #define assert_rcu_or_pool_mutex()					\
366 	RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&			\
367 			 !lockdep_is_held(&wq_pool_mutex),		\
368 			 "RCU or wq_pool_mutex should be held")
369 
370 #define assert_rcu_or_wq_mutex_or_pool_mutex(wq)			\
371 	RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&			\
372 			 !lockdep_is_held(&wq->mutex) &&		\
373 			 !lockdep_is_held(&wq_pool_mutex),		\
374 			 "RCU, wq->mutex or wq_pool_mutex should be held")
375 
376 #define for_each_cpu_worker_pool(pool, cpu)				\
377 	for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0];		\
378 	     (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
379 	     (pool)++)
380 
381 /**
382  * for_each_pool - iterate through all worker_pools in the system
383  * @pool: iteration cursor
384  * @pi: integer used for iteration
385  *
386  * This must be called either with wq_pool_mutex held or RCU read
387  * locked.  If the pool needs to be used beyond the locking in effect, the
388  * caller is responsible for guaranteeing that the pool stays online.
389  *
390  * The if/else clause exists only for the lockdep assertion and can be
391  * ignored.
392  */
393 #define for_each_pool(pool, pi)						\
394 	idr_for_each_entry(&worker_pool_idr, pool, pi)			\
395 		if (({ assert_rcu_or_pool_mutex(); false; })) { }	\
396 		else
397 
398 /**
399  * for_each_pool_worker - iterate through all workers of a worker_pool
400  * @worker: iteration cursor
401  * @pool: worker_pool to iterate workers of
402  *
403  * This must be called with wq_pool_attach_mutex.
404  *
405  * The if/else clause exists only for the lockdep assertion and can be
406  * ignored.
407  */
408 #define for_each_pool_worker(worker, pool)				\
409 	list_for_each_entry((worker), &(pool)->workers, node)		\
410 		if (({ lockdep_assert_held(&wq_pool_attach_mutex); false; })) { } \
411 		else
412 
413 /**
414  * for_each_pwq - iterate through all pool_workqueues of the specified workqueue
415  * @pwq: iteration cursor
416  * @wq: the target workqueue
417  *
418  * This must be called either with wq->mutex held or RCU read locked.
419  * If the pwq needs to be used beyond the locking in effect, the caller is
420  * responsible for guaranteeing that the pwq stays online.
421  *
422  * The if/else clause exists only for the lockdep assertion and can be
423  * ignored.
424  */
425 #define for_each_pwq(pwq, wq)						\
426 	list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node,		\
427 				 lockdep_is_held(&(wq->mutex)))
428 
429 #ifdef CONFIG_DEBUG_OBJECTS_WORK
430 
431 static const struct debug_obj_descr work_debug_descr;
432 
work_debug_hint(void * addr)433 static void *work_debug_hint(void *addr)
434 {
435 	return ((struct work_struct *) addr)->func;
436 }
437 
work_is_static_object(void * addr)438 static bool work_is_static_object(void *addr)
439 {
440 	struct work_struct *work = addr;
441 
442 	return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work));
443 }
444 
445 /*
446  * fixup_init is called when:
447  * - an active object is initialized
448  */
work_fixup_init(void * addr,enum debug_obj_state state)449 static bool work_fixup_init(void *addr, enum debug_obj_state state)
450 {
451 	struct work_struct *work = addr;
452 
453 	switch (state) {
454 	case ODEBUG_STATE_ACTIVE:
455 		cancel_work_sync(work);
456 		debug_object_init(work, &work_debug_descr);
457 		return true;
458 	default:
459 		return false;
460 	}
461 }
462 
463 /*
464  * fixup_free is called when:
465  * - an active object is freed
466  */
work_fixup_free(void * addr,enum debug_obj_state state)467 static bool work_fixup_free(void *addr, enum debug_obj_state state)
468 {
469 	struct work_struct *work = addr;
470 
471 	switch (state) {
472 	case ODEBUG_STATE_ACTIVE:
473 		cancel_work_sync(work);
474 		debug_object_free(work, &work_debug_descr);
475 		return true;
476 	default:
477 		return false;
478 	}
479 }
480 
481 static const struct debug_obj_descr work_debug_descr = {
482 	.name		= "work_struct",
483 	.debug_hint	= work_debug_hint,
484 	.is_static_object = work_is_static_object,
485 	.fixup_init	= work_fixup_init,
486 	.fixup_free	= work_fixup_free,
487 };
488 
debug_work_activate(struct work_struct * work)489 static inline void debug_work_activate(struct work_struct *work)
490 {
491 	debug_object_activate(work, &work_debug_descr);
492 }
493 
debug_work_deactivate(struct work_struct * work)494 static inline void debug_work_deactivate(struct work_struct *work)
495 {
496 	debug_object_deactivate(work, &work_debug_descr);
497 }
498 
__init_work(struct work_struct * work,int onstack)499 void __init_work(struct work_struct *work, int onstack)
500 {
501 	if (onstack)
502 		debug_object_init_on_stack(work, &work_debug_descr);
503 	else
504 		debug_object_init(work, &work_debug_descr);
505 }
506 EXPORT_SYMBOL_GPL(__init_work);
507 
destroy_work_on_stack(struct work_struct * work)508 void destroy_work_on_stack(struct work_struct *work)
509 {
510 	debug_object_free(work, &work_debug_descr);
511 }
512 EXPORT_SYMBOL_GPL(destroy_work_on_stack);
513 
destroy_delayed_work_on_stack(struct delayed_work * work)514 void destroy_delayed_work_on_stack(struct delayed_work *work)
515 {
516 	destroy_timer_on_stack(&work->timer);
517 	debug_object_free(&work->work, &work_debug_descr);
518 }
519 EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stack);
520 
521 #else
debug_work_activate(struct work_struct * work)522 static inline void debug_work_activate(struct work_struct *work) { }
debug_work_deactivate(struct work_struct * work)523 static inline void debug_work_deactivate(struct work_struct *work) { }
524 #endif
525 
526 /**
527  * worker_pool_assign_id - allocate ID and assing it to @pool
528  * @pool: the pool pointer of interest
529  *
530  * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned
531  * successfully, -errno on failure.
532  */
worker_pool_assign_id(struct worker_pool * pool)533 static int worker_pool_assign_id(struct worker_pool *pool)
534 {
535 	int ret;
536 
537 	lockdep_assert_held(&wq_pool_mutex);
538 
539 	ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE,
540 			GFP_KERNEL);
541 	if (ret >= 0) {
542 		pool->id = ret;
543 		return 0;
544 	}
545 	return ret;
546 }
547 
548 /**
549  * unbound_pwq_by_node - return the unbound pool_workqueue for the given node
550  * @wq: the target workqueue
551  * @node: the node ID
552  *
553  * This must be called with any of wq_pool_mutex, wq->mutex or RCU
554  * read locked.
555  * If the pwq needs to be used beyond the locking in effect, the caller is
556  * responsible for guaranteeing that the pwq stays online.
557  *
558  * Return: The unbound pool_workqueue for @node.
559  */
unbound_pwq_by_node(struct workqueue_struct * wq,int node)560 static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq,
561 						  int node)
562 {
563 	assert_rcu_or_wq_mutex_or_pool_mutex(wq);
564 
565 	/*
566 	 * XXX: @node can be NUMA_NO_NODE if CPU goes offline while a
567 	 * delayed item is pending.  The plan is to keep CPU -> NODE
568 	 * mapping valid and stable across CPU on/offlines.  Once that
569 	 * happens, this workaround can be removed.
570 	 */
571 	if (unlikely(node == NUMA_NO_NODE))
572 		return wq->dfl_pwq;
573 
574 	return rcu_dereference_raw(wq->numa_pwq_tbl[node]);
575 }
576 
work_color_to_flags(int color)577 static unsigned int work_color_to_flags(int color)
578 {
579 	return color << WORK_STRUCT_COLOR_SHIFT;
580 }
581 
get_work_color(struct work_struct * work)582 static int get_work_color(struct work_struct *work)
583 {
584 	return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
585 		((1 << WORK_STRUCT_COLOR_BITS) - 1);
586 }
587 
work_next_color(int color)588 static int work_next_color(int color)
589 {
590 	return (color + 1) % WORK_NR_COLORS;
591 }
592 
593 /*
594  * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data
595  * contain the pointer to the queued pwq.  Once execution starts, the flag
596  * is cleared and the high bits contain OFFQ flags and pool ID.
597  *
598  * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling()
599  * and clear_work_data() can be used to set the pwq, pool or clear
600  * work->data.  These functions should only be called while the work is
601  * owned - ie. while the PENDING bit is set.
602  *
603  * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq
604  * corresponding to a work.  Pool is available once the work has been
605  * queued anywhere after initialization until it is sync canceled.  pwq is
606  * available only while the work item is queued.
607  *
608  * %WORK_OFFQ_CANCELING is used to mark a work item which is being
609  * canceled.  While being canceled, a work item may have its PENDING set
610  * but stay off timer and worklist for arbitrarily long and nobody should
611  * try to steal the PENDING bit.
612  */
set_work_data(struct work_struct * work,unsigned long data,unsigned long flags)613 static inline void set_work_data(struct work_struct *work, unsigned long data,
614 				 unsigned long flags)
615 {
616 	WARN_ON_ONCE(!work_pending(work));
617 	atomic_long_set(&work->data, data | flags | work_static(work));
618 }
619 
set_work_pwq(struct work_struct * work,struct pool_workqueue * pwq,unsigned long extra_flags)620 static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq,
621 			 unsigned long extra_flags)
622 {
623 	set_work_data(work, (unsigned long)pwq,
624 		      WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags);
625 }
626 
set_work_pool_and_keep_pending(struct work_struct * work,int pool_id)627 static void set_work_pool_and_keep_pending(struct work_struct *work,
628 					   int pool_id)
629 {
630 	set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT,
631 		      WORK_STRUCT_PENDING);
632 }
633 
set_work_pool_and_clear_pending(struct work_struct * work,int pool_id)634 static void set_work_pool_and_clear_pending(struct work_struct *work,
635 					    int pool_id)
636 {
637 	/*
638 	 * The following wmb is paired with the implied mb in
639 	 * test_and_set_bit(PENDING) and ensures all updates to @work made
640 	 * here are visible to and precede any updates by the next PENDING
641 	 * owner.
642 	 */
643 	smp_wmb();
644 	set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
645 	/*
646 	 * The following mb guarantees that previous clear of a PENDING bit
647 	 * will not be reordered with any speculative LOADS or STORES from
648 	 * work->current_func, which is executed afterwards.  This possible
649 	 * reordering can lead to a missed execution on attempt to queue
650 	 * the same @work.  E.g. consider this case:
651 	 *
652 	 *   CPU#0                         CPU#1
653 	 *   ----------------------------  --------------------------------
654 	 *
655 	 * 1  STORE event_indicated
656 	 * 2  queue_work_on() {
657 	 * 3    test_and_set_bit(PENDING)
658 	 * 4 }                             set_..._and_clear_pending() {
659 	 * 5                                 set_work_data() # clear bit
660 	 * 6                                 smp_mb()
661 	 * 7                               work->current_func() {
662 	 * 8				      LOAD event_indicated
663 	 *				   }
664 	 *
665 	 * Without an explicit full barrier speculative LOAD on line 8 can
666 	 * be executed before CPU#0 does STORE on line 1.  If that happens,
667 	 * CPU#0 observes the PENDING bit is still set and new execution of
668 	 * a @work is not queued in a hope, that CPU#1 will eventually
669 	 * finish the queued @work.  Meanwhile CPU#1 does not see
670 	 * event_indicated is set, because speculative LOAD was executed
671 	 * before actual STORE.
672 	 */
673 	smp_mb();
674 }
675 
clear_work_data(struct work_struct * work)676 static void clear_work_data(struct work_struct *work)
677 {
678 	smp_wmb();	/* see set_work_pool_and_clear_pending() */
679 	set_work_data(work, WORK_STRUCT_NO_POOL, 0);
680 }
681 
work_struct_pwq(unsigned long data)682 static inline struct pool_workqueue *work_struct_pwq(unsigned long data)
683 {
684 	return (struct pool_workqueue *)(data & WORK_STRUCT_WQ_DATA_MASK);
685 }
686 
get_work_pwq(struct work_struct * work)687 static struct pool_workqueue *get_work_pwq(struct work_struct *work)
688 {
689 	unsigned long data = atomic_long_read(&work->data);
690 
691 	if (data & WORK_STRUCT_PWQ)
692 		return work_struct_pwq(data);
693 	else
694 		return NULL;
695 }
696 
697 /**
698  * get_work_pool - return the worker_pool a given work was associated with
699  * @work: the work item of interest
700  *
701  * Pools are created and destroyed under wq_pool_mutex, and allows read
702  * access under RCU read lock.  As such, this function should be
703  * called under wq_pool_mutex or inside of a rcu_read_lock() region.
704  *
705  * All fields of the returned pool are accessible as long as the above
706  * mentioned locking is in effect.  If the returned pool needs to be used
707  * beyond the critical section, the caller is responsible for ensuring the
708  * returned pool is and stays online.
709  *
710  * Return: The worker_pool @work was last associated with.  %NULL if none.
711  */
get_work_pool(struct work_struct * work)712 static struct worker_pool *get_work_pool(struct work_struct *work)
713 {
714 	unsigned long data = atomic_long_read(&work->data);
715 	int pool_id;
716 
717 	assert_rcu_or_pool_mutex();
718 
719 	if (data & WORK_STRUCT_PWQ)
720 		return work_struct_pwq(data)->pool;
721 
722 	pool_id = data >> WORK_OFFQ_POOL_SHIFT;
723 	if (pool_id == WORK_OFFQ_POOL_NONE)
724 		return NULL;
725 
726 	return idr_find(&worker_pool_idr, pool_id);
727 }
728 
729 /**
730  * get_work_pool_id - return the worker pool ID a given work is associated with
731  * @work: the work item of interest
732  *
733  * Return: The worker_pool ID @work was last associated with.
734  * %WORK_OFFQ_POOL_NONE if none.
735  */
get_work_pool_id(struct work_struct * work)736 static int get_work_pool_id(struct work_struct *work)
737 {
738 	unsigned long data = atomic_long_read(&work->data);
739 
740 	if (data & WORK_STRUCT_PWQ)
741 		return work_struct_pwq(data)->pool->id;
742 
743 	return data >> WORK_OFFQ_POOL_SHIFT;
744 }
745 
mark_work_canceling(struct work_struct * work)746 static void mark_work_canceling(struct work_struct *work)
747 {
748 	unsigned long pool_id = get_work_pool_id(work);
749 
750 	pool_id <<= WORK_OFFQ_POOL_SHIFT;
751 	set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING);
752 }
753 
work_is_canceling(struct work_struct * work)754 static bool work_is_canceling(struct work_struct *work)
755 {
756 	unsigned long data = atomic_long_read(&work->data);
757 
758 	return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING);
759 }
760 
761 /*
762  * Policy functions.  These define the policies on how the global worker
763  * pools are managed.  Unless noted otherwise, these functions assume that
764  * they're being called with pool->lock held.
765  */
766 
__need_more_worker(struct worker_pool * pool)767 static bool __need_more_worker(struct worker_pool *pool)
768 {
769 	return !atomic_read(&pool->nr_running);
770 }
771 
772 /*
773  * Need to wake up a worker?  Called from anything but currently
774  * running workers.
775  *
776  * Note that, because unbound workers never contribute to nr_running, this
777  * function will always return %true for unbound pools as long as the
778  * worklist isn't empty.
779  */
need_more_worker(struct worker_pool * pool)780 static bool need_more_worker(struct worker_pool *pool)
781 {
782 	return !list_empty(&pool->worklist) && __need_more_worker(pool);
783 }
784 
785 /* Can I start working?  Called from busy but !running workers. */
may_start_working(struct worker_pool * pool)786 static bool may_start_working(struct worker_pool *pool)
787 {
788 	return pool->nr_idle;
789 }
790 
791 /* Do I need to keep working?  Called from currently running workers. */
keep_working(struct worker_pool * pool)792 static bool keep_working(struct worker_pool *pool)
793 {
794 	return !list_empty(&pool->worklist) &&
795 		atomic_read(&pool->nr_running) <= 1;
796 }
797 
798 /* Do we need a new worker?  Called from manager. */
need_to_create_worker(struct worker_pool * pool)799 static bool need_to_create_worker(struct worker_pool *pool)
800 {
801 	return need_more_worker(pool) && !may_start_working(pool);
802 }
803 
804 /* Do we have too many workers and should some go away? */
too_many_workers(struct worker_pool * pool)805 static bool too_many_workers(struct worker_pool *pool)
806 {
807 	bool managing = pool->flags & POOL_MANAGER_ACTIVE;
808 	int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
809 	int nr_busy = pool->nr_workers - nr_idle;
810 
811 	return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
812 }
813 
814 /*
815  * Wake up functions.
816  */
817 
818 /* Return the first idle worker.  Safe with preemption disabled */
first_idle_worker(struct worker_pool * pool)819 static struct worker *first_idle_worker(struct worker_pool *pool)
820 {
821 	if (unlikely(list_empty(&pool->idle_list)))
822 		return NULL;
823 
824 	return list_first_entry(&pool->idle_list, struct worker, entry);
825 }
826 
827 /**
828  * wake_up_worker - wake up an idle worker
829  * @pool: worker pool to wake worker from
830  *
831  * Wake up the first idle worker of @pool.
832  *
833  * CONTEXT:
834  * raw_spin_lock_irq(pool->lock).
835  */
wake_up_worker(struct worker_pool * pool)836 static void wake_up_worker(struct worker_pool *pool)
837 {
838 	struct worker *worker = first_idle_worker(pool);
839 
840 	if (likely(worker))
841 		wake_up_process(worker->task);
842 }
843 
844 /**
845  * wq_worker_running - a worker is running again
846  * @task: task waking up
847  *
848  * This function is called when a worker returns from schedule()
849  */
wq_worker_running(struct task_struct * task)850 void wq_worker_running(struct task_struct *task)
851 {
852 	struct worker *worker = kthread_data(task);
853 
854 	if (!worker->sleeping)
855 		return;
856 
857 	/*
858 	 * If preempted by unbind_workers() between the WORKER_NOT_RUNNING check
859 	 * and the nr_running increment below, we may ruin the nr_running reset
860 	 * and leave with an unexpected pool->nr_running == 1 on the newly unbound
861 	 * pool. Protect against such race.
862 	 */
863 	preempt_disable();
864 	if (!(worker->flags & WORKER_NOT_RUNNING))
865 		atomic_inc(&worker->pool->nr_running);
866 	preempt_enable();
867 	worker->sleeping = 0;
868 }
869 
870 /**
871  * wq_worker_sleeping - a worker is going to sleep
872  * @task: task going to sleep
873  *
874  * This function is called from schedule() when a busy worker is
875  * going to sleep. Preemption needs to be disabled to protect ->sleeping
876  * assignment.
877  */
wq_worker_sleeping(struct task_struct * task)878 void wq_worker_sleeping(struct task_struct *task)
879 {
880 	struct worker *next, *worker = kthread_data(task);
881 	struct worker_pool *pool;
882 
883 	/*
884 	 * Rescuers, which may not have all the fields set up like normal
885 	 * workers, also reach here, let's not access anything before
886 	 * checking NOT_RUNNING.
887 	 */
888 	if (worker->flags & WORKER_NOT_RUNNING)
889 		return;
890 
891 	pool = worker->pool;
892 
893 	/* Return if preempted before wq_worker_running() was reached */
894 	if (worker->sleeping)
895 		return;
896 
897 	worker->sleeping = 1;
898 	raw_spin_lock_irq(&pool->lock);
899 
900 	/*
901 	 * The counterpart of the following dec_and_test, implied mb,
902 	 * worklist not empty test sequence is in insert_work().
903 	 * Please read comment there.
904 	 *
905 	 * NOT_RUNNING is clear.  This means that we're bound to and
906 	 * running on the local cpu w/ rq lock held and preemption
907 	 * disabled, which in turn means that none else could be
908 	 * manipulating idle_list, so dereferencing idle_list without pool
909 	 * lock is safe.
910 	 */
911 	if (atomic_dec_and_test(&pool->nr_running) &&
912 	    !list_empty(&pool->worklist)) {
913 		next = first_idle_worker(pool);
914 		if (next)
915 			wake_up_process(next->task);
916 	}
917 	raw_spin_unlock_irq(&pool->lock);
918 }
919 
920 /**
921  * wq_worker_last_func - retrieve worker's last work function
922  * @task: Task to retrieve last work function of.
923  *
924  * Determine the last function a worker executed. This is called from
925  * the scheduler to get a worker's last known identity.
926  *
927  * CONTEXT:
928  * raw_spin_lock_irq(rq->lock)
929  *
930  * This function is called during schedule() when a kworker is going
931  * to sleep. It's used by psi to identify aggregation workers during
932  * dequeuing, to allow periodic aggregation to shut-off when that
933  * worker is the last task in the system or cgroup to go to sleep.
934  *
935  * As this function doesn't involve any workqueue-related locking, it
936  * only returns stable values when called from inside the scheduler's
937  * queuing and dequeuing paths, when @task, which must be a kworker,
938  * is guaranteed to not be processing any works.
939  *
940  * Return:
941  * The last work function %current executed as a worker, NULL if it
942  * hasn't executed any work yet.
943  */
wq_worker_last_func(struct task_struct * task)944 work_func_t wq_worker_last_func(struct task_struct *task)
945 {
946 	struct worker *worker = kthread_data(task);
947 
948 	return worker->last_func;
949 }
950 
951 /**
952  * worker_set_flags - set worker flags and adjust nr_running accordingly
953  * @worker: self
954  * @flags: flags to set
955  *
956  * Set @flags in @worker->flags and adjust nr_running accordingly.
957  *
958  * CONTEXT:
959  * raw_spin_lock_irq(pool->lock)
960  */
worker_set_flags(struct worker * worker,unsigned int flags)961 static inline void worker_set_flags(struct worker *worker, unsigned int flags)
962 {
963 	struct worker_pool *pool = worker->pool;
964 
965 	WARN_ON_ONCE(worker->task != current);
966 
967 	/* If transitioning into NOT_RUNNING, adjust nr_running. */
968 	if ((flags & WORKER_NOT_RUNNING) &&
969 	    !(worker->flags & WORKER_NOT_RUNNING)) {
970 		atomic_dec(&pool->nr_running);
971 	}
972 
973 	worker->flags |= flags;
974 }
975 
976 /**
977  * worker_clr_flags - clear worker flags and adjust nr_running accordingly
978  * @worker: self
979  * @flags: flags to clear
980  *
981  * Clear @flags in @worker->flags and adjust nr_running accordingly.
982  *
983  * CONTEXT:
984  * raw_spin_lock_irq(pool->lock)
985  */
worker_clr_flags(struct worker * worker,unsigned int flags)986 static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
987 {
988 	struct worker_pool *pool = worker->pool;
989 	unsigned int oflags = worker->flags;
990 
991 	WARN_ON_ONCE(worker->task != current);
992 
993 	worker->flags &= ~flags;
994 
995 	/*
996 	 * If transitioning out of NOT_RUNNING, increment nr_running.  Note
997 	 * that the nested NOT_RUNNING is not a noop.  NOT_RUNNING is mask
998 	 * of multiple flags, not a single flag.
999 	 */
1000 	if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
1001 		if (!(worker->flags & WORKER_NOT_RUNNING))
1002 			atomic_inc(&pool->nr_running);
1003 }
1004 
1005 /**
1006  * find_worker_executing_work - find worker which is executing a work
1007  * @pool: pool of interest
1008  * @work: work to find worker for
1009  *
1010  * Find a worker which is executing @work on @pool by searching
1011  * @pool->busy_hash which is keyed by the address of @work.  For a worker
1012  * to match, its current execution should match the address of @work and
1013  * its work function.  This is to avoid unwanted dependency between
1014  * unrelated work executions through a work item being recycled while still
1015  * being executed.
1016  *
1017  * This is a bit tricky.  A work item may be freed once its execution
1018  * starts and nothing prevents the freed area from being recycled for
1019  * another work item.  If the same work item address ends up being reused
1020  * before the original execution finishes, workqueue will identify the
1021  * recycled work item as currently executing and make it wait until the
1022  * current execution finishes, introducing an unwanted dependency.
1023  *
1024  * This function checks the work item address and work function to avoid
1025  * false positives.  Note that this isn't complete as one may construct a
1026  * work function which can introduce dependency onto itself through a
1027  * recycled work item.  Well, if somebody wants to shoot oneself in the
1028  * foot that badly, there's only so much we can do, and if such deadlock
1029  * actually occurs, it should be easy to locate the culprit work function.
1030  *
1031  * CONTEXT:
1032  * raw_spin_lock_irq(pool->lock).
1033  *
1034  * Return:
1035  * Pointer to worker which is executing @work if found, %NULL
1036  * otherwise.
1037  */
find_worker_executing_work(struct worker_pool * pool,struct work_struct * work)1038 static struct worker *find_worker_executing_work(struct worker_pool *pool,
1039 						 struct work_struct *work)
1040 {
1041 	struct worker *worker;
1042 
1043 	hash_for_each_possible(pool->busy_hash, worker, hentry,
1044 			       (unsigned long)work)
1045 		if (worker->current_work == work &&
1046 		    worker->current_func == work->func)
1047 			return worker;
1048 
1049 	return NULL;
1050 }
1051 
1052 /**
1053  * move_linked_works - move linked works to a list
1054  * @work: start of series of works to be scheduled
1055  * @head: target list to append @work to
1056  * @nextp: out parameter for nested worklist walking
1057  *
1058  * Schedule linked works starting from @work to @head.  Work series to
1059  * be scheduled starts at @work and includes any consecutive work with
1060  * WORK_STRUCT_LINKED set in its predecessor.
1061  *
1062  * If @nextp is not NULL, it's updated to point to the next work of
1063  * the last scheduled work.  This allows move_linked_works() to be
1064  * nested inside outer list_for_each_entry_safe().
1065  *
1066  * CONTEXT:
1067  * raw_spin_lock_irq(pool->lock).
1068  */
move_linked_works(struct work_struct * work,struct list_head * head,struct work_struct ** nextp)1069 static void move_linked_works(struct work_struct *work, struct list_head *head,
1070 			      struct work_struct **nextp)
1071 {
1072 	struct work_struct *n;
1073 
1074 	/*
1075 	 * Linked worklist will always end before the end of the list,
1076 	 * use NULL for list head.
1077 	 */
1078 	list_for_each_entry_safe_from(work, n, NULL, entry) {
1079 		list_move_tail(&work->entry, head);
1080 		if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
1081 			break;
1082 	}
1083 
1084 	/*
1085 	 * If we're already inside safe list traversal and have moved
1086 	 * multiple works to the scheduled queue, the next position
1087 	 * needs to be updated.
1088 	 */
1089 	if (nextp)
1090 		*nextp = n;
1091 }
1092 
1093 /**
1094  * get_pwq - get an extra reference on the specified pool_workqueue
1095  * @pwq: pool_workqueue to get
1096  *
1097  * Obtain an extra reference on @pwq.  The caller should guarantee that
1098  * @pwq has positive refcnt and be holding the matching pool->lock.
1099  */
get_pwq(struct pool_workqueue * pwq)1100 static void get_pwq(struct pool_workqueue *pwq)
1101 {
1102 	lockdep_assert_held(&pwq->pool->lock);
1103 	WARN_ON_ONCE(pwq->refcnt <= 0);
1104 	pwq->refcnt++;
1105 }
1106 
1107 /**
1108  * put_pwq - put a pool_workqueue reference
1109  * @pwq: pool_workqueue to put
1110  *
1111  * Drop a reference of @pwq.  If its refcnt reaches zero, schedule its
1112  * destruction.  The caller should be holding the matching pool->lock.
1113  */
put_pwq(struct pool_workqueue * pwq)1114 static void put_pwq(struct pool_workqueue *pwq)
1115 {
1116 	lockdep_assert_held(&pwq->pool->lock);
1117 	if (likely(--pwq->refcnt))
1118 		return;
1119 	if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND)))
1120 		return;
1121 	/*
1122 	 * @pwq can't be released under pool->lock, bounce to
1123 	 * pwq_unbound_release_workfn().  This never recurses on the same
1124 	 * pool->lock as this path is taken only for unbound workqueues and
1125 	 * the release work item is scheduled on a per-cpu workqueue.  To
1126 	 * avoid lockdep warning, unbound pool->locks are given lockdep
1127 	 * subclass of 1 in get_unbound_pool().
1128 	 */
1129 	schedule_work(&pwq->unbound_release_work);
1130 }
1131 
1132 /**
1133  * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock
1134  * @pwq: pool_workqueue to put (can be %NULL)
1135  *
1136  * put_pwq() with locking.  This function also allows %NULL @pwq.
1137  */
put_pwq_unlocked(struct pool_workqueue * pwq)1138 static void put_pwq_unlocked(struct pool_workqueue *pwq)
1139 {
1140 	if (pwq) {
1141 		/*
1142 		 * As both pwqs and pools are RCU protected, the
1143 		 * following lock operations are safe.
1144 		 */
1145 		raw_spin_lock_irq(&pwq->pool->lock);
1146 		put_pwq(pwq);
1147 		raw_spin_unlock_irq(&pwq->pool->lock);
1148 	}
1149 }
1150 
pwq_activate_inactive_work(struct work_struct * work)1151 static void pwq_activate_inactive_work(struct work_struct *work)
1152 {
1153 	struct pool_workqueue *pwq = get_work_pwq(work);
1154 
1155 	trace_workqueue_activate_work(work);
1156 	if (list_empty(&pwq->pool->worklist))
1157 		pwq->pool->watchdog_ts = jiffies;
1158 	move_linked_works(work, &pwq->pool->worklist, NULL);
1159 	__clear_bit(WORK_STRUCT_INACTIVE_BIT, work_data_bits(work));
1160 	pwq->nr_active++;
1161 }
1162 
pwq_activate_first_inactive(struct pool_workqueue * pwq)1163 static void pwq_activate_first_inactive(struct pool_workqueue *pwq)
1164 {
1165 	struct work_struct *work = list_first_entry(&pwq->inactive_works,
1166 						    struct work_struct, entry);
1167 
1168 	pwq_activate_inactive_work(work);
1169 }
1170 
1171 /**
1172  * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight
1173  * @pwq: pwq of interest
1174  * @color: color of work which left the queue
1175  *
1176  * A work either has completed or is removed from pending queue,
1177  * decrement nr_in_flight of its pwq and handle workqueue flushing.
1178  *
1179  * CONTEXT:
1180  * raw_spin_lock_irq(pool->lock).
1181  */
pwq_dec_nr_in_flight(struct pool_workqueue * pwq,int color)1182 static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color)
1183 {
1184 	/* uncolored work items don't participate in flushing or nr_active */
1185 	if (color == WORK_NO_COLOR)
1186 		goto out_put;
1187 
1188 	pwq->nr_in_flight[color]--;
1189 
1190 	pwq->nr_active--;
1191 	if (!list_empty(&pwq->inactive_works)) {
1192 		/* one down, submit an inactive one */
1193 		if (pwq->nr_active < pwq->max_active)
1194 			pwq_activate_first_inactive(pwq);
1195 	}
1196 
1197 	/* is flush in progress and are we at the flushing tip? */
1198 	if (likely(pwq->flush_color != color))
1199 		goto out_put;
1200 
1201 	/* are there still in-flight works? */
1202 	if (pwq->nr_in_flight[color])
1203 		goto out_put;
1204 
1205 	/* this pwq is done, clear flush_color */
1206 	pwq->flush_color = -1;
1207 
1208 	/*
1209 	 * If this was the last pwq, wake up the first flusher.  It
1210 	 * will handle the rest.
1211 	 */
1212 	if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush))
1213 		complete(&pwq->wq->first_flusher->done);
1214 out_put:
1215 	put_pwq(pwq);
1216 }
1217 
1218 /**
1219  * try_to_grab_pending - steal work item from worklist and disable irq
1220  * @work: work item to steal
1221  * @is_dwork: @work is a delayed_work
1222  * @flags: place to store irq state
1223  *
1224  * Try to grab PENDING bit of @work.  This function can handle @work in any
1225  * stable state - idle, on timer or on worklist.
1226  *
1227  * Return:
1228  *
1229  *  ========	================================================================
1230  *  1		if @work was pending and we successfully stole PENDING
1231  *  0		if @work was idle and we claimed PENDING
1232  *  -EAGAIN	if PENDING couldn't be grabbed at the moment, safe to busy-retry
1233  *  -ENOENT	if someone else is canceling @work, this state may persist
1234  *		for arbitrarily long
1235  *  ========	================================================================
1236  *
1237  * Note:
1238  * On >= 0 return, the caller owns @work's PENDING bit.  To avoid getting
1239  * interrupted while holding PENDING and @work off queue, irq must be
1240  * disabled on entry.  This, combined with delayed_work->timer being
1241  * irqsafe, ensures that we return -EAGAIN for finite short period of time.
1242  *
1243  * On successful return, >= 0, irq is disabled and the caller is
1244  * responsible for releasing it using local_irq_restore(*@flags).
1245  *
1246  * This function is safe to call from any context including IRQ handler.
1247  */
try_to_grab_pending(struct work_struct * work,bool is_dwork,unsigned long * flags)1248 static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
1249 			       unsigned long *flags)
1250 {
1251 	struct worker_pool *pool;
1252 	struct pool_workqueue *pwq;
1253 
1254 	local_irq_save(*flags);
1255 
1256 	/* try to steal the timer if it exists */
1257 	if (is_dwork) {
1258 		struct delayed_work *dwork = to_delayed_work(work);
1259 
1260 		/*
1261 		 * dwork->timer is irqsafe.  If del_timer() fails, it's
1262 		 * guaranteed that the timer is not queued anywhere and not
1263 		 * running on the local CPU.
1264 		 */
1265 		if (likely(del_timer(&dwork->timer)))
1266 			return 1;
1267 	}
1268 
1269 	/* try to claim PENDING the normal way */
1270 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
1271 		return 0;
1272 
1273 	rcu_read_lock();
1274 	/*
1275 	 * The queueing is in progress, or it is already queued. Try to
1276 	 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
1277 	 */
1278 	pool = get_work_pool(work);
1279 	if (!pool)
1280 		goto fail;
1281 
1282 	raw_spin_lock(&pool->lock);
1283 	/*
1284 	 * work->data is guaranteed to point to pwq only while the work
1285 	 * item is queued on pwq->wq, and both updating work->data to point
1286 	 * to pwq on queueing and to pool on dequeueing are done under
1287 	 * pwq->pool->lock.  This in turn guarantees that, if work->data
1288 	 * points to pwq which is associated with a locked pool, the work
1289 	 * item is currently queued on that pool.
1290 	 */
1291 	pwq = get_work_pwq(work);
1292 	if (pwq && pwq->pool == pool) {
1293 		debug_work_deactivate(work);
1294 
1295 		/*
1296 		 * An inactive work item cannot be grabbed directly because
1297 		 * it might have linked NO_COLOR work items which, if left
1298 		 * on the inactive_works list, will confuse pwq->nr_active
1299 		 * management later on and cause stall.  Make sure the work
1300 		 * item is activated before grabbing.
1301 		 */
1302 		if (*work_data_bits(work) & WORK_STRUCT_INACTIVE)
1303 			pwq_activate_inactive_work(work);
1304 
1305 		list_del_init(&work->entry);
1306 		pwq_dec_nr_in_flight(pwq, get_work_color(work));
1307 
1308 		/* work->data points to pwq iff queued, point to pool */
1309 		set_work_pool_and_keep_pending(work, pool->id);
1310 
1311 		raw_spin_unlock(&pool->lock);
1312 		rcu_read_unlock();
1313 		return 1;
1314 	}
1315 	raw_spin_unlock(&pool->lock);
1316 fail:
1317 	rcu_read_unlock();
1318 	local_irq_restore(*flags);
1319 	if (work_is_canceling(work))
1320 		return -ENOENT;
1321 	cpu_relax();
1322 	return -EAGAIN;
1323 }
1324 
1325 /**
1326  * insert_work - insert a work into a pool
1327  * @pwq: pwq @work belongs to
1328  * @work: work to insert
1329  * @head: insertion point
1330  * @extra_flags: extra WORK_STRUCT_* flags to set
1331  *
1332  * Insert @work which belongs to @pwq after @head.  @extra_flags is or'd to
1333  * work_struct flags.
1334  *
1335  * CONTEXT:
1336  * raw_spin_lock_irq(pool->lock).
1337  */
insert_work(struct pool_workqueue * pwq,struct work_struct * work,struct list_head * head,unsigned int extra_flags)1338 static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
1339 			struct list_head *head, unsigned int extra_flags)
1340 {
1341 	struct worker_pool *pool = pwq->pool;
1342 
1343 	/* we own @work, set data and link */
1344 	set_work_pwq(work, pwq, extra_flags);
1345 	list_add_tail(&work->entry, head);
1346 	get_pwq(pwq);
1347 
1348 	/*
1349 	 * Ensure either wq_worker_sleeping() sees the above
1350 	 * list_add_tail() or we see zero nr_running to avoid workers lying
1351 	 * around lazily while there are works to be processed.
1352 	 */
1353 	smp_mb();
1354 
1355 	if (__need_more_worker(pool))
1356 		wake_up_worker(pool);
1357 }
1358 
1359 /*
1360  * Test whether @work is being queued from another work executing on the
1361  * same workqueue.
1362  */
is_chained_work(struct workqueue_struct * wq)1363 static bool is_chained_work(struct workqueue_struct *wq)
1364 {
1365 	struct worker *worker;
1366 
1367 	worker = current_wq_worker();
1368 	/*
1369 	 * Return %true iff I'm a worker executing a work item on @wq.  If
1370 	 * I'm @worker, it's safe to dereference it without locking.
1371 	 */
1372 	return worker && worker->current_pwq->wq == wq;
1373 }
1374 
1375 /*
1376  * When queueing an unbound work item to a wq, prefer local CPU if allowed
1377  * by wq_unbound_cpumask.  Otherwise, round robin among the allowed ones to
1378  * avoid perturbing sensitive tasks.
1379  */
wq_select_unbound_cpu(int cpu)1380 static int wq_select_unbound_cpu(int cpu)
1381 {
1382 	static bool printed_dbg_warning;
1383 	int new_cpu;
1384 
1385 	if (likely(!wq_debug_force_rr_cpu)) {
1386 		if (cpumask_test_cpu(cpu, wq_unbound_cpumask))
1387 			return cpu;
1388 	} else if (!printed_dbg_warning) {
1389 		pr_warn("workqueue: round-robin CPU selection forced, expect performance impact\n");
1390 		printed_dbg_warning = true;
1391 	}
1392 
1393 	if (cpumask_empty(wq_unbound_cpumask))
1394 		return cpu;
1395 
1396 	new_cpu = __this_cpu_read(wq_rr_cpu_last);
1397 	new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask);
1398 	if (unlikely(new_cpu >= nr_cpu_ids)) {
1399 		new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask);
1400 		if (unlikely(new_cpu >= nr_cpu_ids))
1401 			return cpu;
1402 	}
1403 	__this_cpu_write(wq_rr_cpu_last, new_cpu);
1404 
1405 	return new_cpu;
1406 }
1407 
__queue_work(int cpu,struct workqueue_struct * wq,struct work_struct * work)1408 static void __queue_work(int cpu, struct workqueue_struct *wq,
1409 			 struct work_struct *work)
1410 {
1411 	struct pool_workqueue *pwq;
1412 	struct worker_pool *last_pool;
1413 	struct list_head *worklist;
1414 	unsigned int work_flags;
1415 	unsigned int req_cpu = cpu;
1416 
1417 	/*
1418 	 * While a work item is PENDING && off queue, a task trying to
1419 	 * steal the PENDING will busy-loop waiting for it to either get
1420 	 * queued or lose PENDING.  Grabbing PENDING and queueing should
1421 	 * happen with IRQ disabled.
1422 	 */
1423 	lockdep_assert_irqs_disabled();
1424 
1425 
1426 	/* if draining, only works from the same workqueue are allowed */
1427 	if (unlikely(wq->flags & __WQ_DRAINING) &&
1428 	    WARN_ON_ONCE(!is_chained_work(wq)))
1429 		return;
1430 	rcu_read_lock();
1431 retry:
1432 	/* pwq which will be used unless @work is executing elsewhere */
1433 	if (wq->flags & WQ_UNBOUND) {
1434 		if (req_cpu == WORK_CPU_UNBOUND)
1435 			cpu = wq_select_unbound_cpu(raw_smp_processor_id());
1436 		pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
1437 	} else {
1438 		if (req_cpu == WORK_CPU_UNBOUND)
1439 			cpu = raw_smp_processor_id();
1440 		pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
1441 	}
1442 
1443 	/*
1444 	 * If @work was previously on a different pool, it might still be
1445 	 * running there, in which case the work needs to be queued on that
1446 	 * pool to guarantee non-reentrancy.
1447 	 */
1448 	last_pool = get_work_pool(work);
1449 	if (last_pool && last_pool != pwq->pool) {
1450 		struct worker *worker;
1451 
1452 		raw_spin_lock(&last_pool->lock);
1453 
1454 		worker = find_worker_executing_work(last_pool, work);
1455 
1456 		if (worker && worker->current_pwq->wq == wq) {
1457 			pwq = worker->current_pwq;
1458 		} else {
1459 			/* meh... not running there, queue here */
1460 			raw_spin_unlock(&last_pool->lock);
1461 			raw_spin_lock(&pwq->pool->lock);
1462 		}
1463 	} else {
1464 		raw_spin_lock(&pwq->pool->lock);
1465 	}
1466 
1467 	/*
1468 	 * pwq is determined and locked.  For unbound pools, we could have
1469 	 * raced with pwq release and it could already be dead.  If its
1470 	 * refcnt is zero, repeat pwq selection.  Note that pwqs never die
1471 	 * without another pwq replacing it in the numa_pwq_tbl or while
1472 	 * work items are executing on it, so the retrying is guaranteed to
1473 	 * make forward-progress.
1474 	 */
1475 	if (unlikely(!pwq->refcnt)) {
1476 		if (wq->flags & WQ_UNBOUND) {
1477 			raw_spin_unlock(&pwq->pool->lock);
1478 			cpu_relax();
1479 			goto retry;
1480 		}
1481 		/* oops */
1482 		WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt",
1483 			  wq->name, cpu);
1484 	}
1485 
1486 	/* pwq determined, queue */
1487 	trace_workqueue_queue_work(req_cpu, pwq, work);
1488 
1489 	if (WARN_ON(!list_empty(&work->entry)))
1490 		goto out;
1491 
1492 	pwq->nr_in_flight[pwq->work_color]++;
1493 	work_flags = work_color_to_flags(pwq->work_color);
1494 
1495 	if (likely(pwq->nr_active < pwq->max_active)) {
1496 		trace_workqueue_activate_work(work);
1497 		pwq->nr_active++;
1498 		worklist = &pwq->pool->worklist;
1499 		if (list_empty(worklist))
1500 			pwq->pool->watchdog_ts = jiffies;
1501 	} else {
1502 		work_flags |= WORK_STRUCT_INACTIVE;
1503 		worklist = &pwq->inactive_works;
1504 	}
1505 
1506 	debug_work_activate(work);
1507 	insert_work(pwq, work, worklist, work_flags);
1508 
1509 out:
1510 	raw_spin_unlock(&pwq->pool->lock);
1511 	rcu_read_unlock();
1512 }
1513 
1514 /**
1515  * queue_work_on - queue work on specific cpu
1516  * @cpu: CPU number to execute work on
1517  * @wq: workqueue to use
1518  * @work: work to queue
1519  *
1520  * We queue the work to a specific CPU, the caller must ensure it
1521  * can't go away.
1522  *
1523  * Return: %false if @work was already on a queue, %true otherwise.
1524  */
queue_work_on(int cpu,struct workqueue_struct * wq,struct work_struct * work)1525 bool queue_work_on(int cpu, struct workqueue_struct *wq,
1526 		   struct work_struct *work)
1527 {
1528 	bool ret = false;
1529 	unsigned long flags;
1530 
1531 	local_irq_save(flags);
1532 
1533 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1534 		__queue_work(cpu, wq, work);
1535 		ret = true;
1536 	}
1537 
1538 	local_irq_restore(flags);
1539 	return ret;
1540 }
1541 EXPORT_SYMBOL(queue_work_on);
1542 
1543 /**
1544  * workqueue_select_cpu_near - Select a CPU based on NUMA node
1545  * @node: NUMA node ID that we want to select a CPU from
1546  *
1547  * This function will attempt to find a "random" cpu available on a given
1548  * node. If there are no CPUs available on the given node it will return
1549  * WORK_CPU_UNBOUND indicating that we should just schedule to any
1550  * available CPU if we need to schedule this work.
1551  */
workqueue_select_cpu_near(int node)1552 static int workqueue_select_cpu_near(int node)
1553 {
1554 	int cpu;
1555 
1556 	/* No point in doing this if NUMA isn't enabled for workqueues */
1557 	if (!wq_numa_enabled)
1558 		return WORK_CPU_UNBOUND;
1559 
1560 	/* Delay binding to CPU if node is not valid or online */
1561 	if (node < 0 || node >= MAX_NUMNODES || !node_online(node))
1562 		return WORK_CPU_UNBOUND;
1563 
1564 	/* Use local node/cpu if we are already there */
1565 	cpu = raw_smp_processor_id();
1566 	if (node == cpu_to_node(cpu))
1567 		return cpu;
1568 
1569 	/* Use "random" otherwise know as "first" online CPU of node */
1570 	cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask);
1571 
1572 	/* If CPU is valid return that, otherwise just defer */
1573 	return cpu < nr_cpu_ids ? cpu : WORK_CPU_UNBOUND;
1574 }
1575 
1576 /**
1577  * queue_work_node - queue work on a "random" cpu for a given NUMA node
1578  * @node: NUMA node that we are targeting the work for
1579  * @wq: workqueue to use
1580  * @work: work to queue
1581  *
1582  * We queue the work to a "random" CPU within a given NUMA node. The basic
1583  * idea here is to provide a way to somehow associate work with a given
1584  * NUMA node.
1585  *
1586  * This function will only make a best effort attempt at getting this onto
1587  * the right NUMA node. If no node is requested or the requested node is
1588  * offline then we just fall back to standard queue_work behavior.
1589  *
1590  * Currently the "random" CPU ends up being the first available CPU in the
1591  * intersection of cpu_online_mask and the cpumask of the node, unless we
1592  * are running on the node. In that case we just use the current CPU.
1593  *
1594  * Return: %false if @work was already on a queue, %true otherwise.
1595  */
queue_work_node(int node,struct workqueue_struct * wq,struct work_struct * work)1596 bool queue_work_node(int node, struct workqueue_struct *wq,
1597 		     struct work_struct *work)
1598 {
1599 	unsigned long flags;
1600 	bool ret = false;
1601 
1602 	/*
1603 	 * This current implementation is specific to unbound workqueues.
1604 	 * Specifically we only return the first available CPU for a given
1605 	 * node instead of cycling through individual CPUs within the node.
1606 	 *
1607 	 * If this is used with a per-cpu workqueue then the logic in
1608 	 * workqueue_select_cpu_near would need to be updated to allow for
1609 	 * some round robin type logic.
1610 	 */
1611 	WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND));
1612 
1613 	local_irq_save(flags);
1614 
1615 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1616 		int cpu = workqueue_select_cpu_near(node);
1617 
1618 		__queue_work(cpu, wq, work);
1619 		ret = true;
1620 	}
1621 
1622 	local_irq_restore(flags);
1623 	return ret;
1624 }
1625 EXPORT_SYMBOL_GPL(queue_work_node);
1626 
delayed_work_timer_fn(struct timer_list * t)1627 void delayed_work_timer_fn(struct timer_list *t)
1628 {
1629 	struct delayed_work *dwork = from_timer(dwork, t, timer);
1630 
1631 	/* should have been called from irqsafe timer with irq already off */
1632 	__queue_work(dwork->cpu, dwork->wq, &dwork->work);
1633 }
1634 EXPORT_SYMBOL(delayed_work_timer_fn);
1635 
__queue_delayed_work(int cpu,struct workqueue_struct * wq,struct delayed_work * dwork,unsigned long delay)1636 static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
1637 				struct delayed_work *dwork, unsigned long delay)
1638 {
1639 	struct timer_list *timer = &dwork->timer;
1640 	struct work_struct *work = &dwork->work;
1641 
1642 	WARN_ON_ONCE(!wq);
1643 	WARN_ON_FUNCTION_MISMATCH(timer->function, delayed_work_timer_fn);
1644 	WARN_ON_ONCE(timer_pending(timer));
1645 	WARN_ON_ONCE(!list_empty(&work->entry));
1646 
1647 	/*
1648 	 * If @delay is 0, queue @dwork->work immediately.  This is for
1649 	 * both optimization and correctness.  The earliest @timer can
1650 	 * expire is on the closest next tick and delayed_work users depend
1651 	 * on that there's no such delay when @delay is 0.
1652 	 */
1653 	if (!delay) {
1654 		__queue_work(cpu, wq, &dwork->work);
1655 		return;
1656 	}
1657 
1658 	dwork->wq = wq;
1659 	dwork->cpu = cpu;
1660 	timer->expires = jiffies + delay;
1661 
1662 	if (unlikely(cpu != WORK_CPU_UNBOUND))
1663 		add_timer_on(timer, cpu);
1664 	else
1665 		add_timer(timer);
1666 }
1667 
1668 /**
1669  * queue_delayed_work_on - queue work on specific CPU after delay
1670  * @cpu: CPU number to execute work on
1671  * @wq: workqueue to use
1672  * @dwork: work to queue
1673  * @delay: number of jiffies to wait before queueing
1674  *
1675  * Return: %false if @work was already on a queue, %true otherwise.  If
1676  * @delay is zero and @dwork is idle, it will be scheduled for immediate
1677  * execution.
1678  */
queue_delayed_work_on(int cpu,struct workqueue_struct * wq,struct delayed_work * dwork,unsigned long delay)1679 bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
1680 			   struct delayed_work *dwork, unsigned long delay)
1681 {
1682 	struct work_struct *work = &dwork->work;
1683 	bool ret = false;
1684 	unsigned long flags;
1685 
1686 	/* read the comment in __queue_work() */
1687 	local_irq_save(flags);
1688 
1689 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1690 		__queue_delayed_work(cpu, wq, dwork, delay);
1691 		ret = true;
1692 	}
1693 
1694 	local_irq_restore(flags);
1695 	return ret;
1696 }
1697 EXPORT_SYMBOL(queue_delayed_work_on);
1698 
1699 /**
1700  * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
1701  * @cpu: CPU number to execute work on
1702  * @wq: workqueue to use
1703  * @dwork: work to queue
1704  * @delay: number of jiffies to wait before queueing
1705  *
1706  * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise,
1707  * modify @dwork's timer so that it expires after @delay.  If @delay is
1708  * zero, @work is guaranteed to be scheduled immediately regardless of its
1709  * current state.
1710  *
1711  * Return: %false if @dwork was idle and queued, %true if @dwork was
1712  * pending and its timer was modified.
1713  *
1714  * This function is safe to call from any context including IRQ handler.
1715  * See try_to_grab_pending() for details.
1716  */
mod_delayed_work_on(int cpu,struct workqueue_struct * wq,struct delayed_work * dwork,unsigned long delay)1717 bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
1718 			 struct delayed_work *dwork, unsigned long delay)
1719 {
1720 	unsigned long flags;
1721 	int ret;
1722 
1723 	do {
1724 		ret = try_to_grab_pending(&dwork->work, true, &flags);
1725 	} while (unlikely(ret == -EAGAIN));
1726 
1727 	if (likely(ret >= 0)) {
1728 		__queue_delayed_work(cpu, wq, dwork, delay);
1729 		local_irq_restore(flags);
1730 	}
1731 
1732 	/* -ENOENT from try_to_grab_pending() becomes %true */
1733 	return ret;
1734 }
1735 EXPORT_SYMBOL_GPL(mod_delayed_work_on);
1736 
rcu_work_rcufn(struct rcu_head * rcu)1737 static void rcu_work_rcufn(struct rcu_head *rcu)
1738 {
1739 	struct rcu_work *rwork = container_of(rcu, struct rcu_work, rcu);
1740 
1741 	/* read the comment in __queue_work() */
1742 	local_irq_disable();
1743 	__queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work);
1744 	local_irq_enable();
1745 }
1746 
1747 /**
1748  * queue_rcu_work - queue work after a RCU grace period
1749  * @wq: workqueue to use
1750  * @rwork: work to queue
1751  *
1752  * Return: %false if @rwork was already pending, %true otherwise.  Note
1753  * that a full RCU grace period is guaranteed only after a %true return.
1754  * While @rwork is guaranteed to be executed after a %false return, the
1755  * execution may happen before a full RCU grace period has passed.
1756  */
queue_rcu_work(struct workqueue_struct * wq,struct rcu_work * rwork)1757 bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork)
1758 {
1759 	struct work_struct *work = &rwork->work;
1760 
1761 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1762 		rwork->wq = wq;
1763 		call_rcu(&rwork->rcu, rcu_work_rcufn);
1764 		return true;
1765 	}
1766 
1767 	return false;
1768 }
1769 EXPORT_SYMBOL(queue_rcu_work);
1770 
1771 /**
1772  * worker_enter_idle - enter idle state
1773  * @worker: worker which is entering idle state
1774  *
1775  * @worker is entering idle state.  Update stats and idle timer if
1776  * necessary.
1777  *
1778  * LOCKING:
1779  * raw_spin_lock_irq(pool->lock).
1780  */
worker_enter_idle(struct worker * worker)1781 static void worker_enter_idle(struct worker *worker)
1782 {
1783 	struct worker_pool *pool = worker->pool;
1784 
1785 	if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) ||
1786 	    WARN_ON_ONCE(!list_empty(&worker->entry) &&
1787 			 (worker->hentry.next || worker->hentry.pprev)))
1788 		return;
1789 
1790 	/* can't use worker_set_flags(), also called from create_worker() */
1791 	worker->flags |= WORKER_IDLE;
1792 	pool->nr_idle++;
1793 	worker->last_active = jiffies;
1794 
1795 	/* idle_list is LIFO */
1796 	list_add(&worker->entry, &pool->idle_list);
1797 
1798 	if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
1799 		mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
1800 
1801 	/*
1802 	 * Sanity check nr_running.  Because unbind_workers() releases
1803 	 * pool->lock between setting %WORKER_UNBOUND and zapping
1804 	 * nr_running, the warning may trigger spuriously.  Check iff
1805 	 * unbind is not in progress.
1806 	 */
1807 	WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
1808 		     pool->nr_workers == pool->nr_idle &&
1809 		     atomic_read(&pool->nr_running));
1810 }
1811 
1812 /**
1813  * worker_leave_idle - leave idle state
1814  * @worker: worker which is leaving idle state
1815  *
1816  * @worker is leaving idle state.  Update stats.
1817  *
1818  * LOCKING:
1819  * raw_spin_lock_irq(pool->lock).
1820  */
worker_leave_idle(struct worker * worker)1821 static void worker_leave_idle(struct worker *worker)
1822 {
1823 	struct worker_pool *pool = worker->pool;
1824 
1825 	if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE)))
1826 		return;
1827 	worker_clr_flags(worker, WORKER_IDLE);
1828 	pool->nr_idle--;
1829 	list_del_init(&worker->entry);
1830 }
1831 
alloc_worker(int node)1832 static struct worker *alloc_worker(int node)
1833 {
1834 	struct worker *worker;
1835 
1836 	worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node);
1837 	if (worker) {
1838 		INIT_LIST_HEAD(&worker->entry);
1839 		INIT_LIST_HEAD(&worker->scheduled);
1840 		INIT_LIST_HEAD(&worker->node);
1841 		/* on creation a worker is in !idle && prep state */
1842 		worker->flags = WORKER_PREP;
1843 	}
1844 	return worker;
1845 }
1846 
1847 /**
1848  * worker_attach_to_pool() - attach a worker to a pool
1849  * @worker: worker to be attached
1850  * @pool: the target pool
1851  *
1852  * Attach @worker to @pool.  Once attached, the %WORKER_UNBOUND flag and
1853  * cpu-binding of @worker are kept coordinated with the pool across
1854  * cpu-[un]hotplugs.
1855  */
worker_attach_to_pool(struct worker * worker,struct worker_pool * pool)1856 static void worker_attach_to_pool(struct worker *worker,
1857 				   struct worker_pool *pool)
1858 {
1859 	mutex_lock(&wq_pool_attach_mutex);
1860 
1861 	/*
1862 	 * The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains
1863 	 * stable across this function.  See the comments above the flag
1864 	 * definition for details.
1865 	 */
1866 	if (pool->flags & POOL_DISASSOCIATED)
1867 		worker->flags |= WORKER_UNBOUND;
1868 
1869 	if (worker->rescue_wq)
1870 		set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
1871 
1872 	list_add_tail(&worker->node, &pool->workers);
1873 	worker->pool = pool;
1874 
1875 	mutex_unlock(&wq_pool_attach_mutex);
1876 }
1877 
1878 /**
1879  * worker_detach_from_pool() - detach a worker from its pool
1880  * @worker: worker which is attached to its pool
1881  *
1882  * Undo the attaching which had been done in worker_attach_to_pool().  The
1883  * caller worker shouldn't access to the pool after detached except it has
1884  * other reference to the pool.
1885  */
worker_detach_from_pool(struct worker * worker)1886 static void worker_detach_from_pool(struct worker *worker)
1887 {
1888 	struct worker_pool *pool = worker->pool;
1889 	struct completion *detach_completion = NULL;
1890 
1891 	mutex_lock(&wq_pool_attach_mutex);
1892 
1893 	list_del(&worker->node);
1894 	worker->pool = NULL;
1895 
1896 	if (list_empty(&pool->workers))
1897 		detach_completion = pool->detach_completion;
1898 	mutex_unlock(&wq_pool_attach_mutex);
1899 
1900 	/* clear leftover flags without pool->lock after it is detached */
1901 	worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND);
1902 
1903 	if (detach_completion)
1904 		complete(detach_completion);
1905 }
1906 
1907 /**
1908  * create_worker - create a new workqueue worker
1909  * @pool: pool the new worker will belong to
1910  *
1911  * Create and start a new worker which is attached to @pool.
1912  *
1913  * CONTEXT:
1914  * Might sleep.  Does GFP_KERNEL allocations.
1915  *
1916  * Return:
1917  * Pointer to the newly created worker.
1918  */
create_worker(struct worker_pool * pool)1919 static struct worker *create_worker(struct worker_pool *pool)
1920 {
1921 	struct worker *worker = NULL;
1922 	int id = -1;
1923 	char id_buf[16];
1924 
1925 	/* ID is needed to determine kthread name */
1926 	id = ida_simple_get(&pool->worker_ida, 0, 0, GFP_KERNEL);
1927 	if (id < 0)
1928 		goto fail;
1929 
1930 	worker = alloc_worker(pool->node);
1931 	if (!worker)
1932 		goto fail;
1933 
1934 	worker->id = id;
1935 
1936 	if (pool->cpu >= 0)
1937 		snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id,
1938 			 pool->attrs->nice < 0  ? "H" : "");
1939 	else
1940 		snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id);
1941 
1942 	worker->task = kthread_create_on_node(worker_thread, worker, pool->node,
1943 					      "kworker/%s", id_buf);
1944 	if (IS_ERR(worker->task))
1945 		goto fail;
1946 
1947 	set_user_nice(worker->task, pool->attrs->nice);
1948 	kthread_bind_mask(worker->task, pool->attrs->cpumask);
1949 
1950 	/* successful, attach the worker to the pool */
1951 	worker_attach_to_pool(worker, pool);
1952 
1953 	/* start the newly created worker */
1954 	raw_spin_lock_irq(&pool->lock);
1955 	worker->pool->nr_workers++;
1956 	worker_enter_idle(worker);
1957 	wake_up_process(worker->task);
1958 	raw_spin_unlock_irq(&pool->lock);
1959 
1960 	return worker;
1961 
1962 fail:
1963 	if (id >= 0)
1964 		ida_simple_remove(&pool->worker_ida, id);
1965 	kfree(worker);
1966 	return NULL;
1967 }
1968 
1969 /**
1970  * destroy_worker - destroy a workqueue worker
1971  * @worker: worker to be destroyed
1972  *
1973  * Destroy @worker and adjust @pool stats accordingly.  The worker should
1974  * be idle.
1975  *
1976  * CONTEXT:
1977  * raw_spin_lock_irq(pool->lock).
1978  */
destroy_worker(struct worker * worker)1979 static void destroy_worker(struct worker *worker)
1980 {
1981 	struct worker_pool *pool = worker->pool;
1982 
1983 	lockdep_assert_held(&pool->lock);
1984 
1985 	/* sanity check frenzy */
1986 	if (WARN_ON(worker->current_work) ||
1987 	    WARN_ON(!list_empty(&worker->scheduled)) ||
1988 	    WARN_ON(!(worker->flags & WORKER_IDLE)))
1989 		return;
1990 
1991 	pool->nr_workers--;
1992 	pool->nr_idle--;
1993 
1994 	list_del_init(&worker->entry);
1995 	worker->flags |= WORKER_DIE;
1996 	wake_up_process(worker->task);
1997 }
1998 
idle_worker_timeout(struct timer_list * t)1999 static void idle_worker_timeout(struct timer_list *t)
2000 {
2001 	struct worker_pool *pool = from_timer(pool, t, idle_timer);
2002 
2003 	raw_spin_lock_irq(&pool->lock);
2004 
2005 	while (too_many_workers(pool)) {
2006 		struct worker *worker;
2007 		unsigned long expires;
2008 
2009 		/* idle_list is kept in LIFO order, check the last one */
2010 		worker = list_entry(pool->idle_list.prev, struct worker, entry);
2011 		expires = worker->last_active + IDLE_WORKER_TIMEOUT;
2012 
2013 		if (time_before(jiffies, expires)) {
2014 			mod_timer(&pool->idle_timer, expires);
2015 			break;
2016 		}
2017 
2018 		destroy_worker(worker);
2019 	}
2020 
2021 	raw_spin_unlock_irq(&pool->lock);
2022 }
2023 
send_mayday(struct work_struct * work)2024 static void send_mayday(struct work_struct *work)
2025 {
2026 	struct pool_workqueue *pwq = get_work_pwq(work);
2027 	struct workqueue_struct *wq = pwq->wq;
2028 
2029 	lockdep_assert_held(&wq_mayday_lock);
2030 
2031 	if (!wq->rescuer)
2032 		return;
2033 
2034 	/* mayday mayday mayday */
2035 	if (list_empty(&pwq->mayday_node)) {
2036 		/*
2037 		 * If @pwq is for an unbound wq, its base ref may be put at
2038 		 * any time due to an attribute change.  Pin @pwq until the
2039 		 * rescuer is done with it.
2040 		 */
2041 		get_pwq(pwq);
2042 		list_add_tail(&pwq->mayday_node, &wq->maydays);
2043 		wake_up_process(wq->rescuer->task);
2044 	}
2045 }
2046 
pool_mayday_timeout(struct timer_list * t)2047 static void pool_mayday_timeout(struct timer_list *t)
2048 {
2049 	struct worker_pool *pool = from_timer(pool, t, mayday_timer);
2050 	struct work_struct *work;
2051 
2052 	raw_spin_lock_irq(&pool->lock);
2053 	raw_spin_lock(&wq_mayday_lock);		/* for wq->maydays */
2054 
2055 	if (need_to_create_worker(pool)) {
2056 		/*
2057 		 * We've been trying to create a new worker but
2058 		 * haven't been successful.  We might be hitting an
2059 		 * allocation deadlock.  Send distress signals to
2060 		 * rescuers.
2061 		 */
2062 		list_for_each_entry(work, &pool->worklist, entry)
2063 			send_mayday(work);
2064 	}
2065 
2066 	raw_spin_unlock(&wq_mayday_lock);
2067 	raw_spin_unlock_irq(&pool->lock);
2068 
2069 	mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
2070 }
2071 
2072 /**
2073  * maybe_create_worker - create a new worker if necessary
2074  * @pool: pool to create a new worker for
2075  *
2076  * Create a new worker for @pool if necessary.  @pool is guaranteed to
2077  * have at least one idle worker on return from this function.  If
2078  * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
2079  * sent to all rescuers with works scheduled on @pool to resolve
2080  * possible allocation deadlock.
2081  *
2082  * On return, need_to_create_worker() is guaranteed to be %false and
2083  * may_start_working() %true.
2084  *
2085  * LOCKING:
2086  * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
2087  * multiple times.  Does GFP_KERNEL allocations.  Called only from
2088  * manager.
2089  */
maybe_create_worker(struct worker_pool * pool)2090 static void maybe_create_worker(struct worker_pool *pool)
2091 __releases(&pool->lock)
2092 __acquires(&pool->lock)
2093 {
2094 restart:
2095 	raw_spin_unlock_irq(&pool->lock);
2096 
2097 	/* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
2098 	mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
2099 
2100 	while (true) {
2101 		if (create_worker(pool) || !need_to_create_worker(pool))
2102 			break;
2103 
2104 		schedule_timeout_interruptible(CREATE_COOLDOWN);
2105 
2106 		if (!need_to_create_worker(pool))
2107 			break;
2108 	}
2109 
2110 	del_timer_sync(&pool->mayday_timer);
2111 	raw_spin_lock_irq(&pool->lock);
2112 	/*
2113 	 * This is necessary even after a new worker was just successfully
2114 	 * created as @pool->lock was dropped and the new worker might have
2115 	 * already become busy.
2116 	 */
2117 	if (need_to_create_worker(pool))
2118 		goto restart;
2119 }
2120 
2121 /**
2122  * manage_workers - manage worker pool
2123  * @worker: self
2124  *
2125  * Assume the manager role and manage the worker pool @worker belongs
2126  * to.  At any given time, there can be only zero or one manager per
2127  * pool.  The exclusion is handled automatically by this function.
2128  *
2129  * The caller can safely start processing works on false return.  On
2130  * true return, it's guaranteed that need_to_create_worker() is false
2131  * and may_start_working() is true.
2132  *
2133  * CONTEXT:
2134  * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
2135  * multiple times.  Does GFP_KERNEL allocations.
2136  *
2137  * Return:
2138  * %false if the pool doesn't need management and the caller can safely
2139  * start processing works, %true if management function was performed and
2140  * the conditions that the caller verified before calling the function may
2141  * no longer be true.
2142  */
manage_workers(struct worker * worker)2143 static bool manage_workers(struct worker *worker)
2144 {
2145 	struct worker_pool *pool = worker->pool;
2146 
2147 	if (pool->flags & POOL_MANAGER_ACTIVE)
2148 		return false;
2149 
2150 	pool->flags |= POOL_MANAGER_ACTIVE;
2151 	pool->manager = worker;
2152 
2153 	maybe_create_worker(pool);
2154 
2155 	pool->manager = NULL;
2156 	pool->flags &= ~POOL_MANAGER_ACTIVE;
2157 	rcuwait_wake_up(&manager_wait);
2158 	return true;
2159 }
2160 
2161 /**
2162  * process_one_work - process single work
2163  * @worker: self
2164  * @work: work to process
2165  *
2166  * Process @work.  This function contains all the logics necessary to
2167  * process a single work including synchronization against and
2168  * interaction with other workers on the same cpu, queueing and
2169  * flushing.  As long as context requirement is met, any worker can
2170  * call this function to process a work.
2171  *
2172  * CONTEXT:
2173  * raw_spin_lock_irq(pool->lock) which is released and regrabbed.
2174  */
process_one_work(struct worker * worker,struct work_struct * work)2175 static void process_one_work(struct worker *worker, struct work_struct *work)
2176 __releases(&pool->lock)
2177 __acquires(&pool->lock)
2178 {
2179 	struct pool_workqueue *pwq = get_work_pwq(work);
2180 	struct worker_pool *pool = worker->pool;
2181 	bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE;
2182 	int work_color;
2183 	struct worker *collision;
2184 #ifdef CONFIG_LOCKDEP
2185 	/*
2186 	 * It is permissible to free the struct work_struct from
2187 	 * inside the function that is called from it, this we need to
2188 	 * take into account for lockdep too.  To avoid bogus "held
2189 	 * lock freed" warnings as well as problems when looking into
2190 	 * work->lockdep_map, make a copy and use that here.
2191 	 */
2192 	struct lockdep_map lockdep_map;
2193 
2194 	lockdep_copy_map(&lockdep_map, &work->lockdep_map);
2195 #endif
2196 	/* ensure we're on the correct CPU */
2197 	WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
2198 		     raw_smp_processor_id() != pool->cpu);
2199 
2200 	/*
2201 	 * A single work shouldn't be executed concurrently by
2202 	 * multiple workers on a single cpu.  Check whether anyone is
2203 	 * already processing the work.  If so, defer the work to the
2204 	 * currently executing one.
2205 	 */
2206 	collision = find_worker_executing_work(pool, work);
2207 	if (unlikely(collision)) {
2208 		move_linked_works(work, &collision->scheduled, NULL);
2209 		return;
2210 	}
2211 
2212 	/* claim and dequeue */
2213 	debug_work_deactivate(work);
2214 	hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
2215 	worker->current_work = work;
2216 	worker->current_func = work->func;
2217 	worker->current_pwq = pwq;
2218 	work_color = get_work_color(work);
2219 
2220 	/*
2221 	 * Record wq name for cmdline and debug reporting, may get
2222 	 * overridden through set_worker_desc().
2223 	 */
2224 	strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN);
2225 
2226 	list_del_init(&work->entry);
2227 
2228 	/*
2229 	 * CPU intensive works don't participate in concurrency management.
2230 	 * They're the scheduler's responsibility.  This takes @worker out
2231 	 * of concurrency management and the next code block will chain
2232 	 * execution of the pending work items.
2233 	 */
2234 	if (unlikely(cpu_intensive))
2235 		worker_set_flags(worker, WORKER_CPU_INTENSIVE);
2236 
2237 	/*
2238 	 * Wake up another worker if necessary.  The condition is always
2239 	 * false for normal per-cpu workers since nr_running would always
2240 	 * be >= 1 at this point.  This is used to chain execution of the
2241 	 * pending work items for WORKER_NOT_RUNNING workers such as the
2242 	 * UNBOUND and CPU_INTENSIVE ones.
2243 	 */
2244 	if (need_more_worker(pool))
2245 		wake_up_worker(pool);
2246 
2247 	/*
2248 	 * Record the last pool and clear PENDING which should be the last
2249 	 * update to @work.  Also, do this inside @pool->lock so that
2250 	 * PENDING and queued state changes happen together while IRQ is
2251 	 * disabled.
2252 	 */
2253 	set_work_pool_and_clear_pending(work, pool->id);
2254 
2255 	raw_spin_unlock_irq(&pool->lock);
2256 
2257 	lock_map_acquire(&pwq->wq->lockdep_map);
2258 	lock_map_acquire(&lockdep_map);
2259 	/*
2260 	 * Strictly speaking we should mark the invariant state without holding
2261 	 * any locks, that is, before these two lock_map_acquire()'s.
2262 	 *
2263 	 * However, that would result in:
2264 	 *
2265 	 *   A(W1)
2266 	 *   WFC(C)
2267 	 *		A(W1)
2268 	 *		C(C)
2269 	 *
2270 	 * Which would create W1->C->W1 dependencies, even though there is no
2271 	 * actual deadlock possible. There are two solutions, using a
2272 	 * read-recursive acquire on the work(queue) 'locks', but this will then
2273 	 * hit the lockdep limitation on recursive locks, or simply discard
2274 	 * these locks.
2275 	 *
2276 	 * AFAICT there is no possible deadlock scenario between the
2277 	 * flush_work() and complete() primitives (except for single-threaded
2278 	 * workqueues), so hiding them isn't a problem.
2279 	 */
2280 	lockdep_invariant_state(true);
2281 	trace_workqueue_execute_start(work);
2282 	worker->current_func(work);
2283 	/*
2284 	 * While we must be careful to not use "work" after this, the trace
2285 	 * point will only record its address.
2286 	 */
2287 	trace_workqueue_execute_end(work, worker->current_func);
2288 	lock_map_release(&lockdep_map);
2289 	lock_map_release(&pwq->wq->lockdep_map);
2290 
2291 	if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
2292 		pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
2293 		       "     last function: %ps\n",
2294 		       current->comm, preempt_count(), task_pid_nr(current),
2295 		       worker->current_func);
2296 		debug_show_held_locks(current);
2297 		dump_stack();
2298 	}
2299 
2300 	/*
2301 	 * The following prevents a kworker from hogging CPU on !PREEMPTION
2302 	 * kernels, where a requeueing work item waiting for something to
2303 	 * happen could deadlock with stop_machine as such work item could
2304 	 * indefinitely requeue itself while all other CPUs are trapped in
2305 	 * stop_machine. At the same time, report a quiescent RCU state so
2306 	 * the same condition doesn't freeze RCU.
2307 	 */
2308 	cond_resched();
2309 
2310 	raw_spin_lock_irq(&pool->lock);
2311 
2312 	/* clear cpu intensive status */
2313 	if (unlikely(cpu_intensive))
2314 		worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
2315 
2316 	/* tag the worker for identification in schedule() */
2317 	worker->last_func = worker->current_func;
2318 
2319 	/* we're done with it, release */
2320 	hash_del(&worker->hentry);
2321 	worker->current_work = NULL;
2322 	worker->current_func = NULL;
2323 	worker->current_pwq = NULL;
2324 	pwq_dec_nr_in_flight(pwq, work_color);
2325 }
2326 
2327 /**
2328  * process_scheduled_works - process scheduled works
2329  * @worker: self
2330  *
2331  * Process all scheduled works.  Please note that the scheduled list
2332  * may change while processing a work, so this function repeatedly
2333  * fetches a work from the top and executes it.
2334  *
2335  * CONTEXT:
2336  * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
2337  * multiple times.
2338  */
process_scheduled_works(struct worker * worker)2339 static void process_scheduled_works(struct worker *worker)
2340 {
2341 	while (!list_empty(&worker->scheduled)) {
2342 		struct work_struct *work = list_first_entry(&worker->scheduled,
2343 						struct work_struct, entry);
2344 		process_one_work(worker, work);
2345 	}
2346 }
2347 
set_pf_worker(bool val)2348 static void set_pf_worker(bool val)
2349 {
2350 	mutex_lock(&wq_pool_attach_mutex);
2351 	if (val)
2352 		current->flags |= PF_WQ_WORKER;
2353 	else
2354 		current->flags &= ~PF_WQ_WORKER;
2355 	mutex_unlock(&wq_pool_attach_mutex);
2356 }
2357 
2358 /**
2359  * worker_thread - the worker thread function
2360  * @__worker: self
2361  *
2362  * The worker thread function.  All workers belong to a worker_pool -
2363  * either a per-cpu one or dynamic unbound one.  These workers process all
2364  * work items regardless of their specific target workqueue.  The only
2365  * exception is work items which belong to workqueues with a rescuer which
2366  * will be explained in rescuer_thread().
2367  *
2368  * Return: 0
2369  */
worker_thread(void * __worker)2370 static int worker_thread(void *__worker)
2371 {
2372 	struct worker *worker = __worker;
2373 	struct worker_pool *pool = worker->pool;
2374 
2375 	/* tell the scheduler that this is a workqueue worker */
2376 	set_pf_worker(true);
2377 woke_up:
2378 	raw_spin_lock_irq(&pool->lock);
2379 
2380 	/* am I supposed to die? */
2381 	if (unlikely(worker->flags & WORKER_DIE)) {
2382 		raw_spin_unlock_irq(&pool->lock);
2383 		WARN_ON_ONCE(!list_empty(&worker->entry));
2384 		set_pf_worker(false);
2385 
2386 		set_task_comm(worker->task, "kworker/dying");
2387 		ida_simple_remove(&pool->worker_ida, worker->id);
2388 		worker_detach_from_pool(worker);
2389 		kfree(worker);
2390 		return 0;
2391 	}
2392 
2393 	worker_leave_idle(worker);
2394 recheck:
2395 	/* no more worker necessary? */
2396 	if (!need_more_worker(pool))
2397 		goto sleep;
2398 
2399 	/* do we need to manage? */
2400 	if (unlikely(!may_start_working(pool)) && manage_workers(worker))
2401 		goto recheck;
2402 
2403 	/*
2404 	 * ->scheduled list can only be filled while a worker is
2405 	 * preparing to process a work or actually processing it.
2406 	 * Make sure nobody diddled with it while I was sleeping.
2407 	 */
2408 	WARN_ON_ONCE(!list_empty(&worker->scheduled));
2409 
2410 	/*
2411 	 * Finish PREP stage.  We're guaranteed to have at least one idle
2412 	 * worker or that someone else has already assumed the manager
2413 	 * role.  This is where @worker starts participating in concurrency
2414 	 * management if applicable and concurrency management is restored
2415 	 * after being rebound.  See rebind_workers() for details.
2416 	 */
2417 	worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND);
2418 
2419 	do {
2420 		struct work_struct *work =
2421 			list_first_entry(&pool->worklist,
2422 					 struct work_struct, entry);
2423 
2424 		pool->watchdog_ts = jiffies;
2425 
2426 		if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
2427 			/* optimization path, not strictly necessary */
2428 			process_one_work(worker, work);
2429 			if (unlikely(!list_empty(&worker->scheduled)))
2430 				process_scheduled_works(worker);
2431 		} else {
2432 			move_linked_works(work, &worker->scheduled, NULL);
2433 			process_scheduled_works(worker);
2434 		}
2435 	} while (keep_working(pool));
2436 
2437 	worker_set_flags(worker, WORKER_PREP);
2438 sleep:
2439 	/*
2440 	 * pool->lock is held and there's no work to process and no need to
2441 	 * manage, sleep.  Workers are woken up only while holding
2442 	 * pool->lock or from local cpu, so setting the current state
2443 	 * before releasing pool->lock is enough to prevent losing any
2444 	 * event.
2445 	 */
2446 	worker_enter_idle(worker);
2447 	__set_current_state(TASK_IDLE);
2448 	raw_spin_unlock_irq(&pool->lock);
2449 	schedule();
2450 	goto woke_up;
2451 }
2452 
2453 /**
2454  * rescuer_thread - the rescuer thread function
2455  * @__rescuer: self
2456  *
2457  * Workqueue rescuer thread function.  There's one rescuer for each
2458  * workqueue which has WQ_MEM_RECLAIM set.
2459  *
2460  * Regular work processing on a pool may block trying to create a new
2461  * worker which uses GFP_KERNEL allocation which has slight chance of
2462  * developing into deadlock if some works currently on the same queue
2463  * need to be processed to satisfy the GFP_KERNEL allocation.  This is
2464  * the problem rescuer solves.
2465  *
2466  * When such condition is possible, the pool summons rescuers of all
2467  * workqueues which have works queued on the pool and let them process
2468  * those works so that forward progress can be guaranteed.
2469  *
2470  * This should happen rarely.
2471  *
2472  * Return: 0
2473  */
rescuer_thread(void * __rescuer)2474 static int rescuer_thread(void *__rescuer)
2475 {
2476 	struct worker *rescuer = __rescuer;
2477 	struct workqueue_struct *wq = rescuer->rescue_wq;
2478 	struct list_head *scheduled = &rescuer->scheduled;
2479 	bool should_stop;
2480 
2481 	set_user_nice(current, RESCUER_NICE_LEVEL);
2482 
2483 	/*
2484 	 * Mark rescuer as worker too.  As WORKER_PREP is never cleared, it
2485 	 * doesn't participate in concurrency management.
2486 	 */
2487 	set_pf_worker(true);
2488 repeat:
2489 	set_current_state(TASK_IDLE);
2490 
2491 	/*
2492 	 * By the time the rescuer is requested to stop, the workqueue
2493 	 * shouldn't have any work pending, but @wq->maydays may still have
2494 	 * pwq(s) queued.  This can happen by non-rescuer workers consuming
2495 	 * all the work items before the rescuer got to them.  Go through
2496 	 * @wq->maydays processing before acting on should_stop so that the
2497 	 * list is always empty on exit.
2498 	 */
2499 	should_stop = kthread_should_stop();
2500 
2501 	/* see whether any pwq is asking for help */
2502 	raw_spin_lock_irq(&wq_mayday_lock);
2503 
2504 	while (!list_empty(&wq->maydays)) {
2505 		struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
2506 					struct pool_workqueue, mayday_node);
2507 		struct worker_pool *pool = pwq->pool;
2508 		struct work_struct *work, *n;
2509 		bool first = true;
2510 
2511 		__set_current_state(TASK_RUNNING);
2512 		list_del_init(&pwq->mayday_node);
2513 
2514 		raw_spin_unlock_irq(&wq_mayday_lock);
2515 
2516 		worker_attach_to_pool(rescuer, pool);
2517 
2518 		raw_spin_lock_irq(&pool->lock);
2519 
2520 		/*
2521 		 * Slurp in all works issued via this workqueue and
2522 		 * process'em.
2523 		 */
2524 		WARN_ON_ONCE(!list_empty(scheduled));
2525 		list_for_each_entry_safe(work, n, &pool->worklist, entry) {
2526 			if (get_work_pwq(work) == pwq) {
2527 				if (first)
2528 					pool->watchdog_ts = jiffies;
2529 				move_linked_works(work, scheduled, &n);
2530 			}
2531 			first = false;
2532 		}
2533 
2534 		if (!list_empty(scheduled)) {
2535 			process_scheduled_works(rescuer);
2536 
2537 			/*
2538 			 * The above execution of rescued work items could
2539 			 * have created more to rescue through
2540 			 * pwq_activate_first_inactive() or chained
2541 			 * queueing.  Let's put @pwq back on mayday list so
2542 			 * that such back-to-back work items, which may be
2543 			 * being used to relieve memory pressure, don't
2544 			 * incur MAYDAY_INTERVAL delay inbetween.
2545 			 */
2546 			if (pwq->nr_active && need_to_create_worker(pool)) {
2547 				raw_spin_lock(&wq_mayday_lock);
2548 				/*
2549 				 * Queue iff we aren't racing destruction
2550 				 * and somebody else hasn't queued it already.
2551 				 */
2552 				if (wq->rescuer && list_empty(&pwq->mayday_node)) {
2553 					get_pwq(pwq);
2554 					list_add_tail(&pwq->mayday_node, &wq->maydays);
2555 				}
2556 				raw_spin_unlock(&wq_mayday_lock);
2557 			}
2558 		}
2559 
2560 		/*
2561 		 * Put the reference grabbed by send_mayday().  @pool won't
2562 		 * go away while we're still attached to it.
2563 		 */
2564 		put_pwq(pwq);
2565 
2566 		/*
2567 		 * Leave this pool.  If need_more_worker() is %true, notify a
2568 		 * regular worker; otherwise, we end up with 0 concurrency
2569 		 * and stalling the execution.
2570 		 */
2571 		if (need_more_worker(pool))
2572 			wake_up_worker(pool);
2573 
2574 		raw_spin_unlock_irq(&pool->lock);
2575 
2576 		worker_detach_from_pool(rescuer);
2577 
2578 		raw_spin_lock_irq(&wq_mayday_lock);
2579 	}
2580 
2581 	raw_spin_unlock_irq(&wq_mayday_lock);
2582 
2583 	if (should_stop) {
2584 		__set_current_state(TASK_RUNNING);
2585 		set_pf_worker(false);
2586 		return 0;
2587 	}
2588 
2589 	/* rescuers should never participate in concurrency management */
2590 	WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
2591 	schedule();
2592 	goto repeat;
2593 }
2594 
2595 /**
2596  * check_flush_dependency - check for flush dependency sanity
2597  * @target_wq: workqueue being flushed
2598  * @target_work: work item being flushed (NULL for workqueue flushes)
2599  *
2600  * %current is trying to flush the whole @target_wq or @target_work on it.
2601  * If @target_wq doesn't have %WQ_MEM_RECLAIM, verify that %current is not
2602  * reclaiming memory or running on a workqueue which doesn't have
2603  * %WQ_MEM_RECLAIM as that can break forward-progress guarantee leading to
2604  * a deadlock.
2605  */
check_flush_dependency(struct workqueue_struct * target_wq,struct work_struct * target_work)2606 static void check_flush_dependency(struct workqueue_struct *target_wq,
2607 				   struct work_struct *target_work)
2608 {
2609 	work_func_t target_func = target_work ? target_work->func : NULL;
2610 	struct worker *worker;
2611 
2612 	if (target_wq->flags & WQ_MEM_RECLAIM)
2613 		return;
2614 
2615 	worker = current_wq_worker();
2616 
2617 	WARN_ONCE(current->flags & PF_MEMALLOC,
2618 		  "workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%ps",
2619 		  current->pid, current->comm, target_wq->name, target_func);
2620 	WARN_ONCE(worker && ((worker->current_pwq->wq->flags &
2621 			      (WQ_MEM_RECLAIM | __WQ_LEGACY)) == WQ_MEM_RECLAIM),
2622 		  "workqueue: WQ_MEM_RECLAIM %s:%ps is flushing !WQ_MEM_RECLAIM %s:%ps",
2623 		  worker->current_pwq->wq->name, worker->current_func,
2624 		  target_wq->name, target_func);
2625 }
2626 
2627 struct wq_barrier {
2628 	struct work_struct	work;
2629 	struct completion	done;
2630 	struct task_struct	*task;	/* purely informational */
2631 };
2632 
wq_barrier_func(struct work_struct * work)2633 static void wq_barrier_func(struct work_struct *work)
2634 {
2635 	struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
2636 	complete(&barr->done);
2637 }
2638 
2639 /**
2640  * insert_wq_barrier - insert a barrier work
2641  * @pwq: pwq to insert barrier into
2642  * @barr: wq_barrier to insert
2643  * @target: target work to attach @barr to
2644  * @worker: worker currently executing @target, NULL if @target is not executing
2645  *
2646  * @barr is linked to @target such that @barr is completed only after
2647  * @target finishes execution.  Please note that the ordering
2648  * guarantee is observed only with respect to @target and on the local
2649  * cpu.
2650  *
2651  * Currently, a queued barrier can't be canceled.  This is because
2652  * try_to_grab_pending() can't determine whether the work to be
2653  * grabbed is at the head of the queue and thus can't clear LINKED
2654  * flag of the previous work while there must be a valid next work
2655  * after a work with LINKED flag set.
2656  *
2657  * Note that when @worker is non-NULL, @target may be modified
2658  * underneath us, so we can't reliably determine pwq from @target.
2659  *
2660  * CONTEXT:
2661  * raw_spin_lock_irq(pool->lock).
2662  */
insert_wq_barrier(struct pool_workqueue * pwq,struct wq_barrier * barr,struct work_struct * target,struct worker * worker)2663 static void insert_wq_barrier(struct pool_workqueue *pwq,
2664 			      struct wq_barrier *barr,
2665 			      struct work_struct *target, struct worker *worker)
2666 {
2667 	struct list_head *head;
2668 	unsigned int linked = 0;
2669 
2670 	/*
2671 	 * debugobject calls are safe here even with pool->lock locked
2672 	 * as we know for sure that this will not trigger any of the
2673 	 * checks and call back into the fixup functions where we
2674 	 * might deadlock.
2675 	 */
2676 	INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
2677 	__set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
2678 
2679 	init_completion_map(&barr->done, &target->lockdep_map);
2680 
2681 	barr->task = current;
2682 
2683 	/*
2684 	 * If @target is currently being executed, schedule the
2685 	 * barrier to the worker; otherwise, put it after @target.
2686 	 */
2687 	if (worker)
2688 		head = worker->scheduled.next;
2689 	else {
2690 		unsigned long *bits = work_data_bits(target);
2691 
2692 		head = target->entry.next;
2693 		/* there can already be other linked works, inherit and set */
2694 		linked = *bits & WORK_STRUCT_LINKED;
2695 		__set_bit(WORK_STRUCT_LINKED_BIT, bits);
2696 	}
2697 
2698 	debug_work_activate(&barr->work);
2699 	insert_work(pwq, &barr->work, head,
2700 		    work_color_to_flags(WORK_NO_COLOR) | linked);
2701 }
2702 
2703 /**
2704  * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing
2705  * @wq: workqueue being flushed
2706  * @flush_color: new flush color, < 0 for no-op
2707  * @work_color: new work color, < 0 for no-op
2708  *
2709  * Prepare pwqs for workqueue flushing.
2710  *
2711  * If @flush_color is non-negative, flush_color on all pwqs should be
2712  * -1.  If no pwq has in-flight commands at the specified color, all
2713  * pwq->flush_color's stay at -1 and %false is returned.  If any pwq
2714  * has in flight commands, its pwq->flush_color is set to
2715  * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq
2716  * wakeup logic is armed and %true is returned.
2717  *
2718  * The caller should have initialized @wq->first_flusher prior to
2719  * calling this function with non-negative @flush_color.  If
2720  * @flush_color is negative, no flush color update is done and %false
2721  * is returned.
2722  *
2723  * If @work_color is non-negative, all pwqs should have the same
2724  * work_color which is previous to @work_color and all will be
2725  * advanced to @work_color.
2726  *
2727  * CONTEXT:
2728  * mutex_lock(wq->mutex).
2729  *
2730  * Return:
2731  * %true if @flush_color >= 0 and there's something to flush.  %false
2732  * otherwise.
2733  */
flush_workqueue_prep_pwqs(struct workqueue_struct * wq,int flush_color,int work_color)2734 static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
2735 				      int flush_color, int work_color)
2736 {
2737 	bool wait = false;
2738 	struct pool_workqueue *pwq;
2739 
2740 	if (flush_color >= 0) {
2741 		WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush));
2742 		atomic_set(&wq->nr_pwqs_to_flush, 1);
2743 	}
2744 
2745 	for_each_pwq(pwq, wq) {
2746 		struct worker_pool *pool = pwq->pool;
2747 
2748 		raw_spin_lock_irq(&pool->lock);
2749 
2750 		if (flush_color >= 0) {
2751 			WARN_ON_ONCE(pwq->flush_color != -1);
2752 
2753 			if (pwq->nr_in_flight[flush_color]) {
2754 				pwq->flush_color = flush_color;
2755 				atomic_inc(&wq->nr_pwqs_to_flush);
2756 				wait = true;
2757 			}
2758 		}
2759 
2760 		if (work_color >= 0) {
2761 			WARN_ON_ONCE(work_color != work_next_color(pwq->work_color));
2762 			pwq->work_color = work_color;
2763 		}
2764 
2765 		raw_spin_unlock_irq(&pool->lock);
2766 	}
2767 
2768 	if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
2769 		complete(&wq->first_flusher->done);
2770 
2771 	return wait;
2772 }
2773 
2774 /**
2775  * flush_workqueue - ensure that any scheduled work has run to completion.
2776  * @wq: workqueue to flush
2777  *
2778  * This function sleeps until all work items which were queued on entry
2779  * have finished execution, but it is not livelocked by new incoming ones.
2780  */
flush_workqueue(struct workqueue_struct * wq)2781 void flush_workqueue(struct workqueue_struct *wq)
2782 {
2783 	struct wq_flusher this_flusher = {
2784 		.list = LIST_HEAD_INIT(this_flusher.list),
2785 		.flush_color = -1,
2786 		.done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map),
2787 	};
2788 	int next_color;
2789 
2790 	if (WARN_ON(!wq_online))
2791 		return;
2792 
2793 	lock_map_acquire(&wq->lockdep_map);
2794 	lock_map_release(&wq->lockdep_map);
2795 
2796 	mutex_lock(&wq->mutex);
2797 
2798 	/*
2799 	 * Start-to-wait phase
2800 	 */
2801 	next_color = work_next_color(wq->work_color);
2802 
2803 	if (next_color != wq->flush_color) {
2804 		/*
2805 		 * Color space is not full.  The current work_color
2806 		 * becomes our flush_color and work_color is advanced
2807 		 * by one.
2808 		 */
2809 		WARN_ON_ONCE(!list_empty(&wq->flusher_overflow));
2810 		this_flusher.flush_color = wq->work_color;
2811 		wq->work_color = next_color;
2812 
2813 		if (!wq->first_flusher) {
2814 			/* no flush in progress, become the first flusher */
2815 			WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
2816 
2817 			wq->first_flusher = &this_flusher;
2818 
2819 			if (!flush_workqueue_prep_pwqs(wq, wq->flush_color,
2820 						       wq->work_color)) {
2821 				/* nothing to flush, done */
2822 				wq->flush_color = next_color;
2823 				wq->first_flusher = NULL;
2824 				goto out_unlock;
2825 			}
2826 		} else {
2827 			/* wait in queue */
2828 			WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color);
2829 			list_add_tail(&this_flusher.list, &wq->flusher_queue);
2830 			flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
2831 		}
2832 	} else {
2833 		/*
2834 		 * Oops, color space is full, wait on overflow queue.
2835 		 * The next flush completion will assign us
2836 		 * flush_color and transfer to flusher_queue.
2837 		 */
2838 		list_add_tail(&this_flusher.list, &wq->flusher_overflow);
2839 	}
2840 
2841 	check_flush_dependency(wq, NULL);
2842 
2843 	mutex_unlock(&wq->mutex);
2844 
2845 	wait_for_completion(&this_flusher.done);
2846 
2847 	/*
2848 	 * Wake-up-and-cascade phase
2849 	 *
2850 	 * First flushers are responsible for cascading flushes and
2851 	 * handling overflow.  Non-first flushers can simply return.
2852 	 */
2853 	if (READ_ONCE(wq->first_flusher) != &this_flusher)
2854 		return;
2855 
2856 	mutex_lock(&wq->mutex);
2857 
2858 	/* we might have raced, check again with mutex held */
2859 	if (wq->first_flusher != &this_flusher)
2860 		goto out_unlock;
2861 
2862 	WRITE_ONCE(wq->first_flusher, NULL);
2863 
2864 	WARN_ON_ONCE(!list_empty(&this_flusher.list));
2865 	WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
2866 
2867 	while (true) {
2868 		struct wq_flusher *next, *tmp;
2869 
2870 		/* complete all the flushers sharing the current flush color */
2871 		list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
2872 			if (next->flush_color != wq->flush_color)
2873 				break;
2874 			list_del_init(&next->list);
2875 			complete(&next->done);
2876 		}
2877 
2878 		WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) &&
2879 			     wq->flush_color != work_next_color(wq->work_color));
2880 
2881 		/* this flush_color is finished, advance by one */
2882 		wq->flush_color = work_next_color(wq->flush_color);
2883 
2884 		/* one color has been freed, handle overflow queue */
2885 		if (!list_empty(&wq->flusher_overflow)) {
2886 			/*
2887 			 * Assign the same color to all overflowed
2888 			 * flushers, advance work_color and append to
2889 			 * flusher_queue.  This is the start-to-wait
2890 			 * phase for these overflowed flushers.
2891 			 */
2892 			list_for_each_entry(tmp, &wq->flusher_overflow, list)
2893 				tmp->flush_color = wq->work_color;
2894 
2895 			wq->work_color = work_next_color(wq->work_color);
2896 
2897 			list_splice_tail_init(&wq->flusher_overflow,
2898 					      &wq->flusher_queue);
2899 			flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
2900 		}
2901 
2902 		if (list_empty(&wq->flusher_queue)) {
2903 			WARN_ON_ONCE(wq->flush_color != wq->work_color);
2904 			break;
2905 		}
2906 
2907 		/*
2908 		 * Need to flush more colors.  Make the next flusher
2909 		 * the new first flusher and arm pwqs.
2910 		 */
2911 		WARN_ON_ONCE(wq->flush_color == wq->work_color);
2912 		WARN_ON_ONCE(wq->flush_color != next->flush_color);
2913 
2914 		list_del_init(&next->list);
2915 		wq->first_flusher = next;
2916 
2917 		if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1))
2918 			break;
2919 
2920 		/*
2921 		 * Meh... this color is already done, clear first
2922 		 * flusher and repeat cascading.
2923 		 */
2924 		wq->first_flusher = NULL;
2925 	}
2926 
2927 out_unlock:
2928 	mutex_unlock(&wq->mutex);
2929 }
2930 EXPORT_SYMBOL(flush_workqueue);
2931 
2932 /**
2933  * drain_workqueue - drain a workqueue
2934  * @wq: workqueue to drain
2935  *
2936  * Wait until the workqueue becomes empty.  While draining is in progress,
2937  * only chain queueing is allowed.  IOW, only currently pending or running
2938  * work items on @wq can queue further work items on it.  @wq is flushed
2939  * repeatedly until it becomes empty.  The number of flushing is determined
2940  * by the depth of chaining and should be relatively short.  Whine if it
2941  * takes too long.
2942  */
drain_workqueue(struct workqueue_struct * wq)2943 void drain_workqueue(struct workqueue_struct *wq)
2944 {
2945 	unsigned int flush_cnt = 0;
2946 	struct pool_workqueue *pwq;
2947 
2948 	/*
2949 	 * __queue_work() needs to test whether there are drainers, is much
2950 	 * hotter than drain_workqueue() and already looks at @wq->flags.
2951 	 * Use __WQ_DRAINING so that queue doesn't have to check nr_drainers.
2952 	 */
2953 	mutex_lock(&wq->mutex);
2954 	if (!wq->nr_drainers++)
2955 		wq->flags |= __WQ_DRAINING;
2956 	mutex_unlock(&wq->mutex);
2957 reflush:
2958 	flush_workqueue(wq);
2959 
2960 	mutex_lock(&wq->mutex);
2961 
2962 	for_each_pwq(pwq, wq) {
2963 		bool drained;
2964 
2965 		raw_spin_lock_irq(&pwq->pool->lock);
2966 		drained = !pwq->nr_active && list_empty(&pwq->inactive_works);
2967 		raw_spin_unlock_irq(&pwq->pool->lock);
2968 
2969 		if (drained)
2970 			continue;
2971 
2972 		if (++flush_cnt == 10 ||
2973 		    (flush_cnt % 100 == 0 && flush_cnt <= 1000))
2974 			pr_warn("workqueue %s: drain_workqueue() isn't complete after %u tries\n",
2975 				wq->name, flush_cnt);
2976 
2977 		mutex_unlock(&wq->mutex);
2978 		goto reflush;
2979 	}
2980 
2981 	if (!--wq->nr_drainers)
2982 		wq->flags &= ~__WQ_DRAINING;
2983 	mutex_unlock(&wq->mutex);
2984 }
2985 EXPORT_SYMBOL_GPL(drain_workqueue);
2986 
start_flush_work(struct work_struct * work,struct wq_barrier * barr,bool from_cancel)2987 static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
2988 			     bool from_cancel)
2989 {
2990 	struct worker *worker = NULL;
2991 	struct worker_pool *pool;
2992 	struct pool_workqueue *pwq;
2993 
2994 	might_sleep();
2995 
2996 	rcu_read_lock();
2997 	pool = get_work_pool(work);
2998 	if (!pool) {
2999 		rcu_read_unlock();
3000 		return false;
3001 	}
3002 
3003 	raw_spin_lock_irq(&pool->lock);
3004 	/* see the comment in try_to_grab_pending() with the same code */
3005 	pwq = get_work_pwq(work);
3006 	if (pwq) {
3007 		if (unlikely(pwq->pool != pool))
3008 			goto already_gone;
3009 	} else {
3010 		worker = find_worker_executing_work(pool, work);
3011 		if (!worker)
3012 			goto already_gone;
3013 		pwq = worker->current_pwq;
3014 	}
3015 
3016 	check_flush_dependency(pwq->wq, work);
3017 
3018 	insert_wq_barrier(pwq, barr, work, worker);
3019 	raw_spin_unlock_irq(&pool->lock);
3020 
3021 	/*
3022 	 * Force a lock recursion deadlock when using flush_work() inside a
3023 	 * single-threaded or rescuer equipped workqueue.
3024 	 *
3025 	 * For single threaded workqueues the deadlock happens when the work
3026 	 * is after the work issuing the flush_work(). For rescuer equipped
3027 	 * workqueues the deadlock happens when the rescuer stalls, blocking
3028 	 * forward progress.
3029 	 */
3030 	if (!from_cancel &&
3031 	    (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) {
3032 		lock_map_acquire(&pwq->wq->lockdep_map);
3033 		lock_map_release(&pwq->wq->lockdep_map);
3034 	}
3035 	rcu_read_unlock();
3036 	return true;
3037 already_gone:
3038 	raw_spin_unlock_irq(&pool->lock);
3039 	rcu_read_unlock();
3040 	return false;
3041 }
3042 
__flush_work(struct work_struct * work,bool from_cancel)3043 static bool __flush_work(struct work_struct *work, bool from_cancel)
3044 {
3045 	struct wq_barrier barr;
3046 
3047 	if (WARN_ON(!wq_online))
3048 		return false;
3049 
3050 	if (WARN_ON(!work->func))
3051 		return false;
3052 
3053 	lock_map_acquire(&work->lockdep_map);
3054 	lock_map_release(&work->lockdep_map);
3055 
3056 	if (start_flush_work(work, &barr, from_cancel)) {
3057 		wait_for_completion(&barr.done);
3058 		destroy_work_on_stack(&barr.work);
3059 		return true;
3060 	} else {
3061 		return false;
3062 	}
3063 }
3064 
3065 /**
3066  * flush_work - wait for a work to finish executing the last queueing instance
3067  * @work: the work to flush
3068  *
3069  * Wait until @work has finished execution.  @work is guaranteed to be idle
3070  * on return if it hasn't been requeued since flush started.
3071  *
3072  * Return:
3073  * %true if flush_work() waited for the work to finish execution,
3074  * %false if it was already idle.
3075  */
flush_work(struct work_struct * work)3076 bool flush_work(struct work_struct *work)
3077 {
3078 	return __flush_work(work, false);
3079 }
3080 EXPORT_SYMBOL_GPL(flush_work);
3081 
3082 struct cwt_wait {
3083 	wait_queue_entry_t		wait;
3084 	struct work_struct	*work;
3085 };
3086 
cwt_wakefn(wait_queue_entry_t * wait,unsigned mode,int sync,void * key)3087 static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
3088 {
3089 	struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
3090 
3091 	if (cwait->work != key)
3092 		return 0;
3093 	return autoremove_wake_function(wait, mode, sync, key);
3094 }
3095 
__cancel_work_timer(struct work_struct * work,bool is_dwork)3096 static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
3097 {
3098 	static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
3099 	unsigned long flags;
3100 	int ret;
3101 
3102 	do {
3103 		ret = try_to_grab_pending(work, is_dwork, &flags);
3104 		/*
3105 		 * If someone else is already canceling, wait for it to
3106 		 * finish.  flush_work() doesn't work for PREEMPT_NONE
3107 		 * because we may get scheduled between @work's completion
3108 		 * and the other canceling task resuming and clearing
3109 		 * CANCELING - flush_work() will return false immediately
3110 		 * as @work is no longer busy, try_to_grab_pending() will
3111 		 * return -ENOENT as @work is still being canceled and the
3112 		 * other canceling task won't be able to clear CANCELING as
3113 		 * we're hogging the CPU.
3114 		 *
3115 		 * Let's wait for completion using a waitqueue.  As this
3116 		 * may lead to the thundering herd problem, use a custom
3117 		 * wake function which matches @work along with exclusive
3118 		 * wait and wakeup.
3119 		 */
3120 		if (unlikely(ret == -ENOENT)) {
3121 			struct cwt_wait cwait;
3122 
3123 			init_wait(&cwait.wait);
3124 			cwait.wait.func = cwt_wakefn;
3125 			cwait.work = work;
3126 
3127 			prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait,
3128 						  TASK_UNINTERRUPTIBLE);
3129 			if (work_is_canceling(work))
3130 				schedule();
3131 			finish_wait(&cancel_waitq, &cwait.wait);
3132 		}
3133 	} while (unlikely(ret < 0));
3134 
3135 	/* tell other tasks trying to grab @work to back off */
3136 	mark_work_canceling(work);
3137 	local_irq_restore(flags);
3138 
3139 	/*
3140 	 * This allows canceling during early boot.  We know that @work
3141 	 * isn't executing.
3142 	 */
3143 	if (wq_online)
3144 		__flush_work(work, true);
3145 
3146 	clear_work_data(work);
3147 
3148 	/*
3149 	 * Paired with prepare_to_wait() above so that either
3150 	 * waitqueue_active() is visible here or !work_is_canceling() is
3151 	 * visible there.
3152 	 */
3153 	smp_mb();
3154 	if (waitqueue_active(&cancel_waitq))
3155 		__wake_up(&cancel_waitq, TASK_NORMAL, 1, work);
3156 
3157 	return ret;
3158 }
3159 
3160 /**
3161  * cancel_work_sync - cancel a work and wait for it to finish
3162  * @work: the work to cancel
3163  *
3164  * Cancel @work and wait for its execution to finish.  This function
3165  * can be used even if the work re-queues itself or migrates to
3166  * another workqueue.  On return from this function, @work is
3167  * guaranteed to be not pending or executing on any CPU.
3168  *
3169  * cancel_work_sync(&delayed_work->work) must not be used for
3170  * delayed_work's.  Use cancel_delayed_work_sync() instead.
3171  *
3172  * The caller must ensure that the workqueue on which @work was last
3173  * queued can't be destroyed before this function returns.
3174  *
3175  * Return:
3176  * %true if @work was pending, %false otherwise.
3177  */
cancel_work_sync(struct work_struct * work)3178 bool cancel_work_sync(struct work_struct *work)
3179 {
3180 	return __cancel_work_timer(work, false);
3181 }
3182 EXPORT_SYMBOL_GPL(cancel_work_sync);
3183 
3184 /**
3185  * flush_delayed_work - wait for a dwork to finish executing the last queueing
3186  * @dwork: the delayed work to flush
3187  *
3188  * Delayed timer is cancelled and the pending work is queued for
3189  * immediate execution.  Like flush_work(), this function only
3190  * considers the last queueing instance of @dwork.
3191  *
3192  * Return:
3193  * %true if flush_work() waited for the work to finish execution,
3194  * %false if it was already idle.
3195  */
flush_delayed_work(struct delayed_work * dwork)3196 bool flush_delayed_work(struct delayed_work *dwork)
3197 {
3198 	local_irq_disable();
3199 	if (del_timer_sync(&dwork->timer))
3200 		__queue_work(dwork->cpu, dwork->wq, &dwork->work);
3201 	local_irq_enable();
3202 	return flush_work(&dwork->work);
3203 }
3204 EXPORT_SYMBOL(flush_delayed_work);
3205 
3206 /**
3207  * flush_rcu_work - wait for a rwork to finish executing the last queueing
3208  * @rwork: the rcu work to flush
3209  *
3210  * Return:
3211  * %true if flush_rcu_work() waited for the work to finish execution,
3212  * %false if it was already idle.
3213  */
flush_rcu_work(struct rcu_work * rwork)3214 bool flush_rcu_work(struct rcu_work *rwork)
3215 {
3216 	if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) {
3217 		rcu_barrier();
3218 		flush_work(&rwork->work);
3219 		return true;
3220 	} else {
3221 		return flush_work(&rwork->work);
3222 	}
3223 }
3224 EXPORT_SYMBOL(flush_rcu_work);
3225 
__cancel_work(struct work_struct * work,bool is_dwork)3226 static bool __cancel_work(struct work_struct *work, bool is_dwork)
3227 {
3228 	unsigned long flags;
3229 	int ret;
3230 
3231 	do {
3232 		ret = try_to_grab_pending(work, is_dwork, &flags);
3233 	} while (unlikely(ret == -EAGAIN));
3234 
3235 	if (unlikely(ret < 0))
3236 		return false;
3237 
3238 	set_work_pool_and_clear_pending(work, get_work_pool_id(work));
3239 	local_irq_restore(flags);
3240 	return ret;
3241 }
3242 
3243 /**
3244  * cancel_delayed_work - cancel a delayed work
3245  * @dwork: delayed_work to cancel
3246  *
3247  * Kill off a pending delayed_work.
3248  *
3249  * Return: %true if @dwork was pending and canceled; %false if it wasn't
3250  * pending.
3251  *
3252  * Note:
3253  * The work callback function may still be running on return, unless
3254  * it returns %true and the work doesn't re-arm itself.  Explicitly flush or
3255  * use cancel_delayed_work_sync() to wait on it.
3256  *
3257  * This function is safe to call from any context including IRQ handler.
3258  */
cancel_delayed_work(struct delayed_work * dwork)3259 bool cancel_delayed_work(struct delayed_work *dwork)
3260 {
3261 	return __cancel_work(&dwork->work, true);
3262 }
3263 EXPORT_SYMBOL(cancel_delayed_work);
3264 
3265 /**
3266  * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
3267  * @dwork: the delayed work cancel
3268  *
3269  * This is cancel_work_sync() for delayed works.
3270  *
3271  * Return:
3272  * %true if @dwork was pending, %false otherwise.
3273  */
cancel_delayed_work_sync(struct delayed_work * dwork)3274 bool cancel_delayed_work_sync(struct delayed_work *dwork)
3275 {
3276 	return __cancel_work_timer(&dwork->work, true);
3277 }
3278 EXPORT_SYMBOL(cancel_delayed_work_sync);
3279 
3280 /**
3281  * schedule_on_each_cpu - execute a function synchronously on each online CPU
3282  * @func: the function to call
3283  *
3284  * schedule_on_each_cpu() executes @func on each online CPU using the
3285  * system workqueue and blocks until all CPUs have completed.
3286  * schedule_on_each_cpu() is very slow.
3287  *
3288  * Return:
3289  * 0 on success, -errno on failure.
3290  */
schedule_on_each_cpu(work_func_t func)3291 int schedule_on_each_cpu(work_func_t func)
3292 {
3293 	int cpu;
3294 	struct work_struct __percpu *works;
3295 
3296 	works = alloc_percpu(struct work_struct);
3297 	if (!works)
3298 		return -ENOMEM;
3299 
3300 	get_online_cpus();
3301 
3302 	for_each_online_cpu(cpu) {
3303 		struct work_struct *work = per_cpu_ptr(works, cpu);
3304 
3305 		INIT_WORK(work, func);
3306 		schedule_work_on(cpu, work);
3307 	}
3308 
3309 	for_each_online_cpu(cpu)
3310 		flush_work(per_cpu_ptr(works, cpu));
3311 
3312 	put_online_cpus();
3313 	free_percpu(works);
3314 	return 0;
3315 }
3316 
3317 /**
3318  * execute_in_process_context - reliably execute the routine with user context
3319  * @fn:		the function to execute
3320  * @ew:		guaranteed storage for the execute work structure (must
3321  *		be available when the work executes)
3322  *
3323  * Executes the function immediately if process context is available,
3324  * otherwise schedules the function for delayed execution.
3325  *
3326  * Return:	0 - function was executed
3327  *		1 - function was scheduled for execution
3328  */
execute_in_process_context(work_func_t fn,struct execute_work * ew)3329 int execute_in_process_context(work_func_t fn, struct execute_work *ew)
3330 {
3331 	if (!in_interrupt()) {
3332 		fn(&ew->work);
3333 		return 0;
3334 	}
3335 
3336 	INIT_WORK(&ew->work, fn);
3337 	schedule_work(&ew->work);
3338 
3339 	return 1;
3340 }
3341 EXPORT_SYMBOL_GPL(execute_in_process_context);
3342 
3343 /**
3344  * free_workqueue_attrs - free a workqueue_attrs
3345  * @attrs: workqueue_attrs to free
3346  *
3347  * Undo alloc_workqueue_attrs().
3348  */
free_workqueue_attrs(struct workqueue_attrs * attrs)3349 void free_workqueue_attrs(struct workqueue_attrs *attrs)
3350 {
3351 	if (attrs) {
3352 		free_cpumask_var(attrs->cpumask);
3353 		kfree(attrs);
3354 	}
3355 }
3356 
3357 /**
3358  * alloc_workqueue_attrs - allocate a workqueue_attrs
3359  *
3360  * Allocate a new workqueue_attrs, initialize with default settings and
3361  * return it.
3362  *
3363  * Return: The allocated new workqueue_attr on success. %NULL on failure.
3364  */
alloc_workqueue_attrs(void)3365 struct workqueue_attrs *alloc_workqueue_attrs(void)
3366 {
3367 	struct workqueue_attrs *attrs;
3368 
3369 	attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
3370 	if (!attrs)
3371 		goto fail;
3372 	if (!alloc_cpumask_var(&attrs->cpumask, GFP_KERNEL))
3373 		goto fail;
3374 
3375 	cpumask_copy(attrs->cpumask, cpu_possible_mask);
3376 	return attrs;
3377 fail:
3378 	free_workqueue_attrs(attrs);
3379 	return NULL;
3380 }
3381 
copy_workqueue_attrs(struct workqueue_attrs * to,const struct workqueue_attrs * from)3382 static void copy_workqueue_attrs(struct workqueue_attrs *to,
3383 				 const struct workqueue_attrs *from)
3384 {
3385 	to->nice = from->nice;
3386 	cpumask_copy(to->cpumask, from->cpumask);
3387 	/*
3388 	 * Unlike hash and equality test, this function doesn't ignore
3389 	 * ->no_numa as it is used for both pool and wq attrs.  Instead,
3390 	 * get_unbound_pool() explicitly clears ->no_numa after copying.
3391 	 */
3392 	to->no_numa = from->no_numa;
3393 }
3394 
3395 /* hash value of the content of @attr */
wqattrs_hash(const struct workqueue_attrs * attrs)3396 static u32 wqattrs_hash(const struct workqueue_attrs *attrs)
3397 {
3398 	u32 hash = 0;
3399 
3400 	hash = jhash_1word(attrs->nice, hash);
3401 	hash = jhash(cpumask_bits(attrs->cpumask),
3402 		     BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
3403 	return hash;
3404 }
3405 
3406 /* content equality test */
wqattrs_equal(const struct workqueue_attrs * a,const struct workqueue_attrs * b)3407 static bool wqattrs_equal(const struct workqueue_attrs *a,
3408 			  const struct workqueue_attrs *b)
3409 {
3410 	if (a->nice != b->nice)
3411 		return false;
3412 	if (!cpumask_equal(a->cpumask, b->cpumask))
3413 		return false;
3414 	return true;
3415 }
3416 
3417 /**
3418  * init_worker_pool - initialize a newly zalloc'd worker_pool
3419  * @pool: worker_pool to initialize
3420  *
3421  * Initialize a newly zalloc'd @pool.  It also allocates @pool->attrs.
3422  *
3423  * Return: 0 on success, -errno on failure.  Even on failure, all fields
3424  * inside @pool proper are initialized and put_unbound_pool() can be called
3425  * on @pool safely to release it.
3426  */
init_worker_pool(struct worker_pool * pool)3427 static int init_worker_pool(struct worker_pool *pool)
3428 {
3429 	raw_spin_lock_init(&pool->lock);
3430 	pool->id = -1;
3431 	pool->cpu = -1;
3432 	pool->node = NUMA_NO_NODE;
3433 	pool->flags |= POOL_DISASSOCIATED;
3434 	pool->watchdog_ts = jiffies;
3435 	INIT_LIST_HEAD(&pool->worklist);
3436 	INIT_LIST_HEAD(&pool->idle_list);
3437 	hash_init(pool->busy_hash);
3438 
3439 	timer_setup(&pool->idle_timer, idle_worker_timeout, TIMER_DEFERRABLE);
3440 
3441 	timer_setup(&pool->mayday_timer, pool_mayday_timeout, 0);
3442 
3443 	INIT_LIST_HEAD(&pool->workers);
3444 
3445 	ida_init(&pool->worker_ida);
3446 	INIT_HLIST_NODE(&pool->hash_node);
3447 	pool->refcnt = 1;
3448 
3449 	/* shouldn't fail above this point */
3450 	pool->attrs = alloc_workqueue_attrs();
3451 	if (!pool->attrs)
3452 		return -ENOMEM;
3453 	return 0;
3454 }
3455 
3456 #ifdef CONFIG_LOCKDEP
wq_init_lockdep(struct workqueue_struct * wq)3457 static void wq_init_lockdep(struct workqueue_struct *wq)
3458 {
3459 	char *lock_name;
3460 
3461 	lockdep_register_key(&wq->key);
3462 	lock_name = kasprintf(GFP_KERNEL, "%s%s", "(wq_completion)", wq->name);
3463 	if (!lock_name)
3464 		lock_name = wq->name;
3465 
3466 	wq->lock_name = lock_name;
3467 	lockdep_init_map(&wq->lockdep_map, lock_name, &wq->key, 0);
3468 }
3469 
wq_unregister_lockdep(struct workqueue_struct * wq)3470 static void wq_unregister_lockdep(struct workqueue_struct *wq)
3471 {
3472 	lockdep_unregister_key(&wq->key);
3473 }
3474 
wq_free_lockdep(struct workqueue_struct * wq)3475 static void wq_free_lockdep(struct workqueue_struct *wq)
3476 {
3477 	if (wq->lock_name != wq->name)
3478 		kfree(wq->lock_name);
3479 }
3480 #else
wq_init_lockdep(struct workqueue_struct * wq)3481 static void wq_init_lockdep(struct workqueue_struct *wq)
3482 {
3483 }
3484 
wq_unregister_lockdep(struct workqueue_struct * wq)3485 static void wq_unregister_lockdep(struct workqueue_struct *wq)
3486 {
3487 }
3488 
wq_free_lockdep(struct workqueue_struct * wq)3489 static void wq_free_lockdep(struct workqueue_struct *wq)
3490 {
3491 }
3492 #endif
3493 
rcu_free_wq(struct rcu_head * rcu)3494 static void rcu_free_wq(struct rcu_head *rcu)
3495 {
3496 	struct workqueue_struct *wq =
3497 		container_of(rcu, struct workqueue_struct, rcu);
3498 
3499 	wq_free_lockdep(wq);
3500 
3501 	if (!(wq->flags & WQ_UNBOUND))
3502 		free_percpu(wq->cpu_pwqs);
3503 	else
3504 		free_workqueue_attrs(wq->unbound_attrs);
3505 
3506 	kfree(wq);
3507 }
3508 
rcu_free_pool(struct rcu_head * rcu)3509 static void rcu_free_pool(struct rcu_head *rcu)
3510 {
3511 	struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu);
3512 
3513 	ida_destroy(&pool->worker_ida);
3514 	free_workqueue_attrs(pool->attrs);
3515 	kfree(pool);
3516 }
3517 
3518 /* This returns with the lock held on success (pool manager is inactive). */
wq_manager_inactive(struct worker_pool * pool)3519 static bool wq_manager_inactive(struct worker_pool *pool)
3520 {
3521 	raw_spin_lock_irq(&pool->lock);
3522 
3523 	if (pool->flags & POOL_MANAGER_ACTIVE) {
3524 		raw_spin_unlock_irq(&pool->lock);
3525 		return false;
3526 	}
3527 	return true;
3528 }
3529 
3530 /**
3531  * put_unbound_pool - put a worker_pool
3532  * @pool: worker_pool to put
3533  *
3534  * Put @pool.  If its refcnt reaches zero, it gets destroyed in RCU
3535  * safe manner.  get_unbound_pool() calls this function on its failure path
3536  * and this function should be able to release pools which went through,
3537  * successfully or not, init_worker_pool().
3538  *
3539  * Should be called with wq_pool_mutex held.
3540  */
put_unbound_pool(struct worker_pool * pool)3541 static void put_unbound_pool(struct worker_pool *pool)
3542 {
3543 	DECLARE_COMPLETION_ONSTACK(detach_completion);
3544 	struct worker *worker;
3545 
3546 	lockdep_assert_held(&wq_pool_mutex);
3547 
3548 	if (--pool->refcnt)
3549 		return;
3550 
3551 	/* sanity checks */
3552 	if (WARN_ON(!(pool->cpu < 0)) ||
3553 	    WARN_ON(!list_empty(&pool->worklist)))
3554 		return;
3555 
3556 	/* release id and unhash */
3557 	if (pool->id >= 0)
3558 		idr_remove(&worker_pool_idr, pool->id);
3559 	hash_del(&pool->hash_node);
3560 
3561 	/*
3562 	 * Become the manager and destroy all workers.  This prevents
3563 	 * @pool's workers from blocking on attach_mutex.  We're the last
3564 	 * manager and @pool gets freed with the flag set.
3565 	 * Because of how wq_manager_inactive() works, we will hold the
3566 	 * spinlock after a successful wait.
3567 	 */
3568 	rcuwait_wait_event(&manager_wait, wq_manager_inactive(pool),
3569 			   TASK_UNINTERRUPTIBLE);
3570 	pool->flags |= POOL_MANAGER_ACTIVE;
3571 
3572 	while ((worker = first_idle_worker(pool)))
3573 		destroy_worker(worker);
3574 	WARN_ON(pool->nr_workers || pool->nr_idle);
3575 	raw_spin_unlock_irq(&pool->lock);
3576 
3577 	mutex_lock(&wq_pool_attach_mutex);
3578 	if (!list_empty(&pool->workers))
3579 		pool->detach_completion = &detach_completion;
3580 	mutex_unlock(&wq_pool_attach_mutex);
3581 
3582 	if (pool->detach_completion)
3583 		wait_for_completion(pool->detach_completion);
3584 
3585 	/* shut down the timers */
3586 	del_timer_sync(&pool->idle_timer);
3587 	del_timer_sync(&pool->mayday_timer);
3588 
3589 	/* RCU protected to allow dereferences from get_work_pool() */
3590 	call_rcu(&pool->rcu, rcu_free_pool);
3591 }
3592 
3593 /**
3594  * get_unbound_pool - get a worker_pool with the specified attributes
3595  * @attrs: the attributes of the worker_pool to get
3596  *
3597  * Obtain a worker_pool which has the same attributes as @attrs, bump the
3598  * reference count and return it.  If there already is a matching
3599  * worker_pool, it will be used; otherwise, this function attempts to
3600  * create a new one.
3601  *
3602  * Should be called with wq_pool_mutex held.
3603  *
3604  * Return: On success, a worker_pool with the same attributes as @attrs.
3605  * On failure, %NULL.
3606  */
get_unbound_pool(const struct workqueue_attrs * attrs)3607 static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
3608 {
3609 	u32 hash = wqattrs_hash(attrs);
3610 	struct worker_pool *pool;
3611 	int node;
3612 	int target_node = NUMA_NO_NODE;
3613 
3614 	lockdep_assert_held(&wq_pool_mutex);
3615 
3616 	/* do we already have a matching pool? */
3617 	hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
3618 		if (wqattrs_equal(pool->attrs, attrs)) {
3619 			pool->refcnt++;
3620 			return pool;
3621 		}
3622 	}
3623 
3624 	/* if cpumask is contained inside a NUMA node, we belong to that node */
3625 	if (wq_numa_enabled) {
3626 		for_each_node(node) {
3627 			if (cpumask_subset(attrs->cpumask,
3628 					   wq_numa_possible_cpumask[node])) {
3629 				target_node = node;
3630 				break;
3631 			}
3632 		}
3633 	}
3634 
3635 	/* nope, create a new one */
3636 	pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, target_node);
3637 	if (!pool || init_worker_pool(pool) < 0)
3638 		goto fail;
3639 
3640 	lockdep_set_subclass(&pool->lock, 1);	/* see put_pwq() */
3641 	copy_workqueue_attrs(pool->attrs, attrs);
3642 	pool->node = target_node;
3643 
3644 	/*
3645 	 * no_numa isn't a worker_pool attribute, always clear it.  See
3646 	 * 'struct workqueue_attrs' comments for detail.
3647 	 */
3648 	pool->attrs->no_numa = false;
3649 
3650 	if (worker_pool_assign_id(pool) < 0)
3651 		goto fail;
3652 
3653 	/* create and start the initial worker */
3654 	if (wq_online && !create_worker(pool))
3655 		goto fail;
3656 
3657 	/* install */
3658 	hash_add(unbound_pool_hash, &pool->hash_node, hash);
3659 
3660 	return pool;
3661 fail:
3662 	if (pool)
3663 		put_unbound_pool(pool);
3664 	return NULL;
3665 }
3666 
rcu_free_pwq(struct rcu_head * rcu)3667 static void rcu_free_pwq(struct rcu_head *rcu)
3668 {
3669 	kmem_cache_free(pwq_cache,
3670 			container_of(rcu, struct pool_workqueue, rcu));
3671 }
3672 
3673 /*
3674  * Scheduled on system_wq by put_pwq() when an unbound pwq hits zero refcnt
3675  * and needs to be destroyed.
3676  */
pwq_unbound_release_workfn(struct work_struct * work)3677 static void pwq_unbound_release_workfn(struct work_struct *work)
3678 {
3679 	struct pool_workqueue *pwq = container_of(work, struct pool_workqueue,
3680 						  unbound_release_work);
3681 	struct workqueue_struct *wq = pwq->wq;
3682 	struct worker_pool *pool = pwq->pool;
3683 	bool is_last = false;
3684 
3685 	/*
3686 	 * when @pwq is not linked, it doesn't hold any reference to the
3687 	 * @wq, and @wq is invalid to access.
3688 	 */
3689 	if (!list_empty(&pwq->pwqs_node)) {
3690 		if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
3691 			return;
3692 
3693 		mutex_lock(&wq->mutex);
3694 		list_del_rcu(&pwq->pwqs_node);
3695 		is_last = list_empty(&wq->pwqs);
3696 		mutex_unlock(&wq->mutex);
3697 	}
3698 
3699 	mutex_lock(&wq_pool_mutex);
3700 	put_unbound_pool(pool);
3701 	mutex_unlock(&wq_pool_mutex);
3702 
3703 	call_rcu(&pwq->rcu, rcu_free_pwq);
3704 
3705 	/*
3706 	 * If we're the last pwq going away, @wq is already dead and no one
3707 	 * is gonna access it anymore.  Schedule RCU free.
3708 	 */
3709 	if (is_last) {
3710 		wq_unregister_lockdep(wq);
3711 		call_rcu(&wq->rcu, rcu_free_wq);
3712 	}
3713 }
3714 
3715 /**
3716  * pwq_adjust_max_active - update a pwq's max_active to the current setting
3717  * @pwq: target pool_workqueue
3718  *
3719  * If @pwq isn't freezing, set @pwq->max_active to the associated
3720  * workqueue's saved_max_active and activate inactive work items
3721  * accordingly.  If @pwq is freezing, clear @pwq->max_active to zero.
3722  */
pwq_adjust_max_active(struct pool_workqueue * pwq)3723 static void pwq_adjust_max_active(struct pool_workqueue *pwq)
3724 {
3725 	struct workqueue_struct *wq = pwq->wq;
3726 	bool freezable = wq->flags & WQ_FREEZABLE;
3727 	unsigned long flags;
3728 
3729 	/* for @wq->saved_max_active */
3730 	lockdep_assert_held(&wq->mutex);
3731 
3732 	/* fast exit for non-freezable wqs */
3733 	if (!freezable && pwq->max_active == wq->saved_max_active)
3734 		return;
3735 
3736 	/* this function can be called during early boot w/ irq disabled */
3737 	raw_spin_lock_irqsave(&pwq->pool->lock, flags);
3738 
3739 	/*
3740 	 * During [un]freezing, the caller is responsible for ensuring that
3741 	 * this function is called at least once after @workqueue_freezing
3742 	 * is updated and visible.
3743 	 */
3744 	if (!freezable || !workqueue_freezing) {
3745 		bool kick = false;
3746 
3747 		pwq->max_active = wq->saved_max_active;
3748 
3749 		while (!list_empty(&pwq->inactive_works) &&
3750 		       pwq->nr_active < pwq->max_active) {
3751 			pwq_activate_first_inactive(pwq);
3752 			kick = true;
3753 		}
3754 
3755 		/*
3756 		 * Need to kick a worker after thawed or an unbound wq's
3757 		 * max_active is bumped. In realtime scenarios, always kicking a
3758 		 * worker will cause interference on the isolated cpu cores, so
3759 		 * let's kick iff work items were activated.
3760 		 */
3761 		if (kick)
3762 			wake_up_worker(pwq->pool);
3763 	} else {
3764 		pwq->max_active = 0;
3765 	}
3766 
3767 	raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
3768 }
3769 
3770 /* initialize newly alloced @pwq which is associated with @wq and @pool */
init_pwq(struct pool_workqueue * pwq,struct workqueue_struct * wq,struct worker_pool * pool)3771 static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
3772 		     struct worker_pool *pool)
3773 {
3774 	BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
3775 
3776 	memset(pwq, 0, sizeof(*pwq));
3777 
3778 	pwq->pool = pool;
3779 	pwq->wq = wq;
3780 	pwq->flush_color = -1;
3781 	pwq->refcnt = 1;
3782 	INIT_LIST_HEAD(&pwq->inactive_works);
3783 	INIT_LIST_HEAD(&pwq->pwqs_node);
3784 	INIT_LIST_HEAD(&pwq->mayday_node);
3785 	INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn);
3786 }
3787 
3788 /* sync @pwq with the current state of its associated wq and link it */
link_pwq(struct pool_workqueue * pwq)3789 static void link_pwq(struct pool_workqueue *pwq)
3790 {
3791 	struct workqueue_struct *wq = pwq->wq;
3792 
3793 	lockdep_assert_held(&wq->mutex);
3794 
3795 	/* may be called multiple times, ignore if already linked */
3796 	if (!list_empty(&pwq->pwqs_node))
3797 		return;
3798 
3799 	/* set the matching work_color */
3800 	pwq->work_color = wq->work_color;
3801 
3802 	/* sync max_active to the current setting */
3803 	pwq_adjust_max_active(pwq);
3804 
3805 	/* link in @pwq */
3806 	list_add_rcu(&pwq->pwqs_node, &wq->pwqs);
3807 }
3808 
3809 /* obtain a pool matching @attr and create a pwq associating the pool and @wq */
alloc_unbound_pwq(struct workqueue_struct * wq,const struct workqueue_attrs * attrs)3810 static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq,
3811 					const struct workqueue_attrs *attrs)
3812 {
3813 	struct worker_pool *pool;
3814 	struct pool_workqueue *pwq;
3815 
3816 	lockdep_assert_held(&wq_pool_mutex);
3817 
3818 	pool = get_unbound_pool(attrs);
3819 	if (!pool)
3820 		return NULL;
3821 
3822 	pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node);
3823 	if (!pwq) {
3824 		put_unbound_pool(pool);
3825 		return NULL;
3826 	}
3827 
3828 	init_pwq(pwq, wq, pool);
3829 	return pwq;
3830 }
3831 
3832 /**
3833  * wq_calc_node_cpumask - calculate a wq_attrs' cpumask for the specified node
3834  * @attrs: the wq_attrs of the default pwq of the target workqueue
3835  * @node: the target NUMA node
3836  * @cpu_going_down: if >= 0, the CPU to consider as offline
3837  * @cpumask: outarg, the resulting cpumask
3838  *
3839  * Calculate the cpumask a workqueue with @attrs should use on @node.  If
3840  * @cpu_going_down is >= 0, that cpu is considered offline during
3841  * calculation.  The result is stored in @cpumask.
3842  *
3843  * If NUMA affinity is not enabled, @attrs->cpumask is always used.  If
3844  * enabled and @node has online CPUs requested by @attrs, the returned
3845  * cpumask is the intersection of the possible CPUs of @node and
3846  * @attrs->cpumask.
3847  *
3848  * The caller is responsible for ensuring that the cpumask of @node stays
3849  * stable.
3850  *
3851  * Return: %true if the resulting @cpumask is different from @attrs->cpumask,
3852  * %false if equal.
3853  */
wq_calc_node_cpumask(const struct workqueue_attrs * attrs,int node,int cpu_going_down,cpumask_t * cpumask)3854 static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node,
3855 				 int cpu_going_down, cpumask_t *cpumask)
3856 {
3857 	if (!wq_numa_enabled || attrs->no_numa)
3858 		goto use_dfl;
3859 
3860 	/* does @node have any online CPUs @attrs wants? */
3861 	cpumask_and(cpumask, cpumask_of_node(node), attrs->cpumask);
3862 	if (cpu_going_down >= 0)
3863 		cpumask_clear_cpu(cpu_going_down, cpumask);
3864 
3865 	if (cpumask_empty(cpumask))
3866 		goto use_dfl;
3867 
3868 	/* yeap, return possible CPUs in @node that @attrs wants */
3869 	cpumask_and(cpumask, attrs->cpumask, wq_numa_possible_cpumask[node]);
3870 
3871 	if (cpumask_empty(cpumask)) {
3872 		pr_warn_once("WARNING: workqueue cpumask: online intersect > "
3873 				"possible intersect\n");
3874 		return false;
3875 	}
3876 
3877 	return !cpumask_equal(cpumask, attrs->cpumask);
3878 
3879 use_dfl:
3880 	cpumask_copy(cpumask, attrs->cpumask);
3881 	return false;
3882 }
3883 
3884 /* install @pwq into @wq's numa_pwq_tbl[] for @node and return the old pwq */
numa_pwq_tbl_install(struct workqueue_struct * wq,int node,struct pool_workqueue * pwq)3885 static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq,
3886 						   int node,
3887 						   struct pool_workqueue *pwq)
3888 {
3889 	struct pool_workqueue *old_pwq;
3890 
3891 	lockdep_assert_held(&wq_pool_mutex);
3892 	lockdep_assert_held(&wq->mutex);
3893 
3894 	/* link_pwq() can handle duplicate calls */
3895 	link_pwq(pwq);
3896 
3897 	old_pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
3898 	rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq);
3899 	return old_pwq;
3900 }
3901 
3902 /* context to store the prepared attrs & pwqs before applying */
3903 struct apply_wqattrs_ctx {
3904 	struct workqueue_struct	*wq;		/* target workqueue */
3905 	struct workqueue_attrs	*attrs;		/* attrs to apply */
3906 	struct list_head	list;		/* queued for batching commit */
3907 	struct pool_workqueue	*dfl_pwq;
3908 	struct pool_workqueue	*pwq_tbl[];
3909 };
3910 
3911 /* free the resources after success or abort */
apply_wqattrs_cleanup(struct apply_wqattrs_ctx * ctx)3912 static void apply_wqattrs_cleanup(struct apply_wqattrs_ctx *ctx)
3913 {
3914 	if (ctx) {
3915 		int node;
3916 
3917 		for_each_node(node)
3918 			put_pwq_unlocked(ctx->pwq_tbl[node]);
3919 		put_pwq_unlocked(ctx->dfl_pwq);
3920 
3921 		free_workqueue_attrs(ctx->attrs);
3922 
3923 		kfree(ctx);
3924 	}
3925 }
3926 
3927 /* allocate the attrs and pwqs for later installation */
3928 static struct apply_wqattrs_ctx *
apply_wqattrs_prepare(struct workqueue_struct * wq,const struct workqueue_attrs * attrs)3929 apply_wqattrs_prepare(struct workqueue_struct *wq,
3930 		      const struct workqueue_attrs *attrs)
3931 {
3932 	struct apply_wqattrs_ctx *ctx;
3933 	struct workqueue_attrs *new_attrs, *tmp_attrs;
3934 	int node;
3935 
3936 	lockdep_assert_held(&wq_pool_mutex);
3937 
3938 	ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_node_ids), GFP_KERNEL);
3939 
3940 	new_attrs = alloc_workqueue_attrs();
3941 	tmp_attrs = alloc_workqueue_attrs();
3942 	if (!ctx || !new_attrs || !tmp_attrs)
3943 		goto out_free;
3944 
3945 	/*
3946 	 * Calculate the attrs of the default pwq.
3947 	 * If the user configured cpumask doesn't overlap with the
3948 	 * wq_unbound_cpumask, we fallback to the wq_unbound_cpumask.
3949 	 */
3950 	copy_workqueue_attrs(new_attrs, attrs);
3951 	cpumask_and(new_attrs->cpumask, new_attrs->cpumask, wq_unbound_cpumask);
3952 	if (unlikely(cpumask_empty(new_attrs->cpumask)))
3953 		cpumask_copy(new_attrs->cpumask, wq_unbound_cpumask);
3954 
3955 	/*
3956 	 * We may create multiple pwqs with differing cpumasks.  Make a
3957 	 * copy of @new_attrs which will be modified and used to obtain
3958 	 * pools.
3959 	 */
3960 	copy_workqueue_attrs(tmp_attrs, new_attrs);
3961 
3962 	/*
3963 	 * If something goes wrong during CPU up/down, we'll fall back to
3964 	 * the default pwq covering whole @attrs->cpumask.  Always create
3965 	 * it even if we don't use it immediately.
3966 	 */
3967 	ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs);
3968 	if (!ctx->dfl_pwq)
3969 		goto out_free;
3970 
3971 	for_each_node(node) {
3972 		if (wq_calc_node_cpumask(new_attrs, node, -1, tmp_attrs->cpumask)) {
3973 			ctx->pwq_tbl[node] = alloc_unbound_pwq(wq, tmp_attrs);
3974 			if (!ctx->pwq_tbl[node])
3975 				goto out_free;
3976 		} else {
3977 			ctx->dfl_pwq->refcnt++;
3978 			ctx->pwq_tbl[node] = ctx->dfl_pwq;
3979 		}
3980 	}
3981 
3982 	/* save the user configured attrs and sanitize it. */
3983 	copy_workqueue_attrs(new_attrs, attrs);
3984 	cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask);
3985 	ctx->attrs = new_attrs;
3986 
3987 	ctx->wq = wq;
3988 	free_workqueue_attrs(tmp_attrs);
3989 	return ctx;
3990 
3991 out_free:
3992 	free_workqueue_attrs(tmp_attrs);
3993 	free_workqueue_attrs(new_attrs);
3994 	apply_wqattrs_cleanup(ctx);
3995 	return NULL;
3996 }
3997 
3998 /* set attrs and install prepared pwqs, @ctx points to old pwqs on return */
apply_wqattrs_commit(struct apply_wqattrs_ctx * ctx)3999 static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx)
4000 {
4001 	int node;
4002 
4003 	/* all pwqs have been created successfully, let's install'em */
4004 	mutex_lock(&ctx->wq->mutex);
4005 
4006 	copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs);
4007 
4008 	/* save the previous pwq and install the new one */
4009 	for_each_node(node)
4010 		ctx->pwq_tbl[node] = numa_pwq_tbl_install(ctx->wq, node,
4011 							  ctx->pwq_tbl[node]);
4012 
4013 	/* @dfl_pwq might not have been used, ensure it's linked */
4014 	link_pwq(ctx->dfl_pwq);
4015 	swap(ctx->wq->dfl_pwq, ctx->dfl_pwq);
4016 
4017 	mutex_unlock(&ctx->wq->mutex);
4018 }
4019 
apply_wqattrs_lock(void)4020 static void apply_wqattrs_lock(void)
4021 {
4022 	/* CPUs should stay stable across pwq creations and installations */
4023 	get_online_cpus();
4024 	mutex_lock(&wq_pool_mutex);
4025 }
4026 
apply_wqattrs_unlock(void)4027 static void apply_wqattrs_unlock(void)
4028 {
4029 	mutex_unlock(&wq_pool_mutex);
4030 	put_online_cpus();
4031 }
4032 
apply_workqueue_attrs_locked(struct workqueue_struct * wq,const struct workqueue_attrs * attrs)4033 static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
4034 					const struct workqueue_attrs *attrs)
4035 {
4036 	struct apply_wqattrs_ctx *ctx;
4037 
4038 	/* only unbound workqueues can change attributes */
4039 	if (WARN_ON(!(wq->flags & WQ_UNBOUND)))
4040 		return -EINVAL;
4041 
4042 	/* creating multiple pwqs breaks ordering guarantee */
4043 	if (!list_empty(&wq->pwqs)) {
4044 		if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
4045 			return -EINVAL;
4046 
4047 		wq->flags &= ~__WQ_ORDERED;
4048 	}
4049 
4050 	ctx = apply_wqattrs_prepare(wq, attrs);
4051 	if (!ctx)
4052 		return -ENOMEM;
4053 
4054 	/* the ctx has been prepared successfully, let's commit it */
4055 	apply_wqattrs_commit(ctx);
4056 	apply_wqattrs_cleanup(ctx);
4057 
4058 	return 0;
4059 }
4060 
4061 /**
4062  * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue
4063  * @wq: the target workqueue
4064  * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs()
4065  *
4066  * Apply @attrs to an unbound workqueue @wq.  Unless disabled, on NUMA
4067  * machines, this function maps a separate pwq to each NUMA node with
4068  * possibles CPUs in @attrs->cpumask so that work items are affine to the
4069  * NUMA node it was issued on.  Older pwqs are released as in-flight work
4070  * items finish.  Note that a work item which repeatedly requeues itself
4071  * back-to-back will stay on its current pwq.
4072  *
4073  * Performs GFP_KERNEL allocations.
4074  *
4075  * Assumes caller has CPU hotplug read exclusion, i.e. get_online_cpus().
4076  *
4077  * Return: 0 on success and -errno on failure.
4078  */
apply_workqueue_attrs(struct workqueue_struct * wq,const struct workqueue_attrs * attrs)4079 int apply_workqueue_attrs(struct workqueue_struct *wq,
4080 			  const struct workqueue_attrs *attrs)
4081 {
4082 	int ret;
4083 
4084 	lockdep_assert_cpus_held();
4085 
4086 	mutex_lock(&wq_pool_mutex);
4087 	ret = apply_workqueue_attrs_locked(wq, attrs);
4088 	mutex_unlock(&wq_pool_mutex);
4089 
4090 	return ret;
4091 }
4092 
4093 /**
4094  * wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug
4095  * @wq: the target workqueue
4096  * @cpu: the CPU coming up or going down
4097  * @online: whether @cpu is coming up or going down
4098  *
4099  * This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and
4100  * %CPU_DOWN_FAILED.  @cpu is being hot[un]plugged, update NUMA affinity of
4101  * @wq accordingly.
4102  *
4103  * If NUMA affinity can't be adjusted due to memory allocation failure, it
4104  * falls back to @wq->dfl_pwq which may not be optimal but is always
4105  * correct.
4106  *
4107  * Note that when the last allowed CPU of a NUMA node goes offline for a
4108  * workqueue with a cpumask spanning multiple nodes, the workers which were
4109  * already executing the work items for the workqueue will lose their CPU
4110  * affinity and may execute on any CPU.  This is similar to how per-cpu
4111  * workqueues behave on CPU_DOWN.  If a workqueue user wants strict
4112  * affinity, it's the user's responsibility to flush the work item from
4113  * CPU_DOWN_PREPARE.
4114  */
wq_update_unbound_numa(struct workqueue_struct * wq,int cpu,bool online)4115 static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
4116 				   bool online)
4117 {
4118 	int node = cpu_to_node(cpu);
4119 	int cpu_off = online ? -1 : cpu;
4120 	struct pool_workqueue *old_pwq = NULL, *pwq;
4121 	struct workqueue_attrs *target_attrs;
4122 	cpumask_t *cpumask;
4123 
4124 	lockdep_assert_held(&wq_pool_mutex);
4125 
4126 	if (!wq_numa_enabled || !(wq->flags & WQ_UNBOUND) ||
4127 	    wq->unbound_attrs->no_numa)
4128 		return;
4129 
4130 	/*
4131 	 * We don't wanna alloc/free wq_attrs for each wq for each CPU.
4132 	 * Let's use a preallocated one.  The following buf is protected by
4133 	 * CPU hotplug exclusion.
4134 	 */
4135 	target_attrs = wq_update_unbound_numa_attrs_buf;
4136 	cpumask = target_attrs->cpumask;
4137 
4138 	copy_workqueue_attrs(target_attrs, wq->unbound_attrs);
4139 	pwq = unbound_pwq_by_node(wq, node);
4140 
4141 	/*
4142 	 * Let's determine what needs to be done.  If the target cpumask is
4143 	 * different from the default pwq's, we need to compare it to @pwq's
4144 	 * and create a new one if they don't match.  If the target cpumask
4145 	 * equals the default pwq's, the default pwq should be used.
4146 	 */
4147 	if (wq_calc_node_cpumask(wq->dfl_pwq->pool->attrs, node, cpu_off, cpumask)) {
4148 		if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask))
4149 			return;
4150 	} else {
4151 		goto use_dfl_pwq;
4152 	}
4153 
4154 	/* create a new pwq */
4155 	pwq = alloc_unbound_pwq(wq, target_attrs);
4156 	if (!pwq) {
4157 		pr_warn("workqueue: allocation failed while updating NUMA affinity of \"%s\"\n",
4158 			wq->name);
4159 		goto use_dfl_pwq;
4160 	}
4161 
4162 	/* Install the new pwq. */
4163 	mutex_lock(&wq->mutex);
4164 	old_pwq = numa_pwq_tbl_install(wq, node, pwq);
4165 	goto out_unlock;
4166 
4167 use_dfl_pwq:
4168 	mutex_lock(&wq->mutex);
4169 	raw_spin_lock_irq(&wq->dfl_pwq->pool->lock);
4170 	get_pwq(wq->dfl_pwq);
4171 	raw_spin_unlock_irq(&wq->dfl_pwq->pool->lock);
4172 	old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq);
4173 out_unlock:
4174 	mutex_unlock(&wq->mutex);
4175 	put_pwq_unlocked(old_pwq);
4176 }
4177 
alloc_and_link_pwqs(struct workqueue_struct * wq)4178 static int alloc_and_link_pwqs(struct workqueue_struct *wq)
4179 {
4180 	bool highpri = wq->flags & WQ_HIGHPRI;
4181 	int cpu, ret;
4182 
4183 	if (!(wq->flags & WQ_UNBOUND)) {
4184 		wq->cpu_pwqs = alloc_percpu(struct pool_workqueue);
4185 		if (!wq->cpu_pwqs)
4186 			return -ENOMEM;
4187 
4188 		for_each_possible_cpu(cpu) {
4189 			struct pool_workqueue *pwq =
4190 				per_cpu_ptr(wq->cpu_pwqs, cpu);
4191 			struct worker_pool *cpu_pools =
4192 				per_cpu(cpu_worker_pools, cpu);
4193 
4194 			init_pwq(pwq, wq, &cpu_pools[highpri]);
4195 
4196 			mutex_lock(&wq->mutex);
4197 			link_pwq(pwq);
4198 			mutex_unlock(&wq->mutex);
4199 		}
4200 		return 0;
4201 	}
4202 
4203 	get_online_cpus();
4204 	if (wq->flags & __WQ_ORDERED) {
4205 		ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
4206 		/* there should only be single pwq for ordering guarantee */
4207 		WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node ||
4208 			      wq->pwqs.prev != &wq->dfl_pwq->pwqs_node),
4209 		     "ordering guarantee broken for workqueue %s\n", wq->name);
4210 	} else {
4211 		ret = apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
4212 	}
4213 	put_online_cpus();
4214 
4215 	return ret;
4216 }
4217 
wq_clamp_max_active(int max_active,unsigned int flags,const char * name)4218 static int wq_clamp_max_active(int max_active, unsigned int flags,
4219 			       const char *name)
4220 {
4221 	int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
4222 
4223 	if (max_active < 1 || max_active > lim)
4224 		pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n",
4225 			max_active, name, 1, lim);
4226 
4227 	return clamp_val(max_active, 1, lim);
4228 }
4229 
4230 /*
4231  * Workqueues which may be used during memory reclaim should have a rescuer
4232  * to guarantee forward progress.
4233  */
init_rescuer(struct workqueue_struct * wq)4234 static int init_rescuer(struct workqueue_struct *wq)
4235 {
4236 	struct worker *rescuer;
4237 	int ret;
4238 
4239 	if (!(wq->flags & WQ_MEM_RECLAIM))
4240 		return 0;
4241 
4242 	rescuer = alloc_worker(NUMA_NO_NODE);
4243 	if (!rescuer)
4244 		return -ENOMEM;
4245 
4246 	rescuer->rescue_wq = wq;
4247 	rescuer->task = kthread_create(rescuer_thread, rescuer, "%s", wq->name);
4248 	if (IS_ERR(rescuer->task)) {
4249 		ret = PTR_ERR(rescuer->task);
4250 		kfree(rescuer);
4251 		return ret;
4252 	}
4253 
4254 	wq->rescuer = rescuer;
4255 	kthread_bind_mask(rescuer->task, cpu_possible_mask);
4256 	wake_up_process(rescuer->task);
4257 
4258 	return 0;
4259 }
4260 
4261 __printf(1, 4)
alloc_workqueue(const char * fmt,unsigned int flags,int max_active,...)4262 struct workqueue_struct *alloc_workqueue(const char *fmt,
4263 					 unsigned int flags,
4264 					 int max_active, ...)
4265 {
4266 	size_t tbl_size = 0;
4267 	va_list args;
4268 	struct workqueue_struct *wq;
4269 	struct pool_workqueue *pwq;
4270 
4271 	/*
4272 	 * Unbound && max_active == 1 used to imply ordered, which is no
4273 	 * longer the case on NUMA machines due to per-node pools.  While
4274 	 * alloc_ordered_workqueue() is the right way to create an ordered
4275 	 * workqueue, keep the previous behavior to avoid subtle breakages
4276 	 * on NUMA.
4277 	 */
4278 	if ((flags & WQ_UNBOUND) && max_active == 1)
4279 		flags |= __WQ_ORDERED;
4280 
4281 	/* see the comment above the definition of WQ_POWER_EFFICIENT */
4282 	if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
4283 		flags |= WQ_UNBOUND;
4284 
4285 	/* allocate wq and format name */
4286 	if (flags & WQ_UNBOUND)
4287 		tbl_size = nr_node_ids * sizeof(wq->numa_pwq_tbl[0]);
4288 
4289 	wq = kzalloc(sizeof(*wq) + tbl_size, GFP_KERNEL);
4290 	if (!wq)
4291 		return NULL;
4292 
4293 	if (flags & WQ_UNBOUND) {
4294 		wq->unbound_attrs = alloc_workqueue_attrs();
4295 		if (!wq->unbound_attrs)
4296 			goto err_free_wq;
4297 	}
4298 
4299 	va_start(args, max_active);
4300 	vsnprintf(wq->name, sizeof(wq->name), fmt, args);
4301 	va_end(args);
4302 
4303 	max_active = max_active ?: WQ_DFL_ACTIVE;
4304 	max_active = wq_clamp_max_active(max_active, flags, wq->name);
4305 
4306 	/* init wq */
4307 	wq->flags = flags;
4308 	wq->saved_max_active = max_active;
4309 	mutex_init(&wq->mutex);
4310 	atomic_set(&wq->nr_pwqs_to_flush, 0);
4311 	INIT_LIST_HEAD(&wq->pwqs);
4312 	INIT_LIST_HEAD(&wq->flusher_queue);
4313 	INIT_LIST_HEAD(&wq->flusher_overflow);
4314 	INIT_LIST_HEAD(&wq->maydays);
4315 
4316 	wq_init_lockdep(wq);
4317 	INIT_LIST_HEAD(&wq->list);
4318 
4319 	if (alloc_and_link_pwqs(wq) < 0)
4320 		goto err_unreg_lockdep;
4321 
4322 	if (wq_online && init_rescuer(wq) < 0)
4323 		goto err_destroy;
4324 
4325 	if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq))
4326 		goto err_destroy;
4327 
4328 	/*
4329 	 * wq_pool_mutex protects global freeze state and workqueues list.
4330 	 * Grab it, adjust max_active and add the new @wq to workqueues
4331 	 * list.
4332 	 */
4333 	mutex_lock(&wq_pool_mutex);
4334 
4335 	mutex_lock(&wq->mutex);
4336 	for_each_pwq(pwq, wq)
4337 		pwq_adjust_max_active(pwq);
4338 	mutex_unlock(&wq->mutex);
4339 
4340 	list_add_tail_rcu(&wq->list, &workqueues);
4341 
4342 	mutex_unlock(&wq_pool_mutex);
4343 
4344 	return wq;
4345 
4346 err_unreg_lockdep:
4347 	wq_unregister_lockdep(wq);
4348 	wq_free_lockdep(wq);
4349 err_free_wq:
4350 	free_workqueue_attrs(wq->unbound_attrs);
4351 	kfree(wq);
4352 	return NULL;
4353 err_destroy:
4354 	destroy_workqueue(wq);
4355 	return NULL;
4356 }
4357 EXPORT_SYMBOL_GPL(alloc_workqueue);
4358 
pwq_busy(struct pool_workqueue * pwq)4359 static bool pwq_busy(struct pool_workqueue *pwq)
4360 {
4361 	int i;
4362 
4363 	for (i = 0; i < WORK_NR_COLORS; i++)
4364 		if (pwq->nr_in_flight[i])
4365 			return true;
4366 
4367 	if ((pwq != pwq->wq->dfl_pwq) && (pwq->refcnt > 1))
4368 		return true;
4369 	if (pwq->nr_active || !list_empty(&pwq->inactive_works))
4370 		return true;
4371 
4372 	return false;
4373 }
4374 
4375 /**
4376  * destroy_workqueue - safely terminate a workqueue
4377  * @wq: target workqueue
4378  *
4379  * Safely destroy a workqueue. All work currently pending will be done first.
4380  */
destroy_workqueue(struct workqueue_struct * wq)4381 void destroy_workqueue(struct workqueue_struct *wq)
4382 {
4383 	struct pool_workqueue *pwq;
4384 	int node;
4385 
4386 	/*
4387 	 * Remove it from sysfs first so that sanity check failure doesn't
4388 	 * lead to sysfs name conflicts.
4389 	 */
4390 	workqueue_sysfs_unregister(wq);
4391 
4392 	/* drain it before proceeding with destruction */
4393 	drain_workqueue(wq);
4394 
4395 	/* kill rescuer, if sanity checks fail, leave it w/o rescuer */
4396 	if (wq->rescuer) {
4397 		struct worker *rescuer = wq->rescuer;
4398 
4399 		/* this prevents new queueing */
4400 		raw_spin_lock_irq(&wq_mayday_lock);
4401 		wq->rescuer = NULL;
4402 		raw_spin_unlock_irq(&wq_mayday_lock);
4403 
4404 		/* rescuer will empty maydays list before exiting */
4405 		kthread_stop(rescuer->task);
4406 		kfree(rescuer);
4407 	}
4408 
4409 	/*
4410 	 * Sanity checks - grab all the locks so that we wait for all
4411 	 * in-flight operations which may do put_pwq().
4412 	 */
4413 	mutex_lock(&wq_pool_mutex);
4414 	mutex_lock(&wq->mutex);
4415 	for_each_pwq(pwq, wq) {
4416 		raw_spin_lock_irq(&pwq->pool->lock);
4417 		if (WARN_ON(pwq_busy(pwq))) {
4418 			pr_warn("%s: %s has the following busy pwq\n",
4419 				__func__, wq->name);
4420 			show_pwq(pwq);
4421 			raw_spin_unlock_irq(&pwq->pool->lock);
4422 			mutex_unlock(&wq->mutex);
4423 			mutex_unlock(&wq_pool_mutex);
4424 			show_workqueue_state();
4425 			return;
4426 		}
4427 		raw_spin_unlock_irq(&pwq->pool->lock);
4428 	}
4429 	mutex_unlock(&wq->mutex);
4430 
4431 	/*
4432 	 * wq list is used to freeze wq, remove from list after
4433 	 * flushing is complete in case freeze races us.
4434 	 */
4435 	list_del_rcu(&wq->list);
4436 	mutex_unlock(&wq_pool_mutex);
4437 
4438 	if (!(wq->flags & WQ_UNBOUND)) {
4439 		wq_unregister_lockdep(wq);
4440 		/*
4441 		 * The base ref is never dropped on per-cpu pwqs.  Directly
4442 		 * schedule RCU free.
4443 		 */
4444 		call_rcu(&wq->rcu, rcu_free_wq);
4445 	} else {
4446 		/*
4447 		 * We're the sole accessor of @wq at this point.  Directly
4448 		 * access numa_pwq_tbl[] and dfl_pwq to put the base refs.
4449 		 * @wq will be freed when the last pwq is released.
4450 		 */
4451 		for_each_node(node) {
4452 			pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
4453 			RCU_INIT_POINTER(wq->numa_pwq_tbl[node], NULL);
4454 			put_pwq_unlocked(pwq);
4455 		}
4456 
4457 		/*
4458 		 * Put dfl_pwq.  @wq may be freed any time after dfl_pwq is
4459 		 * put.  Don't access it afterwards.
4460 		 */
4461 		pwq = wq->dfl_pwq;
4462 		wq->dfl_pwq = NULL;
4463 		put_pwq_unlocked(pwq);
4464 	}
4465 }
4466 EXPORT_SYMBOL_GPL(destroy_workqueue);
4467 
4468 /**
4469  * workqueue_set_max_active - adjust max_active of a workqueue
4470  * @wq: target workqueue
4471  * @max_active: new max_active value.
4472  *
4473  * Set max_active of @wq to @max_active.
4474  *
4475  * CONTEXT:
4476  * Don't call from IRQ context.
4477  */
workqueue_set_max_active(struct workqueue_struct * wq,int max_active)4478 void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
4479 {
4480 	struct pool_workqueue *pwq;
4481 
4482 	/* disallow meddling with max_active for ordered workqueues */
4483 	if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
4484 		return;
4485 
4486 	max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
4487 
4488 	mutex_lock(&wq->mutex);
4489 
4490 	wq->flags &= ~__WQ_ORDERED;
4491 	wq->saved_max_active = max_active;
4492 
4493 	for_each_pwq(pwq, wq)
4494 		pwq_adjust_max_active(pwq);
4495 
4496 	mutex_unlock(&wq->mutex);
4497 }
4498 EXPORT_SYMBOL_GPL(workqueue_set_max_active);
4499 
4500 /**
4501  * current_work - retrieve %current task's work struct
4502  *
4503  * Determine if %current task is a workqueue worker and what it's working on.
4504  * Useful to find out the context that the %current task is running in.
4505  *
4506  * Return: work struct if %current task is a workqueue worker, %NULL otherwise.
4507  */
current_work(void)4508 struct work_struct *current_work(void)
4509 {
4510 	struct worker *worker = current_wq_worker();
4511 
4512 	return worker ? worker->current_work : NULL;
4513 }
4514 EXPORT_SYMBOL(current_work);
4515 
4516 /**
4517  * current_is_workqueue_rescuer - is %current workqueue rescuer?
4518  *
4519  * Determine whether %current is a workqueue rescuer.  Can be used from
4520  * work functions to determine whether it's being run off the rescuer task.
4521  *
4522  * Return: %true if %current is a workqueue rescuer. %false otherwise.
4523  */
current_is_workqueue_rescuer(void)4524 bool current_is_workqueue_rescuer(void)
4525 {
4526 	struct worker *worker = current_wq_worker();
4527 
4528 	return worker && worker->rescue_wq;
4529 }
4530 
4531 /**
4532  * workqueue_congested - test whether a workqueue is congested
4533  * @cpu: CPU in question
4534  * @wq: target workqueue
4535  *
4536  * Test whether @wq's cpu workqueue for @cpu is congested.  There is
4537  * no synchronization around this function and the test result is
4538  * unreliable and only useful as advisory hints or for debugging.
4539  *
4540  * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU.
4541  * Note that both per-cpu and unbound workqueues may be associated with
4542  * multiple pool_workqueues which have separate congested states.  A
4543  * workqueue being congested on one CPU doesn't mean the workqueue is also
4544  * contested on other CPUs / NUMA nodes.
4545  *
4546  * Return:
4547  * %true if congested, %false otherwise.
4548  */
workqueue_congested(int cpu,struct workqueue_struct * wq)4549 bool workqueue_congested(int cpu, struct workqueue_struct *wq)
4550 {
4551 	struct pool_workqueue *pwq;
4552 	bool ret;
4553 
4554 	rcu_read_lock();
4555 	preempt_disable();
4556 
4557 	if (cpu == WORK_CPU_UNBOUND)
4558 		cpu = smp_processor_id();
4559 
4560 	if (!(wq->flags & WQ_UNBOUND))
4561 		pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
4562 	else
4563 		pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
4564 
4565 	ret = !list_empty(&pwq->inactive_works);
4566 	preempt_enable();
4567 	rcu_read_unlock();
4568 
4569 	return ret;
4570 }
4571 EXPORT_SYMBOL_GPL(workqueue_congested);
4572 
4573 /**
4574  * work_busy - test whether a work is currently pending or running
4575  * @work: the work to be tested
4576  *
4577  * Test whether @work is currently pending or running.  There is no
4578  * synchronization around this function and the test result is
4579  * unreliable and only useful as advisory hints or for debugging.
4580  *
4581  * Return:
4582  * OR'd bitmask of WORK_BUSY_* bits.
4583  */
work_busy(struct work_struct * work)4584 unsigned int work_busy(struct work_struct *work)
4585 {
4586 	struct worker_pool *pool;
4587 	unsigned long flags;
4588 	unsigned int ret = 0;
4589 
4590 	if (work_pending(work))
4591 		ret |= WORK_BUSY_PENDING;
4592 
4593 	rcu_read_lock();
4594 	pool = get_work_pool(work);
4595 	if (pool) {
4596 		raw_spin_lock_irqsave(&pool->lock, flags);
4597 		if (find_worker_executing_work(pool, work))
4598 			ret |= WORK_BUSY_RUNNING;
4599 		raw_spin_unlock_irqrestore(&pool->lock, flags);
4600 	}
4601 	rcu_read_unlock();
4602 
4603 	return ret;
4604 }
4605 EXPORT_SYMBOL_GPL(work_busy);
4606 
4607 /**
4608  * set_worker_desc - set description for the current work item
4609  * @fmt: printf-style format string
4610  * @...: arguments for the format string
4611  *
4612  * This function can be called by a running work function to describe what
4613  * the work item is about.  If the worker task gets dumped, this
4614  * information will be printed out together to help debugging.  The
4615  * description can be at most WORKER_DESC_LEN including the trailing '\0'.
4616  */
set_worker_desc(const char * fmt,...)4617 void set_worker_desc(const char *fmt, ...)
4618 {
4619 	struct worker *worker = current_wq_worker();
4620 	va_list args;
4621 
4622 	if (worker) {
4623 		va_start(args, fmt);
4624 		vsnprintf(worker->desc, sizeof(worker->desc), fmt, args);
4625 		va_end(args);
4626 	}
4627 }
4628 EXPORT_SYMBOL_GPL(set_worker_desc);
4629 
4630 /**
4631  * print_worker_info - print out worker information and description
4632  * @log_lvl: the log level to use when printing
4633  * @task: target task
4634  *
4635  * If @task is a worker and currently executing a work item, print out the
4636  * name of the workqueue being serviced and worker description set with
4637  * set_worker_desc() by the currently executing work item.
4638  *
4639  * This function can be safely called on any task as long as the
4640  * task_struct itself is accessible.  While safe, this function isn't
4641  * synchronized and may print out mixups or garbages of limited length.
4642  */
print_worker_info(const char * log_lvl,struct task_struct * task)4643 void print_worker_info(const char *log_lvl, struct task_struct *task)
4644 {
4645 	work_func_t *fn = NULL;
4646 	char name[WQ_NAME_LEN] = { };
4647 	char desc[WORKER_DESC_LEN] = { };
4648 	struct pool_workqueue *pwq = NULL;
4649 	struct workqueue_struct *wq = NULL;
4650 	struct worker *worker;
4651 
4652 	if (!(task->flags & PF_WQ_WORKER))
4653 		return;
4654 
4655 	/*
4656 	 * This function is called without any synchronization and @task
4657 	 * could be in any state.  Be careful with dereferences.
4658 	 */
4659 	worker = kthread_probe_data(task);
4660 
4661 	/*
4662 	 * Carefully copy the associated workqueue's workfn, name and desc.
4663 	 * Keep the original last '\0' in case the original is garbage.
4664 	 */
4665 	copy_from_kernel_nofault(&fn, &worker->current_func, sizeof(fn));
4666 	copy_from_kernel_nofault(&pwq, &worker->current_pwq, sizeof(pwq));
4667 	copy_from_kernel_nofault(&wq, &pwq->wq, sizeof(wq));
4668 	copy_from_kernel_nofault(name, wq->name, sizeof(name) - 1);
4669 	copy_from_kernel_nofault(desc, worker->desc, sizeof(desc) - 1);
4670 
4671 	if (fn || name[0] || desc[0]) {
4672 		printk("%sWorkqueue: %s %ps", log_lvl, name, fn);
4673 		if (strcmp(name, desc))
4674 			pr_cont(" (%s)", desc);
4675 		pr_cont("\n");
4676 	}
4677 }
4678 
pr_cont_pool_info(struct worker_pool * pool)4679 static void pr_cont_pool_info(struct worker_pool *pool)
4680 {
4681 	pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask);
4682 	if (pool->node != NUMA_NO_NODE)
4683 		pr_cont(" node=%d", pool->node);
4684 	pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice);
4685 }
4686 
pr_cont_work(bool comma,struct work_struct * work)4687 static void pr_cont_work(bool comma, struct work_struct *work)
4688 {
4689 	if (work->func == wq_barrier_func) {
4690 		struct wq_barrier *barr;
4691 
4692 		barr = container_of(work, struct wq_barrier, work);
4693 
4694 		pr_cont("%s BAR(%d)", comma ? "," : "",
4695 			task_pid_nr(barr->task));
4696 	} else {
4697 		pr_cont("%s %ps", comma ? "," : "", work->func);
4698 	}
4699 }
4700 
show_pwq(struct pool_workqueue * pwq)4701 static void show_pwq(struct pool_workqueue *pwq)
4702 {
4703 	struct worker_pool *pool = pwq->pool;
4704 	struct work_struct *work;
4705 	struct worker *worker;
4706 	bool has_in_flight = false, has_pending = false;
4707 	int bkt;
4708 
4709 	pr_info("  pwq %d:", pool->id);
4710 	pr_cont_pool_info(pool);
4711 
4712 	pr_cont(" active=%d/%d refcnt=%d%s\n",
4713 		pwq->nr_active, pwq->max_active, pwq->refcnt,
4714 		!list_empty(&pwq->mayday_node) ? " MAYDAY" : "");
4715 
4716 	hash_for_each(pool->busy_hash, bkt, worker, hentry) {
4717 		if (worker->current_pwq == pwq) {
4718 			has_in_flight = true;
4719 			break;
4720 		}
4721 	}
4722 	if (has_in_flight) {
4723 		bool comma = false;
4724 
4725 		pr_info("    in-flight:");
4726 		hash_for_each(pool->busy_hash, bkt, worker, hentry) {
4727 			if (worker->current_pwq != pwq)
4728 				continue;
4729 
4730 			pr_cont("%s %d%s:%ps", comma ? "," : "",
4731 				task_pid_nr(worker->task),
4732 				worker->rescue_wq ? "(RESCUER)" : "",
4733 				worker->current_func);
4734 			list_for_each_entry(work, &worker->scheduled, entry)
4735 				pr_cont_work(false, work);
4736 			comma = true;
4737 		}
4738 		pr_cont("\n");
4739 	}
4740 
4741 	list_for_each_entry(work, &pool->worklist, entry) {
4742 		if (get_work_pwq(work) == pwq) {
4743 			has_pending = true;
4744 			break;
4745 		}
4746 	}
4747 	if (has_pending) {
4748 		bool comma = false;
4749 
4750 		pr_info("    pending:");
4751 		list_for_each_entry(work, &pool->worklist, entry) {
4752 			if (get_work_pwq(work) != pwq)
4753 				continue;
4754 
4755 			pr_cont_work(comma, work);
4756 			comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
4757 		}
4758 		pr_cont("\n");
4759 	}
4760 
4761 	if (!list_empty(&pwq->inactive_works)) {
4762 		bool comma = false;
4763 
4764 		pr_info("    inactive:");
4765 		list_for_each_entry(work, &pwq->inactive_works, entry) {
4766 			pr_cont_work(comma, work);
4767 			comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
4768 		}
4769 		pr_cont("\n");
4770 	}
4771 }
4772 
4773 /**
4774  * show_workqueue_state - dump workqueue state
4775  *
4776  * Called from a sysrq handler or try_to_freeze_tasks() and prints out
4777  * all busy workqueues and pools.
4778  */
show_workqueue_state(void)4779 void show_workqueue_state(void)
4780 {
4781 	struct workqueue_struct *wq;
4782 	struct worker_pool *pool;
4783 	unsigned long flags;
4784 	int pi;
4785 
4786 	rcu_read_lock();
4787 
4788 	pr_info("Showing busy workqueues and worker pools:\n");
4789 
4790 	list_for_each_entry_rcu(wq, &workqueues, list) {
4791 		struct pool_workqueue *pwq;
4792 		bool idle = true;
4793 
4794 		for_each_pwq(pwq, wq) {
4795 			if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
4796 				idle = false;
4797 				break;
4798 			}
4799 		}
4800 		if (idle)
4801 			continue;
4802 
4803 		pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
4804 
4805 		for_each_pwq(pwq, wq) {
4806 			raw_spin_lock_irqsave(&pwq->pool->lock, flags);
4807 			if (pwq->nr_active || !list_empty(&pwq->inactive_works))
4808 				show_pwq(pwq);
4809 			raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
4810 			/*
4811 			 * We could be printing a lot from atomic context, e.g.
4812 			 * sysrq-t -> show_workqueue_state(). Avoid triggering
4813 			 * hard lockup.
4814 			 */
4815 			touch_nmi_watchdog();
4816 		}
4817 	}
4818 
4819 	for_each_pool(pool, pi) {
4820 		struct worker *worker;
4821 		bool first = true;
4822 		unsigned long hung = 0;
4823 
4824 		raw_spin_lock_irqsave(&pool->lock, flags);
4825 		if (pool->nr_workers == pool->nr_idle)
4826 			goto next_pool;
4827 
4828 		/* How long the first pending work is waiting for a worker. */
4829 		if (!list_empty(&pool->worklist))
4830 			hung = jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000;
4831 
4832 		pr_info("pool %d:", pool->id);
4833 		pr_cont_pool_info(pool);
4834 		pr_cont(" hung=%lus workers=%d", hung, pool->nr_workers);
4835 		if (pool->manager)
4836 			pr_cont(" manager: %d",
4837 				task_pid_nr(pool->manager->task));
4838 		list_for_each_entry(worker, &pool->idle_list, entry) {
4839 			pr_cont(" %s%d", first ? "idle: " : "",
4840 				task_pid_nr(worker->task));
4841 			first = false;
4842 		}
4843 		pr_cont("\n");
4844 	next_pool:
4845 		raw_spin_unlock_irqrestore(&pool->lock, flags);
4846 		/*
4847 		 * We could be printing a lot from atomic context, e.g.
4848 		 * sysrq-t -> show_workqueue_state(). Avoid triggering
4849 		 * hard lockup.
4850 		 */
4851 		touch_nmi_watchdog();
4852 	}
4853 
4854 	rcu_read_unlock();
4855 }
4856 
4857 /* used to show worker information through /proc/PID/{comm,stat,status} */
wq_worker_comm(char * buf,size_t size,struct task_struct * task)4858 void wq_worker_comm(char *buf, size_t size, struct task_struct *task)
4859 {
4860 	int off;
4861 
4862 	/* always show the actual comm */
4863 	off = strscpy(buf, task->comm, size);
4864 	if (off < 0)
4865 		return;
4866 
4867 	/* stabilize PF_WQ_WORKER and worker pool association */
4868 	mutex_lock(&wq_pool_attach_mutex);
4869 
4870 	if (task->flags & PF_WQ_WORKER) {
4871 		struct worker *worker = kthread_data(task);
4872 		struct worker_pool *pool = worker->pool;
4873 
4874 		if (pool) {
4875 			raw_spin_lock_irq(&pool->lock);
4876 			/*
4877 			 * ->desc tracks information (wq name or
4878 			 * set_worker_desc()) for the latest execution.  If
4879 			 * current, prepend '+', otherwise '-'.
4880 			 */
4881 			if (worker->desc[0] != '\0') {
4882 				if (worker->current_work)
4883 					scnprintf(buf + off, size - off, "+%s",
4884 						  worker->desc);
4885 				else
4886 					scnprintf(buf + off, size - off, "-%s",
4887 						  worker->desc);
4888 			}
4889 			raw_spin_unlock_irq(&pool->lock);
4890 		}
4891 	}
4892 
4893 	mutex_unlock(&wq_pool_attach_mutex);
4894 }
4895 
4896 #ifdef CONFIG_SMP
4897 
4898 /*
4899  * CPU hotplug.
4900  *
4901  * There are two challenges in supporting CPU hotplug.  Firstly, there
4902  * are a lot of assumptions on strong associations among work, pwq and
4903  * pool which make migrating pending and scheduled works very
4904  * difficult to implement without impacting hot paths.  Secondly,
4905  * worker pools serve mix of short, long and very long running works making
4906  * blocked draining impractical.
4907  *
4908  * This is solved by allowing the pools to be disassociated from the CPU
4909  * running as an unbound one and allowing it to be reattached later if the
4910  * cpu comes back online.
4911  */
4912 
unbind_workers(int cpu)4913 static void unbind_workers(int cpu)
4914 {
4915 	struct worker_pool *pool;
4916 	struct worker *worker;
4917 
4918 	for_each_cpu_worker_pool(pool, cpu) {
4919 		mutex_lock(&wq_pool_attach_mutex);
4920 		raw_spin_lock_irq(&pool->lock);
4921 
4922 		/*
4923 		 * We've blocked all attach/detach operations. Make all workers
4924 		 * unbound and set DISASSOCIATED.  Before this, all workers
4925 		 * except for the ones which are still executing works from
4926 		 * before the last CPU down must be on the cpu.  After
4927 		 * this, they may become diasporas.
4928 		 */
4929 		for_each_pool_worker(worker, pool)
4930 			worker->flags |= WORKER_UNBOUND;
4931 
4932 		pool->flags |= POOL_DISASSOCIATED;
4933 
4934 		raw_spin_unlock_irq(&pool->lock);
4935 		mutex_unlock(&wq_pool_attach_mutex);
4936 
4937 		/*
4938 		 * Call schedule() so that we cross rq->lock and thus can
4939 		 * guarantee sched callbacks see the %WORKER_UNBOUND flag.
4940 		 * This is necessary as scheduler callbacks may be invoked
4941 		 * from other cpus.
4942 		 */
4943 		schedule();
4944 
4945 		/*
4946 		 * Sched callbacks are disabled now.  Zap nr_running.
4947 		 * After this, nr_running stays zero and need_more_worker()
4948 		 * and keep_working() are always true as long as the
4949 		 * worklist is not empty.  This pool now behaves as an
4950 		 * unbound (in terms of concurrency management) pool which
4951 		 * are served by workers tied to the pool.
4952 		 */
4953 		atomic_set(&pool->nr_running, 0);
4954 
4955 		/*
4956 		 * With concurrency management just turned off, a busy
4957 		 * worker blocking could lead to lengthy stalls.  Kick off
4958 		 * unbound chain execution of currently pending work items.
4959 		 */
4960 		raw_spin_lock_irq(&pool->lock);
4961 		wake_up_worker(pool);
4962 		raw_spin_unlock_irq(&pool->lock);
4963 	}
4964 }
4965 
4966 /**
4967  * rebind_workers - rebind all workers of a pool to the associated CPU
4968  * @pool: pool of interest
4969  *
4970  * @pool->cpu is coming online.  Rebind all workers to the CPU.
4971  */
rebind_workers(struct worker_pool * pool)4972 static void rebind_workers(struct worker_pool *pool)
4973 {
4974 	struct worker *worker;
4975 
4976 	lockdep_assert_held(&wq_pool_attach_mutex);
4977 
4978 	/*
4979 	 * Restore CPU affinity of all workers.  As all idle workers should
4980 	 * be on the run-queue of the associated CPU before any local
4981 	 * wake-ups for concurrency management happen, restore CPU affinity
4982 	 * of all workers first and then clear UNBOUND.  As we're called
4983 	 * from CPU_ONLINE, the following shouldn't fail.
4984 	 */
4985 	for_each_pool_worker(worker, pool)
4986 		WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
4987 						  pool->attrs->cpumask) < 0);
4988 
4989 	raw_spin_lock_irq(&pool->lock);
4990 
4991 	pool->flags &= ~POOL_DISASSOCIATED;
4992 
4993 	for_each_pool_worker(worker, pool) {
4994 		unsigned int worker_flags = worker->flags;
4995 
4996 		/*
4997 		 * A bound idle worker should actually be on the runqueue
4998 		 * of the associated CPU for local wake-ups targeting it to
4999 		 * work.  Kick all idle workers so that they migrate to the
5000 		 * associated CPU.  Doing this in the same loop as
5001 		 * replacing UNBOUND with REBOUND is safe as no worker will
5002 		 * be bound before @pool->lock is released.
5003 		 */
5004 		if (worker_flags & WORKER_IDLE)
5005 			wake_up_process(worker->task);
5006 
5007 		/*
5008 		 * We want to clear UNBOUND but can't directly call
5009 		 * worker_clr_flags() or adjust nr_running.  Atomically
5010 		 * replace UNBOUND with another NOT_RUNNING flag REBOUND.
5011 		 * @worker will clear REBOUND using worker_clr_flags() when
5012 		 * it initiates the next execution cycle thus restoring
5013 		 * concurrency management.  Note that when or whether
5014 		 * @worker clears REBOUND doesn't affect correctness.
5015 		 *
5016 		 * WRITE_ONCE() is necessary because @worker->flags may be
5017 		 * tested without holding any lock in
5018 		 * wq_worker_running().  Without it, NOT_RUNNING test may
5019 		 * fail incorrectly leading to premature concurrency
5020 		 * management operations.
5021 		 */
5022 		WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
5023 		worker_flags |= WORKER_REBOUND;
5024 		worker_flags &= ~WORKER_UNBOUND;
5025 		WRITE_ONCE(worker->flags, worker_flags);
5026 	}
5027 
5028 	raw_spin_unlock_irq(&pool->lock);
5029 }
5030 
5031 /**
5032  * restore_unbound_workers_cpumask - restore cpumask of unbound workers
5033  * @pool: unbound pool of interest
5034  * @cpu: the CPU which is coming up
5035  *
5036  * An unbound pool may end up with a cpumask which doesn't have any online
5037  * CPUs.  When a worker of such pool get scheduled, the scheduler resets
5038  * its cpus_allowed.  If @cpu is in @pool's cpumask which didn't have any
5039  * online CPU before, cpus_allowed of all its workers should be restored.
5040  */
restore_unbound_workers_cpumask(struct worker_pool * pool,int cpu)5041 static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
5042 {
5043 	static cpumask_t cpumask;
5044 	struct worker *worker;
5045 
5046 	lockdep_assert_held(&wq_pool_attach_mutex);
5047 
5048 	/* is @cpu allowed for @pool? */
5049 	if (!cpumask_test_cpu(cpu, pool->attrs->cpumask))
5050 		return;
5051 
5052 	cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask);
5053 
5054 	/* as we're called from CPU_ONLINE, the following shouldn't fail */
5055 	for_each_pool_worker(worker, pool)
5056 		WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0);
5057 }
5058 
workqueue_prepare_cpu(unsigned int cpu)5059 int workqueue_prepare_cpu(unsigned int cpu)
5060 {
5061 	struct worker_pool *pool;
5062 
5063 	for_each_cpu_worker_pool(pool, cpu) {
5064 		if (pool->nr_workers)
5065 			continue;
5066 		if (!create_worker(pool))
5067 			return -ENOMEM;
5068 	}
5069 	return 0;
5070 }
5071 
workqueue_online_cpu(unsigned int cpu)5072 int workqueue_online_cpu(unsigned int cpu)
5073 {
5074 	struct worker_pool *pool;
5075 	struct workqueue_struct *wq;
5076 	int pi;
5077 
5078 	mutex_lock(&wq_pool_mutex);
5079 
5080 	for_each_pool(pool, pi) {
5081 		mutex_lock(&wq_pool_attach_mutex);
5082 
5083 		if (pool->cpu == cpu)
5084 			rebind_workers(pool);
5085 		else if (pool->cpu < 0)
5086 			restore_unbound_workers_cpumask(pool, cpu);
5087 
5088 		mutex_unlock(&wq_pool_attach_mutex);
5089 	}
5090 
5091 	/* update NUMA affinity of unbound workqueues */
5092 	list_for_each_entry(wq, &workqueues, list)
5093 		wq_update_unbound_numa(wq, cpu, true);
5094 
5095 	mutex_unlock(&wq_pool_mutex);
5096 	return 0;
5097 }
5098 
workqueue_offline_cpu(unsigned int cpu)5099 int workqueue_offline_cpu(unsigned int cpu)
5100 {
5101 	struct workqueue_struct *wq;
5102 
5103 	/* unbinding per-cpu workers should happen on the local CPU */
5104 	if (WARN_ON(cpu != smp_processor_id()))
5105 		return -1;
5106 
5107 	unbind_workers(cpu);
5108 
5109 	/* update NUMA affinity of unbound workqueues */
5110 	mutex_lock(&wq_pool_mutex);
5111 	list_for_each_entry(wq, &workqueues, list)
5112 		wq_update_unbound_numa(wq, cpu, false);
5113 	mutex_unlock(&wq_pool_mutex);
5114 
5115 	return 0;
5116 }
5117 
5118 struct work_for_cpu {
5119 	struct work_struct work;
5120 	long (*fn)(void *);
5121 	void *arg;
5122 	long ret;
5123 };
5124 
work_for_cpu_fn(struct work_struct * work)5125 static void work_for_cpu_fn(struct work_struct *work)
5126 {
5127 	struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
5128 
5129 	wfc->ret = wfc->fn(wfc->arg);
5130 }
5131 
5132 /**
5133  * work_on_cpu - run a function in thread context on a particular cpu
5134  * @cpu: the cpu to run on
5135  * @fn: the function to run
5136  * @arg: the function arg
5137  *
5138  * It is up to the caller to ensure that the cpu doesn't go offline.
5139  * The caller must not hold any locks which would prevent @fn from completing.
5140  *
5141  * Return: The value @fn returns.
5142  */
work_on_cpu(int cpu,long (* fn)(void *),void * arg)5143 long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
5144 {
5145 	struct work_for_cpu wfc = { .fn = fn, .arg = arg };
5146 
5147 	INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
5148 	schedule_work_on(cpu, &wfc.work);
5149 	flush_work(&wfc.work);
5150 	destroy_work_on_stack(&wfc.work);
5151 	return wfc.ret;
5152 }
5153 EXPORT_SYMBOL_GPL(work_on_cpu);
5154 
5155 /**
5156  * work_on_cpu_safe - run a function in thread context on a particular cpu
5157  * @cpu: the cpu to run on
5158  * @fn:  the function to run
5159  * @arg: the function argument
5160  *
5161  * Disables CPU hotplug and calls work_on_cpu(). The caller must not hold
5162  * any locks which would prevent @fn from completing.
5163  *
5164  * Return: The value @fn returns.
5165  */
work_on_cpu_safe(int cpu,long (* fn)(void *),void * arg)5166 long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
5167 {
5168 	long ret = -ENODEV;
5169 
5170 	get_online_cpus();
5171 	if (cpu_online(cpu))
5172 		ret = work_on_cpu(cpu, fn, arg);
5173 	put_online_cpus();
5174 	return ret;
5175 }
5176 EXPORT_SYMBOL_GPL(work_on_cpu_safe);
5177 #endif /* CONFIG_SMP */
5178 
5179 #ifdef CONFIG_FREEZER
5180 
5181 /**
5182  * freeze_workqueues_begin - begin freezing workqueues
5183  *
5184  * Start freezing workqueues.  After this function returns, all freezable
5185  * workqueues will queue new works to their inactive_works list instead of
5186  * pool->worklist.
5187  *
5188  * CONTEXT:
5189  * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
5190  */
freeze_workqueues_begin(void)5191 void freeze_workqueues_begin(void)
5192 {
5193 	struct workqueue_struct *wq;
5194 	struct pool_workqueue *pwq;
5195 
5196 	mutex_lock(&wq_pool_mutex);
5197 
5198 	WARN_ON_ONCE(workqueue_freezing);
5199 	workqueue_freezing = true;
5200 
5201 	list_for_each_entry(wq, &workqueues, list) {
5202 		mutex_lock(&wq->mutex);
5203 		for_each_pwq(pwq, wq)
5204 			pwq_adjust_max_active(pwq);
5205 		mutex_unlock(&wq->mutex);
5206 	}
5207 
5208 	mutex_unlock(&wq_pool_mutex);
5209 }
5210 
5211 /**
5212  * freeze_workqueues_busy - are freezable workqueues still busy?
5213  *
5214  * Check whether freezing is complete.  This function must be called
5215  * between freeze_workqueues_begin() and thaw_workqueues().
5216  *
5217  * CONTEXT:
5218  * Grabs and releases wq_pool_mutex.
5219  *
5220  * Return:
5221  * %true if some freezable workqueues are still busy.  %false if freezing
5222  * is complete.
5223  */
freeze_workqueues_busy(void)5224 bool freeze_workqueues_busy(void)
5225 {
5226 	bool busy = false;
5227 	struct workqueue_struct *wq;
5228 	struct pool_workqueue *pwq;
5229 
5230 	mutex_lock(&wq_pool_mutex);
5231 
5232 	WARN_ON_ONCE(!workqueue_freezing);
5233 
5234 	list_for_each_entry(wq, &workqueues, list) {
5235 		if (!(wq->flags & WQ_FREEZABLE))
5236 			continue;
5237 		/*
5238 		 * nr_active is monotonically decreasing.  It's safe
5239 		 * to peek without lock.
5240 		 */
5241 		rcu_read_lock();
5242 		for_each_pwq(pwq, wq) {
5243 			WARN_ON_ONCE(pwq->nr_active < 0);
5244 			if (pwq->nr_active) {
5245 				busy = true;
5246 				rcu_read_unlock();
5247 				goto out_unlock;
5248 			}
5249 		}
5250 		rcu_read_unlock();
5251 	}
5252 out_unlock:
5253 	mutex_unlock(&wq_pool_mutex);
5254 	return busy;
5255 }
5256 
5257 /**
5258  * thaw_workqueues - thaw workqueues
5259  *
5260  * Thaw workqueues.  Normal queueing is restored and all collected
5261  * frozen works are transferred to their respective pool worklists.
5262  *
5263  * CONTEXT:
5264  * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
5265  */
thaw_workqueues(void)5266 void thaw_workqueues(void)
5267 {
5268 	struct workqueue_struct *wq;
5269 	struct pool_workqueue *pwq;
5270 
5271 	mutex_lock(&wq_pool_mutex);
5272 
5273 	if (!workqueue_freezing)
5274 		goto out_unlock;
5275 
5276 	workqueue_freezing = false;
5277 
5278 	/* restore max_active and repopulate worklist */
5279 	list_for_each_entry(wq, &workqueues, list) {
5280 		mutex_lock(&wq->mutex);
5281 		for_each_pwq(pwq, wq)
5282 			pwq_adjust_max_active(pwq);
5283 		mutex_unlock(&wq->mutex);
5284 	}
5285 
5286 out_unlock:
5287 	mutex_unlock(&wq_pool_mutex);
5288 }
5289 #endif /* CONFIG_FREEZER */
5290 
workqueue_apply_unbound_cpumask(void)5291 static int workqueue_apply_unbound_cpumask(void)
5292 {
5293 	LIST_HEAD(ctxs);
5294 	int ret = 0;
5295 	struct workqueue_struct *wq;
5296 	struct apply_wqattrs_ctx *ctx, *n;
5297 
5298 	lockdep_assert_held(&wq_pool_mutex);
5299 
5300 	list_for_each_entry(wq, &workqueues, list) {
5301 		if (!(wq->flags & WQ_UNBOUND))
5302 			continue;
5303 		/* creating multiple pwqs breaks ordering guarantee */
5304 		if (wq->flags & __WQ_ORDERED)
5305 			continue;
5306 
5307 		ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs);
5308 		if (!ctx) {
5309 			ret = -ENOMEM;
5310 			break;
5311 		}
5312 
5313 		list_add_tail(&ctx->list, &ctxs);
5314 	}
5315 
5316 	list_for_each_entry_safe(ctx, n, &ctxs, list) {
5317 		if (!ret)
5318 			apply_wqattrs_commit(ctx);
5319 		apply_wqattrs_cleanup(ctx);
5320 	}
5321 
5322 	return ret;
5323 }
5324 
5325 /**
5326  *  workqueue_set_unbound_cpumask - Set the low-level unbound cpumask
5327  *  @cpumask: the cpumask to set
5328  *
5329  *  The low-level workqueues cpumask is a global cpumask that limits
5330  *  the affinity of all unbound workqueues.  This function check the @cpumask
5331  *  and apply it to all unbound workqueues and updates all pwqs of them.
5332  *
5333  *  Retun:	0	- Success
5334  *  		-EINVAL	- Invalid @cpumask
5335  *  		-ENOMEM	- Failed to allocate memory for attrs or pwqs.
5336  */
workqueue_set_unbound_cpumask(cpumask_var_t cpumask)5337 int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
5338 {
5339 	int ret = -EINVAL;
5340 	cpumask_var_t saved_cpumask;
5341 
5342 	/*
5343 	 * Not excluding isolated cpus on purpose.
5344 	 * If the user wishes to include them, we allow that.
5345 	 */
5346 	cpumask_and(cpumask, cpumask, cpu_possible_mask);
5347 	if (!cpumask_empty(cpumask)) {
5348 		apply_wqattrs_lock();
5349 		if (cpumask_equal(cpumask, wq_unbound_cpumask)) {
5350 			ret = 0;
5351 			goto out_unlock;
5352 		}
5353 
5354 		if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL)) {
5355 			ret = -ENOMEM;
5356 			goto out_unlock;
5357 		}
5358 
5359 		/* save the old wq_unbound_cpumask. */
5360 		cpumask_copy(saved_cpumask, wq_unbound_cpumask);
5361 
5362 		/* update wq_unbound_cpumask at first and apply it to wqs. */
5363 		cpumask_copy(wq_unbound_cpumask, cpumask);
5364 		ret = workqueue_apply_unbound_cpumask();
5365 
5366 		/* restore the wq_unbound_cpumask when failed. */
5367 		if (ret < 0)
5368 			cpumask_copy(wq_unbound_cpumask, saved_cpumask);
5369 
5370 		free_cpumask_var(saved_cpumask);
5371 out_unlock:
5372 		apply_wqattrs_unlock();
5373 	}
5374 
5375 	return ret;
5376 }
5377 
5378 #ifdef CONFIG_SYSFS
5379 /*
5380  * Workqueues with WQ_SYSFS flag set is visible to userland via
5381  * /sys/bus/workqueue/devices/WQ_NAME.  All visible workqueues have the
5382  * following attributes.
5383  *
5384  *  per_cpu	RO bool	: whether the workqueue is per-cpu or unbound
5385  *  max_active	RW int	: maximum number of in-flight work items
5386  *
5387  * Unbound workqueues have the following extra attributes.
5388  *
5389  *  pool_ids	RO int	: the associated pool IDs for each node
5390  *  nice	RW int	: nice value of the workers
5391  *  cpumask	RW mask	: bitmask of allowed CPUs for the workers
5392  *  numa	RW bool	: whether enable NUMA affinity
5393  */
5394 struct wq_device {
5395 	struct workqueue_struct		*wq;
5396 	struct device			dev;
5397 };
5398 
dev_to_wq(struct device * dev)5399 static struct workqueue_struct *dev_to_wq(struct device *dev)
5400 {
5401 	struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
5402 
5403 	return wq_dev->wq;
5404 }
5405 
per_cpu_show(struct device * dev,struct device_attribute * attr,char * buf)5406 static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr,
5407 			    char *buf)
5408 {
5409 	struct workqueue_struct *wq = dev_to_wq(dev);
5410 
5411 	return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND));
5412 }
5413 static DEVICE_ATTR_RO(per_cpu);
5414 
max_active_show(struct device * dev,struct device_attribute * attr,char * buf)5415 static ssize_t max_active_show(struct device *dev,
5416 			       struct device_attribute *attr, char *buf)
5417 {
5418 	struct workqueue_struct *wq = dev_to_wq(dev);
5419 
5420 	return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active);
5421 }
5422 
max_active_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5423 static ssize_t max_active_store(struct device *dev,
5424 				struct device_attribute *attr, const char *buf,
5425 				size_t count)
5426 {
5427 	struct workqueue_struct *wq = dev_to_wq(dev);
5428 	int val;
5429 
5430 	if (sscanf(buf, "%d", &val) != 1 || val <= 0)
5431 		return -EINVAL;
5432 
5433 	workqueue_set_max_active(wq, val);
5434 	return count;
5435 }
5436 static DEVICE_ATTR_RW(max_active);
5437 
5438 static struct attribute *wq_sysfs_attrs[] = {
5439 	&dev_attr_per_cpu.attr,
5440 	&dev_attr_max_active.attr,
5441 	NULL,
5442 };
5443 ATTRIBUTE_GROUPS(wq_sysfs);
5444 
wq_pool_ids_show(struct device * dev,struct device_attribute * attr,char * buf)5445 static ssize_t wq_pool_ids_show(struct device *dev,
5446 				struct device_attribute *attr, char *buf)
5447 {
5448 	struct workqueue_struct *wq = dev_to_wq(dev);
5449 	const char *delim = "";
5450 	int node, written = 0;
5451 
5452 	get_online_cpus();
5453 	rcu_read_lock();
5454 	for_each_node(node) {
5455 		written += scnprintf(buf + written, PAGE_SIZE - written,
5456 				     "%s%d:%d", delim, node,
5457 				     unbound_pwq_by_node(wq, node)->pool->id);
5458 		delim = " ";
5459 	}
5460 	written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
5461 	rcu_read_unlock();
5462 	put_online_cpus();
5463 
5464 	return written;
5465 }
5466 
wq_nice_show(struct device * dev,struct device_attribute * attr,char * buf)5467 static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr,
5468 			    char *buf)
5469 {
5470 	struct workqueue_struct *wq = dev_to_wq(dev);
5471 	int written;
5472 
5473 	mutex_lock(&wq->mutex);
5474 	written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice);
5475 	mutex_unlock(&wq->mutex);
5476 
5477 	return written;
5478 }
5479 
5480 /* prepare workqueue_attrs for sysfs store operations */
wq_sysfs_prep_attrs(struct workqueue_struct * wq)5481 static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq)
5482 {
5483 	struct workqueue_attrs *attrs;
5484 
5485 	lockdep_assert_held(&wq_pool_mutex);
5486 
5487 	attrs = alloc_workqueue_attrs();
5488 	if (!attrs)
5489 		return NULL;
5490 
5491 	copy_workqueue_attrs(attrs, wq->unbound_attrs);
5492 	return attrs;
5493 }
5494 
wq_nice_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5495 static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr,
5496 			     const char *buf, size_t count)
5497 {
5498 	struct workqueue_struct *wq = dev_to_wq(dev);
5499 	struct workqueue_attrs *attrs;
5500 	int ret = -ENOMEM;
5501 
5502 	apply_wqattrs_lock();
5503 
5504 	attrs = wq_sysfs_prep_attrs(wq);
5505 	if (!attrs)
5506 		goto out_unlock;
5507 
5508 	if (sscanf(buf, "%d", &attrs->nice) == 1 &&
5509 	    attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE)
5510 		ret = apply_workqueue_attrs_locked(wq, attrs);
5511 	else
5512 		ret = -EINVAL;
5513 
5514 out_unlock:
5515 	apply_wqattrs_unlock();
5516 	free_workqueue_attrs(attrs);
5517 	return ret ?: count;
5518 }
5519 
wq_cpumask_show(struct device * dev,struct device_attribute * attr,char * buf)5520 static ssize_t wq_cpumask_show(struct device *dev,
5521 			       struct device_attribute *attr, char *buf)
5522 {
5523 	struct workqueue_struct *wq = dev_to_wq(dev);
5524 	int written;
5525 
5526 	mutex_lock(&wq->mutex);
5527 	written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
5528 			    cpumask_pr_args(wq->unbound_attrs->cpumask));
5529 	mutex_unlock(&wq->mutex);
5530 	return written;
5531 }
5532 
wq_cpumask_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5533 static ssize_t wq_cpumask_store(struct device *dev,
5534 				struct device_attribute *attr,
5535 				const char *buf, size_t count)
5536 {
5537 	struct workqueue_struct *wq = dev_to_wq(dev);
5538 	struct workqueue_attrs *attrs;
5539 	int ret = -ENOMEM;
5540 
5541 	apply_wqattrs_lock();
5542 
5543 	attrs = wq_sysfs_prep_attrs(wq);
5544 	if (!attrs)
5545 		goto out_unlock;
5546 
5547 	ret = cpumask_parse(buf, attrs->cpumask);
5548 	if (!ret)
5549 		ret = apply_workqueue_attrs_locked(wq, attrs);
5550 
5551 out_unlock:
5552 	apply_wqattrs_unlock();
5553 	free_workqueue_attrs(attrs);
5554 	return ret ?: count;
5555 }
5556 
wq_numa_show(struct device * dev,struct device_attribute * attr,char * buf)5557 static ssize_t wq_numa_show(struct device *dev, struct device_attribute *attr,
5558 			    char *buf)
5559 {
5560 	struct workqueue_struct *wq = dev_to_wq(dev);
5561 	int written;
5562 
5563 	mutex_lock(&wq->mutex);
5564 	written = scnprintf(buf, PAGE_SIZE, "%d\n",
5565 			    !wq->unbound_attrs->no_numa);
5566 	mutex_unlock(&wq->mutex);
5567 
5568 	return written;
5569 }
5570 
wq_numa_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5571 static ssize_t wq_numa_store(struct device *dev, struct device_attribute *attr,
5572 			     const char *buf, size_t count)
5573 {
5574 	struct workqueue_struct *wq = dev_to_wq(dev);
5575 	struct workqueue_attrs *attrs;
5576 	int v, ret = -ENOMEM;
5577 
5578 	apply_wqattrs_lock();
5579 
5580 	attrs = wq_sysfs_prep_attrs(wq);
5581 	if (!attrs)
5582 		goto out_unlock;
5583 
5584 	ret = -EINVAL;
5585 	if (sscanf(buf, "%d", &v) == 1) {
5586 		attrs->no_numa = !v;
5587 		ret = apply_workqueue_attrs_locked(wq, attrs);
5588 	}
5589 
5590 out_unlock:
5591 	apply_wqattrs_unlock();
5592 	free_workqueue_attrs(attrs);
5593 	return ret ?: count;
5594 }
5595 
5596 static struct device_attribute wq_sysfs_unbound_attrs[] = {
5597 	__ATTR(pool_ids, 0444, wq_pool_ids_show, NULL),
5598 	__ATTR(nice, 0644, wq_nice_show, wq_nice_store),
5599 	__ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store),
5600 	__ATTR(numa, 0644, wq_numa_show, wq_numa_store),
5601 	__ATTR_NULL,
5602 };
5603 
5604 static struct bus_type wq_subsys = {
5605 	.name				= "workqueue",
5606 	.dev_groups			= wq_sysfs_groups,
5607 };
5608 
wq_unbound_cpumask_show(struct device * dev,struct device_attribute * attr,char * buf)5609 static ssize_t wq_unbound_cpumask_show(struct device *dev,
5610 		struct device_attribute *attr, char *buf)
5611 {
5612 	int written;
5613 
5614 	mutex_lock(&wq_pool_mutex);
5615 	written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
5616 			    cpumask_pr_args(wq_unbound_cpumask));
5617 	mutex_unlock(&wq_pool_mutex);
5618 
5619 	return written;
5620 }
5621 
wq_unbound_cpumask_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5622 static ssize_t wq_unbound_cpumask_store(struct device *dev,
5623 		struct device_attribute *attr, const char *buf, size_t count)
5624 {
5625 	cpumask_var_t cpumask;
5626 	int ret;
5627 
5628 	if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
5629 		return -ENOMEM;
5630 
5631 	ret = cpumask_parse(buf, cpumask);
5632 	if (!ret)
5633 		ret = workqueue_set_unbound_cpumask(cpumask);
5634 
5635 	free_cpumask_var(cpumask);
5636 	return ret ? ret : count;
5637 }
5638 
5639 static struct device_attribute wq_sysfs_cpumask_attr =
5640 	__ATTR(cpumask, 0644, wq_unbound_cpumask_show,
5641 	       wq_unbound_cpumask_store);
5642 
wq_sysfs_init(void)5643 static int __init wq_sysfs_init(void)
5644 {
5645 	int err;
5646 
5647 	err = subsys_virtual_register(&wq_subsys, NULL);
5648 	if (err)
5649 		return err;
5650 
5651 	return device_create_file(wq_subsys.dev_root, &wq_sysfs_cpumask_attr);
5652 }
5653 core_initcall(wq_sysfs_init);
5654 
wq_device_release(struct device * dev)5655 static void wq_device_release(struct device *dev)
5656 {
5657 	struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
5658 
5659 	kfree(wq_dev);
5660 }
5661 
5662 /**
5663  * workqueue_sysfs_register - make a workqueue visible in sysfs
5664  * @wq: the workqueue to register
5665  *
5666  * Expose @wq in sysfs under /sys/bus/workqueue/devices.
5667  * alloc_workqueue*() automatically calls this function if WQ_SYSFS is set
5668  * which is the preferred method.
5669  *
5670  * Workqueue user should use this function directly iff it wants to apply
5671  * workqueue_attrs before making the workqueue visible in sysfs; otherwise,
5672  * apply_workqueue_attrs() may race against userland updating the
5673  * attributes.
5674  *
5675  * Return: 0 on success, -errno on failure.
5676  */
workqueue_sysfs_register(struct workqueue_struct * wq)5677 int workqueue_sysfs_register(struct workqueue_struct *wq)
5678 {
5679 	struct wq_device *wq_dev;
5680 	int ret;
5681 
5682 	/*
5683 	 * Adjusting max_active or creating new pwqs by applying
5684 	 * attributes breaks ordering guarantee.  Disallow exposing ordered
5685 	 * workqueues.
5686 	 */
5687 	if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
5688 		return -EINVAL;
5689 
5690 	wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
5691 	if (!wq_dev)
5692 		return -ENOMEM;
5693 
5694 	wq_dev->wq = wq;
5695 	wq_dev->dev.bus = &wq_subsys;
5696 	wq_dev->dev.release = wq_device_release;
5697 	dev_set_name(&wq_dev->dev, "%s", wq->name);
5698 
5699 	/*
5700 	 * unbound_attrs are created separately.  Suppress uevent until
5701 	 * everything is ready.
5702 	 */
5703 	dev_set_uevent_suppress(&wq_dev->dev, true);
5704 
5705 	ret = device_register(&wq_dev->dev);
5706 	if (ret) {
5707 		put_device(&wq_dev->dev);
5708 		wq->wq_dev = NULL;
5709 		return ret;
5710 	}
5711 
5712 	if (wq->flags & WQ_UNBOUND) {
5713 		struct device_attribute *attr;
5714 
5715 		for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) {
5716 			ret = device_create_file(&wq_dev->dev, attr);
5717 			if (ret) {
5718 				device_unregister(&wq_dev->dev);
5719 				wq->wq_dev = NULL;
5720 				return ret;
5721 			}
5722 		}
5723 	}
5724 
5725 	dev_set_uevent_suppress(&wq_dev->dev, false);
5726 	kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD);
5727 	return 0;
5728 }
5729 
5730 /**
5731  * workqueue_sysfs_unregister - undo workqueue_sysfs_register()
5732  * @wq: the workqueue to unregister
5733  *
5734  * If @wq is registered to sysfs by workqueue_sysfs_register(), unregister.
5735  */
workqueue_sysfs_unregister(struct workqueue_struct * wq)5736 static void workqueue_sysfs_unregister(struct workqueue_struct *wq)
5737 {
5738 	struct wq_device *wq_dev = wq->wq_dev;
5739 
5740 	if (!wq->wq_dev)
5741 		return;
5742 
5743 	wq->wq_dev = NULL;
5744 	device_unregister(&wq_dev->dev);
5745 }
5746 #else	/* CONFIG_SYSFS */
workqueue_sysfs_unregister(struct workqueue_struct * wq)5747 static void workqueue_sysfs_unregister(struct workqueue_struct *wq)	{ }
5748 #endif	/* CONFIG_SYSFS */
5749 
5750 /*
5751  * Workqueue watchdog.
5752  *
5753  * Stall may be caused by various bugs - missing WQ_MEM_RECLAIM, illegal
5754  * flush dependency, a concurrency managed work item which stays RUNNING
5755  * indefinitely.  Workqueue stalls can be very difficult to debug as the
5756  * usual warning mechanisms don't trigger and internal workqueue state is
5757  * largely opaque.
5758  *
5759  * Workqueue watchdog monitors all worker pools periodically and dumps
5760  * state if some pools failed to make forward progress for a while where
5761  * forward progress is defined as the first item on ->worklist changing.
5762  *
5763  * This mechanism is controlled through the kernel parameter
5764  * "workqueue.watchdog_thresh" which can be updated at runtime through the
5765  * corresponding sysfs parameter file.
5766  */
5767 #ifdef CONFIG_WQ_WATCHDOG
5768 
5769 static unsigned long wq_watchdog_thresh = 30;
5770 static struct timer_list wq_watchdog_timer;
5771 
5772 static unsigned long wq_watchdog_touched = INITIAL_JIFFIES;
5773 static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES;
5774 
wq_watchdog_reset_touched(void)5775 static void wq_watchdog_reset_touched(void)
5776 {
5777 	int cpu;
5778 
5779 	wq_watchdog_touched = jiffies;
5780 	for_each_possible_cpu(cpu)
5781 		per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
5782 }
5783 
wq_watchdog_timer_fn(struct timer_list * unused)5784 static void wq_watchdog_timer_fn(struct timer_list *unused)
5785 {
5786 	unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ;
5787 	bool lockup_detected = false;
5788 	unsigned long now = jiffies;
5789 	struct worker_pool *pool;
5790 	int pi;
5791 
5792 	if (!thresh)
5793 		return;
5794 
5795 	rcu_read_lock();
5796 
5797 	for_each_pool(pool, pi) {
5798 		unsigned long pool_ts, touched, ts;
5799 
5800 		if (list_empty(&pool->worklist))
5801 			continue;
5802 
5803 		/*
5804 		 * If a virtual machine is stopped by the host it can look to
5805 		 * the watchdog like a stall.
5806 		 */
5807 		kvm_check_and_clear_guest_paused();
5808 
5809 		/* get the latest of pool and touched timestamps */
5810 		pool_ts = READ_ONCE(pool->watchdog_ts);
5811 		touched = READ_ONCE(wq_watchdog_touched);
5812 
5813 		if (time_after(pool_ts, touched))
5814 			ts = pool_ts;
5815 		else
5816 			ts = touched;
5817 
5818 		if (pool->cpu >= 0) {
5819 			unsigned long cpu_touched =
5820 				READ_ONCE(per_cpu(wq_watchdog_touched_cpu,
5821 						  pool->cpu));
5822 			if (time_after(cpu_touched, ts))
5823 				ts = cpu_touched;
5824 		}
5825 
5826 		/* did we stall? */
5827 		if (time_after(now, ts + thresh)) {
5828 			lockup_detected = true;
5829 			pr_emerg("BUG: workqueue lockup - pool");
5830 			pr_cont_pool_info(pool);
5831 			pr_cont(" stuck for %us!\n",
5832 				jiffies_to_msecs(now - pool_ts) / 1000);
5833 		}
5834 	}
5835 
5836 	rcu_read_unlock();
5837 
5838 	if (lockup_detected)
5839 		show_workqueue_state();
5840 
5841 	wq_watchdog_reset_touched();
5842 	mod_timer(&wq_watchdog_timer, jiffies + thresh);
5843 }
5844 
wq_watchdog_touch(int cpu)5845 notrace void wq_watchdog_touch(int cpu)
5846 {
5847 	if (cpu >= 0)
5848 		per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
5849 	else
5850 		wq_watchdog_touched = jiffies;
5851 }
5852 
wq_watchdog_set_thresh(unsigned long thresh)5853 static void wq_watchdog_set_thresh(unsigned long thresh)
5854 {
5855 	wq_watchdog_thresh = 0;
5856 	del_timer_sync(&wq_watchdog_timer);
5857 
5858 	if (thresh) {
5859 		wq_watchdog_thresh = thresh;
5860 		wq_watchdog_reset_touched();
5861 		mod_timer(&wq_watchdog_timer, jiffies + thresh * HZ);
5862 	}
5863 }
5864 
wq_watchdog_param_set_thresh(const char * val,const struct kernel_param * kp)5865 static int wq_watchdog_param_set_thresh(const char *val,
5866 					const struct kernel_param *kp)
5867 {
5868 	unsigned long thresh;
5869 	int ret;
5870 
5871 	ret = kstrtoul(val, 0, &thresh);
5872 	if (ret)
5873 		return ret;
5874 
5875 	if (system_wq)
5876 		wq_watchdog_set_thresh(thresh);
5877 	else
5878 		wq_watchdog_thresh = thresh;
5879 
5880 	return 0;
5881 }
5882 
5883 static const struct kernel_param_ops wq_watchdog_thresh_ops = {
5884 	.set	= wq_watchdog_param_set_thresh,
5885 	.get	= param_get_ulong,
5886 };
5887 
5888 module_param_cb(watchdog_thresh, &wq_watchdog_thresh_ops, &wq_watchdog_thresh,
5889 		0644);
5890 
wq_watchdog_init(void)5891 static void wq_watchdog_init(void)
5892 {
5893 	timer_setup(&wq_watchdog_timer, wq_watchdog_timer_fn, TIMER_DEFERRABLE);
5894 	wq_watchdog_set_thresh(wq_watchdog_thresh);
5895 }
5896 
5897 #else	/* CONFIG_WQ_WATCHDOG */
5898 
wq_watchdog_init(void)5899 static inline void wq_watchdog_init(void) { }
5900 
5901 #endif	/* CONFIG_WQ_WATCHDOG */
5902 
wq_numa_init(void)5903 static void __init wq_numa_init(void)
5904 {
5905 	cpumask_var_t *tbl;
5906 	int node, cpu;
5907 
5908 	if (num_possible_nodes() <= 1)
5909 		return;
5910 
5911 	if (wq_disable_numa) {
5912 		pr_info("workqueue: NUMA affinity support disabled\n");
5913 		return;
5914 	}
5915 
5916 	for_each_possible_cpu(cpu) {
5917 		if (WARN_ON(cpu_to_node(cpu) == NUMA_NO_NODE)) {
5918 			pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu);
5919 			return;
5920 		}
5921 	}
5922 
5923 	wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs();
5924 	BUG_ON(!wq_update_unbound_numa_attrs_buf);
5925 
5926 	/*
5927 	 * We want masks of possible CPUs of each node which isn't readily
5928 	 * available.  Build one from cpu_to_node() which should have been
5929 	 * fully initialized by now.
5930 	 */
5931 	tbl = kcalloc(nr_node_ids, sizeof(tbl[0]), GFP_KERNEL);
5932 	BUG_ON(!tbl);
5933 
5934 	for_each_node(node)
5935 		BUG_ON(!zalloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
5936 				node_online(node) ? node : NUMA_NO_NODE));
5937 
5938 	for_each_possible_cpu(cpu) {
5939 		node = cpu_to_node(cpu);
5940 		cpumask_set_cpu(cpu, tbl[node]);
5941 	}
5942 
5943 	wq_numa_possible_cpumask = tbl;
5944 	wq_numa_enabled = true;
5945 }
5946 
5947 /**
5948  * workqueue_init_early - early init for workqueue subsystem
5949  *
5950  * This is the first half of two-staged workqueue subsystem initialization
5951  * and invoked as soon as the bare basics - memory allocation, cpumasks and
5952  * idr are up.  It sets up all the data structures and system workqueues
5953  * and allows early boot code to create workqueues and queue/cancel work
5954  * items.  Actual work item execution starts only after kthreads can be
5955  * created and scheduled right before early initcalls.
5956  */
workqueue_init_early(void)5957 void __init workqueue_init_early(void)
5958 {
5959 	int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
5960 	int hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ;
5961 	int i, cpu;
5962 
5963 	BUILD_BUG_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
5964 
5965 	BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL));
5966 	cpumask_copy(wq_unbound_cpumask, housekeeping_cpumask(hk_flags));
5967 
5968 	pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
5969 
5970 	/* initialize CPU pools */
5971 	for_each_possible_cpu(cpu) {
5972 		struct worker_pool *pool;
5973 
5974 		i = 0;
5975 		for_each_cpu_worker_pool(pool, cpu) {
5976 			BUG_ON(init_worker_pool(pool));
5977 			pool->cpu = cpu;
5978 			cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
5979 			pool->attrs->nice = std_nice[i++];
5980 			pool->node = cpu_to_node(cpu);
5981 
5982 			/* alloc pool ID */
5983 			mutex_lock(&wq_pool_mutex);
5984 			BUG_ON(worker_pool_assign_id(pool));
5985 			mutex_unlock(&wq_pool_mutex);
5986 		}
5987 	}
5988 
5989 	/* create default unbound and ordered wq attrs */
5990 	for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
5991 		struct workqueue_attrs *attrs;
5992 
5993 		BUG_ON(!(attrs = alloc_workqueue_attrs()));
5994 		attrs->nice = std_nice[i];
5995 		unbound_std_wq_attrs[i] = attrs;
5996 
5997 		/*
5998 		 * An ordered wq should have only one pwq as ordering is
5999 		 * guaranteed by max_active which is enforced by pwqs.
6000 		 * Turn off NUMA so that dfl_pwq is used for all nodes.
6001 		 */
6002 		BUG_ON(!(attrs = alloc_workqueue_attrs()));
6003 		attrs->nice = std_nice[i];
6004 		attrs->no_numa = true;
6005 		ordered_wq_attrs[i] = attrs;
6006 	}
6007 
6008 	system_wq = alloc_workqueue("events", 0, 0);
6009 	system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0);
6010 	system_long_wq = alloc_workqueue("events_long", 0, 0);
6011 	system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
6012 					    WQ_UNBOUND_MAX_ACTIVE);
6013 	system_freezable_wq = alloc_workqueue("events_freezable",
6014 					      WQ_FREEZABLE, 0);
6015 	system_power_efficient_wq = alloc_workqueue("events_power_efficient",
6016 					      WQ_POWER_EFFICIENT, 0);
6017 	system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient",
6018 					      WQ_FREEZABLE | WQ_POWER_EFFICIENT,
6019 					      0);
6020 	BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq ||
6021 	       !system_unbound_wq || !system_freezable_wq ||
6022 	       !system_power_efficient_wq ||
6023 	       !system_freezable_power_efficient_wq);
6024 }
6025 
6026 /**
6027  * workqueue_init - bring workqueue subsystem fully online
6028  *
6029  * This is the latter half of two-staged workqueue subsystem initialization
6030  * and invoked as soon as kthreads can be created and scheduled.
6031  * Workqueues have been created and work items queued on them, but there
6032  * are no kworkers executing the work items yet.  Populate the worker pools
6033  * with the initial workers and enable future kworker creations.
6034  */
workqueue_init(void)6035 void __init workqueue_init(void)
6036 {
6037 	struct workqueue_struct *wq;
6038 	struct worker_pool *pool;
6039 	int cpu, bkt;
6040 
6041 	/*
6042 	 * It'd be simpler to initialize NUMA in workqueue_init_early() but
6043 	 * CPU to node mapping may not be available that early on some
6044 	 * archs such as power and arm64.  As per-cpu pools created
6045 	 * previously could be missing node hint and unbound pools NUMA
6046 	 * affinity, fix them up.
6047 	 *
6048 	 * Also, while iterating workqueues, create rescuers if requested.
6049 	 */
6050 	wq_numa_init();
6051 
6052 	mutex_lock(&wq_pool_mutex);
6053 
6054 	for_each_possible_cpu(cpu) {
6055 		for_each_cpu_worker_pool(pool, cpu) {
6056 			pool->node = cpu_to_node(cpu);
6057 		}
6058 	}
6059 
6060 	list_for_each_entry(wq, &workqueues, list) {
6061 		wq_update_unbound_numa(wq, smp_processor_id(), true);
6062 		WARN(init_rescuer(wq),
6063 		     "workqueue: failed to create early rescuer for %s",
6064 		     wq->name);
6065 	}
6066 
6067 	mutex_unlock(&wq_pool_mutex);
6068 
6069 	/* create the initial workers */
6070 	for_each_online_cpu(cpu) {
6071 		for_each_cpu_worker_pool(pool, cpu) {
6072 			pool->flags &= ~POOL_DISASSOCIATED;
6073 			BUG_ON(!create_worker(pool));
6074 		}
6075 	}
6076 
6077 	hash_for_each(unbound_pool_hash, bkt, pool, hash_node)
6078 		BUG_ON(!create_worker(pool));
6079 
6080 	wq_online = true;
6081 	wq_watchdog_init();
6082 }
6083