• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * kernel/workqueue.c - generic async execution with shared worker pool
4  *
5  * Copyright (C) 2002		Ingo Molnar
6  *
7  *   Derived from the taskqueue/keventd code by:
8  *     David Woodhouse <dwmw2@infradead.org>
9  *     Andrew Morton
10  *     Kai Petzke <wpp@marie.physik.tu-berlin.de>
11  *     Theodore Ts'o <tytso@mit.edu>
12  *
13  * Made to use alloc_percpu by Christoph Lameter.
14  *
15  * Copyright (C) 2010		SUSE Linux Products GmbH
16  * Copyright (C) 2010		Tejun Heo <tj@kernel.org>
17  *
18  * This is the generic async execution mechanism.  Work items as are
19  * executed in process context.  The worker pool is shared and
20  * automatically managed.  There are two worker pools for each CPU (one for
21  * normal work items and the other for high priority ones) and some extra
22  * pools for workqueues which are not bound to any specific CPU - the
23  * number of these backing pools is dynamic.
24  *
25  * Please read Documentation/core-api/workqueue.rst for details.
26  */
27 
28 #include <linux/export.h>
29 #include <linux/kernel.h>
30 #include <linux/sched.h>
31 #include <linux/init.h>
32 #include <linux/signal.h>
33 #include <linux/completion.h>
34 #include <linux/workqueue.h>
35 #include <linux/slab.h>
36 #include <linux/cpu.h>
37 #include <linux/notifier.h>
38 #include <linux/kthread.h>
39 #include <linux/hardirq.h>
40 #include <linux/mempolicy.h>
41 #include <linux/freezer.h>
42 #include <linux/debug_locks.h>
43 #include <linux/lockdep.h>
44 #include <linux/idr.h>
45 #include <linux/jhash.h>
46 #include <linux/hashtable.h>
47 #include <linux/rculist.h>
48 #include <linux/nodemask.h>
49 #include <linux/moduleparam.h>
50 #include <linux/uaccess.h>
51 #include <linux/sched/isolation.h>
52 #include <linux/sched/debug.h>
53 #include <linux/nmi.h>
54 #include <linux/kvm_para.h>
55 #include <linux/delay.h>
56 
57 #include "workqueue_internal.h"
58 
59 #include <trace/hooks/wqlockup.h>
60 /* events/workqueue.h uses default TRACE_INCLUDE_PATH */
61 #undef TRACE_INCLUDE_PATH
62 
63 enum {
64 	/*
65 	 * worker_pool flags
66 	 *
67 	 * A bound pool is either associated or disassociated with its CPU.
68 	 * While associated (!DISASSOCIATED), all workers are bound to the
69 	 * CPU and none has %WORKER_UNBOUND set and concurrency management
70 	 * is in effect.
71 	 *
72 	 * While DISASSOCIATED, the cpu may be offline and all workers have
73 	 * %WORKER_UNBOUND set and concurrency management disabled, and may
74 	 * be executing on any CPU.  The pool behaves as an unbound one.
75 	 *
76 	 * Note that DISASSOCIATED should be flipped only while holding
77 	 * wq_pool_attach_mutex to avoid changing binding state while
78 	 * worker_attach_to_pool() is in progress.
79 	 */
80 	POOL_MANAGER_ACTIVE	= 1 << 0,	/* being managed */
81 	POOL_DISASSOCIATED	= 1 << 2,	/* cpu can't serve workers */
82 
83 	/* worker flags */
84 	WORKER_DIE		= 1 << 1,	/* die die die */
85 	WORKER_IDLE		= 1 << 2,	/* is idle */
86 	WORKER_PREP		= 1 << 3,	/* preparing to run works */
87 	WORKER_CPU_INTENSIVE	= 1 << 6,	/* cpu intensive */
88 	WORKER_UNBOUND		= 1 << 7,	/* worker is unbound */
89 	WORKER_REBOUND		= 1 << 8,	/* worker was rebound */
90 
91 	WORKER_NOT_RUNNING	= WORKER_PREP | WORKER_CPU_INTENSIVE |
92 				  WORKER_UNBOUND | WORKER_REBOUND,
93 
94 	NR_STD_WORKER_POOLS	= 2,		/* # standard pools per cpu */
95 
96 	UNBOUND_POOL_HASH_ORDER	= 6,		/* hashed by pool->attrs */
97 	BUSY_WORKER_HASH_ORDER	= 6,		/* 64 pointers */
98 
99 	MAX_IDLE_WORKERS_RATIO	= 4,		/* 1/4 of busy can be idle */
100 	IDLE_WORKER_TIMEOUT	= 300 * HZ,	/* keep idle ones for 5 mins */
101 
102 	MAYDAY_INITIAL_TIMEOUT  = HZ / 100 >= 2 ? HZ / 100 : 2,
103 						/* call for help after 10ms
104 						   (min two ticks) */
105 	MAYDAY_INTERVAL		= HZ / 10,	/* and then every 100ms */
106 	CREATE_COOLDOWN		= HZ,		/* time to breath after fail */
107 
108 	/*
109 	 * Rescue workers are used only on emergencies and shared by
110 	 * all cpus.  Give MIN_NICE.
111 	 */
112 	RESCUER_NICE_LEVEL	= MIN_NICE,
113 	HIGHPRI_NICE_LEVEL	= MIN_NICE,
114 
115 	WQ_NAME_LEN		= 24,
116 };
117 
118 /*
119  * Structure fields follow one of the following exclusion rules.
120  *
121  * I: Modifiable by initialization/destruction paths and read-only for
122  *    everyone else.
123  *
124  * P: Preemption protected.  Disabling preemption is enough and should
125  *    only be modified and accessed from the local cpu.
126  *
127  * L: pool->lock protected.  Access with pool->lock held.
128  *
129  * K: Only modified by worker while holding pool->lock. Can be safely read by
130  *    self, while holding pool->lock or from IRQ context if %current is the
131  *    kworker.
132  *
133  * S: Only modified by worker self.
134  *
135  * A: wq_pool_attach_mutex protected.
136  *
137  * PL: wq_pool_mutex protected.
138  *
139  * PR: wq_pool_mutex protected for writes.  RCU protected for reads.
140  *
141  * PW: wq_pool_mutex and wq->mutex protected for writes.  Either for reads.
142  *
143  * PWR: wq_pool_mutex and wq->mutex protected for writes.  Either or
144  *      RCU for reads.
145  *
146  * WQ: wq->mutex protected.
147  *
148  * WR: wq->mutex protected for writes.  RCU protected for reads.
149  *
150  * MD: wq_mayday_lock protected.
151  *
152  * WD: Used internally by the watchdog.
153  */
154 
155 /* struct worker is defined in workqueue_internal.h */
156 
157 struct worker_pool {
158 	raw_spinlock_t		lock;		/* the pool lock */
159 	int			cpu;		/* I: the associated cpu */
160 	int			node;		/* I: the associated node ID */
161 	int			id;		/* I: pool ID */
162 	unsigned int		flags;		/* L: flags */
163 
164 	unsigned long		watchdog_ts;	/* L: watchdog timestamp */
165 	bool			cpu_stall;	/* WD: stalled cpu bound pool */
166 
167 	/*
168 	 * The counter is incremented in a process context on the associated CPU
169 	 * w/ preemption disabled, and decremented or reset in the same context
170 	 * but w/ pool->lock held. The readers grab pool->lock and are
171 	 * guaranteed to see if the counter reached zero.
172 	 */
173 	int			nr_running;
174 
175 	struct list_head	worklist;	/* L: list of pending works */
176 
177 	int			nr_workers;	/* L: total number of workers */
178 	int			nr_idle;	/* L: currently idle workers */
179 
180 	struct list_head	idle_list;	/* L: list of idle workers */
181 	struct timer_list	idle_timer;	/* L: worker idle timeout */
182 	struct work_struct      idle_cull_work; /* L: worker idle cleanup */
183 
184 	struct timer_list	mayday_timer;	  /* L: SOS timer for workers */
185 
186 	/* a workers is either on busy_hash or idle_list, or the manager */
187 	DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER);
188 						/* L: hash of busy workers */
189 
190 	struct worker		*manager;	/* L: purely informational */
191 	struct list_head	workers;	/* A: attached workers */
192 	struct list_head        dying_workers;  /* A: workers about to die */
193 	struct completion	*detach_completion; /* all workers detached */
194 
195 	struct ida		worker_ida;	/* worker IDs for task name */
196 
197 	struct workqueue_attrs	*attrs;		/* I: worker attributes */
198 	struct hlist_node	hash_node;	/* PL: unbound_pool_hash node */
199 	int			refcnt;		/* PL: refcnt for unbound pools */
200 
201 	/*
202 	 * Destruction of pool is RCU protected to allow dereferences
203 	 * from get_work_pool().
204 	 */
205 	struct rcu_head		rcu;
206 };
207 
208 /*
209  * Per-pool_workqueue statistics. These can be monitored using
210  * tools/workqueue/wq_monitor.py.
211  */
212 enum pool_workqueue_stats {
213 	PWQ_STAT_STARTED,	/* work items started execution */
214 	PWQ_STAT_COMPLETED,	/* work items completed execution */
215 	PWQ_STAT_CPU_TIME,	/* total CPU time consumed */
216 	PWQ_STAT_CPU_INTENSIVE,	/* wq_cpu_intensive_thresh_us violations */
217 	PWQ_STAT_CM_WAKEUP,	/* concurrency-management worker wakeups */
218 	PWQ_STAT_REPATRIATED,	/* unbound workers brought back into scope */
219 	PWQ_STAT_MAYDAY,	/* maydays to rescuer */
220 	PWQ_STAT_RESCUED,	/* linked work items executed by rescuer */
221 
222 	PWQ_NR_STATS,
223 };
224 
225 /*
226  * The per-pool workqueue.  While queued, the lower WORK_STRUCT_FLAG_BITS
227  * of work_struct->data are used for flags and the remaining high bits
228  * point to the pwq; thus, pwqs need to be aligned at two's power of the
229  * number of flag bits.
230  */
231 struct pool_workqueue {
232 	struct worker_pool	*pool;		/* I: the associated pool */
233 	struct workqueue_struct *wq;		/* I: the owning workqueue */
234 	int			work_color;	/* L: current color */
235 	int			flush_color;	/* L: flushing color */
236 	int			refcnt;		/* L: reference count */
237 	int			nr_in_flight[WORK_NR_COLORS];
238 						/* L: nr of in_flight works */
239 
240 	/*
241 	 * nr_active management and WORK_STRUCT_INACTIVE:
242 	 *
243 	 * When pwq->nr_active >= max_active, new work item is queued to
244 	 * pwq->inactive_works instead of pool->worklist and marked with
245 	 * WORK_STRUCT_INACTIVE.
246 	 *
247 	 * All work items marked with WORK_STRUCT_INACTIVE do not participate
248 	 * in pwq->nr_active and all work items in pwq->inactive_works are
249 	 * marked with WORK_STRUCT_INACTIVE.  But not all WORK_STRUCT_INACTIVE
250 	 * work items are in pwq->inactive_works.  Some of them are ready to
251 	 * run in pool->worklist or worker->scheduled.  Those work itmes are
252 	 * only struct wq_barrier which is used for flush_work() and should
253 	 * not participate in pwq->nr_active.  For non-barrier work item, it
254 	 * is marked with WORK_STRUCT_INACTIVE iff it is in pwq->inactive_works.
255 	 */
256 	int			nr_active;	/* L: nr of active works */
257 	int			max_active;	/* L: max active works */
258 	struct list_head	inactive_works;	/* L: inactive works */
259 	struct list_head	pwqs_node;	/* WR: node on wq->pwqs */
260 	struct list_head	mayday_node;	/* MD: node on wq->maydays */
261 
262 	u64			stats[PWQ_NR_STATS];
263 
264 	/*
265 	 * Release of unbound pwq is punted to a kthread_worker. See put_pwq()
266 	 * and pwq_release_workfn() for details. pool_workqueue itself is also
267 	 * RCU protected so that the first pwq can be determined without
268 	 * grabbing wq->mutex.
269 	 */
270 	struct kthread_work	release_work;
271 	struct rcu_head		rcu;
272 } __aligned(1 << WORK_STRUCT_FLAG_BITS);
273 
274 /*
275  * Structure used to wait for workqueue flush.
276  */
277 struct wq_flusher {
278 	struct list_head	list;		/* WQ: list of flushers */
279 	int			flush_color;	/* WQ: flush color waiting for */
280 	struct completion	done;		/* flush completion */
281 };
282 
283 struct wq_device;
284 
285 /*
286  * The externally visible workqueue.  It relays the issued work items to
287  * the appropriate worker_pool through its pool_workqueues.
288  */
289 struct workqueue_struct {
290 	struct list_head	pwqs;		/* WR: all pwqs of this wq */
291 	struct list_head	list;		/* PR: list of all workqueues */
292 
293 	struct mutex		mutex;		/* protects this wq */
294 	int			work_color;	/* WQ: current work color */
295 	int			flush_color;	/* WQ: current flush color */
296 	atomic_t		nr_pwqs_to_flush; /* flush in progress */
297 	struct wq_flusher	*first_flusher;	/* WQ: first flusher */
298 	struct list_head	flusher_queue;	/* WQ: flush waiters */
299 	struct list_head	flusher_overflow; /* WQ: flush overflow list */
300 
301 	struct list_head	maydays;	/* MD: pwqs requesting rescue */
302 	struct worker		*rescuer;	/* MD: rescue worker */
303 
304 	int			nr_drainers;	/* WQ: drain in progress */
305 	int			saved_max_active; /* WQ: saved pwq max_active */
306 
307 	struct workqueue_attrs	*unbound_attrs;	/* PW: only for unbound wqs */
308 	struct pool_workqueue	*dfl_pwq;	/* PW: only for unbound wqs */
309 
310 #ifdef CONFIG_SYSFS
311 	struct wq_device	*wq_dev;	/* I: for sysfs interface */
312 #endif
313 #ifdef CONFIG_LOCKDEP
314 	char			*lock_name;
315 	struct lock_class_key	key;
316 	struct lockdep_map	lockdep_map;
317 #endif
318 	char			name[WQ_NAME_LEN]; /* I: workqueue name */
319 
320 	/*
321 	 * Destruction of workqueue_struct is RCU protected to allow walking
322 	 * the workqueues list without grabbing wq_pool_mutex.
323 	 * This is used to dump all workqueues from sysrq.
324 	 */
325 	struct rcu_head		rcu;
326 
327 	/* hot fields used during command issue, aligned to cacheline */
328 	unsigned int		flags ____cacheline_aligned; /* WQ: WQ_* flags */
329 	struct pool_workqueue __percpu __rcu **cpu_pwq; /* I: per-cpu pwqs */
330 };
331 
332 static struct kmem_cache *pwq_cache;
333 
334 /*
335  * Each pod type describes how CPUs should be grouped for unbound workqueues.
336  * See the comment above workqueue_attrs->affn_scope.
337  */
338 struct wq_pod_type {
339 	int			nr_pods;	/* number of pods */
340 	cpumask_var_t		*pod_cpus;	/* pod -> cpus */
341 	int			*pod_node;	/* pod -> node */
342 	int			*cpu_pod;	/* cpu -> pod */
343 };
344 
345 static struct wq_pod_type wq_pod_types[WQ_AFFN_NR_TYPES];
346 static enum wq_affn_scope wq_affn_dfl = WQ_AFFN_CACHE;
347 
348 static const char *wq_affn_names[WQ_AFFN_NR_TYPES] = {
349 	[WQ_AFFN_DFL]			= "default",
350 	[WQ_AFFN_CPU]			= "cpu",
351 	[WQ_AFFN_SMT]			= "smt",
352 	[WQ_AFFN_CACHE]			= "cache",
353 	[WQ_AFFN_NUMA]			= "numa",
354 	[WQ_AFFN_SYSTEM]		= "system",
355 };
356 
357 /*
358  * Per-cpu work items which run for longer than the following threshold are
359  * automatically considered CPU intensive and excluded from concurrency
360  * management to prevent them from noticeably delaying other per-cpu work items.
361  * ULONG_MAX indicates that the user hasn't overridden it with a boot parameter.
362  * The actual value is initialized in wq_cpu_intensive_thresh_init().
363  */
364 static unsigned long wq_cpu_intensive_thresh_us = ULONG_MAX;
365 module_param_named(cpu_intensive_thresh_us, wq_cpu_intensive_thresh_us, ulong, 0644);
366 #ifdef CONFIG_WQ_CPU_INTENSIVE_REPORT
367 static unsigned int wq_cpu_intensive_warning_thresh = 4;
368 module_param_named(cpu_intensive_warning_thresh, wq_cpu_intensive_warning_thresh, uint, 0644);
369 #endif
370 
371 /* see the comment above the definition of WQ_POWER_EFFICIENT */
372 static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT);
373 module_param_named(power_efficient, wq_power_efficient, bool, 0444);
374 
375 static bool wq_online;			/* can kworkers be created yet? */
376 
377 /* buf for wq_update_unbound_pod_attrs(), protected by CPU hotplug exclusion */
378 static struct workqueue_attrs *wq_update_pod_attrs_buf;
379 
380 static DEFINE_MUTEX(wq_pool_mutex);	/* protects pools and workqueues list */
381 static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */
382 static DEFINE_RAW_SPINLOCK(wq_mayday_lock);	/* protects wq->maydays list */
383 /* wait for manager to go away */
384 static struct rcuwait manager_wait = __RCUWAIT_INITIALIZER(manager_wait);
385 
386 static LIST_HEAD(workqueues);		/* PR: list of all workqueues */
387 static bool workqueue_freezing;		/* PL: have wqs started freezing? */
388 
389 /* PL&A: allowable cpus for unbound wqs and work items */
390 static cpumask_var_t wq_unbound_cpumask;
391 
392 /* for further constrain wq_unbound_cpumask by cmdline parameter*/
393 static struct cpumask wq_cmdline_cpumask __initdata;
394 
395 /* CPU where unbound work was last round robin scheduled from this CPU */
396 static DEFINE_PER_CPU(int, wq_rr_cpu_last);
397 
398 /*
399  * Local execution of unbound work items is no longer guaranteed.  The
400  * following always forces round-robin CPU selection on unbound work items
401  * to uncover usages which depend on it.
402  */
403 #ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU
404 static bool wq_debug_force_rr_cpu = true;
405 #else
406 static bool wq_debug_force_rr_cpu = false;
407 #endif
408 module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644);
409 
410 /* the per-cpu worker pools */
411 static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], cpu_worker_pools);
412 
413 static DEFINE_IDR(worker_pool_idr);	/* PR: idr of all pools */
414 
415 /* PL: hash of all unbound pools keyed by pool->attrs */
416 static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
417 
418 /* I: attributes used when instantiating standard unbound pools on demand */
419 static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
420 
421 /* I: attributes used when instantiating ordered pools on demand */
422 static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS];
423 
424 /*
425  * I: kthread_worker to release pwq's. pwq release needs to be bounced to a
426  * process context while holding a pool lock. Bounce to a dedicated kthread
427  * worker to avoid A-A deadlocks.
428  */
429 static struct kthread_worker *pwq_release_worker;
430 
431 struct workqueue_struct *system_wq __read_mostly;
432 EXPORT_SYMBOL(system_wq);
433 struct workqueue_struct *system_highpri_wq __read_mostly;
434 EXPORT_SYMBOL_GPL(system_highpri_wq);
435 struct workqueue_struct *system_long_wq __read_mostly;
436 EXPORT_SYMBOL_GPL(system_long_wq);
437 struct workqueue_struct *system_unbound_wq __read_mostly;
438 EXPORT_SYMBOL_GPL(system_unbound_wq);
439 struct workqueue_struct *system_freezable_wq __read_mostly;
440 EXPORT_SYMBOL_GPL(system_freezable_wq);
441 struct workqueue_struct *system_power_efficient_wq __read_mostly;
442 EXPORT_SYMBOL_GPL(system_power_efficient_wq);
443 struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
444 EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
445 
446 static int worker_thread(void *__worker);
447 static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
448 static void show_pwq(struct pool_workqueue *pwq);
449 static void show_one_worker_pool(struct worker_pool *pool);
450 
451 #define CREATE_TRACE_POINTS
452 #include <trace/events/workqueue.h>
453 
454 EXPORT_TRACEPOINT_SYMBOL_GPL(workqueue_execute_start);
455 EXPORT_TRACEPOINT_SYMBOL_GPL(workqueue_execute_end);
456 
457 #define assert_rcu_or_pool_mutex()					\
458 	RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&			\
459 			 !lockdep_is_held(&wq_pool_mutex),		\
460 			 "RCU or wq_pool_mutex should be held")
461 
462 #define assert_rcu_or_wq_mutex_or_pool_mutex(wq)			\
463 	RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&			\
464 			 !lockdep_is_held(&wq->mutex) &&		\
465 			 !lockdep_is_held(&wq_pool_mutex),		\
466 			 "RCU, wq->mutex or wq_pool_mutex should be held")
467 
468 #define for_each_cpu_worker_pool(pool, cpu)				\
469 	for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0];		\
470 	     (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
471 	     (pool)++)
472 
473 /**
474  * for_each_pool - iterate through all worker_pools in the system
475  * @pool: iteration cursor
476  * @pi: integer used for iteration
477  *
478  * This must be called either with wq_pool_mutex held or RCU read
479  * locked.  If the pool needs to be used beyond the locking in effect, the
480  * caller is responsible for guaranteeing that the pool stays online.
481  *
482  * The if/else clause exists only for the lockdep assertion and can be
483  * ignored.
484  */
485 #define for_each_pool(pool, pi)						\
486 	idr_for_each_entry(&worker_pool_idr, pool, pi)			\
487 		if (({ assert_rcu_or_pool_mutex(); false; })) { }	\
488 		else
489 
490 /**
491  * for_each_pool_worker - iterate through all workers of a worker_pool
492  * @worker: iteration cursor
493  * @pool: worker_pool to iterate workers of
494  *
495  * This must be called with wq_pool_attach_mutex.
496  *
497  * The if/else clause exists only for the lockdep assertion and can be
498  * ignored.
499  */
500 #define for_each_pool_worker(worker, pool)				\
501 	list_for_each_entry((worker), &(pool)->workers, node)		\
502 		if (({ lockdep_assert_held(&wq_pool_attach_mutex); false; })) { } \
503 		else
504 
505 /**
506  * for_each_pwq - iterate through all pool_workqueues of the specified workqueue
507  * @pwq: iteration cursor
508  * @wq: the target workqueue
509  *
510  * This must be called either with wq->mutex held or RCU read locked.
511  * If the pwq needs to be used beyond the locking in effect, the caller is
512  * responsible for guaranteeing that the pwq stays online.
513  *
514  * The if/else clause exists only for the lockdep assertion and can be
515  * ignored.
516  */
517 #define for_each_pwq(pwq, wq)						\
518 	list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node,		\
519 				 lockdep_is_held(&(wq->mutex)))
520 
521 #ifdef CONFIG_DEBUG_OBJECTS_WORK
522 
523 static const struct debug_obj_descr work_debug_descr;
524 
work_debug_hint(void * addr)525 static void *work_debug_hint(void *addr)
526 {
527 	return ((struct work_struct *) addr)->func;
528 }
529 
work_is_static_object(void * addr)530 static bool work_is_static_object(void *addr)
531 {
532 	struct work_struct *work = addr;
533 
534 	return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work));
535 }
536 
537 /*
538  * fixup_init is called when:
539  * - an active object is initialized
540  */
work_fixup_init(void * addr,enum debug_obj_state state)541 static bool work_fixup_init(void *addr, enum debug_obj_state state)
542 {
543 	struct work_struct *work = addr;
544 
545 	switch (state) {
546 	case ODEBUG_STATE_ACTIVE:
547 		cancel_work_sync(work);
548 		debug_object_init(work, &work_debug_descr);
549 		return true;
550 	default:
551 		return false;
552 	}
553 }
554 
555 /*
556  * fixup_free is called when:
557  * - an active object is freed
558  */
work_fixup_free(void * addr,enum debug_obj_state state)559 static bool work_fixup_free(void *addr, enum debug_obj_state state)
560 {
561 	struct work_struct *work = addr;
562 
563 	switch (state) {
564 	case ODEBUG_STATE_ACTIVE:
565 		cancel_work_sync(work);
566 		debug_object_free(work, &work_debug_descr);
567 		return true;
568 	default:
569 		return false;
570 	}
571 }
572 
573 static const struct debug_obj_descr work_debug_descr = {
574 	.name		= "work_struct",
575 	.debug_hint	= work_debug_hint,
576 	.is_static_object = work_is_static_object,
577 	.fixup_init	= work_fixup_init,
578 	.fixup_free	= work_fixup_free,
579 };
580 
debug_work_activate(struct work_struct * work)581 static inline void debug_work_activate(struct work_struct *work)
582 {
583 	debug_object_activate(work, &work_debug_descr);
584 }
585 
debug_work_deactivate(struct work_struct * work)586 static inline void debug_work_deactivate(struct work_struct *work)
587 {
588 	debug_object_deactivate(work, &work_debug_descr);
589 }
590 
__init_work(struct work_struct * work,int onstack)591 void __init_work(struct work_struct *work, int onstack)
592 {
593 	if (onstack)
594 		debug_object_init_on_stack(work, &work_debug_descr);
595 	else
596 		debug_object_init(work, &work_debug_descr);
597 }
598 EXPORT_SYMBOL_GPL(__init_work);
599 
destroy_work_on_stack(struct work_struct * work)600 void destroy_work_on_stack(struct work_struct *work)
601 {
602 	debug_object_free(work, &work_debug_descr);
603 }
604 EXPORT_SYMBOL_GPL(destroy_work_on_stack);
605 
destroy_delayed_work_on_stack(struct delayed_work * work)606 void destroy_delayed_work_on_stack(struct delayed_work *work)
607 {
608 	destroy_timer_on_stack(&work->timer);
609 	debug_object_free(&work->work, &work_debug_descr);
610 }
611 EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stack);
612 
613 #else
debug_work_activate(struct work_struct * work)614 static inline void debug_work_activate(struct work_struct *work) { }
debug_work_deactivate(struct work_struct * work)615 static inline void debug_work_deactivate(struct work_struct *work) { }
616 #endif
617 
618 /**
619  * worker_pool_assign_id - allocate ID and assign it to @pool
620  * @pool: the pool pointer of interest
621  *
622  * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned
623  * successfully, -errno on failure.
624  */
worker_pool_assign_id(struct worker_pool * pool)625 static int worker_pool_assign_id(struct worker_pool *pool)
626 {
627 	int ret;
628 
629 	lockdep_assert_held(&wq_pool_mutex);
630 
631 	ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE,
632 			GFP_KERNEL);
633 	if (ret >= 0) {
634 		pool->id = ret;
635 		return 0;
636 	}
637 	return ret;
638 }
639 
work_color_to_flags(int color)640 static unsigned int work_color_to_flags(int color)
641 {
642 	return color << WORK_STRUCT_COLOR_SHIFT;
643 }
644 
get_work_color(unsigned long work_data)645 static int get_work_color(unsigned long work_data)
646 {
647 	return (work_data >> WORK_STRUCT_COLOR_SHIFT) &
648 		((1 << WORK_STRUCT_COLOR_BITS) - 1);
649 }
650 
work_next_color(int color)651 static int work_next_color(int color)
652 {
653 	return (color + 1) % WORK_NR_COLORS;
654 }
655 
656 /*
657  * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data
658  * contain the pointer to the queued pwq.  Once execution starts, the flag
659  * is cleared and the high bits contain OFFQ flags and pool ID.
660  *
661  * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling()
662  * and clear_work_data() can be used to set the pwq, pool or clear
663  * work->data.  These functions should only be called while the work is
664  * owned - ie. while the PENDING bit is set.
665  *
666  * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq
667  * corresponding to a work.  Pool is available once the work has been
668  * queued anywhere after initialization until it is sync canceled.  pwq is
669  * available only while the work item is queued.
670  *
671  * %WORK_OFFQ_CANCELING is used to mark a work item which is being
672  * canceled.  While being canceled, a work item may have its PENDING set
673  * but stay off timer and worklist for arbitrarily long and nobody should
674  * try to steal the PENDING bit.
675  */
set_work_data(struct work_struct * work,unsigned long data,unsigned long flags)676 static inline void set_work_data(struct work_struct *work, unsigned long data,
677 				 unsigned long flags)
678 {
679 	WARN_ON_ONCE(!work_pending(work));
680 	atomic_long_set(&work->data, data | flags | work_static(work));
681 }
682 
set_work_pwq(struct work_struct * work,struct pool_workqueue * pwq,unsigned long extra_flags)683 static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq,
684 			 unsigned long extra_flags)
685 {
686 	set_work_data(work, (unsigned long)pwq,
687 		      WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags);
688 }
689 
set_work_pool_and_keep_pending(struct work_struct * work,int pool_id)690 static void set_work_pool_and_keep_pending(struct work_struct *work,
691 					   int pool_id)
692 {
693 	set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT,
694 		      WORK_STRUCT_PENDING);
695 }
696 
set_work_pool_and_clear_pending(struct work_struct * work,int pool_id)697 static void set_work_pool_and_clear_pending(struct work_struct *work,
698 					    int pool_id)
699 {
700 	/*
701 	 * The following wmb is paired with the implied mb in
702 	 * test_and_set_bit(PENDING) and ensures all updates to @work made
703 	 * here are visible to and precede any updates by the next PENDING
704 	 * owner.
705 	 */
706 	smp_wmb();
707 	set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
708 	/*
709 	 * The following mb guarantees that previous clear of a PENDING bit
710 	 * will not be reordered with any speculative LOADS or STORES from
711 	 * work->current_func, which is executed afterwards.  This possible
712 	 * reordering can lead to a missed execution on attempt to queue
713 	 * the same @work.  E.g. consider this case:
714 	 *
715 	 *   CPU#0                         CPU#1
716 	 *   ----------------------------  --------------------------------
717 	 *
718 	 * 1  STORE event_indicated
719 	 * 2  queue_work_on() {
720 	 * 3    test_and_set_bit(PENDING)
721 	 * 4 }                             set_..._and_clear_pending() {
722 	 * 5                                 set_work_data() # clear bit
723 	 * 6                                 smp_mb()
724 	 * 7                               work->current_func() {
725 	 * 8				      LOAD event_indicated
726 	 *				   }
727 	 *
728 	 * Without an explicit full barrier speculative LOAD on line 8 can
729 	 * be executed before CPU#0 does STORE on line 1.  If that happens,
730 	 * CPU#0 observes the PENDING bit is still set and new execution of
731 	 * a @work is not queued in a hope, that CPU#1 will eventually
732 	 * finish the queued @work.  Meanwhile CPU#1 does not see
733 	 * event_indicated is set, because speculative LOAD was executed
734 	 * before actual STORE.
735 	 */
736 	smp_mb();
737 }
738 
clear_work_data(struct work_struct * work)739 static void clear_work_data(struct work_struct *work)
740 {
741 	smp_wmb();	/* see set_work_pool_and_clear_pending() */
742 	set_work_data(work, WORK_STRUCT_NO_POOL, 0);
743 }
744 
work_struct_pwq(unsigned long data)745 static inline struct pool_workqueue *work_struct_pwq(unsigned long data)
746 {
747 	return (struct pool_workqueue *)(data & WORK_STRUCT_WQ_DATA_MASK);
748 }
749 
get_work_pwq(struct work_struct * work)750 static struct pool_workqueue *get_work_pwq(struct work_struct *work)
751 {
752 	unsigned long data = atomic_long_read(&work->data);
753 
754 	if (data & WORK_STRUCT_PWQ)
755 		return work_struct_pwq(data);
756 	else
757 		return NULL;
758 }
759 
760 /**
761  * get_work_pool - return the worker_pool a given work was associated with
762  * @work: the work item of interest
763  *
764  * Pools are created and destroyed under wq_pool_mutex, and allows read
765  * access under RCU read lock.  As such, this function should be
766  * called under wq_pool_mutex or inside of a rcu_read_lock() region.
767  *
768  * All fields of the returned pool are accessible as long as the above
769  * mentioned locking is in effect.  If the returned pool needs to be used
770  * beyond the critical section, the caller is responsible for ensuring the
771  * returned pool is and stays online.
772  *
773  * Return: The worker_pool @work was last associated with.  %NULL if none.
774  */
get_work_pool(struct work_struct * work)775 static struct worker_pool *get_work_pool(struct work_struct *work)
776 {
777 	unsigned long data = atomic_long_read(&work->data);
778 	int pool_id;
779 
780 	assert_rcu_or_pool_mutex();
781 
782 	if (data & WORK_STRUCT_PWQ)
783 		return work_struct_pwq(data)->pool;
784 
785 	pool_id = data >> WORK_OFFQ_POOL_SHIFT;
786 	if (pool_id == WORK_OFFQ_POOL_NONE)
787 		return NULL;
788 
789 	return idr_find(&worker_pool_idr, pool_id);
790 }
791 
792 /**
793  * get_work_pool_id - return the worker pool ID a given work is associated with
794  * @work: the work item of interest
795  *
796  * Return: The worker_pool ID @work was last associated with.
797  * %WORK_OFFQ_POOL_NONE if none.
798  */
get_work_pool_id(struct work_struct * work)799 static int get_work_pool_id(struct work_struct *work)
800 {
801 	unsigned long data = atomic_long_read(&work->data);
802 
803 	if (data & WORK_STRUCT_PWQ)
804 		return work_struct_pwq(data)->pool->id;
805 
806 	return data >> WORK_OFFQ_POOL_SHIFT;
807 }
808 
mark_work_canceling(struct work_struct * work)809 static void mark_work_canceling(struct work_struct *work)
810 {
811 	unsigned long pool_id = get_work_pool_id(work);
812 
813 	pool_id <<= WORK_OFFQ_POOL_SHIFT;
814 	set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING);
815 }
816 
work_is_canceling(struct work_struct * work)817 static bool work_is_canceling(struct work_struct *work)
818 {
819 	unsigned long data = atomic_long_read(&work->data);
820 
821 	return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING);
822 }
823 
824 /*
825  * Policy functions.  These define the policies on how the global worker
826  * pools are managed.  Unless noted otherwise, these functions assume that
827  * they're being called with pool->lock held.
828  */
829 
830 /*
831  * Need to wake up a worker?  Called from anything but currently
832  * running workers.
833  *
834  * Note that, because unbound workers never contribute to nr_running, this
835  * function will always return %true for unbound pools as long as the
836  * worklist isn't empty.
837  */
need_more_worker(struct worker_pool * pool)838 static bool need_more_worker(struct worker_pool *pool)
839 {
840 	return !list_empty(&pool->worklist) && !pool->nr_running;
841 }
842 
843 /* Can I start working?  Called from busy but !running workers. */
may_start_working(struct worker_pool * pool)844 static bool may_start_working(struct worker_pool *pool)
845 {
846 	return pool->nr_idle;
847 }
848 
849 /* Do I need to keep working?  Called from currently running workers. */
keep_working(struct worker_pool * pool)850 static bool keep_working(struct worker_pool *pool)
851 {
852 	return !list_empty(&pool->worklist) && (pool->nr_running <= 1);
853 }
854 
855 /* Do we need a new worker?  Called from manager. */
need_to_create_worker(struct worker_pool * pool)856 static bool need_to_create_worker(struct worker_pool *pool)
857 {
858 	return need_more_worker(pool) && !may_start_working(pool);
859 }
860 
861 /* Do we have too many workers and should some go away? */
too_many_workers(struct worker_pool * pool)862 static bool too_many_workers(struct worker_pool *pool)
863 {
864 	bool managing = pool->flags & POOL_MANAGER_ACTIVE;
865 	int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
866 	int nr_busy = pool->nr_workers - nr_idle;
867 
868 	return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
869 }
870 
871 /**
872  * worker_set_flags - set worker flags and adjust nr_running accordingly
873  * @worker: self
874  * @flags: flags to set
875  *
876  * Set @flags in @worker->flags and adjust nr_running accordingly.
877  */
worker_set_flags(struct worker * worker,unsigned int flags)878 static inline void worker_set_flags(struct worker *worker, unsigned int flags)
879 {
880 	struct worker_pool *pool = worker->pool;
881 
882 	lockdep_assert_held(&pool->lock);
883 
884 	/* If transitioning into NOT_RUNNING, adjust nr_running. */
885 	if ((flags & WORKER_NOT_RUNNING) &&
886 	    !(worker->flags & WORKER_NOT_RUNNING)) {
887 		pool->nr_running--;
888 	}
889 
890 	worker->flags |= flags;
891 }
892 
893 /**
894  * worker_clr_flags - clear worker flags and adjust nr_running accordingly
895  * @worker: self
896  * @flags: flags to clear
897  *
898  * Clear @flags in @worker->flags and adjust nr_running accordingly.
899  */
worker_clr_flags(struct worker * worker,unsigned int flags)900 static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
901 {
902 	struct worker_pool *pool = worker->pool;
903 	unsigned int oflags = worker->flags;
904 
905 	lockdep_assert_held(&pool->lock);
906 
907 	worker->flags &= ~flags;
908 
909 	/*
910 	 * If transitioning out of NOT_RUNNING, increment nr_running.  Note
911 	 * that the nested NOT_RUNNING is not a noop.  NOT_RUNNING is mask
912 	 * of multiple flags, not a single flag.
913 	 */
914 	if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
915 		if (!(worker->flags & WORKER_NOT_RUNNING))
916 			pool->nr_running++;
917 }
918 
919 /* Return the first idle worker.  Called with pool->lock held. */
first_idle_worker(struct worker_pool * pool)920 static struct worker *first_idle_worker(struct worker_pool *pool)
921 {
922 	if (unlikely(list_empty(&pool->idle_list)))
923 		return NULL;
924 
925 	return list_first_entry(&pool->idle_list, struct worker, entry);
926 }
927 
928 /**
929  * worker_enter_idle - enter idle state
930  * @worker: worker which is entering idle state
931  *
932  * @worker is entering idle state.  Update stats and idle timer if
933  * necessary.
934  *
935  * LOCKING:
936  * raw_spin_lock_irq(pool->lock).
937  */
worker_enter_idle(struct worker * worker)938 static void worker_enter_idle(struct worker *worker)
939 {
940 	struct worker_pool *pool = worker->pool;
941 
942 	if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) ||
943 	    WARN_ON_ONCE(!list_empty(&worker->entry) &&
944 			 (worker->hentry.next || worker->hentry.pprev)))
945 		return;
946 
947 	/* can't use worker_set_flags(), also called from create_worker() */
948 	worker->flags |= WORKER_IDLE;
949 	pool->nr_idle++;
950 	worker->last_active = jiffies;
951 
952 	/* idle_list is LIFO */
953 	list_add(&worker->entry, &pool->idle_list);
954 
955 	if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
956 		mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
957 
958 	/* Sanity check nr_running. */
959 	WARN_ON_ONCE(pool->nr_workers == pool->nr_idle && pool->nr_running);
960 }
961 
962 /**
963  * worker_leave_idle - leave idle state
964  * @worker: worker which is leaving idle state
965  *
966  * @worker is leaving idle state.  Update stats.
967  *
968  * LOCKING:
969  * raw_spin_lock_irq(pool->lock).
970  */
worker_leave_idle(struct worker * worker)971 static void worker_leave_idle(struct worker *worker)
972 {
973 	struct worker_pool *pool = worker->pool;
974 
975 	if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE)))
976 		return;
977 	worker_clr_flags(worker, WORKER_IDLE);
978 	pool->nr_idle--;
979 	list_del_init(&worker->entry);
980 }
981 
982 /**
983  * find_worker_executing_work - find worker which is executing a work
984  * @pool: pool of interest
985  * @work: work to find worker for
986  *
987  * Find a worker which is executing @work on @pool by searching
988  * @pool->busy_hash which is keyed by the address of @work.  For a worker
989  * to match, its current execution should match the address of @work and
990  * its work function.  This is to avoid unwanted dependency between
991  * unrelated work executions through a work item being recycled while still
992  * being executed.
993  *
994  * This is a bit tricky.  A work item may be freed once its execution
995  * starts and nothing prevents the freed area from being recycled for
996  * another work item.  If the same work item address ends up being reused
997  * before the original execution finishes, workqueue will identify the
998  * recycled work item as currently executing and make it wait until the
999  * current execution finishes, introducing an unwanted dependency.
1000  *
1001  * This function checks the work item address and work function to avoid
1002  * false positives.  Note that this isn't complete as one may construct a
1003  * work function which can introduce dependency onto itself through a
1004  * recycled work item.  Well, if somebody wants to shoot oneself in the
1005  * foot that badly, there's only so much we can do, and if such deadlock
1006  * actually occurs, it should be easy to locate the culprit work function.
1007  *
1008  * CONTEXT:
1009  * raw_spin_lock_irq(pool->lock).
1010  *
1011  * Return:
1012  * Pointer to worker which is executing @work if found, %NULL
1013  * otherwise.
1014  */
find_worker_executing_work(struct worker_pool * pool,struct work_struct * work)1015 static struct worker *find_worker_executing_work(struct worker_pool *pool,
1016 						 struct work_struct *work)
1017 {
1018 	struct worker *worker;
1019 
1020 	hash_for_each_possible(pool->busy_hash, worker, hentry,
1021 			       (unsigned long)work)
1022 		if (worker->current_work == work &&
1023 		    worker->current_func == work->func)
1024 			return worker;
1025 
1026 	return NULL;
1027 }
1028 
1029 /**
1030  * move_linked_works - move linked works to a list
1031  * @work: start of series of works to be scheduled
1032  * @head: target list to append @work to
1033  * @nextp: out parameter for nested worklist walking
1034  *
1035  * Schedule linked works starting from @work to @head. Work series to be
1036  * scheduled starts at @work and includes any consecutive work with
1037  * WORK_STRUCT_LINKED set in its predecessor. See assign_work() for details on
1038  * @nextp.
1039  *
1040  * CONTEXT:
1041  * raw_spin_lock_irq(pool->lock).
1042  */
move_linked_works(struct work_struct * work,struct list_head * head,struct work_struct ** nextp)1043 static void move_linked_works(struct work_struct *work, struct list_head *head,
1044 			      struct work_struct **nextp)
1045 {
1046 	struct work_struct *n;
1047 
1048 	/*
1049 	 * Linked worklist will always end before the end of the list,
1050 	 * use NULL for list head.
1051 	 */
1052 	list_for_each_entry_safe_from(work, n, NULL, entry) {
1053 		list_move_tail(&work->entry, head);
1054 		if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
1055 			break;
1056 	}
1057 
1058 	/*
1059 	 * If we're already inside safe list traversal and have moved
1060 	 * multiple works to the scheduled queue, the next position
1061 	 * needs to be updated.
1062 	 */
1063 	if (nextp)
1064 		*nextp = n;
1065 }
1066 
1067 /**
1068  * assign_work - assign a work item and its linked work items to a worker
1069  * @work: work to assign
1070  * @worker: worker to assign to
1071  * @nextp: out parameter for nested worklist walking
1072  *
1073  * Assign @work and its linked work items to @worker. If @work is already being
1074  * executed by another worker in the same pool, it'll be punted there.
1075  *
1076  * If @nextp is not NULL, it's updated to point to the next work of the last
1077  * scheduled work. This allows assign_work() to be nested inside
1078  * list_for_each_entry_safe().
1079  *
1080  * Returns %true if @work was successfully assigned to @worker. %false if @work
1081  * was punted to another worker already executing it.
1082  */
assign_work(struct work_struct * work,struct worker * worker,struct work_struct ** nextp)1083 static bool assign_work(struct work_struct *work, struct worker *worker,
1084 			struct work_struct **nextp)
1085 {
1086 	struct worker_pool *pool = worker->pool;
1087 	struct worker *collision;
1088 
1089 	lockdep_assert_held(&pool->lock);
1090 
1091 	/*
1092 	 * A single work shouldn't be executed concurrently by multiple workers.
1093 	 * __queue_work() ensures that @work doesn't jump to a different pool
1094 	 * while still running in the previous pool. Here, we should ensure that
1095 	 * @work is not executed concurrently by multiple workers from the same
1096 	 * pool. Check whether anyone is already processing the work. If so,
1097 	 * defer the work to the currently executing one.
1098 	 */
1099 	collision = find_worker_executing_work(pool, work);
1100 	if (unlikely(collision)) {
1101 		move_linked_works(work, &collision->scheduled, nextp);
1102 		return false;
1103 	}
1104 
1105 	move_linked_works(work, &worker->scheduled, nextp);
1106 	return true;
1107 }
1108 
1109 /**
1110  * kick_pool - wake up an idle worker if necessary
1111  * @pool: pool to kick
1112  *
1113  * @pool may have pending work items. Wake up worker if necessary. Returns
1114  * whether a worker was woken up.
1115  */
kick_pool(struct worker_pool * pool)1116 static bool kick_pool(struct worker_pool *pool)
1117 {
1118 	struct worker *worker = first_idle_worker(pool);
1119 	struct task_struct *p;
1120 
1121 	lockdep_assert_held(&pool->lock);
1122 
1123 	if (!need_more_worker(pool) || !worker)
1124 		return false;
1125 
1126 	p = worker->task;
1127 
1128 #ifdef CONFIG_SMP
1129 	/*
1130 	 * Idle @worker is about to execute @work and waking up provides an
1131 	 * opportunity to migrate @worker at a lower cost by setting the task's
1132 	 * wake_cpu field. Let's see if we want to move @worker to improve
1133 	 * execution locality.
1134 	 *
1135 	 * We're waking the worker that went idle the latest and there's some
1136 	 * chance that @worker is marked idle but hasn't gone off CPU yet. If
1137 	 * so, setting the wake_cpu won't do anything. As this is a best-effort
1138 	 * optimization and the race window is narrow, let's leave as-is for
1139 	 * now. If this becomes pronounced, we can skip over workers which are
1140 	 * still on cpu when picking an idle worker.
1141 	 *
1142 	 * If @pool has non-strict affinity, @worker might have ended up outside
1143 	 * its affinity scope. Repatriate.
1144 	 */
1145 	if (!pool->attrs->affn_strict &&
1146 	    !cpumask_test_cpu(p->wake_cpu, pool->attrs->__pod_cpumask)) {
1147 		struct work_struct *work = list_first_entry(&pool->worklist,
1148 						struct work_struct, entry);
1149 		int wake_cpu = cpumask_any_and_distribute(pool->attrs->__pod_cpumask,
1150 							  cpu_online_mask);
1151 		if (wake_cpu < nr_cpu_ids) {
1152 			p->wake_cpu = wake_cpu;
1153 			get_work_pwq(work)->stats[PWQ_STAT_REPATRIATED]++;
1154 		}
1155 	}
1156 #endif
1157 	wake_up_process(p);
1158 	return true;
1159 }
1160 
1161 #ifdef CONFIG_WQ_CPU_INTENSIVE_REPORT
1162 
1163 /*
1164  * Concurrency-managed per-cpu work items that hog CPU for longer than
1165  * wq_cpu_intensive_thresh_us trigger the automatic CPU_INTENSIVE mechanism,
1166  * which prevents them from stalling other concurrency-managed work items. If a
1167  * work function keeps triggering this mechanism, it's likely that the work item
1168  * should be using an unbound workqueue instead.
1169  *
1170  * wq_cpu_intensive_report() tracks work functions which trigger such conditions
1171  * and report them so that they can be examined and converted to use unbound
1172  * workqueues as appropriate. To avoid flooding the console, each violating work
1173  * function is tracked and reported with exponential backoff.
1174  */
1175 #define WCI_MAX_ENTS 128
1176 
1177 struct wci_ent {
1178 	work_func_t		func;
1179 	atomic64_t		cnt;
1180 	struct hlist_node	hash_node;
1181 };
1182 
1183 static struct wci_ent wci_ents[WCI_MAX_ENTS];
1184 static int wci_nr_ents;
1185 static DEFINE_RAW_SPINLOCK(wci_lock);
1186 static DEFINE_HASHTABLE(wci_hash, ilog2(WCI_MAX_ENTS));
1187 
wci_find_ent(work_func_t func)1188 static struct wci_ent *wci_find_ent(work_func_t func)
1189 {
1190 	struct wci_ent *ent;
1191 
1192 	hash_for_each_possible_rcu(wci_hash, ent, hash_node,
1193 				   (unsigned long)func) {
1194 		if (ent->func == func)
1195 			return ent;
1196 	}
1197 	return NULL;
1198 }
1199 
wq_cpu_intensive_report(work_func_t func)1200 static void wq_cpu_intensive_report(work_func_t func)
1201 {
1202 	struct wci_ent *ent;
1203 
1204 restart:
1205 	ent = wci_find_ent(func);
1206 	if (ent) {
1207 		u64 cnt;
1208 
1209 		/*
1210 		 * Start reporting from the warning_thresh and back off
1211 		 * exponentially.
1212 		 */
1213 		cnt = atomic64_inc_return_relaxed(&ent->cnt);
1214 		if (wq_cpu_intensive_warning_thresh &&
1215 		    cnt >= wq_cpu_intensive_warning_thresh &&
1216 		    is_power_of_2(cnt + 1 - wq_cpu_intensive_warning_thresh))
1217 			printk_deferred(KERN_WARNING "workqueue: %ps hogged CPU for >%luus %llu times, consider switching to WQ_UNBOUND\n",
1218 					ent->func, wq_cpu_intensive_thresh_us,
1219 					atomic64_read(&ent->cnt));
1220 		return;
1221 	}
1222 
1223 	/*
1224 	 * @func is a new violation. Allocate a new entry for it. If wcn_ents[]
1225 	 * is exhausted, something went really wrong and we probably made enough
1226 	 * noise already.
1227 	 */
1228 	if (wci_nr_ents >= WCI_MAX_ENTS)
1229 		return;
1230 
1231 	raw_spin_lock(&wci_lock);
1232 
1233 	if (wci_nr_ents >= WCI_MAX_ENTS) {
1234 		raw_spin_unlock(&wci_lock);
1235 		return;
1236 	}
1237 
1238 	if (wci_find_ent(func)) {
1239 		raw_spin_unlock(&wci_lock);
1240 		goto restart;
1241 	}
1242 
1243 	ent = &wci_ents[wci_nr_ents++];
1244 	ent->func = func;
1245 	atomic64_set(&ent->cnt, 0);
1246 	hash_add_rcu(wci_hash, &ent->hash_node, (unsigned long)func);
1247 
1248 	raw_spin_unlock(&wci_lock);
1249 
1250 	goto restart;
1251 }
1252 
1253 #else	/* CONFIG_WQ_CPU_INTENSIVE_REPORT */
wq_cpu_intensive_report(work_func_t func)1254 static void wq_cpu_intensive_report(work_func_t func) {}
1255 #endif	/* CONFIG_WQ_CPU_INTENSIVE_REPORT */
1256 
1257 /**
1258  * wq_worker_running - a worker is running again
1259  * @task: task waking up
1260  *
1261  * This function is called when a worker returns from schedule()
1262  */
wq_worker_running(struct task_struct * task)1263 void wq_worker_running(struct task_struct *task)
1264 {
1265 	struct worker *worker = kthread_data(task);
1266 
1267 	if (!READ_ONCE(worker->sleeping))
1268 		return;
1269 
1270 	/*
1271 	 * If preempted by unbind_workers() between the WORKER_NOT_RUNNING check
1272 	 * and the nr_running increment below, we may ruin the nr_running reset
1273 	 * and leave with an unexpected pool->nr_running == 1 on the newly unbound
1274 	 * pool. Protect against such race.
1275 	 */
1276 	preempt_disable();
1277 	if (!(worker->flags & WORKER_NOT_RUNNING))
1278 		worker->pool->nr_running++;
1279 	preempt_enable();
1280 
1281 	/*
1282 	 * CPU intensive auto-detection cares about how long a work item hogged
1283 	 * CPU without sleeping. Reset the starting timestamp on wakeup.
1284 	 */
1285 	worker->current_at = worker->task->se.sum_exec_runtime;
1286 
1287 	WRITE_ONCE(worker->sleeping, 0);
1288 }
1289 
1290 /**
1291  * wq_worker_sleeping - a worker is going to sleep
1292  * @task: task going to sleep
1293  *
1294  * This function is called from schedule() when a busy worker is
1295  * going to sleep.
1296  */
wq_worker_sleeping(struct task_struct * task)1297 void wq_worker_sleeping(struct task_struct *task)
1298 {
1299 	struct worker *worker = kthread_data(task);
1300 	struct worker_pool *pool;
1301 
1302 	/*
1303 	 * Rescuers, which may not have all the fields set up like normal
1304 	 * workers, also reach here, let's not access anything before
1305 	 * checking NOT_RUNNING.
1306 	 */
1307 	if (worker->flags & WORKER_NOT_RUNNING)
1308 		return;
1309 
1310 	pool = worker->pool;
1311 
1312 	/* Return if preempted before wq_worker_running() was reached */
1313 	if (READ_ONCE(worker->sleeping))
1314 		return;
1315 
1316 	WRITE_ONCE(worker->sleeping, 1);
1317 	raw_spin_lock_irq(&pool->lock);
1318 
1319 	/*
1320 	 * Recheck in case unbind_workers() preempted us. We don't
1321 	 * want to decrement nr_running after the worker is unbound
1322 	 * and nr_running has been reset.
1323 	 */
1324 	if (worker->flags & WORKER_NOT_RUNNING) {
1325 		raw_spin_unlock_irq(&pool->lock);
1326 		return;
1327 	}
1328 
1329 	pool->nr_running--;
1330 	if (kick_pool(pool))
1331 		worker->current_pwq->stats[PWQ_STAT_CM_WAKEUP]++;
1332 
1333 	raw_spin_unlock_irq(&pool->lock);
1334 }
1335 
1336 /**
1337  * wq_worker_tick - a scheduler tick occurred while a kworker is running
1338  * @task: task currently running
1339  *
1340  * Called from scheduler_tick(). We're in the IRQ context and the current
1341  * worker's fields which follow the 'K' locking rule can be accessed safely.
1342  */
wq_worker_tick(struct task_struct * task)1343 void wq_worker_tick(struct task_struct *task)
1344 {
1345 	struct worker *worker = kthread_data(task);
1346 	struct pool_workqueue *pwq = worker->current_pwq;
1347 	struct worker_pool *pool = worker->pool;
1348 
1349 	if (!pwq)
1350 		return;
1351 
1352 	pwq->stats[PWQ_STAT_CPU_TIME] += TICK_USEC;
1353 
1354 	if (!wq_cpu_intensive_thresh_us)
1355 		return;
1356 
1357 	/*
1358 	 * If the current worker is concurrency managed and hogged the CPU for
1359 	 * longer than wq_cpu_intensive_thresh_us, it's automatically marked
1360 	 * CPU_INTENSIVE to avoid stalling other concurrency-managed work items.
1361 	 *
1362 	 * Set @worker->sleeping means that @worker is in the process of
1363 	 * switching out voluntarily and won't be contributing to
1364 	 * @pool->nr_running until it wakes up. As wq_worker_sleeping() also
1365 	 * decrements ->nr_running, setting CPU_INTENSIVE here can lead to
1366 	 * double decrements. The task is releasing the CPU anyway. Let's skip.
1367 	 * We probably want to make this prettier in the future.
1368 	 */
1369 	if ((worker->flags & WORKER_NOT_RUNNING) || READ_ONCE(worker->sleeping) ||
1370 	    worker->task->se.sum_exec_runtime - worker->current_at <
1371 	    wq_cpu_intensive_thresh_us * NSEC_PER_USEC)
1372 		return;
1373 
1374 	raw_spin_lock(&pool->lock);
1375 
1376 	worker_set_flags(worker, WORKER_CPU_INTENSIVE);
1377 	wq_cpu_intensive_report(worker->current_func);
1378 	pwq->stats[PWQ_STAT_CPU_INTENSIVE]++;
1379 
1380 	if (kick_pool(pool))
1381 		pwq->stats[PWQ_STAT_CM_WAKEUP]++;
1382 
1383 	raw_spin_unlock(&pool->lock);
1384 }
1385 
1386 /**
1387  * wq_worker_last_func - retrieve worker's last work function
1388  * @task: Task to retrieve last work function of.
1389  *
1390  * Determine the last function a worker executed. This is called from
1391  * the scheduler to get a worker's last known identity.
1392  *
1393  * CONTEXT:
1394  * raw_spin_lock_irq(rq->lock)
1395  *
1396  * This function is called during schedule() when a kworker is going
1397  * to sleep. It's used by psi to identify aggregation workers during
1398  * dequeuing, to allow periodic aggregation to shut-off when that
1399  * worker is the last task in the system or cgroup to go to sleep.
1400  *
1401  * As this function doesn't involve any workqueue-related locking, it
1402  * only returns stable values when called from inside the scheduler's
1403  * queuing and dequeuing paths, when @task, which must be a kworker,
1404  * is guaranteed to not be processing any works.
1405  *
1406  * Return:
1407  * The last work function %current executed as a worker, NULL if it
1408  * hasn't executed any work yet.
1409  */
wq_worker_last_func(struct task_struct * task)1410 work_func_t wq_worker_last_func(struct task_struct *task)
1411 {
1412 	struct worker *worker = kthread_data(task);
1413 
1414 	return worker->last_func;
1415 }
1416 
1417 /**
1418  * get_pwq - get an extra reference on the specified pool_workqueue
1419  * @pwq: pool_workqueue to get
1420  *
1421  * Obtain an extra reference on @pwq.  The caller should guarantee that
1422  * @pwq has positive refcnt and be holding the matching pool->lock.
1423  */
get_pwq(struct pool_workqueue * pwq)1424 static void get_pwq(struct pool_workqueue *pwq)
1425 {
1426 	lockdep_assert_held(&pwq->pool->lock);
1427 	WARN_ON_ONCE(pwq->refcnt <= 0);
1428 	pwq->refcnt++;
1429 }
1430 
1431 /**
1432  * put_pwq - put a pool_workqueue reference
1433  * @pwq: pool_workqueue to put
1434  *
1435  * Drop a reference of @pwq.  If its refcnt reaches zero, schedule its
1436  * destruction.  The caller should be holding the matching pool->lock.
1437  */
put_pwq(struct pool_workqueue * pwq)1438 static void put_pwq(struct pool_workqueue *pwq)
1439 {
1440 	lockdep_assert_held(&pwq->pool->lock);
1441 	if (likely(--pwq->refcnt))
1442 		return;
1443 	/*
1444 	 * @pwq can't be released under pool->lock, bounce to a dedicated
1445 	 * kthread_worker to avoid A-A deadlocks.
1446 	 */
1447 	kthread_queue_work(pwq_release_worker, &pwq->release_work);
1448 }
1449 
1450 /**
1451  * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock
1452  * @pwq: pool_workqueue to put (can be %NULL)
1453  *
1454  * put_pwq() with locking.  This function also allows %NULL @pwq.
1455  */
put_pwq_unlocked(struct pool_workqueue * pwq)1456 static void put_pwq_unlocked(struct pool_workqueue *pwq)
1457 {
1458 	if (pwq) {
1459 		/*
1460 		 * As both pwqs and pools are RCU protected, the
1461 		 * following lock operations are safe.
1462 		 */
1463 		raw_spin_lock_irq(&pwq->pool->lock);
1464 		put_pwq(pwq);
1465 		raw_spin_unlock_irq(&pwq->pool->lock);
1466 	}
1467 }
1468 
pwq_activate_inactive_work(struct work_struct * work)1469 static void pwq_activate_inactive_work(struct work_struct *work)
1470 {
1471 	struct pool_workqueue *pwq = get_work_pwq(work);
1472 
1473 	trace_workqueue_activate_work(work);
1474 	if (list_empty(&pwq->pool->worklist))
1475 		pwq->pool->watchdog_ts = jiffies;
1476 	move_linked_works(work, &pwq->pool->worklist, NULL);
1477 	__clear_bit(WORK_STRUCT_INACTIVE_BIT, work_data_bits(work));
1478 	pwq->nr_active++;
1479 }
1480 
pwq_activate_first_inactive(struct pool_workqueue * pwq)1481 static void pwq_activate_first_inactive(struct pool_workqueue *pwq)
1482 {
1483 	struct work_struct *work = list_first_entry(&pwq->inactive_works,
1484 						    struct work_struct, entry);
1485 
1486 	pwq_activate_inactive_work(work);
1487 }
1488 
1489 /**
1490  * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight
1491  * @pwq: pwq of interest
1492  * @work_data: work_data of work which left the queue
1493  *
1494  * A work either has completed or is removed from pending queue,
1495  * decrement nr_in_flight of its pwq and handle workqueue flushing.
1496  *
1497  * CONTEXT:
1498  * raw_spin_lock_irq(pool->lock).
1499  */
pwq_dec_nr_in_flight(struct pool_workqueue * pwq,unsigned long work_data)1500 static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, unsigned long work_data)
1501 {
1502 	int color = get_work_color(work_data);
1503 
1504 	if (!(work_data & WORK_STRUCT_INACTIVE)) {
1505 		pwq->nr_active--;
1506 		if (!list_empty(&pwq->inactive_works)) {
1507 			/* one down, submit an inactive one */
1508 			if (pwq->nr_active < pwq->max_active)
1509 				pwq_activate_first_inactive(pwq);
1510 		}
1511 	}
1512 
1513 	pwq->nr_in_flight[color]--;
1514 
1515 	/* is flush in progress and are we at the flushing tip? */
1516 	if (likely(pwq->flush_color != color))
1517 		goto out_put;
1518 
1519 	/* are there still in-flight works? */
1520 	if (pwq->nr_in_flight[color])
1521 		goto out_put;
1522 
1523 	/* this pwq is done, clear flush_color */
1524 	pwq->flush_color = -1;
1525 
1526 	/*
1527 	 * If this was the last pwq, wake up the first flusher.  It
1528 	 * will handle the rest.
1529 	 */
1530 	if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush))
1531 		complete(&pwq->wq->first_flusher->done);
1532 out_put:
1533 	put_pwq(pwq);
1534 }
1535 
1536 /**
1537  * try_to_grab_pending - steal work item from worklist and disable irq
1538  * @work: work item to steal
1539  * @is_dwork: @work is a delayed_work
1540  * @flags: place to store irq state
1541  *
1542  * Try to grab PENDING bit of @work.  This function can handle @work in any
1543  * stable state - idle, on timer or on worklist.
1544  *
1545  * Return:
1546  *
1547  *  ========	================================================================
1548  *  1		if @work was pending and we successfully stole PENDING
1549  *  0		if @work was idle and we claimed PENDING
1550  *  -EAGAIN	if PENDING couldn't be grabbed at the moment, safe to busy-retry
1551  *  -ENOENT	if someone else is canceling @work, this state may persist
1552  *		for arbitrarily long
1553  *  ========	================================================================
1554  *
1555  * Note:
1556  * On >= 0 return, the caller owns @work's PENDING bit.  To avoid getting
1557  * interrupted while holding PENDING and @work off queue, irq must be
1558  * disabled on entry.  This, combined with delayed_work->timer being
1559  * irqsafe, ensures that we return -EAGAIN for finite short period of time.
1560  *
1561  * On successful return, >= 0, irq is disabled and the caller is
1562  * responsible for releasing it using local_irq_restore(*@flags).
1563  *
1564  * This function is safe to call from any context including IRQ handler.
1565  */
try_to_grab_pending(struct work_struct * work,bool is_dwork,unsigned long * flags)1566 static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
1567 			       unsigned long *flags)
1568 {
1569 	struct worker_pool *pool;
1570 	struct pool_workqueue *pwq;
1571 
1572 	local_irq_save(*flags);
1573 
1574 	/* try to steal the timer if it exists */
1575 	if (is_dwork) {
1576 		struct delayed_work *dwork = to_delayed_work(work);
1577 
1578 		/*
1579 		 * dwork->timer is irqsafe.  If del_timer() fails, it's
1580 		 * guaranteed that the timer is not queued anywhere and not
1581 		 * running on the local CPU.
1582 		 */
1583 		if (likely(del_timer(&dwork->timer)))
1584 			return 1;
1585 	}
1586 
1587 	/* try to claim PENDING the normal way */
1588 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
1589 		return 0;
1590 
1591 	rcu_read_lock();
1592 	/*
1593 	 * The queueing is in progress, or it is already queued. Try to
1594 	 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
1595 	 */
1596 	pool = get_work_pool(work);
1597 	if (!pool)
1598 		goto fail;
1599 
1600 	raw_spin_lock(&pool->lock);
1601 	/*
1602 	 * work->data is guaranteed to point to pwq only while the work
1603 	 * item is queued on pwq->wq, and both updating work->data to point
1604 	 * to pwq on queueing and to pool on dequeueing are done under
1605 	 * pwq->pool->lock.  This in turn guarantees that, if work->data
1606 	 * points to pwq which is associated with a locked pool, the work
1607 	 * item is currently queued on that pool.
1608 	 */
1609 	pwq = get_work_pwq(work);
1610 	if (pwq && pwq->pool == pool) {
1611 		debug_work_deactivate(work);
1612 
1613 		/*
1614 		 * A cancelable inactive work item must be in the
1615 		 * pwq->inactive_works since a queued barrier can't be
1616 		 * canceled (see the comments in insert_wq_barrier()).
1617 		 *
1618 		 * An inactive work item cannot be grabbed directly because
1619 		 * it might have linked barrier work items which, if left
1620 		 * on the inactive_works list, will confuse pwq->nr_active
1621 		 * management later on and cause stall.  Make sure the work
1622 		 * item is activated before grabbing.
1623 		 */
1624 		if (*work_data_bits(work) & WORK_STRUCT_INACTIVE)
1625 			pwq_activate_inactive_work(work);
1626 
1627 		list_del_init(&work->entry);
1628 		pwq_dec_nr_in_flight(pwq, *work_data_bits(work));
1629 
1630 		/* work->data points to pwq iff queued, point to pool */
1631 		set_work_pool_and_keep_pending(work, pool->id);
1632 
1633 		raw_spin_unlock(&pool->lock);
1634 		rcu_read_unlock();
1635 		return 1;
1636 	}
1637 	raw_spin_unlock(&pool->lock);
1638 fail:
1639 	rcu_read_unlock();
1640 	local_irq_restore(*flags);
1641 	if (work_is_canceling(work))
1642 		return -ENOENT;
1643 	cpu_relax();
1644 	return -EAGAIN;
1645 }
1646 
1647 /**
1648  * insert_work - insert a work into a pool
1649  * @pwq: pwq @work belongs to
1650  * @work: work to insert
1651  * @head: insertion point
1652  * @extra_flags: extra WORK_STRUCT_* flags to set
1653  *
1654  * Insert @work which belongs to @pwq after @head.  @extra_flags is or'd to
1655  * work_struct flags.
1656  *
1657  * CONTEXT:
1658  * raw_spin_lock_irq(pool->lock).
1659  */
insert_work(struct pool_workqueue * pwq,struct work_struct * work,struct list_head * head,unsigned int extra_flags)1660 static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
1661 			struct list_head *head, unsigned int extra_flags)
1662 {
1663 	debug_work_activate(work);
1664 
1665 	/* record the work call stack in order to print it in KASAN reports */
1666 	kasan_record_aux_stack_noalloc(work);
1667 
1668 	/* we own @work, set data and link */
1669 	set_work_pwq(work, pwq, extra_flags);
1670 	list_add_tail(&work->entry, head);
1671 	get_pwq(pwq);
1672 }
1673 
1674 /*
1675  * Test whether @work is being queued from another work executing on the
1676  * same workqueue.
1677  */
is_chained_work(struct workqueue_struct * wq)1678 static bool is_chained_work(struct workqueue_struct *wq)
1679 {
1680 	struct worker *worker;
1681 
1682 	worker = current_wq_worker();
1683 	/*
1684 	 * Return %true iff I'm a worker executing a work item on @wq.  If
1685 	 * I'm @worker, it's safe to dereference it without locking.
1686 	 */
1687 	return worker && worker->current_pwq->wq == wq;
1688 }
1689 
1690 /*
1691  * When queueing an unbound work item to a wq, prefer local CPU if allowed
1692  * by wq_unbound_cpumask.  Otherwise, round robin among the allowed ones to
1693  * avoid perturbing sensitive tasks.
1694  */
wq_select_unbound_cpu(int cpu)1695 static int wq_select_unbound_cpu(int cpu)
1696 {
1697 	int new_cpu;
1698 
1699 	if (likely(!wq_debug_force_rr_cpu)) {
1700 		if (cpumask_test_cpu(cpu, wq_unbound_cpumask))
1701 			return cpu;
1702 	} else {
1703 		pr_warn_once("workqueue: round-robin CPU selection forced, expect performance impact\n");
1704 	}
1705 
1706 	new_cpu = __this_cpu_read(wq_rr_cpu_last);
1707 	new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask);
1708 	if (unlikely(new_cpu >= nr_cpu_ids)) {
1709 		new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask);
1710 		if (unlikely(new_cpu >= nr_cpu_ids))
1711 			return cpu;
1712 	}
1713 	__this_cpu_write(wq_rr_cpu_last, new_cpu);
1714 
1715 	return new_cpu;
1716 }
1717 
__queue_work(int cpu,struct workqueue_struct * wq,struct work_struct * work)1718 static void __queue_work(int cpu, struct workqueue_struct *wq,
1719 			 struct work_struct *work)
1720 {
1721 	struct pool_workqueue *pwq;
1722 	struct worker_pool *last_pool, *pool;
1723 	unsigned int work_flags;
1724 	unsigned int req_cpu = cpu;
1725 
1726 	/*
1727 	 * While a work item is PENDING && off queue, a task trying to
1728 	 * steal the PENDING will busy-loop waiting for it to either get
1729 	 * queued or lose PENDING.  Grabbing PENDING and queueing should
1730 	 * happen with IRQ disabled.
1731 	 */
1732 	lockdep_assert_irqs_disabled();
1733 
1734 
1735 	/*
1736 	 * For a draining wq, only works from the same workqueue are
1737 	 * allowed. The __WQ_DESTROYING helps to spot the issue that
1738 	 * queues a new work item to a wq after destroy_workqueue(wq).
1739 	 */
1740 	if (unlikely(wq->flags & (__WQ_DESTROYING | __WQ_DRAINING) &&
1741 		     WARN_ON_ONCE(!is_chained_work(wq))))
1742 		return;
1743 	rcu_read_lock();
1744 retry:
1745 	/* pwq which will be used unless @work is executing elsewhere */
1746 	if (req_cpu == WORK_CPU_UNBOUND) {
1747 		if (wq->flags & WQ_UNBOUND)
1748 			cpu = wq_select_unbound_cpu(raw_smp_processor_id());
1749 		else
1750 			cpu = raw_smp_processor_id();
1751 	}
1752 
1753 	pwq = rcu_dereference(*per_cpu_ptr(wq->cpu_pwq, cpu));
1754 	pool = pwq->pool;
1755 
1756 	/*
1757 	 * If @work was previously on a different pool, it might still be
1758 	 * running there, in which case the work needs to be queued on that
1759 	 * pool to guarantee non-reentrancy.
1760 	 */
1761 	last_pool = get_work_pool(work);
1762 	if (last_pool && last_pool != pool) {
1763 		struct worker *worker;
1764 
1765 		raw_spin_lock(&last_pool->lock);
1766 
1767 		worker = find_worker_executing_work(last_pool, work);
1768 
1769 		if (worker && worker->current_pwq->wq == wq) {
1770 			pwq = worker->current_pwq;
1771 			pool = pwq->pool;
1772 			WARN_ON_ONCE(pool != last_pool);
1773 		} else {
1774 			/* meh... not running there, queue here */
1775 			raw_spin_unlock(&last_pool->lock);
1776 			raw_spin_lock(&pool->lock);
1777 		}
1778 	} else {
1779 		raw_spin_lock(&pool->lock);
1780 	}
1781 
1782 	/*
1783 	 * pwq is determined and locked. For unbound pools, we could have raced
1784 	 * with pwq release and it could already be dead. If its refcnt is zero,
1785 	 * repeat pwq selection. Note that unbound pwqs never die without
1786 	 * another pwq replacing it in cpu_pwq or while work items are executing
1787 	 * on it, so the retrying is guaranteed to make forward-progress.
1788 	 */
1789 	if (unlikely(!pwq->refcnt)) {
1790 		if (wq->flags & WQ_UNBOUND) {
1791 			raw_spin_unlock(&pool->lock);
1792 			cpu_relax();
1793 			goto retry;
1794 		}
1795 		/* oops */
1796 		WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt",
1797 			  wq->name, cpu);
1798 	}
1799 
1800 	/* pwq determined, queue */
1801 	trace_workqueue_queue_work(req_cpu, pwq, work);
1802 
1803 	if (WARN_ON(!list_empty(&work->entry)))
1804 		goto out;
1805 
1806 	pwq->nr_in_flight[pwq->work_color]++;
1807 	work_flags = work_color_to_flags(pwq->work_color);
1808 
1809 	if (likely(pwq->nr_active < pwq->max_active)) {
1810 		if (list_empty(&pool->worklist))
1811 			pool->watchdog_ts = jiffies;
1812 
1813 		trace_workqueue_activate_work(work);
1814 		pwq->nr_active++;
1815 		insert_work(pwq, work, &pool->worklist, work_flags);
1816 		kick_pool(pool);
1817 	} else {
1818 		work_flags |= WORK_STRUCT_INACTIVE;
1819 		insert_work(pwq, work, &pwq->inactive_works, work_flags);
1820 	}
1821 
1822 out:
1823 	raw_spin_unlock(&pool->lock);
1824 	rcu_read_unlock();
1825 }
1826 
1827 /**
1828  * queue_work_on - queue work on specific cpu
1829  * @cpu: CPU number to execute work on
1830  * @wq: workqueue to use
1831  * @work: work to queue
1832  *
1833  * We queue the work to a specific CPU, the caller must ensure it
1834  * can't go away.  Callers that fail to ensure that the specified
1835  * CPU cannot go away will execute on a randomly chosen CPU.
1836  * But note well that callers specifying a CPU that never has been
1837  * online will get a splat.
1838  *
1839  * Return: %false if @work was already on a queue, %true otherwise.
1840  */
queue_work_on(int cpu,struct workqueue_struct * wq,struct work_struct * work)1841 bool queue_work_on(int cpu, struct workqueue_struct *wq,
1842 		   struct work_struct *work)
1843 {
1844 	bool ret = false;
1845 	unsigned long flags;
1846 
1847 	local_irq_save(flags);
1848 
1849 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1850 		__queue_work(cpu, wq, work);
1851 		ret = true;
1852 	}
1853 
1854 	local_irq_restore(flags);
1855 	return ret;
1856 }
1857 EXPORT_SYMBOL(queue_work_on);
1858 
1859 /**
1860  * select_numa_node_cpu - Select a CPU based on NUMA node
1861  * @node: NUMA node ID that we want to select a CPU from
1862  *
1863  * This function will attempt to find a "random" cpu available on a given
1864  * node. If there are no CPUs available on the given node it will return
1865  * WORK_CPU_UNBOUND indicating that we should just schedule to any
1866  * available CPU if we need to schedule this work.
1867  */
select_numa_node_cpu(int node)1868 static int select_numa_node_cpu(int node)
1869 {
1870 	int cpu;
1871 
1872 	/* Delay binding to CPU if node is not valid or online */
1873 	if (node < 0 || node >= MAX_NUMNODES || !node_online(node))
1874 		return WORK_CPU_UNBOUND;
1875 
1876 	/* Use local node/cpu if we are already there */
1877 	cpu = raw_smp_processor_id();
1878 	if (node == cpu_to_node(cpu))
1879 		return cpu;
1880 
1881 	/* Use "random" otherwise know as "first" online CPU of node */
1882 	cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask);
1883 
1884 	/* If CPU is valid return that, otherwise just defer */
1885 	return cpu < nr_cpu_ids ? cpu : WORK_CPU_UNBOUND;
1886 }
1887 
1888 /**
1889  * queue_work_node - queue work on a "random" cpu for a given NUMA node
1890  * @node: NUMA node that we are targeting the work for
1891  * @wq: workqueue to use
1892  * @work: work to queue
1893  *
1894  * We queue the work to a "random" CPU within a given NUMA node. The basic
1895  * idea here is to provide a way to somehow associate work with a given
1896  * NUMA node.
1897  *
1898  * This function will only make a best effort attempt at getting this onto
1899  * the right NUMA node. If no node is requested or the requested node is
1900  * offline then we just fall back to standard queue_work behavior.
1901  *
1902  * Currently the "random" CPU ends up being the first available CPU in the
1903  * intersection of cpu_online_mask and the cpumask of the node, unless we
1904  * are running on the node. In that case we just use the current CPU.
1905  *
1906  * Return: %false if @work was already on a queue, %true otherwise.
1907  */
queue_work_node(int node,struct workqueue_struct * wq,struct work_struct * work)1908 bool queue_work_node(int node, struct workqueue_struct *wq,
1909 		     struct work_struct *work)
1910 {
1911 	unsigned long flags;
1912 	bool ret = false;
1913 
1914 	/*
1915 	 * This current implementation is specific to unbound workqueues.
1916 	 * Specifically we only return the first available CPU for a given
1917 	 * node instead of cycling through individual CPUs within the node.
1918 	 *
1919 	 * If this is used with a per-cpu workqueue then the logic in
1920 	 * workqueue_select_cpu_near would need to be updated to allow for
1921 	 * some round robin type logic.
1922 	 */
1923 	WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND));
1924 
1925 	local_irq_save(flags);
1926 
1927 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1928 		int cpu = select_numa_node_cpu(node);
1929 
1930 		__queue_work(cpu, wq, work);
1931 		ret = true;
1932 	}
1933 
1934 	local_irq_restore(flags);
1935 	return ret;
1936 }
1937 EXPORT_SYMBOL_GPL(queue_work_node);
1938 
delayed_work_timer_fn(struct timer_list * t)1939 void delayed_work_timer_fn(struct timer_list *t)
1940 {
1941 	struct delayed_work *dwork = from_timer(dwork, t, timer);
1942 
1943 	/* should have been called from irqsafe timer with irq already off */
1944 	__queue_work(dwork->cpu, dwork->wq, &dwork->work);
1945 }
1946 EXPORT_SYMBOL(delayed_work_timer_fn);
1947 
__queue_delayed_work(int cpu,struct workqueue_struct * wq,struct delayed_work * dwork,unsigned long delay)1948 static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
1949 				struct delayed_work *dwork, unsigned long delay)
1950 {
1951 	struct timer_list *timer = &dwork->timer;
1952 	struct work_struct *work = &dwork->work;
1953 
1954 	WARN_ON_ONCE(!wq);
1955 	WARN_ON_ONCE(timer->function != delayed_work_timer_fn);
1956 	WARN_ON_ONCE(timer_pending(timer));
1957 	WARN_ON_ONCE(!list_empty(&work->entry));
1958 
1959 	/*
1960 	 * If @delay is 0, queue @dwork->work immediately.  This is for
1961 	 * both optimization and correctness.  The earliest @timer can
1962 	 * expire is on the closest next tick and delayed_work users depend
1963 	 * on that there's no such delay when @delay is 0.
1964 	 */
1965 	if (!delay) {
1966 		__queue_work(cpu, wq, &dwork->work);
1967 		return;
1968 	}
1969 
1970 	dwork->wq = wq;
1971 	dwork->cpu = cpu;
1972 	timer->expires = jiffies + delay;
1973 
1974 	if (unlikely(cpu != WORK_CPU_UNBOUND))
1975 		add_timer_on(timer, cpu);
1976 	else
1977 		add_timer(timer);
1978 }
1979 
1980 /**
1981  * queue_delayed_work_on - queue work on specific CPU after delay
1982  * @cpu: CPU number to execute work on
1983  * @wq: workqueue to use
1984  * @dwork: work to queue
1985  * @delay: number of jiffies to wait before queueing
1986  *
1987  * Return: %false if @work was already on a queue, %true otherwise.  If
1988  * @delay is zero and @dwork is idle, it will be scheduled for immediate
1989  * execution.
1990  */
queue_delayed_work_on(int cpu,struct workqueue_struct * wq,struct delayed_work * dwork,unsigned long delay)1991 bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
1992 			   struct delayed_work *dwork, unsigned long delay)
1993 {
1994 	struct work_struct *work = &dwork->work;
1995 	bool ret = false;
1996 	unsigned long flags;
1997 
1998 	/* read the comment in __queue_work() */
1999 	local_irq_save(flags);
2000 
2001 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
2002 		__queue_delayed_work(cpu, wq, dwork, delay);
2003 		ret = true;
2004 	}
2005 
2006 	local_irq_restore(flags);
2007 	return ret;
2008 }
2009 EXPORT_SYMBOL(queue_delayed_work_on);
2010 
2011 /**
2012  * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
2013  * @cpu: CPU number to execute work on
2014  * @wq: workqueue to use
2015  * @dwork: work to queue
2016  * @delay: number of jiffies to wait before queueing
2017  *
2018  * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise,
2019  * modify @dwork's timer so that it expires after @delay.  If @delay is
2020  * zero, @work is guaranteed to be scheduled immediately regardless of its
2021  * current state.
2022  *
2023  * Return: %false if @dwork was idle and queued, %true if @dwork was
2024  * pending and its timer was modified.
2025  *
2026  * This function is safe to call from any context including IRQ handler.
2027  * See try_to_grab_pending() for details.
2028  */
mod_delayed_work_on(int cpu,struct workqueue_struct * wq,struct delayed_work * dwork,unsigned long delay)2029 bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
2030 			 struct delayed_work *dwork, unsigned long delay)
2031 {
2032 	unsigned long flags;
2033 	int ret;
2034 
2035 	do {
2036 		ret = try_to_grab_pending(&dwork->work, true, &flags);
2037 	} while (unlikely(ret == -EAGAIN));
2038 
2039 	if (likely(ret >= 0)) {
2040 		__queue_delayed_work(cpu, wq, dwork, delay);
2041 		local_irq_restore(flags);
2042 	}
2043 
2044 	/* -ENOENT from try_to_grab_pending() becomes %true */
2045 	return ret;
2046 }
2047 EXPORT_SYMBOL_GPL(mod_delayed_work_on);
2048 
rcu_work_rcufn(struct rcu_head * rcu)2049 static void rcu_work_rcufn(struct rcu_head *rcu)
2050 {
2051 	struct rcu_work *rwork = container_of(rcu, struct rcu_work, rcu);
2052 
2053 	/* read the comment in __queue_work() */
2054 	local_irq_disable();
2055 	__queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work);
2056 	local_irq_enable();
2057 }
2058 
2059 /**
2060  * queue_rcu_work - queue work after a RCU grace period
2061  * @wq: workqueue to use
2062  * @rwork: work to queue
2063  *
2064  * Return: %false if @rwork was already pending, %true otherwise.  Note
2065  * that a full RCU grace period is guaranteed only after a %true return.
2066  * While @rwork is guaranteed to be executed after a %false return, the
2067  * execution may happen before a full RCU grace period has passed.
2068  */
queue_rcu_work(struct workqueue_struct * wq,struct rcu_work * rwork)2069 bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork)
2070 {
2071 	struct work_struct *work = &rwork->work;
2072 
2073 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
2074 		rwork->wq = wq;
2075 		call_rcu_hurry(&rwork->rcu, rcu_work_rcufn);
2076 		return true;
2077 	}
2078 
2079 	return false;
2080 }
2081 EXPORT_SYMBOL(queue_rcu_work);
2082 
alloc_worker(int node)2083 static struct worker *alloc_worker(int node)
2084 {
2085 	struct worker *worker;
2086 
2087 	worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node);
2088 	if (worker) {
2089 		INIT_LIST_HEAD(&worker->entry);
2090 		INIT_LIST_HEAD(&worker->scheduled);
2091 		INIT_LIST_HEAD(&worker->node);
2092 		/* on creation a worker is in !idle && prep state */
2093 		worker->flags = WORKER_PREP;
2094 	}
2095 	return worker;
2096 }
2097 
pool_allowed_cpus(struct worker_pool * pool)2098 static cpumask_t *pool_allowed_cpus(struct worker_pool *pool)
2099 {
2100 	if (pool->cpu < 0 && pool->attrs->affn_strict)
2101 		return pool->attrs->__pod_cpumask;
2102 	else
2103 		return pool->attrs->cpumask;
2104 }
2105 
2106 /**
2107  * worker_attach_to_pool() - attach a worker to a pool
2108  * @worker: worker to be attached
2109  * @pool: the target pool
2110  *
2111  * Attach @worker to @pool.  Once attached, the %WORKER_UNBOUND flag and
2112  * cpu-binding of @worker are kept coordinated with the pool across
2113  * cpu-[un]hotplugs.
2114  */
worker_attach_to_pool(struct worker * worker,struct worker_pool * pool)2115 static void worker_attach_to_pool(struct worker *worker,
2116 				   struct worker_pool *pool)
2117 {
2118 	mutex_lock(&wq_pool_attach_mutex);
2119 
2120 	/*
2121 	 * The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains
2122 	 * stable across this function.  See the comments above the flag
2123 	 * definition for details.
2124 	 */
2125 	if (pool->flags & POOL_DISASSOCIATED)
2126 		worker->flags |= WORKER_UNBOUND;
2127 	else
2128 		kthread_set_per_cpu(worker->task, pool->cpu);
2129 
2130 	if (worker->rescue_wq)
2131 		set_cpus_allowed_ptr(worker->task, pool_allowed_cpus(pool));
2132 
2133 	list_add_tail(&worker->node, &pool->workers);
2134 	worker->pool = pool;
2135 
2136 	mutex_unlock(&wq_pool_attach_mutex);
2137 }
2138 
2139 /**
2140  * worker_detach_from_pool() - detach a worker from its pool
2141  * @worker: worker which is attached to its pool
2142  *
2143  * Undo the attaching which had been done in worker_attach_to_pool().  The
2144  * caller worker shouldn't access to the pool after detached except it has
2145  * other reference to the pool.
2146  */
worker_detach_from_pool(struct worker * worker)2147 static void worker_detach_from_pool(struct worker *worker)
2148 {
2149 	struct worker_pool *pool = worker->pool;
2150 	struct completion *detach_completion = NULL;
2151 
2152 	mutex_lock(&wq_pool_attach_mutex);
2153 
2154 	kthread_set_per_cpu(worker->task, -1);
2155 	list_del(&worker->node);
2156 	worker->pool = NULL;
2157 
2158 	if (list_empty(&pool->workers) && list_empty(&pool->dying_workers))
2159 		detach_completion = pool->detach_completion;
2160 	mutex_unlock(&wq_pool_attach_mutex);
2161 
2162 	/* clear leftover flags without pool->lock after it is detached */
2163 	worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND);
2164 
2165 	if (detach_completion)
2166 		complete(detach_completion);
2167 }
2168 
2169 /**
2170  * create_worker - create a new workqueue worker
2171  * @pool: pool the new worker will belong to
2172  *
2173  * Create and start a new worker which is attached to @pool.
2174  *
2175  * CONTEXT:
2176  * Might sleep.  Does GFP_KERNEL allocations.
2177  *
2178  * Return:
2179  * Pointer to the newly created worker.
2180  */
create_worker(struct worker_pool * pool)2181 static struct worker *create_worker(struct worker_pool *pool)
2182 {
2183 	struct worker *worker;
2184 	int id;
2185 	char id_buf[23];
2186 
2187 	/* ID is needed to determine kthread name */
2188 	id = ida_alloc(&pool->worker_ida, GFP_KERNEL);
2189 	if (id < 0) {
2190 		pr_err_once("workqueue: Failed to allocate a worker ID: %pe\n",
2191 			    ERR_PTR(id));
2192 		return NULL;
2193 	}
2194 
2195 	worker = alloc_worker(pool->node);
2196 	if (!worker) {
2197 		pr_err_once("workqueue: Failed to allocate a worker\n");
2198 		goto fail;
2199 	}
2200 
2201 	worker->id = id;
2202 
2203 	if (pool->cpu >= 0)
2204 		snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id,
2205 			 pool->attrs->nice < 0  ? "H" : "");
2206 	else
2207 		snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id);
2208 
2209 	worker->task = kthread_create_on_node(worker_thread, worker, pool->node,
2210 					      "kworker/%s", id_buf);
2211 	if (IS_ERR(worker->task)) {
2212 		if (PTR_ERR(worker->task) == -EINTR) {
2213 			pr_err("workqueue: Interrupted when creating a worker thread \"kworker/%s\"\n",
2214 			       id_buf);
2215 		} else {
2216 			pr_err_once("workqueue: Failed to create a worker thread: %pe",
2217 				    worker->task);
2218 		}
2219 		goto fail;
2220 	}
2221 
2222 	set_user_nice(worker->task, pool->attrs->nice);
2223 	trace_android_rvh_create_worker(worker->task, pool->attrs);
2224 	kthread_bind_mask(worker->task, pool_allowed_cpus(pool));
2225 
2226 	/* successful, attach the worker to the pool */
2227 	worker_attach_to_pool(worker, pool);
2228 
2229 	/* start the newly created worker */
2230 	raw_spin_lock_irq(&pool->lock);
2231 
2232 	worker->pool->nr_workers++;
2233 	worker_enter_idle(worker);
2234 	kick_pool(pool);
2235 
2236 	/*
2237 	 * @worker is waiting on a completion in kthread() and will trigger hung
2238 	 * check if not woken up soon. As kick_pool() might not have waken it
2239 	 * up, wake it up explicitly once more.
2240 	 */
2241 	wake_up_process(worker->task);
2242 
2243 	raw_spin_unlock_irq(&pool->lock);
2244 
2245 	return worker;
2246 
2247 fail:
2248 	ida_free(&pool->worker_ida, id);
2249 	kfree(worker);
2250 	return NULL;
2251 }
2252 
unbind_worker(struct worker * worker)2253 static void unbind_worker(struct worker *worker)
2254 {
2255 	lockdep_assert_held(&wq_pool_attach_mutex);
2256 
2257 	kthread_set_per_cpu(worker->task, -1);
2258 	if (cpumask_intersects(wq_unbound_cpumask, cpu_active_mask))
2259 		WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, wq_unbound_cpumask) < 0);
2260 	else
2261 		WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0);
2262 }
2263 
wake_dying_workers(struct list_head * cull_list)2264 static void wake_dying_workers(struct list_head *cull_list)
2265 {
2266 	struct worker *worker, *tmp;
2267 
2268 	list_for_each_entry_safe(worker, tmp, cull_list, entry) {
2269 		list_del_init(&worker->entry);
2270 		unbind_worker(worker);
2271 		/*
2272 		 * If the worker was somehow already running, then it had to be
2273 		 * in pool->idle_list when set_worker_dying() happened or we
2274 		 * wouldn't have gotten here.
2275 		 *
2276 		 * Thus, the worker must either have observed the WORKER_DIE
2277 		 * flag, or have set its state to TASK_IDLE. Either way, the
2278 		 * below will be observed by the worker and is safe to do
2279 		 * outside of pool->lock.
2280 		 */
2281 		wake_up_process(worker->task);
2282 	}
2283 }
2284 
2285 /**
2286  * set_worker_dying - Tag a worker for destruction
2287  * @worker: worker to be destroyed
2288  * @list: transfer worker away from its pool->idle_list and into list
2289  *
2290  * Tag @worker for destruction and adjust @pool stats accordingly.  The worker
2291  * should be idle.
2292  *
2293  * CONTEXT:
2294  * raw_spin_lock_irq(pool->lock).
2295  */
set_worker_dying(struct worker * worker,struct list_head * list)2296 static void set_worker_dying(struct worker *worker, struct list_head *list)
2297 {
2298 	struct worker_pool *pool = worker->pool;
2299 
2300 	lockdep_assert_held(&pool->lock);
2301 	lockdep_assert_held(&wq_pool_attach_mutex);
2302 
2303 	/* sanity check frenzy */
2304 	if (WARN_ON(worker->current_work) ||
2305 	    WARN_ON(!list_empty(&worker->scheduled)) ||
2306 	    WARN_ON(!(worker->flags & WORKER_IDLE)))
2307 		return;
2308 
2309 	pool->nr_workers--;
2310 	pool->nr_idle--;
2311 
2312 	worker->flags |= WORKER_DIE;
2313 
2314 	list_move(&worker->entry, list);
2315 	list_move(&worker->node, &pool->dying_workers);
2316 }
2317 
2318 /**
2319  * idle_worker_timeout - check if some idle workers can now be deleted.
2320  * @t: The pool's idle_timer that just expired
2321  *
2322  * The timer is armed in worker_enter_idle(). Note that it isn't disarmed in
2323  * worker_leave_idle(), as a worker flicking between idle and active while its
2324  * pool is at the too_many_workers() tipping point would cause too much timer
2325  * housekeeping overhead. Since IDLE_WORKER_TIMEOUT is long enough, we just let
2326  * it expire and re-evaluate things from there.
2327  */
idle_worker_timeout(struct timer_list * t)2328 static void idle_worker_timeout(struct timer_list *t)
2329 {
2330 	struct worker_pool *pool = from_timer(pool, t, idle_timer);
2331 	bool do_cull = false;
2332 
2333 	if (work_pending(&pool->idle_cull_work))
2334 		return;
2335 
2336 	raw_spin_lock_irq(&pool->lock);
2337 
2338 	if (too_many_workers(pool)) {
2339 		struct worker *worker;
2340 		unsigned long expires;
2341 
2342 		/* idle_list is kept in LIFO order, check the last one */
2343 		worker = list_entry(pool->idle_list.prev, struct worker, entry);
2344 		expires = worker->last_active + IDLE_WORKER_TIMEOUT;
2345 		do_cull = !time_before(jiffies, expires);
2346 
2347 		if (!do_cull)
2348 			mod_timer(&pool->idle_timer, expires);
2349 	}
2350 	raw_spin_unlock_irq(&pool->lock);
2351 
2352 	if (do_cull)
2353 		queue_work(system_unbound_wq, &pool->idle_cull_work);
2354 }
2355 
2356 /**
2357  * idle_cull_fn - cull workers that have been idle for too long.
2358  * @work: the pool's work for handling these idle workers
2359  *
2360  * This goes through a pool's idle workers and gets rid of those that have been
2361  * idle for at least IDLE_WORKER_TIMEOUT seconds.
2362  *
2363  * We don't want to disturb isolated CPUs because of a pcpu kworker being
2364  * culled, so this also resets worker affinity. This requires a sleepable
2365  * context, hence the split between timer callback and work item.
2366  */
idle_cull_fn(struct work_struct * work)2367 static void idle_cull_fn(struct work_struct *work)
2368 {
2369 	struct worker_pool *pool = container_of(work, struct worker_pool, idle_cull_work);
2370 	LIST_HEAD(cull_list);
2371 
2372 	/*
2373 	 * Grabbing wq_pool_attach_mutex here ensures an already-running worker
2374 	 * cannot proceed beyong worker_detach_from_pool() in its self-destruct
2375 	 * path. This is required as a previously-preempted worker could run after
2376 	 * set_worker_dying() has happened but before wake_dying_workers() did.
2377 	 */
2378 	mutex_lock(&wq_pool_attach_mutex);
2379 	raw_spin_lock_irq(&pool->lock);
2380 
2381 	while (too_many_workers(pool)) {
2382 		struct worker *worker;
2383 		unsigned long expires;
2384 
2385 		worker = list_entry(pool->idle_list.prev, struct worker, entry);
2386 		expires = worker->last_active + IDLE_WORKER_TIMEOUT;
2387 
2388 		if (time_before(jiffies, expires)) {
2389 			mod_timer(&pool->idle_timer, expires);
2390 			break;
2391 		}
2392 
2393 		set_worker_dying(worker, &cull_list);
2394 	}
2395 
2396 	raw_spin_unlock_irq(&pool->lock);
2397 	wake_dying_workers(&cull_list);
2398 	mutex_unlock(&wq_pool_attach_mutex);
2399 }
2400 
send_mayday(struct work_struct * work)2401 static void send_mayday(struct work_struct *work)
2402 {
2403 	struct pool_workqueue *pwq = get_work_pwq(work);
2404 	struct workqueue_struct *wq = pwq->wq;
2405 
2406 	lockdep_assert_held(&wq_mayday_lock);
2407 
2408 	if (!wq->rescuer)
2409 		return;
2410 
2411 	/* mayday mayday mayday */
2412 	if (list_empty(&pwq->mayday_node)) {
2413 		/*
2414 		 * If @pwq is for an unbound wq, its base ref may be put at
2415 		 * any time due to an attribute change.  Pin @pwq until the
2416 		 * rescuer is done with it.
2417 		 */
2418 		get_pwq(pwq);
2419 		list_add_tail(&pwq->mayday_node, &wq->maydays);
2420 		wake_up_process(wq->rescuer->task);
2421 		pwq->stats[PWQ_STAT_MAYDAY]++;
2422 	}
2423 }
2424 
pool_mayday_timeout(struct timer_list * t)2425 static void pool_mayday_timeout(struct timer_list *t)
2426 {
2427 	struct worker_pool *pool = from_timer(pool, t, mayday_timer);
2428 	struct work_struct *work;
2429 
2430 	raw_spin_lock_irq(&pool->lock);
2431 	raw_spin_lock(&wq_mayday_lock);		/* for wq->maydays */
2432 
2433 	if (need_to_create_worker(pool)) {
2434 		/*
2435 		 * We've been trying to create a new worker but
2436 		 * haven't been successful.  We might be hitting an
2437 		 * allocation deadlock.  Send distress signals to
2438 		 * rescuers.
2439 		 */
2440 		list_for_each_entry(work, &pool->worklist, entry)
2441 			send_mayday(work);
2442 	}
2443 
2444 	raw_spin_unlock(&wq_mayday_lock);
2445 	raw_spin_unlock_irq(&pool->lock);
2446 
2447 	mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
2448 }
2449 
2450 /**
2451  * maybe_create_worker - create a new worker if necessary
2452  * @pool: pool to create a new worker for
2453  *
2454  * Create a new worker for @pool if necessary.  @pool is guaranteed to
2455  * have at least one idle worker on return from this function.  If
2456  * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
2457  * sent to all rescuers with works scheduled on @pool to resolve
2458  * possible allocation deadlock.
2459  *
2460  * On return, need_to_create_worker() is guaranteed to be %false and
2461  * may_start_working() %true.
2462  *
2463  * LOCKING:
2464  * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
2465  * multiple times.  Does GFP_KERNEL allocations.  Called only from
2466  * manager.
2467  */
maybe_create_worker(struct worker_pool * pool)2468 static void maybe_create_worker(struct worker_pool *pool)
2469 __releases(&pool->lock)
2470 __acquires(&pool->lock)
2471 {
2472 restart:
2473 	raw_spin_unlock_irq(&pool->lock);
2474 
2475 	/* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
2476 	mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
2477 
2478 	while (true) {
2479 		if (create_worker(pool) || !need_to_create_worker(pool))
2480 			break;
2481 
2482 		schedule_timeout_interruptible(CREATE_COOLDOWN);
2483 
2484 		if (!need_to_create_worker(pool))
2485 			break;
2486 	}
2487 
2488 	del_timer_sync(&pool->mayday_timer);
2489 	raw_spin_lock_irq(&pool->lock);
2490 	/*
2491 	 * This is necessary even after a new worker was just successfully
2492 	 * created as @pool->lock was dropped and the new worker might have
2493 	 * already become busy.
2494 	 */
2495 	if (need_to_create_worker(pool))
2496 		goto restart;
2497 }
2498 
2499 /**
2500  * manage_workers - manage worker pool
2501  * @worker: self
2502  *
2503  * Assume the manager role and manage the worker pool @worker belongs
2504  * to.  At any given time, there can be only zero or one manager per
2505  * pool.  The exclusion is handled automatically by this function.
2506  *
2507  * The caller can safely start processing works on false return.  On
2508  * true return, it's guaranteed that need_to_create_worker() is false
2509  * and may_start_working() is true.
2510  *
2511  * CONTEXT:
2512  * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
2513  * multiple times.  Does GFP_KERNEL allocations.
2514  *
2515  * Return:
2516  * %false if the pool doesn't need management and the caller can safely
2517  * start processing works, %true if management function was performed and
2518  * the conditions that the caller verified before calling the function may
2519  * no longer be true.
2520  */
manage_workers(struct worker * worker)2521 static bool manage_workers(struct worker *worker)
2522 {
2523 	struct worker_pool *pool = worker->pool;
2524 
2525 	if (pool->flags & POOL_MANAGER_ACTIVE)
2526 		return false;
2527 
2528 	pool->flags |= POOL_MANAGER_ACTIVE;
2529 	pool->manager = worker;
2530 
2531 	maybe_create_worker(pool);
2532 
2533 	pool->manager = NULL;
2534 	pool->flags &= ~POOL_MANAGER_ACTIVE;
2535 	rcuwait_wake_up(&manager_wait);
2536 	return true;
2537 }
2538 
2539 /**
2540  * process_one_work - process single work
2541  * @worker: self
2542  * @work: work to process
2543  *
2544  * Process @work.  This function contains all the logics necessary to
2545  * process a single work including synchronization against and
2546  * interaction with other workers on the same cpu, queueing and
2547  * flushing.  As long as context requirement is met, any worker can
2548  * call this function to process a work.
2549  *
2550  * CONTEXT:
2551  * raw_spin_lock_irq(pool->lock) which is released and regrabbed.
2552  */
process_one_work(struct worker * worker,struct work_struct * work)2553 static void process_one_work(struct worker *worker, struct work_struct *work)
2554 __releases(&pool->lock)
2555 __acquires(&pool->lock)
2556 {
2557 	struct pool_workqueue *pwq = get_work_pwq(work);
2558 	struct worker_pool *pool = worker->pool;
2559 	unsigned long work_data;
2560 #ifdef CONFIG_LOCKDEP
2561 	/*
2562 	 * It is permissible to free the struct work_struct from
2563 	 * inside the function that is called from it, this we need to
2564 	 * take into account for lockdep too.  To avoid bogus "held
2565 	 * lock freed" warnings as well as problems when looking into
2566 	 * work->lockdep_map, make a copy and use that here.
2567 	 */
2568 	struct lockdep_map lockdep_map;
2569 
2570 	lockdep_copy_map(&lockdep_map, &work->lockdep_map);
2571 #endif
2572 	/* ensure we're on the correct CPU */
2573 	WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
2574 		     raw_smp_processor_id() != pool->cpu);
2575 
2576 	/* claim and dequeue */
2577 	debug_work_deactivate(work);
2578 	hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
2579 	worker->current_work = work;
2580 	worker->current_func = work->func;
2581 	worker->current_pwq = pwq;
2582 	worker->current_at = worker->task->se.sum_exec_runtime;
2583 	work_data = *work_data_bits(work);
2584 	worker->current_color = get_work_color(work_data);
2585 
2586 	/*
2587 	 * Record wq name for cmdline and debug reporting, may get
2588 	 * overridden through set_worker_desc().
2589 	 */
2590 	strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN);
2591 
2592 	list_del_init(&work->entry);
2593 
2594 	/*
2595 	 * CPU intensive works don't participate in concurrency management.
2596 	 * They're the scheduler's responsibility.  This takes @worker out
2597 	 * of concurrency management and the next code block will chain
2598 	 * execution of the pending work items.
2599 	 */
2600 	if (unlikely(pwq->wq->flags & WQ_CPU_INTENSIVE))
2601 		worker_set_flags(worker, WORKER_CPU_INTENSIVE);
2602 
2603 	/*
2604 	 * Kick @pool if necessary. It's always noop for per-cpu worker pools
2605 	 * since nr_running would always be >= 1 at this point. This is used to
2606 	 * chain execution of the pending work items for WORKER_NOT_RUNNING
2607 	 * workers such as the UNBOUND and CPU_INTENSIVE ones.
2608 	 */
2609 	kick_pool(pool);
2610 
2611 	/*
2612 	 * Record the last pool and clear PENDING which should be the last
2613 	 * update to @work.  Also, do this inside @pool->lock so that
2614 	 * PENDING and queued state changes happen together while IRQ is
2615 	 * disabled.
2616 	 */
2617 	set_work_pool_and_clear_pending(work, pool->id);
2618 
2619 	pwq->stats[PWQ_STAT_STARTED]++;
2620 	raw_spin_unlock_irq(&pool->lock);
2621 
2622 	lock_map_acquire(&pwq->wq->lockdep_map);
2623 	lock_map_acquire(&lockdep_map);
2624 	/*
2625 	 * Strictly speaking we should mark the invariant state without holding
2626 	 * any locks, that is, before these two lock_map_acquire()'s.
2627 	 *
2628 	 * However, that would result in:
2629 	 *
2630 	 *   A(W1)
2631 	 *   WFC(C)
2632 	 *		A(W1)
2633 	 *		C(C)
2634 	 *
2635 	 * Which would create W1->C->W1 dependencies, even though there is no
2636 	 * actual deadlock possible. There are two solutions, using a
2637 	 * read-recursive acquire on the work(queue) 'locks', but this will then
2638 	 * hit the lockdep limitation on recursive locks, or simply discard
2639 	 * these locks.
2640 	 *
2641 	 * AFAICT there is no possible deadlock scenario between the
2642 	 * flush_work() and complete() primitives (except for single-threaded
2643 	 * workqueues), so hiding them isn't a problem.
2644 	 */
2645 	lockdep_invariant_state(true);
2646 	trace_workqueue_execute_start(work);
2647 	worker->current_func(work);
2648 	/*
2649 	 * While we must be careful to not use "work" after this, the trace
2650 	 * point will only record its address.
2651 	 */
2652 	trace_workqueue_execute_end(work, worker->current_func);
2653 	pwq->stats[PWQ_STAT_COMPLETED]++;
2654 	lock_map_release(&lockdep_map);
2655 	lock_map_release(&pwq->wq->lockdep_map);
2656 
2657 	if (unlikely(in_atomic() || lockdep_depth(current) > 0 ||
2658 		     rcu_preempt_depth() > 0)) {
2659 		pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d/%d\n"
2660 		       "     last function: %ps\n",
2661 		       current->comm, preempt_count(), rcu_preempt_depth(),
2662 		       task_pid_nr(current), worker->current_func);
2663 		debug_show_held_locks(current);
2664 		dump_stack();
2665 	}
2666 
2667 	/*
2668 	 * The following prevents a kworker from hogging CPU on !PREEMPTION
2669 	 * kernels, where a requeueing work item waiting for something to
2670 	 * happen could deadlock with stop_machine as such work item could
2671 	 * indefinitely requeue itself while all other CPUs are trapped in
2672 	 * stop_machine. At the same time, report a quiescent RCU state so
2673 	 * the same condition doesn't freeze RCU.
2674 	 */
2675 	cond_resched();
2676 
2677 	raw_spin_lock_irq(&pool->lock);
2678 
2679 	/*
2680 	 * In addition to %WQ_CPU_INTENSIVE, @worker may also have been marked
2681 	 * CPU intensive by wq_worker_tick() if @work hogged CPU longer than
2682 	 * wq_cpu_intensive_thresh_us. Clear it.
2683 	 */
2684 	worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
2685 
2686 	/* tag the worker for identification in schedule() */
2687 	worker->last_func = worker->current_func;
2688 
2689 	/* we're done with it, release */
2690 	hash_del(&worker->hentry);
2691 	worker->current_work = NULL;
2692 	worker->current_func = NULL;
2693 	worker->current_pwq = NULL;
2694 	worker->current_color = INT_MAX;
2695 	pwq_dec_nr_in_flight(pwq, work_data);
2696 }
2697 
2698 /**
2699  * process_scheduled_works - process scheduled works
2700  * @worker: self
2701  *
2702  * Process all scheduled works.  Please note that the scheduled list
2703  * may change while processing a work, so this function repeatedly
2704  * fetches a work from the top and executes it.
2705  *
2706  * CONTEXT:
2707  * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
2708  * multiple times.
2709  */
process_scheduled_works(struct worker * worker)2710 static void process_scheduled_works(struct worker *worker)
2711 {
2712 	struct work_struct *work;
2713 	bool first = true;
2714 
2715 	while ((work = list_first_entry_or_null(&worker->scheduled,
2716 						struct work_struct, entry))) {
2717 		if (first) {
2718 			worker->pool->watchdog_ts = jiffies;
2719 			first = false;
2720 		}
2721 		process_one_work(worker, work);
2722 	}
2723 }
2724 
set_pf_worker(bool val)2725 static void set_pf_worker(bool val)
2726 {
2727 	mutex_lock(&wq_pool_attach_mutex);
2728 	if (val)
2729 		current->flags |= PF_WQ_WORKER;
2730 	else
2731 		current->flags &= ~PF_WQ_WORKER;
2732 	mutex_unlock(&wq_pool_attach_mutex);
2733 }
2734 
2735 /**
2736  * worker_thread - the worker thread function
2737  * @__worker: self
2738  *
2739  * The worker thread function.  All workers belong to a worker_pool -
2740  * either a per-cpu one or dynamic unbound one.  These workers process all
2741  * work items regardless of their specific target workqueue.  The only
2742  * exception is work items which belong to workqueues with a rescuer which
2743  * will be explained in rescuer_thread().
2744  *
2745  * Return: 0
2746  */
worker_thread(void * __worker)2747 static int worker_thread(void *__worker)
2748 {
2749 	struct worker *worker = __worker;
2750 	struct worker_pool *pool = worker->pool;
2751 
2752 	/* tell the scheduler that this is a workqueue worker */
2753 	set_pf_worker(true);
2754 woke_up:
2755 	raw_spin_lock_irq(&pool->lock);
2756 
2757 	/* am I supposed to die? */
2758 	if (unlikely(worker->flags & WORKER_DIE)) {
2759 		raw_spin_unlock_irq(&pool->lock);
2760 		set_pf_worker(false);
2761 
2762 		set_task_comm(worker->task, "kworker/dying");
2763 		ida_free(&pool->worker_ida, worker->id);
2764 		worker_detach_from_pool(worker);
2765 		WARN_ON_ONCE(!list_empty(&worker->entry));
2766 		kfree(worker);
2767 		return 0;
2768 	}
2769 
2770 	worker_leave_idle(worker);
2771 recheck:
2772 	/* no more worker necessary? */
2773 	if (!need_more_worker(pool))
2774 		goto sleep;
2775 
2776 	/* do we need to manage? */
2777 	if (unlikely(!may_start_working(pool)) && manage_workers(worker))
2778 		goto recheck;
2779 
2780 	/*
2781 	 * ->scheduled list can only be filled while a worker is
2782 	 * preparing to process a work or actually processing it.
2783 	 * Make sure nobody diddled with it while I was sleeping.
2784 	 */
2785 	WARN_ON_ONCE(!list_empty(&worker->scheduled));
2786 
2787 	/*
2788 	 * Finish PREP stage.  We're guaranteed to have at least one idle
2789 	 * worker or that someone else has already assumed the manager
2790 	 * role.  This is where @worker starts participating in concurrency
2791 	 * management if applicable and concurrency management is restored
2792 	 * after being rebound.  See rebind_workers() for details.
2793 	 */
2794 	worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND);
2795 
2796 	do {
2797 		struct work_struct *work =
2798 			list_first_entry(&pool->worklist,
2799 					 struct work_struct, entry);
2800 
2801 		if (assign_work(work, worker, NULL))
2802 			process_scheduled_works(worker);
2803 	} while (keep_working(pool));
2804 
2805 	worker_set_flags(worker, WORKER_PREP);
2806 sleep:
2807 	/*
2808 	 * pool->lock is held and there's no work to process and no need to
2809 	 * manage, sleep.  Workers are woken up only while holding
2810 	 * pool->lock or from local cpu, so setting the current state
2811 	 * before releasing pool->lock is enough to prevent losing any
2812 	 * event.
2813 	 */
2814 	worker_enter_idle(worker);
2815 	__set_current_state(TASK_IDLE);
2816 	raw_spin_unlock_irq(&pool->lock);
2817 	schedule();
2818 	goto woke_up;
2819 }
2820 
2821 /**
2822  * rescuer_thread - the rescuer thread function
2823  * @__rescuer: self
2824  *
2825  * Workqueue rescuer thread function.  There's one rescuer for each
2826  * workqueue which has WQ_MEM_RECLAIM set.
2827  *
2828  * Regular work processing on a pool may block trying to create a new
2829  * worker which uses GFP_KERNEL allocation which has slight chance of
2830  * developing into deadlock if some works currently on the same queue
2831  * need to be processed to satisfy the GFP_KERNEL allocation.  This is
2832  * the problem rescuer solves.
2833  *
2834  * When such condition is possible, the pool summons rescuers of all
2835  * workqueues which have works queued on the pool and let them process
2836  * those works so that forward progress can be guaranteed.
2837  *
2838  * This should happen rarely.
2839  *
2840  * Return: 0
2841  */
rescuer_thread(void * __rescuer)2842 static int rescuer_thread(void *__rescuer)
2843 {
2844 	struct worker *rescuer = __rescuer;
2845 	struct workqueue_struct *wq = rescuer->rescue_wq;
2846 	bool should_stop;
2847 
2848 	set_user_nice(current, RESCUER_NICE_LEVEL);
2849 
2850 	/*
2851 	 * Mark rescuer as worker too.  As WORKER_PREP is never cleared, it
2852 	 * doesn't participate in concurrency management.
2853 	 */
2854 	set_pf_worker(true);
2855 repeat:
2856 	set_current_state(TASK_IDLE);
2857 
2858 	/*
2859 	 * By the time the rescuer is requested to stop, the workqueue
2860 	 * shouldn't have any work pending, but @wq->maydays may still have
2861 	 * pwq(s) queued.  This can happen by non-rescuer workers consuming
2862 	 * all the work items before the rescuer got to them.  Go through
2863 	 * @wq->maydays processing before acting on should_stop so that the
2864 	 * list is always empty on exit.
2865 	 */
2866 	should_stop = kthread_should_stop();
2867 
2868 	/* see whether any pwq is asking for help */
2869 	raw_spin_lock_irq(&wq_mayday_lock);
2870 
2871 	while (!list_empty(&wq->maydays)) {
2872 		struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
2873 					struct pool_workqueue, mayday_node);
2874 		struct worker_pool *pool = pwq->pool;
2875 		struct work_struct *work, *n;
2876 
2877 		__set_current_state(TASK_RUNNING);
2878 		list_del_init(&pwq->mayday_node);
2879 
2880 		raw_spin_unlock_irq(&wq_mayday_lock);
2881 
2882 		worker_attach_to_pool(rescuer, pool);
2883 
2884 		raw_spin_lock_irq(&pool->lock);
2885 
2886 		/*
2887 		 * Slurp in all works issued via this workqueue and
2888 		 * process'em.
2889 		 */
2890 		WARN_ON_ONCE(!list_empty(&rescuer->scheduled));
2891 		list_for_each_entry_safe(work, n, &pool->worklist, entry) {
2892 			if (get_work_pwq(work) == pwq &&
2893 			    assign_work(work, rescuer, &n))
2894 				pwq->stats[PWQ_STAT_RESCUED]++;
2895 		}
2896 
2897 		if (!list_empty(&rescuer->scheduled)) {
2898 			process_scheduled_works(rescuer);
2899 
2900 			/*
2901 			 * The above execution of rescued work items could
2902 			 * have created more to rescue through
2903 			 * pwq_activate_first_inactive() or chained
2904 			 * queueing.  Let's put @pwq back on mayday list so
2905 			 * that such back-to-back work items, which may be
2906 			 * being used to relieve memory pressure, don't
2907 			 * incur MAYDAY_INTERVAL delay inbetween.
2908 			 */
2909 			if (pwq->nr_active && need_to_create_worker(pool)) {
2910 				raw_spin_lock(&wq_mayday_lock);
2911 				/*
2912 				 * Queue iff we aren't racing destruction
2913 				 * and somebody else hasn't queued it already.
2914 				 */
2915 				if (wq->rescuer && list_empty(&pwq->mayday_node)) {
2916 					get_pwq(pwq);
2917 					list_add_tail(&pwq->mayday_node, &wq->maydays);
2918 				}
2919 				raw_spin_unlock(&wq_mayday_lock);
2920 			}
2921 		}
2922 
2923 		/*
2924 		 * Put the reference grabbed by send_mayday().  @pool won't
2925 		 * go away while we're still attached to it.
2926 		 */
2927 		put_pwq(pwq);
2928 
2929 		/*
2930 		 * Leave this pool. Notify regular workers; otherwise, we end up
2931 		 * with 0 concurrency and stalling the execution.
2932 		 */
2933 		kick_pool(pool);
2934 
2935 		raw_spin_unlock_irq(&pool->lock);
2936 
2937 		worker_detach_from_pool(rescuer);
2938 
2939 		raw_spin_lock_irq(&wq_mayday_lock);
2940 	}
2941 
2942 	raw_spin_unlock_irq(&wq_mayday_lock);
2943 
2944 	if (should_stop) {
2945 		__set_current_state(TASK_RUNNING);
2946 		set_pf_worker(false);
2947 		return 0;
2948 	}
2949 
2950 	/* rescuers should never participate in concurrency management */
2951 	WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
2952 	schedule();
2953 	goto repeat;
2954 }
2955 
2956 /**
2957  * check_flush_dependency - check for flush dependency sanity
2958  * @target_wq: workqueue being flushed
2959  * @target_work: work item being flushed (NULL for workqueue flushes)
2960  *
2961  * %current is trying to flush the whole @target_wq or @target_work on it.
2962  * If @target_wq doesn't have %WQ_MEM_RECLAIM, verify that %current is not
2963  * reclaiming memory or running on a workqueue which doesn't have
2964  * %WQ_MEM_RECLAIM as that can break forward-progress guarantee leading to
2965  * a deadlock.
2966  */
check_flush_dependency(struct workqueue_struct * target_wq,struct work_struct * target_work)2967 static void check_flush_dependency(struct workqueue_struct *target_wq,
2968 				   struct work_struct *target_work)
2969 {
2970 	work_func_t target_func = target_work ? target_work->func : NULL;
2971 	struct worker *worker;
2972 
2973 	if (target_wq->flags & WQ_MEM_RECLAIM)
2974 		return;
2975 
2976 	worker = current_wq_worker();
2977 
2978 	WARN_ONCE(current->flags & PF_MEMALLOC,
2979 		  "workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%ps",
2980 		  current->pid, current->comm, target_wq->name, target_func);
2981 	WARN_ONCE(worker && ((worker->current_pwq->wq->flags &
2982 			      (WQ_MEM_RECLAIM | __WQ_LEGACY)) == WQ_MEM_RECLAIM),
2983 		  "workqueue: WQ_MEM_RECLAIM %s:%ps is flushing !WQ_MEM_RECLAIM %s:%ps",
2984 		  worker->current_pwq->wq->name, worker->current_func,
2985 		  target_wq->name, target_func);
2986 }
2987 
2988 struct wq_barrier {
2989 	struct work_struct	work;
2990 	struct completion	done;
2991 	struct task_struct	*task;	/* purely informational */
2992 };
2993 
wq_barrier_func(struct work_struct * work)2994 static void wq_barrier_func(struct work_struct *work)
2995 {
2996 	struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
2997 	complete(&barr->done);
2998 }
2999 
3000 /**
3001  * insert_wq_barrier - insert a barrier work
3002  * @pwq: pwq to insert barrier into
3003  * @barr: wq_barrier to insert
3004  * @target: target work to attach @barr to
3005  * @worker: worker currently executing @target, NULL if @target is not executing
3006  *
3007  * @barr is linked to @target such that @barr is completed only after
3008  * @target finishes execution.  Please note that the ordering
3009  * guarantee is observed only with respect to @target and on the local
3010  * cpu.
3011  *
3012  * Currently, a queued barrier can't be canceled.  This is because
3013  * try_to_grab_pending() can't determine whether the work to be
3014  * grabbed is at the head of the queue and thus can't clear LINKED
3015  * flag of the previous work while there must be a valid next work
3016  * after a work with LINKED flag set.
3017  *
3018  * Note that when @worker is non-NULL, @target may be modified
3019  * underneath us, so we can't reliably determine pwq from @target.
3020  *
3021  * CONTEXT:
3022  * raw_spin_lock_irq(pool->lock).
3023  */
insert_wq_barrier(struct pool_workqueue * pwq,struct wq_barrier * barr,struct work_struct * target,struct worker * worker)3024 static void insert_wq_barrier(struct pool_workqueue *pwq,
3025 			      struct wq_barrier *barr,
3026 			      struct work_struct *target, struct worker *worker)
3027 {
3028 	unsigned int work_flags = 0;
3029 	unsigned int work_color;
3030 	struct list_head *head;
3031 
3032 	/*
3033 	 * debugobject calls are safe here even with pool->lock locked
3034 	 * as we know for sure that this will not trigger any of the
3035 	 * checks and call back into the fixup functions where we
3036 	 * might deadlock.
3037 	 */
3038 	INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
3039 	__set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
3040 
3041 	init_completion_map(&barr->done, &target->lockdep_map);
3042 
3043 	barr->task = current;
3044 
3045 	/* The barrier work item does not participate in pwq->nr_active. */
3046 	work_flags |= WORK_STRUCT_INACTIVE;
3047 
3048 	/*
3049 	 * If @target is currently being executed, schedule the
3050 	 * barrier to the worker; otherwise, put it after @target.
3051 	 */
3052 	if (worker) {
3053 		head = worker->scheduled.next;
3054 		work_color = worker->current_color;
3055 	} else {
3056 		unsigned long *bits = work_data_bits(target);
3057 
3058 		head = target->entry.next;
3059 		/* there can already be other linked works, inherit and set */
3060 		work_flags |= *bits & WORK_STRUCT_LINKED;
3061 		work_color = get_work_color(*bits);
3062 		__set_bit(WORK_STRUCT_LINKED_BIT, bits);
3063 	}
3064 
3065 	pwq->nr_in_flight[work_color]++;
3066 	work_flags |= work_color_to_flags(work_color);
3067 
3068 	insert_work(pwq, &barr->work, head, work_flags);
3069 }
3070 
3071 /**
3072  * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing
3073  * @wq: workqueue being flushed
3074  * @flush_color: new flush color, < 0 for no-op
3075  * @work_color: new work color, < 0 for no-op
3076  *
3077  * Prepare pwqs for workqueue flushing.
3078  *
3079  * If @flush_color is non-negative, flush_color on all pwqs should be
3080  * -1.  If no pwq has in-flight commands at the specified color, all
3081  * pwq->flush_color's stay at -1 and %false is returned.  If any pwq
3082  * has in flight commands, its pwq->flush_color is set to
3083  * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq
3084  * wakeup logic is armed and %true is returned.
3085  *
3086  * The caller should have initialized @wq->first_flusher prior to
3087  * calling this function with non-negative @flush_color.  If
3088  * @flush_color is negative, no flush color update is done and %false
3089  * is returned.
3090  *
3091  * If @work_color is non-negative, all pwqs should have the same
3092  * work_color which is previous to @work_color and all will be
3093  * advanced to @work_color.
3094  *
3095  * CONTEXT:
3096  * mutex_lock(wq->mutex).
3097  *
3098  * Return:
3099  * %true if @flush_color >= 0 and there's something to flush.  %false
3100  * otherwise.
3101  */
flush_workqueue_prep_pwqs(struct workqueue_struct * wq,int flush_color,int work_color)3102 static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
3103 				      int flush_color, int work_color)
3104 {
3105 	bool wait = false;
3106 	struct pool_workqueue *pwq;
3107 
3108 	if (flush_color >= 0) {
3109 		WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush));
3110 		atomic_set(&wq->nr_pwqs_to_flush, 1);
3111 	}
3112 
3113 	for_each_pwq(pwq, wq) {
3114 		struct worker_pool *pool = pwq->pool;
3115 
3116 		raw_spin_lock_irq(&pool->lock);
3117 
3118 		if (flush_color >= 0) {
3119 			WARN_ON_ONCE(pwq->flush_color != -1);
3120 
3121 			if (pwq->nr_in_flight[flush_color]) {
3122 				pwq->flush_color = flush_color;
3123 				atomic_inc(&wq->nr_pwqs_to_flush);
3124 				wait = true;
3125 			}
3126 		}
3127 
3128 		if (work_color >= 0) {
3129 			WARN_ON_ONCE(work_color != work_next_color(pwq->work_color));
3130 			pwq->work_color = work_color;
3131 		}
3132 
3133 		raw_spin_unlock_irq(&pool->lock);
3134 	}
3135 
3136 	if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
3137 		complete(&wq->first_flusher->done);
3138 
3139 	return wait;
3140 }
3141 
3142 /**
3143  * __flush_workqueue - ensure that any scheduled work has run to completion.
3144  * @wq: workqueue to flush
3145  *
3146  * This function sleeps until all work items which were queued on entry
3147  * have finished execution, but it is not livelocked by new incoming ones.
3148  */
__flush_workqueue(struct workqueue_struct * wq)3149 void __flush_workqueue(struct workqueue_struct *wq)
3150 {
3151 	struct wq_flusher this_flusher = {
3152 		.list = LIST_HEAD_INIT(this_flusher.list),
3153 		.flush_color = -1,
3154 		.done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map),
3155 	};
3156 	int next_color;
3157 
3158 	if (WARN_ON(!wq_online))
3159 		return;
3160 
3161 	lock_map_acquire(&wq->lockdep_map);
3162 	lock_map_release(&wq->lockdep_map);
3163 
3164 	mutex_lock(&wq->mutex);
3165 
3166 	/*
3167 	 * Start-to-wait phase
3168 	 */
3169 	next_color = work_next_color(wq->work_color);
3170 
3171 	if (next_color != wq->flush_color) {
3172 		/*
3173 		 * Color space is not full.  The current work_color
3174 		 * becomes our flush_color and work_color is advanced
3175 		 * by one.
3176 		 */
3177 		WARN_ON_ONCE(!list_empty(&wq->flusher_overflow));
3178 		this_flusher.flush_color = wq->work_color;
3179 		wq->work_color = next_color;
3180 
3181 		if (!wq->first_flusher) {
3182 			/* no flush in progress, become the first flusher */
3183 			WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
3184 
3185 			wq->first_flusher = &this_flusher;
3186 
3187 			if (!flush_workqueue_prep_pwqs(wq, wq->flush_color,
3188 						       wq->work_color)) {
3189 				/* nothing to flush, done */
3190 				wq->flush_color = next_color;
3191 				wq->first_flusher = NULL;
3192 				goto out_unlock;
3193 			}
3194 		} else {
3195 			/* wait in queue */
3196 			WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color);
3197 			list_add_tail(&this_flusher.list, &wq->flusher_queue);
3198 			flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
3199 		}
3200 	} else {
3201 		/*
3202 		 * Oops, color space is full, wait on overflow queue.
3203 		 * The next flush completion will assign us
3204 		 * flush_color and transfer to flusher_queue.
3205 		 */
3206 		list_add_tail(&this_flusher.list, &wq->flusher_overflow);
3207 	}
3208 
3209 	check_flush_dependency(wq, NULL);
3210 
3211 	mutex_unlock(&wq->mutex);
3212 
3213 	wait_for_completion(&this_flusher.done);
3214 
3215 	/*
3216 	 * Wake-up-and-cascade phase
3217 	 *
3218 	 * First flushers are responsible for cascading flushes and
3219 	 * handling overflow.  Non-first flushers can simply return.
3220 	 */
3221 	if (READ_ONCE(wq->first_flusher) != &this_flusher)
3222 		return;
3223 
3224 	mutex_lock(&wq->mutex);
3225 
3226 	/* we might have raced, check again with mutex held */
3227 	if (wq->first_flusher != &this_flusher)
3228 		goto out_unlock;
3229 
3230 	WRITE_ONCE(wq->first_flusher, NULL);
3231 
3232 	WARN_ON_ONCE(!list_empty(&this_flusher.list));
3233 	WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
3234 
3235 	while (true) {
3236 		struct wq_flusher *next, *tmp;
3237 
3238 		/* complete all the flushers sharing the current flush color */
3239 		list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
3240 			if (next->flush_color != wq->flush_color)
3241 				break;
3242 			list_del_init(&next->list);
3243 			complete(&next->done);
3244 		}
3245 
3246 		WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) &&
3247 			     wq->flush_color != work_next_color(wq->work_color));
3248 
3249 		/* this flush_color is finished, advance by one */
3250 		wq->flush_color = work_next_color(wq->flush_color);
3251 
3252 		/* one color has been freed, handle overflow queue */
3253 		if (!list_empty(&wq->flusher_overflow)) {
3254 			/*
3255 			 * Assign the same color to all overflowed
3256 			 * flushers, advance work_color and append to
3257 			 * flusher_queue.  This is the start-to-wait
3258 			 * phase for these overflowed flushers.
3259 			 */
3260 			list_for_each_entry(tmp, &wq->flusher_overflow, list)
3261 				tmp->flush_color = wq->work_color;
3262 
3263 			wq->work_color = work_next_color(wq->work_color);
3264 
3265 			list_splice_tail_init(&wq->flusher_overflow,
3266 					      &wq->flusher_queue);
3267 			flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
3268 		}
3269 
3270 		if (list_empty(&wq->flusher_queue)) {
3271 			WARN_ON_ONCE(wq->flush_color != wq->work_color);
3272 			break;
3273 		}
3274 
3275 		/*
3276 		 * Need to flush more colors.  Make the next flusher
3277 		 * the new first flusher and arm pwqs.
3278 		 */
3279 		WARN_ON_ONCE(wq->flush_color == wq->work_color);
3280 		WARN_ON_ONCE(wq->flush_color != next->flush_color);
3281 
3282 		list_del_init(&next->list);
3283 		wq->first_flusher = next;
3284 
3285 		if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1))
3286 			break;
3287 
3288 		/*
3289 		 * Meh... this color is already done, clear first
3290 		 * flusher and repeat cascading.
3291 		 */
3292 		wq->first_flusher = NULL;
3293 	}
3294 
3295 out_unlock:
3296 	mutex_unlock(&wq->mutex);
3297 }
3298 EXPORT_SYMBOL(__flush_workqueue);
3299 
3300 /**
3301  * drain_workqueue - drain a workqueue
3302  * @wq: workqueue to drain
3303  *
3304  * Wait until the workqueue becomes empty.  While draining is in progress,
3305  * only chain queueing is allowed.  IOW, only currently pending or running
3306  * work items on @wq can queue further work items on it.  @wq is flushed
3307  * repeatedly until it becomes empty.  The number of flushing is determined
3308  * by the depth of chaining and should be relatively short.  Whine if it
3309  * takes too long.
3310  */
drain_workqueue(struct workqueue_struct * wq)3311 void drain_workqueue(struct workqueue_struct *wq)
3312 {
3313 	unsigned int flush_cnt = 0;
3314 	struct pool_workqueue *pwq;
3315 
3316 	/*
3317 	 * __queue_work() needs to test whether there are drainers, is much
3318 	 * hotter than drain_workqueue() and already looks at @wq->flags.
3319 	 * Use __WQ_DRAINING so that queue doesn't have to check nr_drainers.
3320 	 */
3321 	mutex_lock(&wq->mutex);
3322 	if (!wq->nr_drainers++)
3323 		wq->flags |= __WQ_DRAINING;
3324 	mutex_unlock(&wq->mutex);
3325 reflush:
3326 	__flush_workqueue(wq);
3327 
3328 	mutex_lock(&wq->mutex);
3329 
3330 	for_each_pwq(pwq, wq) {
3331 		bool drained;
3332 
3333 		raw_spin_lock_irq(&pwq->pool->lock);
3334 		drained = !pwq->nr_active && list_empty(&pwq->inactive_works);
3335 		raw_spin_unlock_irq(&pwq->pool->lock);
3336 
3337 		if (drained)
3338 			continue;
3339 
3340 		if (++flush_cnt == 10 ||
3341 		    (flush_cnt % 100 == 0 && flush_cnt <= 1000))
3342 			pr_warn("workqueue %s: %s() isn't complete after %u tries\n",
3343 				wq->name, __func__, flush_cnt);
3344 
3345 		mutex_unlock(&wq->mutex);
3346 		goto reflush;
3347 	}
3348 
3349 	if (!--wq->nr_drainers)
3350 		wq->flags &= ~__WQ_DRAINING;
3351 	mutex_unlock(&wq->mutex);
3352 }
3353 EXPORT_SYMBOL_GPL(drain_workqueue);
3354 
start_flush_work(struct work_struct * work,struct wq_barrier * barr,bool from_cancel)3355 static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
3356 			     bool from_cancel)
3357 {
3358 	struct worker *worker = NULL;
3359 	struct worker_pool *pool;
3360 	struct pool_workqueue *pwq;
3361 
3362 	might_sleep();
3363 
3364 	rcu_read_lock();
3365 	pool = get_work_pool(work);
3366 	if (!pool) {
3367 		rcu_read_unlock();
3368 		return false;
3369 	}
3370 
3371 	raw_spin_lock_irq(&pool->lock);
3372 	/* see the comment in try_to_grab_pending() with the same code */
3373 	pwq = get_work_pwq(work);
3374 	if (pwq) {
3375 		if (unlikely(pwq->pool != pool))
3376 			goto already_gone;
3377 	} else {
3378 		worker = find_worker_executing_work(pool, work);
3379 		if (!worker)
3380 			goto already_gone;
3381 		pwq = worker->current_pwq;
3382 	}
3383 
3384 	check_flush_dependency(pwq->wq, work);
3385 
3386 	insert_wq_barrier(pwq, barr, work, worker);
3387 	raw_spin_unlock_irq(&pool->lock);
3388 
3389 	/*
3390 	 * Force a lock recursion deadlock when using flush_work() inside a
3391 	 * single-threaded or rescuer equipped workqueue.
3392 	 *
3393 	 * For single threaded workqueues the deadlock happens when the work
3394 	 * is after the work issuing the flush_work(). For rescuer equipped
3395 	 * workqueues the deadlock happens when the rescuer stalls, blocking
3396 	 * forward progress.
3397 	 */
3398 	if (!from_cancel &&
3399 	    (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) {
3400 		lock_map_acquire(&pwq->wq->lockdep_map);
3401 		lock_map_release(&pwq->wq->lockdep_map);
3402 	}
3403 	rcu_read_unlock();
3404 	return true;
3405 already_gone:
3406 	raw_spin_unlock_irq(&pool->lock);
3407 	rcu_read_unlock();
3408 	return false;
3409 }
3410 
__flush_work(struct work_struct * work,bool from_cancel)3411 static bool __flush_work(struct work_struct *work, bool from_cancel)
3412 {
3413 	struct wq_barrier barr;
3414 
3415 	if (WARN_ON(!wq_online))
3416 		return false;
3417 
3418 	if (WARN_ON(!work->func))
3419 		return false;
3420 
3421 	lock_map_acquire(&work->lockdep_map);
3422 	lock_map_release(&work->lockdep_map);
3423 
3424 	if (start_flush_work(work, &barr, from_cancel)) {
3425 		wait_for_completion(&barr.done);
3426 		destroy_work_on_stack(&barr.work);
3427 		return true;
3428 	} else {
3429 		return false;
3430 	}
3431 }
3432 
3433 /**
3434  * flush_work - wait for a work to finish executing the last queueing instance
3435  * @work: the work to flush
3436  *
3437  * Wait until @work has finished execution.  @work is guaranteed to be idle
3438  * on return if it hasn't been requeued since flush started.
3439  *
3440  * Return:
3441  * %true if flush_work() waited for the work to finish execution,
3442  * %false if it was already idle.
3443  */
flush_work(struct work_struct * work)3444 bool flush_work(struct work_struct *work)
3445 {
3446 	return __flush_work(work, false);
3447 }
3448 EXPORT_SYMBOL_GPL(flush_work);
3449 
3450 struct cwt_wait {
3451 	wait_queue_entry_t		wait;
3452 	struct work_struct	*work;
3453 };
3454 
cwt_wakefn(wait_queue_entry_t * wait,unsigned mode,int sync,void * key)3455 static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
3456 {
3457 	struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
3458 
3459 	if (cwait->work != key)
3460 		return 0;
3461 	return autoremove_wake_function(wait, mode, sync, key);
3462 }
3463 
__cancel_work_timer(struct work_struct * work,bool is_dwork)3464 static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
3465 {
3466 	static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
3467 	unsigned long flags;
3468 	int ret;
3469 
3470 	do {
3471 		ret = try_to_grab_pending(work, is_dwork, &flags);
3472 		/*
3473 		 * If someone else is already canceling, wait for it to
3474 		 * finish.  flush_work() doesn't work for PREEMPT_NONE
3475 		 * because we may get scheduled between @work's completion
3476 		 * and the other canceling task resuming and clearing
3477 		 * CANCELING - flush_work() will return false immediately
3478 		 * as @work is no longer busy, try_to_grab_pending() will
3479 		 * return -ENOENT as @work is still being canceled and the
3480 		 * other canceling task won't be able to clear CANCELING as
3481 		 * we're hogging the CPU.
3482 		 *
3483 		 * Let's wait for completion using a waitqueue.  As this
3484 		 * may lead to the thundering herd problem, use a custom
3485 		 * wake function which matches @work along with exclusive
3486 		 * wait and wakeup.
3487 		 */
3488 		if (unlikely(ret == -ENOENT)) {
3489 			struct cwt_wait cwait;
3490 
3491 			init_wait(&cwait.wait);
3492 			cwait.wait.func = cwt_wakefn;
3493 			cwait.work = work;
3494 
3495 			prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait,
3496 						  TASK_UNINTERRUPTIBLE);
3497 			if (work_is_canceling(work))
3498 				schedule();
3499 			finish_wait(&cancel_waitq, &cwait.wait);
3500 		}
3501 	} while (unlikely(ret < 0));
3502 
3503 	/* tell other tasks trying to grab @work to back off */
3504 	mark_work_canceling(work);
3505 	local_irq_restore(flags);
3506 
3507 	/*
3508 	 * This allows canceling during early boot.  We know that @work
3509 	 * isn't executing.
3510 	 */
3511 	if (wq_online)
3512 		__flush_work(work, true);
3513 
3514 	clear_work_data(work);
3515 
3516 	/*
3517 	 * Paired with prepare_to_wait() above so that either
3518 	 * waitqueue_active() is visible here or !work_is_canceling() is
3519 	 * visible there.
3520 	 */
3521 	smp_mb();
3522 	if (waitqueue_active(&cancel_waitq))
3523 		__wake_up(&cancel_waitq, TASK_NORMAL, 1, work);
3524 
3525 	return ret;
3526 }
3527 
3528 /**
3529  * cancel_work_sync - cancel a work and wait for it to finish
3530  * @work: the work to cancel
3531  *
3532  * Cancel @work and wait for its execution to finish.  This function
3533  * can be used even if the work re-queues itself or migrates to
3534  * another workqueue.  On return from this function, @work is
3535  * guaranteed to be not pending or executing on any CPU.
3536  *
3537  * cancel_work_sync(&delayed_work->work) must not be used for
3538  * delayed_work's.  Use cancel_delayed_work_sync() instead.
3539  *
3540  * The caller must ensure that the workqueue on which @work was last
3541  * queued can't be destroyed before this function returns.
3542  *
3543  * Return:
3544  * %true if @work was pending, %false otherwise.
3545  */
cancel_work_sync(struct work_struct * work)3546 bool cancel_work_sync(struct work_struct *work)
3547 {
3548 	return __cancel_work_timer(work, false);
3549 }
3550 EXPORT_SYMBOL_GPL(cancel_work_sync);
3551 
3552 /**
3553  * flush_delayed_work - wait for a dwork to finish executing the last queueing
3554  * @dwork: the delayed work to flush
3555  *
3556  * Delayed timer is cancelled and the pending work is queued for
3557  * immediate execution.  Like flush_work(), this function only
3558  * considers the last queueing instance of @dwork.
3559  *
3560  * Return:
3561  * %true if flush_work() waited for the work to finish execution,
3562  * %false if it was already idle.
3563  */
flush_delayed_work(struct delayed_work * dwork)3564 bool flush_delayed_work(struct delayed_work *dwork)
3565 {
3566 	local_irq_disable();
3567 	if (del_timer_sync(&dwork->timer))
3568 		__queue_work(dwork->cpu, dwork->wq, &dwork->work);
3569 	local_irq_enable();
3570 	return flush_work(&dwork->work);
3571 }
3572 EXPORT_SYMBOL(flush_delayed_work);
3573 
3574 /**
3575  * flush_rcu_work - wait for a rwork to finish executing the last queueing
3576  * @rwork: the rcu work to flush
3577  *
3578  * Return:
3579  * %true if flush_rcu_work() waited for the work to finish execution,
3580  * %false if it was already idle.
3581  */
flush_rcu_work(struct rcu_work * rwork)3582 bool flush_rcu_work(struct rcu_work *rwork)
3583 {
3584 	if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) {
3585 		rcu_barrier();
3586 		flush_work(&rwork->work);
3587 		return true;
3588 	} else {
3589 		return flush_work(&rwork->work);
3590 	}
3591 }
3592 EXPORT_SYMBOL(flush_rcu_work);
3593 
__cancel_work(struct work_struct * work,bool is_dwork)3594 static bool __cancel_work(struct work_struct *work, bool is_dwork)
3595 {
3596 	unsigned long flags;
3597 	int ret;
3598 
3599 	do {
3600 		ret = try_to_grab_pending(work, is_dwork, &flags);
3601 	} while (unlikely(ret == -EAGAIN));
3602 
3603 	if (unlikely(ret < 0))
3604 		return false;
3605 
3606 	set_work_pool_and_clear_pending(work, get_work_pool_id(work));
3607 	local_irq_restore(flags);
3608 	return ret;
3609 }
3610 
3611 /*
3612  * See cancel_delayed_work()
3613  */
cancel_work(struct work_struct * work)3614 bool cancel_work(struct work_struct *work)
3615 {
3616 	return __cancel_work(work, false);
3617 }
3618 EXPORT_SYMBOL(cancel_work);
3619 
3620 /**
3621  * cancel_delayed_work - cancel a delayed work
3622  * @dwork: delayed_work to cancel
3623  *
3624  * Kill off a pending delayed_work.
3625  *
3626  * Return: %true if @dwork was pending and canceled; %false if it wasn't
3627  * pending.
3628  *
3629  * Note:
3630  * The work callback function may still be running on return, unless
3631  * it returns %true and the work doesn't re-arm itself.  Explicitly flush or
3632  * use cancel_delayed_work_sync() to wait on it.
3633  *
3634  * This function is safe to call from any context including IRQ handler.
3635  */
cancel_delayed_work(struct delayed_work * dwork)3636 bool cancel_delayed_work(struct delayed_work *dwork)
3637 {
3638 	return __cancel_work(&dwork->work, true);
3639 }
3640 EXPORT_SYMBOL(cancel_delayed_work);
3641 
3642 /**
3643  * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
3644  * @dwork: the delayed work cancel
3645  *
3646  * This is cancel_work_sync() for delayed works.
3647  *
3648  * Return:
3649  * %true if @dwork was pending, %false otherwise.
3650  */
cancel_delayed_work_sync(struct delayed_work * dwork)3651 bool cancel_delayed_work_sync(struct delayed_work *dwork)
3652 {
3653 	return __cancel_work_timer(&dwork->work, true);
3654 }
3655 EXPORT_SYMBOL(cancel_delayed_work_sync);
3656 
3657 /**
3658  * schedule_on_each_cpu - execute a function synchronously on each online CPU
3659  * @func: the function to call
3660  *
3661  * schedule_on_each_cpu() executes @func on each online CPU using the
3662  * system workqueue and blocks until all CPUs have completed.
3663  * schedule_on_each_cpu() is very slow.
3664  *
3665  * Return:
3666  * 0 on success, -errno on failure.
3667  */
schedule_on_each_cpu(work_func_t func)3668 int schedule_on_each_cpu(work_func_t func)
3669 {
3670 	int cpu;
3671 	struct work_struct __percpu *works;
3672 
3673 	works = alloc_percpu(struct work_struct);
3674 	if (!works)
3675 		return -ENOMEM;
3676 
3677 	cpus_read_lock();
3678 
3679 	for_each_online_cpu(cpu) {
3680 		struct work_struct *work = per_cpu_ptr(works, cpu);
3681 
3682 		INIT_WORK(work, func);
3683 		schedule_work_on(cpu, work);
3684 	}
3685 
3686 	for_each_online_cpu(cpu)
3687 		flush_work(per_cpu_ptr(works, cpu));
3688 
3689 	cpus_read_unlock();
3690 	free_percpu(works);
3691 	return 0;
3692 }
3693 
3694 /**
3695  * execute_in_process_context - reliably execute the routine with user context
3696  * @fn:		the function to execute
3697  * @ew:		guaranteed storage for the execute work structure (must
3698  *		be available when the work executes)
3699  *
3700  * Executes the function immediately if process context is available,
3701  * otherwise schedules the function for delayed execution.
3702  *
3703  * Return:	0 - function was executed
3704  *		1 - function was scheduled for execution
3705  */
execute_in_process_context(work_func_t fn,struct execute_work * ew)3706 int execute_in_process_context(work_func_t fn, struct execute_work *ew)
3707 {
3708 	if (!in_interrupt()) {
3709 		fn(&ew->work);
3710 		return 0;
3711 	}
3712 
3713 	INIT_WORK(&ew->work, fn);
3714 	schedule_work(&ew->work);
3715 
3716 	return 1;
3717 }
3718 EXPORT_SYMBOL_GPL(execute_in_process_context);
3719 
3720 /**
3721  * free_workqueue_attrs - free a workqueue_attrs
3722  * @attrs: workqueue_attrs to free
3723  *
3724  * Undo alloc_workqueue_attrs().
3725  */
free_workqueue_attrs(struct workqueue_attrs * attrs)3726 void free_workqueue_attrs(struct workqueue_attrs *attrs)
3727 {
3728 	if (attrs) {
3729 		free_cpumask_var(attrs->cpumask);
3730 		free_cpumask_var(attrs->__pod_cpumask);
3731 		kfree(attrs);
3732 	}
3733 }
3734 EXPORT_SYMBOL_GPL(free_workqueue_attrs);
3735 
3736 /**
3737  * alloc_workqueue_attrs - allocate a workqueue_attrs
3738  *
3739  * Allocate a new workqueue_attrs, initialize with default settings and
3740  * return it.
3741  *
3742  * Return: The allocated new workqueue_attr on success. %NULL on failure.
3743  */
alloc_workqueue_attrs(void)3744 struct workqueue_attrs *alloc_workqueue_attrs(void)
3745 {
3746 	struct workqueue_attrs *attrs;
3747 
3748 	attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
3749 	if (!attrs)
3750 		goto fail;
3751 	if (!alloc_cpumask_var(&attrs->cpumask, GFP_KERNEL))
3752 		goto fail;
3753 	if (!alloc_cpumask_var(&attrs->__pod_cpumask, GFP_KERNEL))
3754 		goto fail;
3755 
3756 	cpumask_copy(attrs->cpumask, cpu_possible_mask);
3757 	attrs->affn_scope = WQ_AFFN_DFL;
3758 	return attrs;
3759 fail:
3760 	free_workqueue_attrs(attrs);
3761 	return NULL;
3762 }
3763 EXPORT_SYMBOL_GPL(alloc_workqueue_attrs);
3764 
copy_workqueue_attrs(struct workqueue_attrs * to,const struct workqueue_attrs * from)3765 static void copy_workqueue_attrs(struct workqueue_attrs *to,
3766 				 const struct workqueue_attrs *from)
3767 {
3768 	to->nice = from->nice;
3769 	cpumask_copy(to->cpumask, from->cpumask);
3770 	cpumask_copy(to->__pod_cpumask, from->__pod_cpumask);
3771 	to->affn_strict = from->affn_strict;
3772 
3773 	/*
3774 	 * Unlike hash and equality test, copying shouldn't ignore wq-only
3775 	 * fields as copying is used for both pool and wq attrs. Instead,
3776 	 * get_unbound_pool() explicitly clears the fields.
3777 	 */
3778 	to->affn_scope = from->affn_scope;
3779 	to->ordered = from->ordered;
3780 }
3781 
3782 /*
3783  * Some attrs fields are workqueue-only. Clear them for worker_pool's. See the
3784  * comments in 'struct workqueue_attrs' definition.
3785  */
wqattrs_clear_for_pool(struct workqueue_attrs * attrs)3786 static void wqattrs_clear_for_pool(struct workqueue_attrs *attrs)
3787 {
3788 	attrs->affn_scope = WQ_AFFN_NR_TYPES;
3789 	attrs->ordered = false;
3790 }
3791 
3792 /* hash value of the content of @attr */
wqattrs_hash(const struct workqueue_attrs * attrs)3793 static u32 wqattrs_hash(const struct workqueue_attrs *attrs)
3794 {
3795 	u32 hash = 0;
3796 
3797 	hash = jhash_1word(attrs->nice, hash);
3798 	hash = jhash(cpumask_bits(attrs->cpumask),
3799 		     BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
3800 	hash = jhash(cpumask_bits(attrs->__pod_cpumask),
3801 		     BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
3802 	hash = jhash_1word(attrs->affn_strict, hash);
3803 	return hash;
3804 }
3805 
3806 /* content equality test */
wqattrs_equal(const struct workqueue_attrs * a,const struct workqueue_attrs * b)3807 static bool wqattrs_equal(const struct workqueue_attrs *a,
3808 			  const struct workqueue_attrs *b)
3809 {
3810 	if (a->nice != b->nice)
3811 		return false;
3812 	if (!cpumask_equal(a->cpumask, b->cpumask))
3813 		return false;
3814 	if (!cpumask_equal(a->__pod_cpumask, b->__pod_cpumask))
3815 		return false;
3816 	if (a->affn_strict != b->affn_strict)
3817 		return false;
3818 	return true;
3819 }
3820 
3821 /* Update @attrs with actually available CPUs */
wqattrs_actualize_cpumask(struct workqueue_attrs * attrs,const cpumask_t * unbound_cpumask)3822 static void wqattrs_actualize_cpumask(struct workqueue_attrs *attrs,
3823 				      const cpumask_t *unbound_cpumask)
3824 {
3825 	/*
3826 	 * Calculate the effective CPU mask of @attrs given @unbound_cpumask. If
3827 	 * @attrs->cpumask doesn't overlap with @unbound_cpumask, we fallback to
3828 	 * @unbound_cpumask.
3829 	 */
3830 	cpumask_and(attrs->cpumask, attrs->cpumask, unbound_cpumask);
3831 	if (unlikely(cpumask_empty(attrs->cpumask)))
3832 		cpumask_copy(attrs->cpumask, unbound_cpumask);
3833 }
3834 
3835 /* find wq_pod_type to use for @attrs */
3836 static const struct wq_pod_type *
wqattrs_pod_type(const struct workqueue_attrs * attrs)3837 wqattrs_pod_type(const struct workqueue_attrs *attrs)
3838 {
3839 	enum wq_affn_scope scope;
3840 	struct wq_pod_type *pt;
3841 
3842 	/* to synchronize access to wq_affn_dfl */
3843 	lockdep_assert_held(&wq_pool_mutex);
3844 
3845 	if (attrs->affn_scope == WQ_AFFN_DFL)
3846 		scope = wq_affn_dfl;
3847 	else
3848 		scope = attrs->affn_scope;
3849 
3850 	pt = &wq_pod_types[scope];
3851 
3852 	if (!WARN_ON_ONCE(attrs->affn_scope == WQ_AFFN_NR_TYPES) &&
3853 	    likely(pt->nr_pods))
3854 		return pt;
3855 
3856 	/*
3857 	 * Before workqueue_init_topology(), only SYSTEM is available which is
3858 	 * initialized in workqueue_init_early().
3859 	 */
3860 	pt = &wq_pod_types[WQ_AFFN_SYSTEM];
3861 	BUG_ON(!pt->nr_pods);
3862 	return pt;
3863 }
3864 
3865 /**
3866  * init_worker_pool - initialize a newly zalloc'd worker_pool
3867  * @pool: worker_pool to initialize
3868  *
3869  * Initialize a newly zalloc'd @pool.  It also allocates @pool->attrs.
3870  *
3871  * Return: 0 on success, -errno on failure.  Even on failure, all fields
3872  * inside @pool proper are initialized and put_unbound_pool() can be called
3873  * on @pool safely to release it.
3874  */
init_worker_pool(struct worker_pool * pool)3875 static int init_worker_pool(struct worker_pool *pool)
3876 {
3877 	raw_spin_lock_init(&pool->lock);
3878 	pool->id = -1;
3879 	pool->cpu = -1;
3880 	pool->node = NUMA_NO_NODE;
3881 	pool->flags |= POOL_DISASSOCIATED;
3882 	pool->watchdog_ts = jiffies;
3883 	INIT_LIST_HEAD(&pool->worklist);
3884 	INIT_LIST_HEAD(&pool->idle_list);
3885 	hash_init(pool->busy_hash);
3886 
3887 	timer_setup(&pool->idle_timer, idle_worker_timeout, TIMER_DEFERRABLE);
3888 	INIT_WORK(&pool->idle_cull_work, idle_cull_fn);
3889 
3890 	timer_setup(&pool->mayday_timer, pool_mayday_timeout, 0);
3891 
3892 	INIT_LIST_HEAD(&pool->workers);
3893 	INIT_LIST_HEAD(&pool->dying_workers);
3894 
3895 	ida_init(&pool->worker_ida);
3896 	INIT_HLIST_NODE(&pool->hash_node);
3897 	pool->refcnt = 1;
3898 
3899 	/* shouldn't fail above this point */
3900 	pool->attrs = alloc_workqueue_attrs();
3901 	if (!pool->attrs)
3902 		return -ENOMEM;
3903 
3904 	wqattrs_clear_for_pool(pool->attrs);
3905 
3906 	return 0;
3907 }
3908 
3909 #ifdef CONFIG_LOCKDEP
wq_init_lockdep(struct workqueue_struct * wq)3910 static void wq_init_lockdep(struct workqueue_struct *wq)
3911 {
3912 	char *lock_name;
3913 
3914 	lockdep_register_key(&wq->key);
3915 	lock_name = kasprintf(GFP_KERNEL, "%s%s", "(wq_completion)", wq->name);
3916 	if (!lock_name)
3917 		lock_name = wq->name;
3918 
3919 	wq->lock_name = lock_name;
3920 	lockdep_init_map(&wq->lockdep_map, lock_name, &wq->key, 0);
3921 }
3922 
wq_unregister_lockdep(struct workqueue_struct * wq)3923 static void wq_unregister_lockdep(struct workqueue_struct *wq)
3924 {
3925 	lockdep_unregister_key(&wq->key);
3926 }
3927 
wq_free_lockdep(struct workqueue_struct * wq)3928 static void wq_free_lockdep(struct workqueue_struct *wq)
3929 {
3930 	if (wq->lock_name != wq->name)
3931 		kfree(wq->lock_name);
3932 }
3933 #else
wq_init_lockdep(struct workqueue_struct * wq)3934 static void wq_init_lockdep(struct workqueue_struct *wq)
3935 {
3936 }
3937 
wq_unregister_lockdep(struct workqueue_struct * wq)3938 static void wq_unregister_lockdep(struct workqueue_struct *wq)
3939 {
3940 }
3941 
wq_free_lockdep(struct workqueue_struct * wq)3942 static void wq_free_lockdep(struct workqueue_struct *wq)
3943 {
3944 }
3945 #endif
3946 
rcu_free_wq(struct rcu_head * rcu)3947 static void rcu_free_wq(struct rcu_head *rcu)
3948 {
3949 	struct workqueue_struct *wq =
3950 		container_of(rcu, struct workqueue_struct, rcu);
3951 
3952 	wq_free_lockdep(wq);
3953 	free_percpu(wq->cpu_pwq);
3954 	free_workqueue_attrs(wq->unbound_attrs);
3955 	kfree(wq);
3956 }
3957 
rcu_free_pool(struct rcu_head * rcu)3958 static void rcu_free_pool(struct rcu_head *rcu)
3959 {
3960 	struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu);
3961 
3962 	ida_destroy(&pool->worker_ida);
3963 	free_workqueue_attrs(pool->attrs);
3964 	kfree(pool);
3965 }
3966 
3967 /**
3968  * put_unbound_pool - put a worker_pool
3969  * @pool: worker_pool to put
3970  *
3971  * Put @pool.  If its refcnt reaches zero, it gets destroyed in RCU
3972  * safe manner.  get_unbound_pool() calls this function on its failure path
3973  * and this function should be able to release pools which went through,
3974  * successfully or not, init_worker_pool().
3975  *
3976  * Should be called with wq_pool_mutex held.
3977  */
put_unbound_pool(struct worker_pool * pool)3978 static void put_unbound_pool(struct worker_pool *pool)
3979 {
3980 	DECLARE_COMPLETION_ONSTACK(detach_completion);
3981 	struct worker *worker;
3982 	LIST_HEAD(cull_list);
3983 
3984 	lockdep_assert_held(&wq_pool_mutex);
3985 
3986 	if (--pool->refcnt)
3987 		return;
3988 
3989 	/* sanity checks */
3990 	if (WARN_ON(!(pool->cpu < 0)) ||
3991 	    WARN_ON(!list_empty(&pool->worklist)))
3992 		return;
3993 
3994 	/* release id and unhash */
3995 	if (pool->id >= 0)
3996 		idr_remove(&worker_pool_idr, pool->id);
3997 	hash_del(&pool->hash_node);
3998 
3999 	/*
4000 	 * Become the manager and destroy all workers.  This prevents
4001 	 * @pool's workers from blocking on attach_mutex.  We're the last
4002 	 * manager and @pool gets freed with the flag set.
4003 	 *
4004 	 * Having a concurrent manager is quite unlikely to happen as we can
4005 	 * only get here with
4006 	 *   pwq->refcnt == pool->refcnt == 0
4007 	 * which implies no work queued to the pool, which implies no worker can
4008 	 * become the manager. However a worker could have taken the role of
4009 	 * manager before the refcnts dropped to 0, since maybe_create_worker()
4010 	 * drops pool->lock
4011 	 */
4012 	while (true) {
4013 		rcuwait_wait_event(&manager_wait,
4014 				   !(pool->flags & POOL_MANAGER_ACTIVE),
4015 				   TASK_UNINTERRUPTIBLE);
4016 
4017 		mutex_lock(&wq_pool_attach_mutex);
4018 		raw_spin_lock_irq(&pool->lock);
4019 		if (!(pool->flags & POOL_MANAGER_ACTIVE)) {
4020 			pool->flags |= POOL_MANAGER_ACTIVE;
4021 			break;
4022 		}
4023 		raw_spin_unlock_irq(&pool->lock);
4024 		mutex_unlock(&wq_pool_attach_mutex);
4025 	}
4026 
4027 	while ((worker = first_idle_worker(pool)))
4028 		set_worker_dying(worker, &cull_list);
4029 	WARN_ON(pool->nr_workers || pool->nr_idle);
4030 	raw_spin_unlock_irq(&pool->lock);
4031 
4032 	wake_dying_workers(&cull_list);
4033 
4034 	if (!list_empty(&pool->workers) || !list_empty(&pool->dying_workers))
4035 		pool->detach_completion = &detach_completion;
4036 	mutex_unlock(&wq_pool_attach_mutex);
4037 
4038 	if (pool->detach_completion)
4039 		wait_for_completion(pool->detach_completion);
4040 
4041 	/* shut down the timers */
4042 	del_timer_sync(&pool->idle_timer);
4043 	cancel_work_sync(&pool->idle_cull_work);
4044 	del_timer_sync(&pool->mayday_timer);
4045 
4046 	/* RCU protected to allow dereferences from get_work_pool() */
4047 	call_rcu(&pool->rcu, rcu_free_pool);
4048 }
4049 
4050 /**
4051  * get_unbound_pool - get a worker_pool with the specified attributes
4052  * @attrs: the attributes of the worker_pool to get
4053  *
4054  * Obtain a worker_pool which has the same attributes as @attrs, bump the
4055  * reference count and return it.  If there already is a matching
4056  * worker_pool, it will be used; otherwise, this function attempts to
4057  * create a new one.
4058  *
4059  * Should be called with wq_pool_mutex held.
4060  *
4061  * Return: On success, a worker_pool with the same attributes as @attrs.
4062  * On failure, %NULL.
4063  */
get_unbound_pool(const struct workqueue_attrs * attrs)4064 static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
4065 {
4066 	struct wq_pod_type *pt = &wq_pod_types[WQ_AFFN_NUMA];
4067 	u32 hash = wqattrs_hash(attrs);
4068 	struct worker_pool *pool;
4069 	int pod, node = NUMA_NO_NODE;
4070 
4071 	lockdep_assert_held(&wq_pool_mutex);
4072 
4073 	/* do we already have a matching pool? */
4074 	hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
4075 		if (wqattrs_equal(pool->attrs, attrs)) {
4076 			pool->refcnt++;
4077 			return pool;
4078 		}
4079 	}
4080 
4081 	/* If __pod_cpumask is contained inside a NUMA pod, that's our node */
4082 	for (pod = 0; pod < pt->nr_pods; pod++) {
4083 		if (cpumask_subset(attrs->__pod_cpumask, pt->pod_cpus[pod])) {
4084 			node = pt->pod_node[pod];
4085 			break;
4086 		}
4087 	}
4088 
4089 	/* nope, create a new one */
4090 	pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, node);
4091 	if (!pool || init_worker_pool(pool) < 0)
4092 		goto fail;
4093 
4094 	pool->node = node;
4095 	copy_workqueue_attrs(pool->attrs, attrs);
4096 	wqattrs_clear_for_pool(pool->attrs);
4097 
4098 	if (worker_pool_assign_id(pool) < 0)
4099 		goto fail;
4100 
4101 	/* create and start the initial worker */
4102 	if (wq_online && !create_worker(pool))
4103 		goto fail;
4104 
4105 	/* install */
4106 	hash_add(unbound_pool_hash, &pool->hash_node, hash);
4107 
4108 	return pool;
4109 fail:
4110 	if (pool)
4111 		put_unbound_pool(pool);
4112 	return NULL;
4113 }
4114 
rcu_free_pwq(struct rcu_head * rcu)4115 static void rcu_free_pwq(struct rcu_head *rcu)
4116 {
4117 	kmem_cache_free(pwq_cache,
4118 			container_of(rcu, struct pool_workqueue, rcu));
4119 }
4120 
4121 /*
4122  * Scheduled on pwq_release_worker by put_pwq() when an unbound pwq hits zero
4123  * refcnt and needs to be destroyed.
4124  */
pwq_release_workfn(struct kthread_work * work)4125 static void pwq_release_workfn(struct kthread_work *work)
4126 {
4127 	struct pool_workqueue *pwq = container_of(work, struct pool_workqueue,
4128 						  release_work);
4129 	struct workqueue_struct *wq = pwq->wq;
4130 	struct worker_pool *pool = pwq->pool;
4131 	bool is_last = false;
4132 
4133 	/*
4134 	 * When @pwq is not linked, it doesn't hold any reference to the
4135 	 * @wq, and @wq is invalid to access.
4136 	 */
4137 	if (!list_empty(&pwq->pwqs_node)) {
4138 		mutex_lock(&wq->mutex);
4139 		list_del_rcu(&pwq->pwqs_node);
4140 		is_last = list_empty(&wq->pwqs);
4141 		mutex_unlock(&wq->mutex);
4142 	}
4143 
4144 	if (wq->flags & WQ_UNBOUND) {
4145 		mutex_lock(&wq_pool_mutex);
4146 		put_unbound_pool(pool);
4147 		mutex_unlock(&wq_pool_mutex);
4148 	}
4149 
4150 	call_rcu(&pwq->rcu, rcu_free_pwq);
4151 
4152 	/*
4153 	 * If we're the last pwq going away, @wq is already dead and no one
4154 	 * is gonna access it anymore.  Schedule RCU free.
4155 	 */
4156 	if (is_last) {
4157 		wq_unregister_lockdep(wq);
4158 		call_rcu(&wq->rcu, rcu_free_wq);
4159 	}
4160 }
4161 
4162 /**
4163  * pwq_adjust_max_active - update a pwq's max_active to the current setting
4164  * @pwq: target pool_workqueue
4165  *
4166  * If @pwq isn't freezing, set @pwq->max_active to the associated
4167  * workqueue's saved_max_active and activate inactive work items
4168  * accordingly.  If @pwq is freezing, clear @pwq->max_active to zero.
4169  */
pwq_adjust_max_active(struct pool_workqueue * pwq)4170 static void pwq_adjust_max_active(struct pool_workqueue *pwq)
4171 {
4172 	struct workqueue_struct *wq = pwq->wq;
4173 	bool freezable = wq->flags & WQ_FREEZABLE;
4174 	unsigned long flags;
4175 
4176 	/* for @wq->saved_max_active */
4177 	lockdep_assert_held(&wq->mutex);
4178 
4179 	/* fast exit for non-freezable wqs */
4180 	if (!freezable && pwq->max_active == wq->saved_max_active)
4181 		return;
4182 
4183 	/* this function can be called during early boot w/ irq disabled */
4184 	raw_spin_lock_irqsave(&pwq->pool->lock, flags);
4185 
4186 	/*
4187 	 * During [un]freezing, the caller is responsible for ensuring that
4188 	 * this function is called at least once after @workqueue_freezing
4189 	 * is updated and visible.
4190 	 */
4191 	if (!freezable || !workqueue_freezing) {
4192 		pwq->max_active = wq->saved_max_active;
4193 
4194 		while (!list_empty(&pwq->inactive_works) &&
4195 		       pwq->nr_active < pwq->max_active)
4196 			pwq_activate_first_inactive(pwq);
4197 
4198 		kick_pool(pwq->pool);
4199 	} else {
4200 		pwq->max_active = 0;
4201 	}
4202 
4203 	raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
4204 }
4205 
4206 /* initialize newly allocated @pwq which is associated with @wq and @pool */
init_pwq(struct pool_workqueue * pwq,struct workqueue_struct * wq,struct worker_pool * pool)4207 static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
4208 		     struct worker_pool *pool)
4209 {
4210 	BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
4211 
4212 	memset(pwq, 0, sizeof(*pwq));
4213 
4214 	pwq->pool = pool;
4215 	pwq->wq = wq;
4216 	pwq->flush_color = -1;
4217 	pwq->refcnt = 1;
4218 	INIT_LIST_HEAD(&pwq->inactive_works);
4219 	INIT_LIST_HEAD(&pwq->pwqs_node);
4220 	INIT_LIST_HEAD(&pwq->mayday_node);
4221 	kthread_init_work(&pwq->release_work, pwq_release_workfn);
4222 }
4223 
4224 /* sync @pwq with the current state of its associated wq and link it */
link_pwq(struct pool_workqueue * pwq)4225 static void link_pwq(struct pool_workqueue *pwq)
4226 {
4227 	struct workqueue_struct *wq = pwq->wq;
4228 
4229 	lockdep_assert_held(&wq->mutex);
4230 
4231 	/* may be called multiple times, ignore if already linked */
4232 	if (!list_empty(&pwq->pwqs_node))
4233 		return;
4234 
4235 	/* set the matching work_color */
4236 	pwq->work_color = wq->work_color;
4237 
4238 	/* sync max_active to the current setting */
4239 	pwq_adjust_max_active(pwq);
4240 
4241 	/* link in @pwq */
4242 	list_add_rcu(&pwq->pwqs_node, &wq->pwqs);
4243 }
4244 
4245 /* obtain a pool matching @attr and create a pwq associating the pool and @wq */
alloc_unbound_pwq(struct workqueue_struct * wq,const struct workqueue_attrs * attrs)4246 static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq,
4247 					const struct workqueue_attrs *attrs)
4248 {
4249 	struct worker_pool *pool;
4250 	struct pool_workqueue *pwq;
4251 
4252 	lockdep_assert_held(&wq_pool_mutex);
4253 
4254 	pool = get_unbound_pool(attrs);
4255 	if (!pool)
4256 		return NULL;
4257 
4258 	pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node);
4259 	if (!pwq) {
4260 		put_unbound_pool(pool);
4261 		return NULL;
4262 	}
4263 
4264 	init_pwq(pwq, wq, pool);
4265 	return pwq;
4266 }
4267 
4268 /**
4269  * wq_calc_pod_cpumask - calculate a wq_attrs' cpumask for a pod
4270  * @attrs: the wq_attrs of the default pwq of the target workqueue
4271  * @cpu: the target CPU
4272  * @cpu_going_down: if >= 0, the CPU to consider as offline
4273  *
4274  * Calculate the cpumask a workqueue with @attrs should use on @pod. If
4275  * @cpu_going_down is >= 0, that cpu is considered offline during calculation.
4276  * The result is stored in @attrs->__pod_cpumask.
4277  *
4278  * If pod affinity is not enabled, @attrs->cpumask is always used. If enabled
4279  * and @pod has online CPUs requested by @attrs, the returned cpumask is the
4280  * intersection of the possible CPUs of @pod and @attrs->cpumask.
4281  *
4282  * The caller is responsible for ensuring that the cpumask of @pod stays stable.
4283  */
wq_calc_pod_cpumask(struct workqueue_attrs * attrs,int cpu,int cpu_going_down)4284 static void wq_calc_pod_cpumask(struct workqueue_attrs *attrs, int cpu,
4285 				int cpu_going_down)
4286 {
4287 	const struct wq_pod_type *pt = wqattrs_pod_type(attrs);
4288 	int pod = pt->cpu_pod[cpu];
4289 
4290 	/* does @pod have any online CPUs @attrs wants? */
4291 	cpumask_and(attrs->__pod_cpumask, pt->pod_cpus[pod], attrs->cpumask);
4292 	cpumask_and(attrs->__pod_cpumask, attrs->__pod_cpumask, cpu_online_mask);
4293 	if (cpu_going_down >= 0)
4294 		cpumask_clear_cpu(cpu_going_down, attrs->__pod_cpumask);
4295 
4296 	if (cpumask_empty(attrs->__pod_cpumask)) {
4297 		cpumask_copy(attrs->__pod_cpumask, attrs->cpumask);
4298 		return;
4299 	}
4300 
4301 	/* yeap, return possible CPUs in @pod that @attrs wants */
4302 	cpumask_and(attrs->__pod_cpumask, attrs->cpumask, pt->pod_cpus[pod]);
4303 
4304 	if (cpumask_empty(attrs->__pod_cpumask))
4305 		pr_warn_once("WARNING: workqueue cpumask: online intersect > "
4306 				"possible intersect\n");
4307 }
4308 
4309 /* install @pwq into @wq's cpu_pwq and return the old pwq */
install_unbound_pwq(struct workqueue_struct * wq,int cpu,struct pool_workqueue * pwq)4310 static struct pool_workqueue *install_unbound_pwq(struct workqueue_struct *wq,
4311 					int cpu, struct pool_workqueue *pwq)
4312 {
4313 	struct pool_workqueue *old_pwq;
4314 
4315 	lockdep_assert_held(&wq_pool_mutex);
4316 	lockdep_assert_held(&wq->mutex);
4317 
4318 	/* link_pwq() can handle duplicate calls */
4319 	link_pwq(pwq);
4320 
4321 	old_pwq = rcu_access_pointer(*per_cpu_ptr(wq->cpu_pwq, cpu));
4322 	rcu_assign_pointer(*per_cpu_ptr(wq->cpu_pwq, cpu), pwq);
4323 	return old_pwq;
4324 }
4325 
4326 /* context to store the prepared attrs & pwqs before applying */
4327 struct apply_wqattrs_ctx {
4328 	struct workqueue_struct	*wq;		/* target workqueue */
4329 	struct workqueue_attrs	*attrs;		/* attrs to apply */
4330 	struct list_head	list;		/* queued for batching commit */
4331 	struct pool_workqueue	*dfl_pwq;
4332 	struct pool_workqueue	*pwq_tbl[];
4333 };
4334 
4335 /* free the resources after success or abort */
apply_wqattrs_cleanup(struct apply_wqattrs_ctx * ctx)4336 static void apply_wqattrs_cleanup(struct apply_wqattrs_ctx *ctx)
4337 {
4338 	if (ctx) {
4339 		int cpu;
4340 
4341 		for_each_possible_cpu(cpu)
4342 			put_pwq_unlocked(ctx->pwq_tbl[cpu]);
4343 		put_pwq_unlocked(ctx->dfl_pwq);
4344 
4345 		free_workqueue_attrs(ctx->attrs);
4346 
4347 		kfree(ctx);
4348 	}
4349 }
4350 
4351 /* allocate the attrs and pwqs for later installation */
4352 static struct apply_wqattrs_ctx *
apply_wqattrs_prepare(struct workqueue_struct * wq,const struct workqueue_attrs * attrs,const cpumask_var_t unbound_cpumask)4353 apply_wqattrs_prepare(struct workqueue_struct *wq,
4354 		      const struct workqueue_attrs *attrs,
4355 		      const cpumask_var_t unbound_cpumask)
4356 {
4357 	struct apply_wqattrs_ctx *ctx;
4358 	struct workqueue_attrs *new_attrs;
4359 	int cpu;
4360 
4361 	lockdep_assert_held(&wq_pool_mutex);
4362 
4363 	if (WARN_ON(attrs->affn_scope < 0 ||
4364 		    attrs->affn_scope >= WQ_AFFN_NR_TYPES))
4365 		return ERR_PTR(-EINVAL);
4366 
4367 	ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_cpu_ids), GFP_KERNEL);
4368 
4369 	new_attrs = alloc_workqueue_attrs();
4370 	if (!ctx || !new_attrs)
4371 		goto out_free;
4372 
4373 	/*
4374 	 * If something goes wrong during CPU up/down, we'll fall back to
4375 	 * the default pwq covering whole @attrs->cpumask.  Always create
4376 	 * it even if we don't use it immediately.
4377 	 */
4378 	copy_workqueue_attrs(new_attrs, attrs);
4379 	wqattrs_actualize_cpumask(new_attrs, unbound_cpumask);
4380 	cpumask_copy(new_attrs->__pod_cpumask, new_attrs->cpumask);
4381 	ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs);
4382 	if (!ctx->dfl_pwq)
4383 		goto out_free;
4384 
4385 	for_each_possible_cpu(cpu) {
4386 		if (new_attrs->ordered) {
4387 			ctx->dfl_pwq->refcnt++;
4388 			ctx->pwq_tbl[cpu] = ctx->dfl_pwq;
4389 		} else {
4390 			wq_calc_pod_cpumask(new_attrs, cpu, -1);
4391 			ctx->pwq_tbl[cpu] = alloc_unbound_pwq(wq, new_attrs);
4392 			if (!ctx->pwq_tbl[cpu])
4393 				goto out_free;
4394 		}
4395 	}
4396 
4397 	/* save the user configured attrs and sanitize it. */
4398 	copy_workqueue_attrs(new_attrs, attrs);
4399 	cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask);
4400 	cpumask_copy(new_attrs->__pod_cpumask, new_attrs->cpumask);
4401 	ctx->attrs = new_attrs;
4402 
4403 	ctx->wq = wq;
4404 	return ctx;
4405 
4406 out_free:
4407 	free_workqueue_attrs(new_attrs);
4408 	apply_wqattrs_cleanup(ctx);
4409 	return ERR_PTR(-ENOMEM);
4410 }
4411 
4412 /* set attrs and install prepared pwqs, @ctx points to old pwqs on return */
apply_wqattrs_commit(struct apply_wqattrs_ctx * ctx)4413 static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx)
4414 {
4415 	int cpu;
4416 
4417 	/* all pwqs have been created successfully, let's install'em */
4418 	mutex_lock(&ctx->wq->mutex);
4419 
4420 	copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs);
4421 
4422 	/* save the previous pwq and install the new one */
4423 	for_each_possible_cpu(cpu)
4424 		ctx->pwq_tbl[cpu] = install_unbound_pwq(ctx->wq, cpu,
4425 							ctx->pwq_tbl[cpu]);
4426 
4427 	/* @dfl_pwq might not have been used, ensure it's linked */
4428 	link_pwq(ctx->dfl_pwq);
4429 	swap(ctx->wq->dfl_pwq, ctx->dfl_pwq);
4430 
4431 	mutex_unlock(&ctx->wq->mutex);
4432 }
4433 
apply_wqattrs_lock(void)4434 static void apply_wqattrs_lock(void)
4435 {
4436 	/* CPUs should stay stable across pwq creations and installations */
4437 	cpus_read_lock();
4438 	mutex_lock(&wq_pool_mutex);
4439 }
4440 
apply_wqattrs_unlock(void)4441 static void apply_wqattrs_unlock(void)
4442 {
4443 	mutex_unlock(&wq_pool_mutex);
4444 	cpus_read_unlock();
4445 }
4446 
apply_workqueue_attrs_locked(struct workqueue_struct * wq,const struct workqueue_attrs * attrs)4447 static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
4448 					const struct workqueue_attrs *attrs)
4449 {
4450 	struct apply_wqattrs_ctx *ctx;
4451 
4452 	/* only unbound workqueues can change attributes */
4453 	if (WARN_ON(!(wq->flags & WQ_UNBOUND)))
4454 		return -EINVAL;
4455 
4456 	/* creating multiple pwqs breaks ordering guarantee */
4457 	if (!list_empty(&wq->pwqs)) {
4458 		if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
4459 			return -EINVAL;
4460 
4461 		wq->flags &= ~__WQ_ORDERED;
4462 	}
4463 
4464 	ctx = apply_wqattrs_prepare(wq, attrs, wq_unbound_cpumask);
4465 	if (IS_ERR(ctx))
4466 		return PTR_ERR(ctx);
4467 
4468 	/* the ctx has been prepared successfully, let's commit it */
4469 	apply_wqattrs_commit(ctx);
4470 	apply_wqattrs_cleanup(ctx);
4471 
4472 	return 0;
4473 }
4474 
4475 /**
4476  * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue
4477  * @wq: the target workqueue
4478  * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs()
4479  *
4480  * Apply @attrs to an unbound workqueue @wq. Unless disabled, this function maps
4481  * a separate pwq to each CPU pod with possibles CPUs in @attrs->cpumask so that
4482  * work items are affine to the pod it was issued on. Older pwqs are released as
4483  * in-flight work items finish. Note that a work item which repeatedly requeues
4484  * itself back-to-back will stay on its current pwq.
4485  *
4486  * Performs GFP_KERNEL allocations.
4487  *
4488  * Assumes caller has CPU hotplug read exclusion, i.e. cpus_read_lock().
4489  *
4490  * Return: 0 on success and -errno on failure.
4491  */
apply_workqueue_attrs(struct workqueue_struct * wq,const struct workqueue_attrs * attrs)4492 int apply_workqueue_attrs(struct workqueue_struct *wq,
4493 			  const struct workqueue_attrs *attrs)
4494 {
4495 	int ret;
4496 
4497 	lockdep_assert_cpus_held();
4498 
4499 	mutex_lock(&wq_pool_mutex);
4500 	ret = apply_workqueue_attrs_locked(wq, attrs);
4501 	mutex_unlock(&wq_pool_mutex);
4502 
4503 	return ret;
4504 }
4505 EXPORT_SYMBOL_GPL(apply_workqueue_attrs);
4506 
4507 /**
4508  * wq_update_pod - update pod affinity of a wq for CPU hot[un]plug
4509  * @wq: the target workqueue
4510  * @cpu: the CPU to update pool association for
4511  * @hotplug_cpu: the CPU coming up or going down
4512  * @online: whether @cpu is coming up or going down
4513  *
4514  * This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and
4515  * %CPU_DOWN_FAILED.  @cpu is being hot[un]plugged, update pod affinity of
4516  * @wq accordingly.
4517  *
4518  *
4519  * If pod affinity can't be adjusted due to memory allocation failure, it falls
4520  * back to @wq->dfl_pwq which may not be optimal but is always correct.
4521  *
4522  * Note that when the last allowed CPU of a pod goes offline for a workqueue
4523  * with a cpumask spanning multiple pods, the workers which were already
4524  * executing the work items for the workqueue will lose their CPU affinity and
4525  * may execute on any CPU. This is similar to how per-cpu workqueues behave on
4526  * CPU_DOWN. If a workqueue user wants strict affinity, it's the user's
4527  * responsibility to flush the work item from CPU_DOWN_PREPARE.
4528  */
wq_update_pod(struct workqueue_struct * wq,int cpu,int hotplug_cpu,bool online)4529 static void wq_update_pod(struct workqueue_struct *wq, int cpu,
4530 			  int hotplug_cpu, bool online)
4531 {
4532 	int off_cpu = online ? -1 : hotplug_cpu;
4533 	struct pool_workqueue *old_pwq = NULL, *pwq;
4534 	struct workqueue_attrs *target_attrs;
4535 
4536 	lockdep_assert_held(&wq_pool_mutex);
4537 
4538 	if (!(wq->flags & WQ_UNBOUND) || wq->unbound_attrs->ordered)
4539 		return;
4540 
4541 	/*
4542 	 * We don't wanna alloc/free wq_attrs for each wq for each CPU.
4543 	 * Let's use a preallocated one.  The following buf is protected by
4544 	 * CPU hotplug exclusion.
4545 	 */
4546 	target_attrs = wq_update_pod_attrs_buf;
4547 
4548 	copy_workqueue_attrs(target_attrs, wq->unbound_attrs);
4549 	wqattrs_actualize_cpumask(target_attrs, wq_unbound_cpumask);
4550 
4551 	/* nothing to do if the target cpumask matches the current pwq */
4552 	wq_calc_pod_cpumask(target_attrs, cpu, off_cpu);
4553 	pwq = rcu_dereference_protected(*per_cpu_ptr(wq->cpu_pwq, cpu),
4554 					lockdep_is_held(&wq_pool_mutex));
4555 	if (wqattrs_equal(target_attrs, pwq->pool->attrs))
4556 		return;
4557 
4558 	/* create a new pwq */
4559 	pwq = alloc_unbound_pwq(wq, target_attrs);
4560 	if (!pwq) {
4561 		pr_warn("workqueue: allocation failed while updating CPU pod affinity of \"%s\"\n",
4562 			wq->name);
4563 		goto use_dfl_pwq;
4564 	}
4565 
4566 	/* Install the new pwq. */
4567 	mutex_lock(&wq->mutex);
4568 	old_pwq = install_unbound_pwq(wq, cpu, pwq);
4569 	goto out_unlock;
4570 
4571 use_dfl_pwq:
4572 	mutex_lock(&wq->mutex);
4573 	raw_spin_lock_irq(&wq->dfl_pwq->pool->lock);
4574 	get_pwq(wq->dfl_pwq);
4575 	raw_spin_unlock_irq(&wq->dfl_pwq->pool->lock);
4576 	old_pwq = install_unbound_pwq(wq, cpu, wq->dfl_pwq);
4577 out_unlock:
4578 	mutex_unlock(&wq->mutex);
4579 	put_pwq_unlocked(old_pwq);
4580 }
4581 
alloc_and_link_pwqs(struct workqueue_struct * wq)4582 static int alloc_and_link_pwqs(struct workqueue_struct *wq)
4583 {
4584 	bool highpri = wq->flags & WQ_HIGHPRI;
4585 	int cpu, ret;
4586 	bool skip = false;
4587 
4588 	wq->cpu_pwq = alloc_percpu(struct pool_workqueue *);
4589 	if (!wq->cpu_pwq)
4590 		goto enomem;
4591 
4592 	if (!(wq->flags & WQ_UNBOUND)) {
4593 		for_each_possible_cpu(cpu) {
4594 			struct pool_workqueue **pwq_p =
4595 				per_cpu_ptr(wq->cpu_pwq, cpu);
4596 			struct worker_pool *pool =
4597 				&(per_cpu_ptr(cpu_worker_pools, cpu)[highpri]);
4598 
4599 			*pwq_p = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL,
4600 						       pool->node);
4601 			if (!*pwq_p)
4602 				goto enomem;
4603 
4604 			init_pwq(*pwq_p, wq, pool);
4605 
4606 			mutex_lock(&wq->mutex);
4607 			link_pwq(*pwq_p);
4608 			mutex_unlock(&wq->mutex);
4609 		}
4610 		return 0;
4611 	}
4612 
4613 	trace_android_rvh_alloc_and_link_pwqs(wq, &ret, &skip);
4614 	if (skip)
4615 		goto oem_skip;
4616 
4617 	cpus_read_lock();
4618 	if (wq->flags & __WQ_ORDERED) {
4619 		ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
4620 		/* there should only be single pwq for ordering guarantee */
4621 		WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node ||
4622 			      wq->pwqs.prev != &wq->dfl_pwq->pwqs_node),
4623 		     "ordering guarantee broken for workqueue %s\n", wq->name);
4624 	} else {
4625 		ret = apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
4626 	}
4627 	cpus_read_unlock();
4628 
4629 oem_skip:
4630 	/* for unbound pwq, flush the pwq_release_worker ensures that the
4631 	 * pwq_release_workfn() completes before calling kfree(wq).
4632 	 */
4633 	if (ret)
4634 		kthread_flush_worker(pwq_release_worker);
4635 
4636 	return ret;
4637 
4638 enomem:
4639 	if (wq->cpu_pwq) {
4640 		for_each_possible_cpu(cpu) {
4641 			struct pool_workqueue *pwq = *per_cpu_ptr(wq->cpu_pwq, cpu);
4642 
4643 			if (pwq)
4644 				kmem_cache_free(pwq_cache, pwq);
4645 		}
4646 		free_percpu(wq->cpu_pwq);
4647 		wq->cpu_pwq = NULL;
4648 	}
4649 	return -ENOMEM;
4650 }
4651 
wq_clamp_max_active(int max_active,unsigned int flags,const char * name)4652 static int wq_clamp_max_active(int max_active, unsigned int flags,
4653 			       const char *name)
4654 {
4655 	if (max_active < 1 || max_active > WQ_MAX_ACTIVE)
4656 		pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n",
4657 			max_active, name, 1, WQ_MAX_ACTIVE);
4658 
4659 	return clamp_val(max_active, 1, WQ_MAX_ACTIVE);
4660 }
4661 
4662 /*
4663  * Workqueues which may be used during memory reclaim should have a rescuer
4664  * to guarantee forward progress.
4665  */
init_rescuer(struct workqueue_struct * wq)4666 static int init_rescuer(struct workqueue_struct *wq)
4667 {
4668 	struct worker *rescuer;
4669 	int ret;
4670 
4671 	if (!(wq->flags & WQ_MEM_RECLAIM))
4672 		return 0;
4673 
4674 	rescuer = alloc_worker(NUMA_NO_NODE);
4675 	if (!rescuer) {
4676 		pr_err("workqueue: Failed to allocate a rescuer for wq \"%s\"\n",
4677 		       wq->name);
4678 		return -ENOMEM;
4679 	}
4680 
4681 	rescuer->rescue_wq = wq;
4682 	rescuer->task = kthread_create(rescuer_thread, rescuer, "kworker/R-%s", wq->name);
4683 	if (IS_ERR(rescuer->task)) {
4684 		ret = PTR_ERR(rescuer->task);
4685 		pr_err("workqueue: Failed to create a rescuer kthread for wq \"%s\": %pe",
4686 		       wq->name, ERR_PTR(ret));
4687 		kfree(rescuer);
4688 		return ret;
4689 	}
4690 
4691 	wq->rescuer = rescuer;
4692 	kthread_bind_mask(rescuer->task, cpu_possible_mask);
4693 	wake_up_process(rescuer->task);
4694 
4695 	return 0;
4696 }
4697 
4698 __printf(1, 4)
alloc_workqueue(const char * fmt,unsigned int flags,int max_active,...)4699 struct workqueue_struct *alloc_workqueue(const char *fmt,
4700 					 unsigned int flags,
4701 					 int max_active, ...)
4702 {
4703 	va_list args;
4704 	struct workqueue_struct *wq;
4705 	struct pool_workqueue *pwq;
4706 
4707 	/*
4708 	 * Unbound && max_active == 1 used to imply ordered, which is no longer
4709 	 * the case on many machines due to per-pod pools. While
4710 	 * alloc_ordered_workqueue() is the right way to create an ordered
4711 	 * workqueue, keep the previous behavior to avoid subtle breakages.
4712 	 */
4713 	if ((flags & WQ_UNBOUND) && max_active == 1)
4714 		flags |= __WQ_ORDERED;
4715 
4716 	/* see the comment above the definition of WQ_POWER_EFFICIENT */
4717 	if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
4718 		flags |= WQ_UNBOUND;
4719 
4720 	/* allocate wq and format name */
4721 	wq = kzalloc(sizeof(*wq), GFP_KERNEL);
4722 	if (!wq)
4723 		return NULL;
4724 
4725 	if (flags & WQ_UNBOUND) {
4726 		wq->unbound_attrs = alloc_workqueue_attrs();
4727 		if (!wq->unbound_attrs)
4728 			goto err_free_wq;
4729 	}
4730 
4731 	va_start(args, max_active);
4732 	vsnprintf(wq->name, sizeof(wq->name), fmt, args);
4733 	va_end(args);
4734 
4735 	max_active = max_active ?: WQ_DFL_ACTIVE;
4736 	max_active = wq_clamp_max_active(max_active, flags, wq->name);
4737 
4738 	/* init wq */
4739 	wq->flags = flags;
4740 	wq->saved_max_active = max_active;
4741 	mutex_init(&wq->mutex);
4742 	atomic_set(&wq->nr_pwqs_to_flush, 0);
4743 	INIT_LIST_HEAD(&wq->pwqs);
4744 	INIT_LIST_HEAD(&wq->flusher_queue);
4745 	INIT_LIST_HEAD(&wq->flusher_overflow);
4746 	INIT_LIST_HEAD(&wq->maydays);
4747 
4748 	wq_init_lockdep(wq);
4749 	INIT_LIST_HEAD(&wq->list);
4750 
4751 	if (alloc_and_link_pwqs(wq) < 0)
4752 		goto err_unreg_lockdep;
4753 
4754 	if (wq_online && init_rescuer(wq) < 0)
4755 		goto err_destroy;
4756 
4757 	if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq))
4758 		goto err_destroy;
4759 
4760 	/*
4761 	 * wq_pool_mutex protects global freeze state and workqueues list.
4762 	 * Grab it, adjust max_active and add the new @wq to workqueues
4763 	 * list.
4764 	 */
4765 	mutex_lock(&wq_pool_mutex);
4766 
4767 	mutex_lock(&wq->mutex);
4768 	for_each_pwq(pwq, wq)
4769 		pwq_adjust_max_active(pwq);
4770 	mutex_unlock(&wq->mutex);
4771 
4772 	list_add_tail_rcu(&wq->list, &workqueues);
4773 
4774 	mutex_unlock(&wq_pool_mutex);
4775 
4776 	return wq;
4777 
4778 err_unreg_lockdep:
4779 	wq_unregister_lockdep(wq);
4780 	wq_free_lockdep(wq);
4781 err_free_wq:
4782 	free_workqueue_attrs(wq->unbound_attrs);
4783 	kfree(wq);
4784 	return NULL;
4785 err_destroy:
4786 	destroy_workqueue(wq);
4787 	return NULL;
4788 }
4789 EXPORT_SYMBOL_GPL(alloc_workqueue);
4790 
pwq_busy(struct pool_workqueue * pwq)4791 static bool pwq_busy(struct pool_workqueue *pwq)
4792 {
4793 	int i;
4794 
4795 	for (i = 0; i < WORK_NR_COLORS; i++)
4796 		if (pwq->nr_in_flight[i])
4797 			return true;
4798 
4799 	if ((pwq != pwq->wq->dfl_pwq) && (pwq->refcnt > 1))
4800 		return true;
4801 	if (pwq->nr_active || !list_empty(&pwq->inactive_works))
4802 		return true;
4803 
4804 	return false;
4805 }
4806 
4807 /**
4808  * destroy_workqueue - safely terminate a workqueue
4809  * @wq: target workqueue
4810  *
4811  * Safely destroy a workqueue. All work currently pending will be done first.
4812  */
destroy_workqueue(struct workqueue_struct * wq)4813 void destroy_workqueue(struct workqueue_struct *wq)
4814 {
4815 	struct pool_workqueue *pwq;
4816 	int cpu;
4817 
4818 	/*
4819 	 * Remove it from sysfs first so that sanity check failure doesn't
4820 	 * lead to sysfs name conflicts.
4821 	 */
4822 	workqueue_sysfs_unregister(wq);
4823 
4824 	/* mark the workqueue destruction is in progress */
4825 	mutex_lock(&wq->mutex);
4826 	wq->flags |= __WQ_DESTROYING;
4827 	mutex_unlock(&wq->mutex);
4828 
4829 	/* drain it before proceeding with destruction */
4830 	drain_workqueue(wq);
4831 
4832 	/* kill rescuer, if sanity checks fail, leave it w/o rescuer */
4833 	if (wq->rescuer) {
4834 		struct worker *rescuer = wq->rescuer;
4835 
4836 		/* this prevents new queueing */
4837 		raw_spin_lock_irq(&wq_mayday_lock);
4838 		wq->rescuer = NULL;
4839 		raw_spin_unlock_irq(&wq_mayday_lock);
4840 
4841 		/* rescuer will empty maydays list before exiting */
4842 		kthread_stop(rescuer->task);
4843 		kfree(rescuer);
4844 	}
4845 
4846 	/*
4847 	 * Sanity checks - grab all the locks so that we wait for all
4848 	 * in-flight operations which may do put_pwq().
4849 	 */
4850 	mutex_lock(&wq_pool_mutex);
4851 	mutex_lock(&wq->mutex);
4852 	for_each_pwq(pwq, wq) {
4853 		raw_spin_lock_irq(&pwq->pool->lock);
4854 		if (WARN_ON(pwq_busy(pwq))) {
4855 			pr_warn("%s: %s has the following busy pwq\n",
4856 				__func__, wq->name);
4857 			show_pwq(pwq);
4858 			raw_spin_unlock_irq(&pwq->pool->lock);
4859 			mutex_unlock(&wq->mutex);
4860 			mutex_unlock(&wq_pool_mutex);
4861 			show_one_workqueue(wq);
4862 			return;
4863 		}
4864 		raw_spin_unlock_irq(&pwq->pool->lock);
4865 	}
4866 	mutex_unlock(&wq->mutex);
4867 
4868 	/*
4869 	 * wq list is used to freeze wq, remove from list after
4870 	 * flushing is complete in case freeze races us.
4871 	 */
4872 	list_del_rcu(&wq->list);
4873 	mutex_unlock(&wq_pool_mutex);
4874 
4875 	/*
4876 	 * We're the sole accessor of @wq. Directly access cpu_pwq and dfl_pwq
4877 	 * to put the base refs. @wq will be auto-destroyed from the last
4878 	 * pwq_put. RCU read lock prevents @wq from going away from under us.
4879 	 */
4880 	rcu_read_lock();
4881 
4882 	for_each_possible_cpu(cpu) {
4883 		pwq = rcu_access_pointer(*per_cpu_ptr(wq->cpu_pwq, cpu));
4884 		RCU_INIT_POINTER(*per_cpu_ptr(wq->cpu_pwq, cpu), NULL);
4885 		put_pwq_unlocked(pwq);
4886 	}
4887 
4888 	put_pwq_unlocked(wq->dfl_pwq);
4889 	wq->dfl_pwq = NULL;
4890 
4891 	rcu_read_unlock();
4892 }
4893 EXPORT_SYMBOL_GPL(destroy_workqueue);
4894 
4895 /**
4896  * workqueue_set_max_active - adjust max_active of a workqueue
4897  * @wq: target workqueue
4898  * @max_active: new max_active value.
4899  *
4900  * Set max_active of @wq to @max_active.
4901  *
4902  * CONTEXT:
4903  * Don't call from IRQ context.
4904  */
workqueue_set_max_active(struct workqueue_struct * wq,int max_active)4905 void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
4906 {
4907 	struct pool_workqueue *pwq;
4908 
4909 	/* disallow meddling with max_active for ordered workqueues */
4910 	if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
4911 		return;
4912 
4913 	max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
4914 
4915 	mutex_lock(&wq->mutex);
4916 
4917 	wq->flags &= ~__WQ_ORDERED;
4918 	wq->saved_max_active = max_active;
4919 
4920 	for_each_pwq(pwq, wq)
4921 		pwq_adjust_max_active(pwq);
4922 
4923 	mutex_unlock(&wq->mutex);
4924 }
4925 EXPORT_SYMBOL_GPL(workqueue_set_max_active);
4926 
4927 /**
4928  * current_work - retrieve %current task's work struct
4929  *
4930  * Determine if %current task is a workqueue worker and what it's working on.
4931  * Useful to find out the context that the %current task is running in.
4932  *
4933  * Return: work struct if %current task is a workqueue worker, %NULL otherwise.
4934  */
current_work(void)4935 struct work_struct *current_work(void)
4936 {
4937 	struct worker *worker = current_wq_worker();
4938 
4939 	return worker ? worker->current_work : NULL;
4940 }
4941 EXPORT_SYMBOL(current_work);
4942 
4943 /**
4944  * current_is_workqueue_rescuer - is %current workqueue rescuer?
4945  *
4946  * Determine whether %current is a workqueue rescuer.  Can be used from
4947  * work functions to determine whether it's being run off the rescuer task.
4948  *
4949  * Return: %true if %current is a workqueue rescuer. %false otherwise.
4950  */
current_is_workqueue_rescuer(void)4951 bool current_is_workqueue_rescuer(void)
4952 {
4953 	struct worker *worker = current_wq_worker();
4954 
4955 	return worker && worker->rescue_wq;
4956 }
4957 
4958 /**
4959  * workqueue_congested - test whether a workqueue is congested
4960  * @cpu: CPU in question
4961  * @wq: target workqueue
4962  *
4963  * Test whether @wq's cpu workqueue for @cpu is congested.  There is
4964  * no synchronization around this function and the test result is
4965  * unreliable and only useful as advisory hints or for debugging.
4966  *
4967  * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU.
4968  *
4969  * With the exception of ordered workqueues, all workqueues have per-cpu
4970  * pool_workqueues, each with its own congested state. A workqueue being
4971  * congested on one CPU doesn't mean that the workqueue is contested on any
4972  * other CPUs.
4973  *
4974  * Return:
4975  * %true if congested, %false otherwise.
4976  */
workqueue_congested(int cpu,struct workqueue_struct * wq)4977 bool workqueue_congested(int cpu, struct workqueue_struct *wq)
4978 {
4979 	struct pool_workqueue *pwq;
4980 	bool ret;
4981 
4982 	rcu_read_lock();
4983 	preempt_disable();
4984 
4985 	if (cpu == WORK_CPU_UNBOUND)
4986 		cpu = smp_processor_id();
4987 
4988 	pwq = *per_cpu_ptr(wq->cpu_pwq, cpu);
4989 	ret = !list_empty(&pwq->inactive_works);
4990 
4991 	preempt_enable();
4992 	rcu_read_unlock();
4993 
4994 	return ret;
4995 }
4996 EXPORT_SYMBOL_GPL(workqueue_congested);
4997 
4998 /**
4999  * work_busy - test whether a work is currently pending or running
5000  * @work: the work to be tested
5001  *
5002  * Test whether @work is currently pending or running.  There is no
5003  * synchronization around this function and the test result is
5004  * unreliable and only useful as advisory hints or for debugging.
5005  *
5006  * Return:
5007  * OR'd bitmask of WORK_BUSY_* bits.
5008  */
work_busy(struct work_struct * work)5009 unsigned int work_busy(struct work_struct *work)
5010 {
5011 	struct worker_pool *pool;
5012 	unsigned long flags;
5013 	unsigned int ret = 0;
5014 
5015 	if (work_pending(work))
5016 		ret |= WORK_BUSY_PENDING;
5017 
5018 	rcu_read_lock();
5019 	pool = get_work_pool(work);
5020 	if (pool) {
5021 		raw_spin_lock_irqsave(&pool->lock, flags);
5022 		if (find_worker_executing_work(pool, work))
5023 			ret |= WORK_BUSY_RUNNING;
5024 		raw_spin_unlock_irqrestore(&pool->lock, flags);
5025 	}
5026 	rcu_read_unlock();
5027 
5028 	return ret;
5029 }
5030 EXPORT_SYMBOL_GPL(work_busy);
5031 
5032 /**
5033  * set_worker_desc - set description for the current work item
5034  * @fmt: printf-style format string
5035  * @...: arguments for the format string
5036  *
5037  * This function can be called by a running work function to describe what
5038  * the work item is about.  If the worker task gets dumped, this
5039  * information will be printed out together to help debugging.  The
5040  * description can be at most WORKER_DESC_LEN including the trailing '\0'.
5041  */
set_worker_desc(const char * fmt,...)5042 void set_worker_desc(const char *fmt, ...)
5043 {
5044 	struct worker *worker = current_wq_worker();
5045 	va_list args;
5046 
5047 	if (worker) {
5048 		va_start(args, fmt);
5049 		vsnprintf(worker->desc, sizeof(worker->desc), fmt, args);
5050 		va_end(args);
5051 	}
5052 }
5053 EXPORT_SYMBOL_GPL(set_worker_desc);
5054 
5055 /**
5056  * print_worker_info - print out worker information and description
5057  * @log_lvl: the log level to use when printing
5058  * @task: target task
5059  *
5060  * If @task is a worker and currently executing a work item, print out the
5061  * name of the workqueue being serviced and worker description set with
5062  * set_worker_desc() by the currently executing work item.
5063  *
5064  * This function can be safely called on any task as long as the
5065  * task_struct itself is accessible.  While safe, this function isn't
5066  * synchronized and may print out mixups or garbages of limited length.
5067  */
print_worker_info(const char * log_lvl,struct task_struct * task)5068 void print_worker_info(const char *log_lvl, struct task_struct *task)
5069 {
5070 	work_func_t *fn = NULL;
5071 	char name[WQ_NAME_LEN] = { };
5072 	char desc[WORKER_DESC_LEN] = { };
5073 	struct pool_workqueue *pwq = NULL;
5074 	struct workqueue_struct *wq = NULL;
5075 	struct worker *worker;
5076 
5077 	if (!(task->flags & PF_WQ_WORKER))
5078 		return;
5079 
5080 	/*
5081 	 * This function is called without any synchronization and @task
5082 	 * could be in any state.  Be careful with dereferences.
5083 	 */
5084 	worker = kthread_probe_data(task);
5085 
5086 	/*
5087 	 * Carefully copy the associated workqueue's workfn, name and desc.
5088 	 * Keep the original last '\0' in case the original is garbage.
5089 	 */
5090 	copy_from_kernel_nofault(&fn, &worker->current_func, sizeof(fn));
5091 	copy_from_kernel_nofault(&pwq, &worker->current_pwq, sizeof(pwq));
5092 	copy_from_kernel_nofault(&wq, &pwq->wq, sizeof(wq));
5093 	copy_from_kernel_nofault(name, wq->name, sizeof(name) - 1);
5094 	copy_from_kernel_nofault(desc, worker->desc, sizeof(desc) - 1);
5095 
5096 	if (fn || name[0] || desc[0]) {
5097 		printk("%sWorkqueue: %s %ps", log_lvl, name, fn);
5098 		if (strcmp(name, desc))
5099 			pr_cont(" (%s)", desc);
5100 		pr_cont("\n");
5101 	}
5102 }
5103 
pr_cont_pool_info(struct worker_pool * pool)5104 static void pr_cont_pool_info(struct worker_pool *pool)
5105 {
5106 	pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask);
5107 	if (pool->node != NUMA_NO_NODE)
5108 		pr_cont(" node=%d", pool->node);
5109 	pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice);
5110 }
5111 
5112 struct pr_cont_work_struct {
5113 	bool comma;
5114 	work_func_t func;
5115 	long ctr;
5116 };
5117 
pr_cont_work_flush(bool comma,work_func_t func,struct pr_cont_work_struct * pcwsp)5118 static void pr_cont_work_flush(bool comma, work_func_t func, struct pr_cont_work_struct *pcwsp)
5119 {
5120 	if (!pcwsp->ctr)
5121 		goto out_record;
5122 	if (func == pcwsp->func) {
5123 		pcwsp->ctr++;
5124 		return;
5125 	}
5126 	if (pcwsp->ctr == 1)
5127 		pr_cont("%s %ps", pcwsp->comma ? "," : "", pcwsp->func);
5128 	else
5129 		pr_cont("%s %ld*%ps", pcwsp->comma ? "," : "", pcwsp->ctr, pcwsp->func);
5130 	pcwsp->ctr = 0;
5131 out_record:
5132 	if ((long)func == -1L)
5133 		return;
5134 	pcwsp->comma = comma;
5135 	pcwsp->func = func;
5136 	pcwsp->ctr = 1;
5137 }
5138 
pr_cont_work(bool comma,struct work_struct * work,struct pr_cont_work_struct * pcwsp)5139 static void pr_cont_work(bool comma, struct work_struct *work, struct pr_cont_work_struct *pcwsp)
5140 {
5141 	if (work->func == wq_barrier_func) {
5142 		struct wq_barrier *barr;
5143 
5144 		barr = container_of(work, struct wq_barrier, work);
5145 
5146 		pr_cont_work_flush(comma, (work_func_t)-1, pcwsp);
5147 		pr_cont("%s BAR(%d)", comma ? "," : "",
5148 			task_pid_nr(barr->task));
5149 	} else {
5150 		if (!comma)
5151 			pr_cont_work_flush(comma, (work_func_t)-1, pcwsp);
5152 		pr_cont_work_flush(comma, work->func, pcwsp);
5153 	}
5154 }
5155 
show_pwq(struct pool_workqueue * pwq)5156 static void show_pwq(struct pool_workqueue *pwq)
5157 {
5158 	struct pr_cont_work_struct pcws = { .ctr = 0, };
5159 	struct worker_pool *pool = pwq->pool;
5160 	struct work_struct *work;
5161 	struct worker *worker;
5162 	bool has_in_flight = false, has_pending = false;
5163 	int bkt;
5164 
5165 	pr_info("  pwq %d:", pool->id);
5166 	pr_cont_pool_info(pool);
5167 
5168 	pr_cont(" active=%d/%d refcnt=%d%s\n",
5169 		pwq->nr_active, pwq->max_active, pwq->refcnt,
5170 		!list_empty(&pwq->mayday_node) ? " MAYDAY" : "");
5171 
5172 	hash_for_each(pool->busy_hash, bkt, worker, hentry) {
5173 		if (worker->current_pwq == pwq) {
5174 			has_in_flight = true;
5175 			break;
5176 		}
5177 	}
5178 	if (has_in_flight) {
5179 		bool comma = false;
5180 
5181 		pr_info("    in-flight:");
5182 		hash_for_each(pool->busy_hash, bkt, worker, hentry) {
5183 			if (worker->current_pwq != pwq)
5184 				continue;
5185 
5186 			pr_cont("%s %d%s:%ps", comma ? "," : "",
5187 				task_pid_nr(worker->task),
5188 				worker->rescue_wq ? "(RESCUER)" : "",
5189 				worker->current_func);
5190 			list_for_each_entry(work, &worker->scheduled, entry)
5191 				pr_cont_work(false, work, &pcws);
5192 			pr_cont_work_flush(comma, (work_func_t)-1L, &pcws);
5193 			comma = true;
5194 		}
5195 		pr_cont("\n");
5196 	}
5197 
5198 	list_for_each_entry(work, &pool->worklist, entry) {
5199 		if (get_work_pwq(work) == pwq) {
5200 			has_pending = true;
5201 			break;
5202 		}
5203 	}
5204 	if (has_pending) {
5205 		bool comma = false;
5206 
5207 		pr_info("    pending:");
5208 		list_for_each_entry(work, &pool->worklist, entry) {
5209 			if (get_work_pwq(work) != pwq)
5210 				continue;
5211 
5212 			pr_cont_work(comma, work, &pcws);
5213 			comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
5214 		}
5215 		pr_cont_work_flush(comma, (work_func_t)-1L, &pcws);
5216 		pr_cont("\n");
5217 	}
5218 
5219 	if (!list_empty(&pwq->inactive_works)) {
5220 		bool comma = false;
5221 
5222 		pr_info("    inactive:");
5223 		list_for_each_entry(work, &pwq->inactive_works, entry) {
5224 			pr_cont_work(comma, work, &pcws);
5225 			comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
5226 		}
5227 		pr_cont_work_flush(comma, (work_func_t)-1L, &pcws);
5228 		pr_cont("\n");
5229 	}
5230 }
5231 
5232 /**
5233  * show_one_workqueue - dump state of specified workqueue
5234  * @wq: workqueue whose state will be printed
5235  */
show_one_workqueue(struct workqueue_struct * wq)5236 void show_one_workqueue(struct workqueue_struct *wq)
5237 {
5238 	struct pool_workqueue *pwq;
5239 	bool idle = true;
5240 	unsigned long flags;
5241 
5242 	for_each_pwq(pwq, wq) {
5243 		if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
5244 			idle = false;
5245 			break;
5246 		}
5247 	}
5248 	if (idle) /* Nothing to print for idle workqueue */
5249 		return;
5250 
5251 	pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
5252 
5253 	for_each_pwq(pwq, wq) {
5254 		raw_spin_lock_irqsave(&pwq->pool->lock, flags);
5255 		if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
5256 			/*
5257 			 * Defer printing to avoid deadlocks in console
5258 			 * drivers that queue work while holding locks
5259 			 * also taken in their write paths.
5260 			 */
5261 			printk_deferred_enter();
5262 			show_pwq(pwq);
5263 			printk_deferred_exit();
5264 		}
5265 		raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
5266 		/*
5267 		 * We could be printing a lot from atomic context, e.g.
5268 		 * sysrq-t -> show_all_workqueues(). Avoid triggering
5269 		 * hard lockup.
5270 		 */
5271 		touch_nmi_watchdog();
5272 	}
5273 
5274 }
5275 
5276 /**
5277  * show_one_worker_pool - dump state of specified worker pool
5278  * @pool: worker pool whose state will be printed
5279  */
show_one_worker_pool(struct worker_pool * pool)5280 static void show_one_worker_pool(struct worker_pool *pool)
5281 {
5282 	struct worker *worker;
5283 	bool first = true;
5284 	unsigned long flags;
5285 	unsigned long hung = 0;
5286 
5287 	raw_spin_lock_irqsave(&pool->lock, flags);
5288 	if (pool->nr_workers == pool->nr_idle)
5289 		goto next_pool;
5290 
5291 	/* How long the first pending work is waiting for a worker. */
5292 	if (!list_empty(&pool->worklist))
5293 		hung = jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000;
5294 
5295 	/*
5296 	 * Defer printing to avoid deadlocks in console drivers that
5297 	 * queue work while holding locks also taken in their write
5298 	 * paths.
5299 	 */
5300 	printk_deferred_enter();
5301 	pr_info("pool %d:", pool->id);
5302 	pr_cont_pool_info(pool);
5303 	pr_cont(" hung=%lus workers=%d", hung, pool->nr_workers);
5304 	if (pool->manager)
5305 		pr_cont(" manager: %d",
5306 			task_pid_nr(pool->manager->task));
5307 	list_for_each_entry(worker, &pool->idle_list, entry) {
5308 		pr_cont(" %s%d", first ? "idle: " : "",
5309 			task_pid_nr(worker->task));
5310 		first = false;
5311 	}
5312 	pr_cont("\n");
5313 	printk_deferred_exit();
5314 next_pool:
5315 	raw_spin_unlock_irqrestore(&pool->lock, flags);
5316 	/*
5317 	 * We could be printing a lot from atomic context, e.g.
5318 	 * sysrq-t -> show_all_workqueues(). Avoid triggering
5319 	 * hard lockup.
5320 	 */
5321 	touch_nmi_watchdog();
5322 
5323 }
5324 
5325 /**
5326  * show_all_workqueues - dump workqueue state
5327  *
5328  * Called from a sysrq handler and prints out all busy workqueues and pools.
5329  */
show_all_workqueues(void)5330 void show_all_workqueues(void)
5331 {
5332 	struct workqueue_struct *wq;
5333 	struct worker_pool *pool;
5334 	int pi;
5335 
5336 	rcu_read_lock();
5337 
5338 	pr_info("Showing busy workqueues and worker pools:\n");
5339 
5340 	list_for_each_entry_rcu(wq, &workqueues, list)
5341 		show_one_workqueue(wq);
5342 
5343 	for_each_pool(pool, pi)
5344 		show_one_worker_pool(pool);
5345 
5346 	rcu_read_unlock();
5347 }
5348 
5349 /**
5350  * show_freezable_workqueues - dump freezable workqueue state
5351  *
5352  * Called from try_to_freeze_tasks() and prints out all freezable workqueues
5353  * still busy.
5354  */
show_freezable_workqueues(void)5355 void show_freezable_workqueues(void)
5356 {
5357 	struct workqueue_struct *wq;
5358 
5359 	rcu_read_lock();
5360 
5361 	pr_info("Showing freezable workqueues that are still busy:\n");
5362 
5363 	list_for_each_entry_rcu(wq, &workqueues, list) {
5364 		if (!(wq->flags & WQ_FREEZABLE))
5365 			continue;
5366 		show_one_workqueue(wq);
5367 	}
5368 
5369 	rcu_read_unlock();
5370 }
5371 
5372 /* used to show worker information through /proc/PID/{comm,stat,status} */
wq_worker_comm(char * buf,size_t size,struct task_struct * task)5373 void wq_worker_comm(char *buf, size_t size, struct task_struct *task)
5374 {
5375 	int off;
5376 
5377 	/* always show the actual comm */
5378 	off = strscpy(buf, task->comm, size);
5379 	if (off < 0)
5380 		return;
5381 
5382 	/* stabilize PF_WQ_WORKER and worker pool association */
5383 	mutex_lock(&wq_pool_attach_mutex);
5384 
5385 	if (task->flags & PF_WQ_WORKER) {
5386 		struct worker *worker = kthread_data(task);
5387 		struct worker_pool *pool = worker->pool;
5388 
5389 		if (pool) {
5390 			raw_spin_lock_irq(&pool->lock);
5391 			/*
5392 			 * ->desc tracks information (wq name or
5393 			 * set_worker_desc()) for the latest execution.  If
5394 			 * current, prepend '+', otherwise '-'.
5395 			 */
5396 			if (worker->desc[0] != '\0') {
5397 				if (worker->current_work)
5398 					scnprintf(buf + off, size - off, "+%s",
5399 						  worker->desc);
5400 				else
5401 					scnprintf(buf + off, size - off, "-%s",
5402 						  worker->desc);
5403 			}
5404 			raw_spin_unlock_irq(&pool->lock);
5405 		}
5406 	}
5407 
5408 	mutex_unlock(&wq_pool_attach_mutex);
5409 }
5410 EXPORT_SYMBOL_GPL(wq_worker_comm);
5411 
5412 #ifdef CONFIG_SMP
5413 
5414 /*
5415  * CPU hotplug.
5416  *
5417  * There are two challenges in supporting CPU hotplug.  Firstly, there
5418  * are a lot of assumptions on strong associations among work, pwq and
5419  * pool which make migrating pending and scheduled works very
5420  * difficult to implement without impacting hot paths.  Secondly,
5421  * worker pools serve mix of short, long and very long running works making
5422  * blocked draining impractical.
5423  *
5424  * This is solved by allowing the pools to be disassociated from the CPU
5425  * running as an unbound one and allowing it to be reattached later if the
5426  * cpu comes back online.
5427  */
5428 
unbind_workers(int cpu)5429 static void unbind_workers(int cpu)
5430 {
5431 	struct worker_pool *pool;
5432 	struct worker *worker;
5433 
5434 	for_each_cpu_worker_pool(pool, cpu) {
5435 		mutex_lock(&wq_pool_attach_mutex);
5436 		raw_spin_lock_irq(&pool->lock);
5437 
5438 		/*
5439 		 * We've blocked all attach/detach operations. Make all workers
5440 		 * unbound and set DISASSOCIATED.  Before this, all workers
5441 		 * must be on the cpu.  After this, they may become diasporas.
5442 		 * And the preemption disabled section in their sched callbacks
5443 		 * are guaranteed to see WORKER_UNBOUND since the code here
5444 		 * is on the same cpu.
5445 		 */
5446 		for_each_pool_worker(worker, pool)
5447 			worker->flags |= WORKER_UNBOUND;
5448 
5449 		pool->flags |= POOL_DISASSOCIATED;
5450 
5451 		/*
5452 		 * The handling of nr_running in sched callbacks are disabled
5453 		 * now.  Zap nr_running.  After this, nr_running stays zero and
5454 		 * need_more_worker() and keep_working() are always true as
5455 		 * long as the worklist is not empty.  This pool now behaves as
5456 		 * an unbound (in terms of concurrency management) pool which
5457 		 * are served by workers tied to the pool.
5458 		 */
5459 		pool->nr_running = 0;
5460 
5461 		/*
5462 		 * With concurrency management just turned off, a busy
5463 		 * worker blocking could lead to lengthy stalls.  Kick off
5464 		 * unbound chain execution of currently pending work items.
5465 		 */
5466 		kick_pool(pool);
5467 
5468 		raw_spin_unlock_irq(&pool->lock);
5469 
5470 		for_each_pool_worker(worker, pool)
5471 			unbind_worker(worker);
5472 
5473 		mutex_unlock(&wq_pool_attach_mutex);
5474 	}
5475 }
5476 
5477 /**
5478  * rebind_workers - rebind all workers of a pool to the associated CPU
5479  * @pool: pool of interest
5480  *
5481  * @pool->cpu is coming online.  Rebind all workers to the CPU.
5482  */
rebind_workers(struct worker_pool * pool)5483 static void rebind_workers(struct worker_pool *pool)
5484 {
5485 	struct worker *worker;
5486 
5487 	lockdep_assert_held(&wq_pool_attach_mutex);
5488 
5489 	/*
5490 	 * Restore CPU affinity of all workers.  As all idle workers should
5491 	 * be on the run-queue of the associated CPU before any local
5492 	 * wake-ups for concurrency management happen, restore CPU affinity
5493 	 * of all workers first and then clear UNBOUND.  As we're called
5494 	 * from CPU_ONLINE, the following shouldn't fail.
5495 	 */
5496 	for_each_pool_worker(worker, pool) {
5497 		kthread_set_per_cpu(worker->task, pool->cpu);
5498 		WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
5499 						  pool_allowed_cpus(pool)) < 0);
5500 	}
5501 
5502 	raw_spin_lock_irq(&pool->lock);
5503 
5504 	pool->flags &= ~POOL_DISASSOCIATED;
5505 
5506 	for_each_pool_worker(worker, pool) {
5507 		unsigned int worker_flags = worker->flags;
5508 
5509 		/*
5510 		 * We want to clear UNBOUND but can't directly call
5511 		 * worker_clr_flags() or adjust nr_running.  Atomically
5512 		 * replace UNBOUND with another NOT_RUNNING flag REBOUND.
5513 		 * @worker will clear REBOUND using worker_clr_flags() when
5514 		 * it initiates the next execution cycle thus restoring
5515 		 * concurrency management.  Note that when or whether
5516 		 * @worker clears REBOUND doesn't affect correctness.
5517 		 *
5518 		 * WRITE_ONCE() is necessary because @worker->flags may be
5519 		 * tested without holding any lock in
5520 		 * wq_worker_running().  Without it, NOT_RUNNING test may
5521 		 * fail incorrectly leading to premature concurrency
5522 		 * management operations.
5523 		 */
5524 		WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
5525 		worker_flags |= WORKER_REBOUND;
5526 		worker_flags &= ~WORKER_UNBOUND;
5527 		WRITE_ONCE(worker->flags, worker_flags);
5528 	}
5529 
5530 	raw_spin_unlock_irq(&pool->lock);
5531 }
5532 
5533 /**
5534  * restore_unbound_workers_cpumask - restore cpumask of unbound workers
5535  * @pool: unbound pool of interest
5536  * @cpu: the CPU which is coming up
5537  *
5538  * An unbound pool may end up with a cpumask which doesn't have any online
5539  * CPUs.  When a worker of such pool get scheduled, the scheduler resets
5540  * its cpus_allowed.  If @cpu is in @pool's cpumask which didn't have any
5541  * online CPU before, cpus_allowed of all its workers should be restored.
5542  */
restore_unbound_workers_cpumask(struct worker_pool * pool,int cpu)5543 static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
5544 {
5545 	static cpumask_t cpumask;
5546 	struct worker *worker;
5547 
5548 	lockdep_assert_held(&wq_pool_attach_mutex);
5549 
5550 	/* is @cpu allowed for @pool? */
5551 	if (!cpumask_test_cpu(cpu, pool->attrs->cpumask))
5552 		return;
5553 
5554 	cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask);
5555 
5556 	/* as we're called from CPU_ONLINE, the following shouldn't fail */
5557 	for_each_pool_worker(worker, pool)
5558 		WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0);
5559 }
5560 
workqueue_prepare_cpu(unsigned int cpu)5561 int workqueue_prepare_cpu(unsigned int cpu)
5562 {
5563 	struct worker_pool *pool;
5564 
5565 	for_each_cpu_worker_pool(pool, cpu) {
5566 		if (pool->nr_workers)
5567 			continue;
5568 		if (!create_worker(pool))
5569 			return -ENOMEM;
5570 	}
5571 	return 0;
5572 }
5573 
workqueue_online_cpu(unsigned int cpu)5574 int workqueue_online_cpu(unsigned int cpu)
5575 {
5576 	struct worker_pool *pool;
5577 	struct workqueue_struct *wq;
5578 	int pi;
5579 
5580 	mutex_lock(&wq_pool_mutex);
5581 
5582 	for_each_pool(pool, pi) {
5583 		mutex_lock(&wq_pool_attach_mutex);
5584 
5585 		if (pool->cpu == cpu)
5586 			rebind_workers(pool);
5587 		else if (pool->cpu < 0)
5588 			restore_unbound_workers_cpumask(pool, cpu);
5589 
5590 		mutex_unlock(&wq_pool_attach_mutex);
5591 	}
5592 
5593 	/* update pod affinity of unbound workqueues */
5594 	list_for_each_entry(wq, &workqueues, list) {
5595 		struct workqueue_attrs *attrs = wq->unbound_attrs;
5596 
5597 		if (attrs) {
5598 			const struct wq_pod_type *pt = wqattrs_pod_type(attrs);
5599 			int tcpu;
5600 
5601 			for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]])
5602 				wq_update_pod(wq, tcpu, cpu, true);
5603 		}
5604 	}
5605 
5606 	mutex_unlock(&wq_pool_mutex);
5607 	return 0;
5608 }
5609 
workqueue_offline_cpu(unsigned int cpu)5610 int workqueue_offline_cpu(unsigned int cpu)
5611 {
5612 	struct workqueue_struct *wq;
5613 
5614 	/* unbinding per-cpu workers should happen on the local CPU */
5615 	if (WARN_ON(cpu != smp_processor_id()))
5616 		return -1;
5617 
5618 	unbind_workers(cpu);
5619 
5620 	/* update pod affinity of unbound workqueues */
5621 	mutex_lock(&wq_pool_mutex);
5622 	list_for_each_entry(wq, &workqueues, list) {
5623 		struct workqueue_attrs *attrs = wq->unbound_attrs;
5624 
5625 		if (attrs) {
5626 			const struct wq_pod_type *pt = wqattrs_pod_type(attrs);
5627 			int tcpu;
5628 
5629 			for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]])
5630 				wq_update_pod(wq, tcpu, cpu, false);
5631 		}
5632 	}
5633 	mutex_unlock(&wq_pool_mutex);
5634 
5635 	return 0;
5636 }
5637 
5638 struct work_for_cpu {
5639 	struct work_struct work;
5640 	long (*fn)(void *);
5641 	void *arg;
5642 	long ret;
5643 };
5644 
work_for_cpu_fn(struct work_struct * work)5645 static void work_for_cpu_fn(struct work_struct *work)
5646 {
5647 	struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
5648 
5649 	wfc->ret = wfc->fn(wfc->arg);
5650 }
5651 
5652 /**
5653  * work_on_cpu_key - run a function in thread context on a particular cpu
5654  * @cpu: the cpu to run on
5655  * @fn: the function to run
5656  * @arg: the function arg
5657  * @key: The lock class key for lock debugging purposes
5658  *
5659  * It is up to the caller to ensure that the cpu doesn't go offline.
5660  * The caller must not hold any locks which would prevent @fn from completing.
5661  *
5662  * Return: The value @fn returns.
5663  */
work_on_cpu_key(int cpu,long (* fn)(void *),void * arg,struct lock_class_key * key)5664 long work_on_cpu_key(int cpu, long (*fn)(void *),
5665 		     void *arg, struct lock_class_key *key)
5666 {
5667 	struct work_for_cpu wfc = { .fn = fn, .arg = arg };
5668 
5669 	INIT_WORK_ONSTACK_KEY(&wfc.work, work_for_cpu_fn, key);
5670 	schedule_work_on(cpu, &wfc.work);
5671 	flush_work(&wfc.work);
5672 	destroy_work_on_stack(&wfc.work);
5673 	return wfc.ret;
5674 }
5675 EXPORT_SYMBOL_GPL(work_on_cpu_key);
5676 
5677 /**
5678  * work_on_cpu_safe_key - run a function in thread context on a particular cpu
5679  * @cpu: the cpu to run on
5680  * @fn:  the function to run
5681  * @arg: the function argument
5682  * @key: The lock class key for lock debugging purposes
5683  *
5684  * Disables CPU hotplug and calls work_on_cpu(). The caller must not hold
5685  * any locks which would prevent @fn from completing.
5686  *
5687  * Return: The value @fn returns.
5688  */
work_on_cpu_safe_key(int cpu,long (* fn)(void *),void * arg,struct lock_class_key * key)5689 long work_on_cpu_safe_key(int cpu, long (*fn)(void *),
5690 			  void *arg, struct lock_class_key *key)
5691 {
5692 	long ret = -ENODEV;
5693 
5694 	cpus_read_lock();
5695 	if (cpu_online(cpu))
5696 		ret = work_on_cpu_key(cpu, fn, arg, key);
5697 	cpus_read_unlock();
5698 	return ret;
5699 }
5700 EXPORT_SYMBOL_GPL(work_on_cpu_safe_key);
5701 #endif /* CONFIG_SMP */
5702 
5703 #ifdef CONFIG_FREEZER
5704 
5705 /**
5706  * freeze_workqueues_begin - begin freezing workqueues
5707  *
5708  * Start freezing workqueues.  After this function returns, all freezable
5709  * workqueues will queue new works to their inactive_works list instead of
5710  * pool->worklist.
5711  *
5712  * CONTEXT:
5713  * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
5714  */
freeze_workqueues_begin(void)5715 void freeze_workqueues_begin(void)
5716 {
5717 	struct workqueue_struct *wq;
5718 	struct pool_workqueue *pwq;
5719 
5720 	mutex_lock(&wq_pool_mutex);
5721 
5722 	WARN_ON_ONCE(workqueue_freezing);
5723 	workqueue_freezing = true;
5724 
5725 	list_for_each_entry(wq, &workqueues, list) {
5726 		mutex_lock(&wq->mutex);
5727 		for_each_pwq(pwq, wq)
5728 			pwq_adjust_max_active(pwq);
5729 		mutex_unlock(&wq->mutex);
5730 	}
5731 
5732 	mutex_unlock(&wq_pool_mutex);
5733 }
5734 
5735 /**
5736  * freeze_workqueues_busy - are freezable workqueues still busy?
5737  *
5738  * Check whether freezing is complete.  This function must be called
5739  * between freeze_workqueues_begin() and thaw_workqueues().
5740  *
5741  * CONTEXT:
5742  * Grabs and releases wq_pool_mutex.
5743  *
5744  * Return:
5745  * %true if some freezable workqueues are still busy.  %false if freezing
5746  * is complete.
5747  */
freeze_workqueues_busy(void)5748 bool freeze_workqueues_busy(void)
5749 {
5750 	bool busy = false;
5751 	struct workqueue_struct *wq;
5752 	struct pool_workqueue *pwq;
5753 
5754 	mutex_lock(&wq_pool_mutex);
5755 
5756 	WARN_ON_ONCE(!workqueue_freezing);
5757 
5758 	list_for_each_entry(wq, &workqueues, list) {
5759 		if (!(wq->flags & WQ_FREEZABLE))
5760 			continue;
5761 		/*
5762 		 * nr_active is monotonically decreasing.  It's safe
5763 		 * to peek without lock.
5764 		 */
5765 		rcu_read_lock();
5766 		for_each_pwq(pwq, wq) {
5767 			WARN_ON_ONCE(pwq->nr_active < 0);
5768 			if (pwq->nr_active) {
5769 				busy = true;
5770 				rcu_read_unlock();
5771 				goto out_unlock;
5772 			}
5773 		}
5774 		rcu_read_unlock();
5775 	}
5776 out_unlock:
5777 	mutex_unlock(&wq_pool_mutex);
5778 	return busy;
5779 }
5780 
5781 /**
5782  * thaw_workqueues - thaw workqueues
5783  *
5784  * Thaw workqueues.  Normal queueing is restored and all collected
5785  * frozen works are transferred to their respective pool worklists.
5786  *
5787  * CONTEXT:
5788  * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
5789  */
thaw_workqueues(void)5790 void thaw_workqueues(void)
5791 {
5792 	struct workqueue_struct *wq;
5793 	struct pool_workqueue *pwq;
5794 
5795 	mutex_lock(&wq_pool_mutex);
5796 
5797 	if (!workqueue_freezing)
5798 		goto out_unlock;
5799 
5800 	workqueue_freezing = false;
5801 
5802 	/* restore max_active and repopulate worklist */
5803 	list_for_each_entry(wq, &workqueues, list) {
5804 		mutex_lock(&wq->mutex);
5805 		for_each_pwq(pwq, wq)
5806 			pwq_adjust_max_active(pwq);
5807 		mutex_unlock(&wq->mutex);
5808 	}
5809 
5810 out_unlock:
5811 	mutex_unlock(&wq_pool_mutex);
5812 }
5813 #endif /* CONFIG_FREEZER */
5814 
workqueue_apply_unbound_cpumask(const cpumask_var_t unbound_cpumask)5815 static int workqueue_apply_unbound_cpumask(const cpumask_var_t unbound_cpumask)
5816 {
5817 	LIST_HEAD(ctxs);
5818 	int ret = 0;
5819 	struct workqueue_struct *wq;
5820 	struct apply_wqattrs_ctx *ctx, *n;
5821 
5822 	lockdep_assert_held(&wq_pool_mutex);
5823 
5824 	list_for_each_entry(wq, &workqueues, list) {
5825 		if (!(wq->flags & WQ_UNBOUND))
5826 			continue;
5827 		/* creating multiple pwqs breaks ordering guarantee */
5828 		if (wq->flags & __WQ_ORDERED)
5829 			continue;
5830 
5831 		ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs, unbound_cpumask);
5832 		if (IS_ERR(ctx)) {
5833 			ret = PTR_ERR(ctx);
5834 			break;
5835 		}
5836 
5837 		list_add_tail(&ctx->list, &ctxs);
5838 	}
5839 
5840 	list_for_each_entry_safe(ctx, n, &ctxs, list) {
5841 		if (!ret)
5842 			apply_wqattrs_commit(ctx);
5843 		apply_wqattrs_cleanup(ctx);
5844 	}
5845 
5846 	if (!ret) {
5847 		mutex_lock(&wq_pool_attach_mutex);
5848 		cpumask_copy(wq_unbound_cpumask, unbound_cpumask);
5849 		mutex_unlock(&wq_pool_attach_mutex);
5850 	}
5851 	return ret;
5852 }
5853 
5854 /**
5855  *  workqueue_set_unbound_cpumask - Set the low-level unbound cpumask
5856  *  @cpumask: the cpumask to set
5857  *
5858  *  The low-level workqueues cpumask is a global cpumask that limits
5859  *  the affinity of all unbound workqueues.  This function check the @cpumask
5860  *  and apply it to all unbound workqueues and updates all pwqs of them.
5861  *
5862  *  Return:	0	- Success
5863  *  		-EINVAL	- Invalid @cpumask
5864  *  		-ENOMEM	- Failed to allocate memory for attrs or pwqs.
5865  */
workqueue_set_unbound_cpumask(cpumask_var_t cpumask)5866 int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
5867 {
5868 	int ret = -EINVAL;
5869 
5870 	/*
5871 	 * Not excluding isolated cpus on purpose.
5872 	 * If the user wishes to include them, we allow that.
5873 	 */
5874 	cpumask_and(cpumask, cpumask, cpu_possible_mask);
5875 	if (!cpumask_empty(cpumask)) {
5876 		apply_wqattrs_lock();
5877 		if (cpumask_equal(cpumask, wq_unbound_cpumask)) {
5878 			ret = 0;
5879 			goto out_unlock;
5880 		}
5881 
5882 		ret = workqueue_apply_unbound_cpumask(cpumask);
5883 
5884 out_unlock:
5885 		apply_wqattrs_unlock();
5886 	}
5887 
5888 	return ret;
5889 }
5890 
parse_affn_scope(const char * val)5891 static int parse_affn_scope(const char *val)
5892 {
5893 	int i;
5894 
5895 	for (i = 0; i < ARRAY_SIZE(wq_affn_names); i++) {
5896 		if (!strncasecmp(val, wq_affn_names[i], strlen(wq_affn_names[i])))
5897 			return i;
5898 	}
5899 	return -EINVAL;
5900 }
5901 
wq_affn_dfl_set(const char * val,const struct kernel_param * kp)5902 static int wq_affn_dfl_set(const char *val, const struct kernel_param *kp)
5903 {
5904 	struct workqueue_struct *wq;
5905 	int affn, cpu;
5906 
5907 	affn = parse_affn_scope(val);
5908 	if (affn < 0)
5909 		return affn;
5910 	if (affn == WQ_AFFN_DFL)
5911 		return -EINVAL;
5912 
5913 	cpus_read_lock();
5914 	mutex_lock(&wq_pool_mutex);
5915 
5916 	wq_affn_dfl = affn;
5917 
5918 	list_for_each_entry(wq, &workqueues, list) {
5919 		for_each_online_cpu(cpu) {
5920 			wq_update_pod(wq, cpu, cpu, true);
5921 		}
5922 	}
5923 
5924 	mutex_unlock(&wq_pool_mutex);
5925 	cpus_read_unlock();
5926 
5927 	return 0;
5928 }
5929 
wq_affn_dfl_get(char * buffer,const struct kernel_param * kp)5930 static int wq_affn_dfl_get(char *buffer, const struct kernel_param *kp)
5931 {
5932 	return scnprintf(buffer, PAGE_SIZE, "%s\n", wq_affn_names[wq_affn_dfl]);
5933 }
5934 
5935 static const struct kernel_param_ops wq_affn_dfl_ops = {
5936 	.set	= wq_affn_dfl_set,
5937 	.get	= wq_affn_dfl_get,
5938 };
5939 
5940 module_param_cb(default_affinity_scope, &wq_affn_dfl_ops, NULL, 0644);
5941 
5942 #ifdef CONFIG_SYSFS
5943 /*
5944  * Workqueues with WQ_SYSFS flag set is visible to userland via
5945  * /sys/bus/workqueue/devices/WQ_NAME.  All visible workqueues have the
5946  * following attributes.
5947  *
5948  *  per_cpu		RO bool	: whether the workqueue is per-cpu or unbound
5949  *  max_active		RW int	: maximum number of in-flight work items
5950  *
5951  * Unbound workqueues have the following extra attributes.
5952  *
5953  *  nice		RW int	: nice value of the workers
5954  *  cpumask		RW mask	: bitmask of allowed CPUs for the workers
5955  *  affinity_scope	RW str  : worker CPU affinity scope (cache, numa, none)
5956  *  affinity_strict	RW bool : worker CPU affinity is strict
5957  */
5958 struct wq_device {
5959 	struct workqueue_struct		*wq;
5960 	struct device			dev;
5961 };
5962 
dev_to_wq(struct device * dev)5963 static struct workqueue_struct *dev_to_wq(struct device *dev)
5964 {
5965 	struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
5966 
5967 	return wq_dev->wq;
5968 }
5969 
per_cpu_show(struct device * dev,struct device_attribute * attr,char * buf)5970 static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr,
5971 			    char *buf)
5972 {
5973 	struct workqueue_struct *wq = dev_to_wq(dev);
5974 
5975 	return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND));
5976 }
5977 static DEVICE_ATTR_RO(per_cpu);
5978 
max_active_show(struct device * dev,struct device_attribute * attr,char * buf)5979 static ssize_t max_active_show(struct device *dev,
5980 			       struct device_attribute *attr, char *buf)
5981 {
5982 	struct workqueue_struct *wq = dev_to_wq(dev);
5983 
5984 	return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active);
5985 }
5986 
max_active_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5987 static ssize_t max_active_store(struct device *dev,
5988 				struct device_attribute *attr, const char *buf,
5989 				size_t count)
5990 {
5991 	struct workqueue_struct *wq = dev_to_wq(dev);
5992 	int val;
5993 
5994 	if (sscanf(buf, "%d", &val) != 1 || val <= 0)
5995 		return -EINVAL;
5996 
5997 	workqueue_set_max_active(wq, val);
5998 	return count;
5999 }
6000 static DEVICE_ATTR_RW(max_active);
6001 
6002 static struct attribute *wq_sysfs_attrs[] = {
6003 	&dev_attr_per_cpu.attr,
6004 	&dev_attr_max_active.attr,
6005 	NULL,
6006 };
6007 ATTRIBUTE_GROUPS(wq_sysfs);
6008 
wq_nice_show(struct device * dev,struct device_attribute * attr,char * buf)6009 static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr,
6010 			    char *buf)
6011 {
6012 	struct workqueue_struct *wq = dev_to_wq(dev);
6013 	int written;
6014 
6015 	mutex_lock(&wq->mutex);
6016 	written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice);
6017 	mutex_unlock(&wq->mutex);
6018 
6019 	return written;
6020 }
6021 
6022 /* prepare workqueue_attrs for sysfs store operations */
wq_sysfs_prep_attrs(struct workqueue_struct * wq)6023 static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq)
6024 {
6025 	struct workqueue_attrs *attrs;
6026 
6027 	lockdep_assert_held(&wq_pool_mutex);
6028 
6029 	attrs = alloc_workqueue_attrs();
6030 	if (!attrs)
6031 		return NULL;
6032 
6033 	copy_workqueue_attrs(attrs, wq->unbound_attrs);
6034 	return attrs;
6035 }
6036 
wq_nice_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)6037 static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr,
6038 			     const char *buf, size_t count)
6039 {
6040 	struct workqueue_struct *wq = dev_to_wq(dev);
6041 	struct workqueue_attrs *attrs;
6042 	int ret = -ENOMEM;
6043 
6044 	apply_wqattrs_lock();
6045 
6046 	attrs = wq_sysfs_prep_attrs(wq);
6047 	if (!attrs)
6048 		goto out_unlock;
6049 
6050 	if (sscanf(buf, "%d", &attrs->nice) == 1 &&
6051 	    attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE)
6052 		ret = apply_workqueue_attrs_locked(wq, attrs);
6053 	else
6054 		ret = -EINVAL;
6055 
6056 out_unlock:
6057 	apply_wqattrs_unlock();
6058 	free_workqueue_attrs(attrs);
6059 	return ret ?: count;
6060 }
6061 
wq_cpumask_show(struct device * dev,struct device_attribute * attr,char * buf)6062 static ssize_t wq_cpumask_show(struct device *dev,
6063 			       struct device_attribute *attr, char *buf)
6064 {
6065 	struct workqueue_struct *wq = dev_to_wq(dev);
6066 	int written;
6067 
6068 	mutex_lock(&wq->mutex);
6069 	written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
6070 			    cpumask_pr_args(wq->unbound_attrs->cpumask));
6071 	mutex_unlock(&wq->mutex);
6072 	return written;
6073 }
6074 
wq_cpumask_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)6075 static ssize_t wq_cpumask_store(struct device *dev,
6076 				struct device_attribute *attr,
6077 				const char *buf, size_t count)
6078 {
6079 	struct workqueue_struct *wq = dev_to_wq(dev);
6080 	struct workqueue_attrs *attrs;
6081 	int ret = -ENOMEM;
6082 
6083 	apply_wqattrs_lock();
6084 
6085 	attrs = wq_sysfs_prep_attrs(wq);
6086 	if (!attrs)
6087 		goto out_unlock;
6088 
6089 	ret = cpumask_parse(buf, attrs->cpumask);
6090 	if (!ret)
6091 		ret = apply_workqueue_attrs_locked(wq, attrs);
6092 
6093 out_unlock:
6094 	apply_wqattrs_unlock();
6095 	free_workqueue_attrs(attrs);
6096 	return ret ?: count;
6097 }
6098 
wq_affn_scope_show(struct device * dev,struct device_attribute * attr,char * buf)6099 static ssize_t wq_affn_scope_show(struct device *dev,
6100 				  struct device_attribute *attr, char *buf)
6101 {
6102 	struct workqueue_struct *wq = dev_to_wq(dev);
6103 	int written;
6104 
6105 	mutex_lock(&wq->mutex);
6106 	if (wq->unbound_attrs->affn_scope == WQ_AFFN_DFL)
6107 		written = scnprintf(buf, PAGE_SIZE, "%s (%s)\n",
6108 				    wq_affn_names[WQ_AFFN_DFL],
6109 				    wq_affn_names[wq_affn_dfl]);
6110 	else
6111 		written = scnprintf(buf, PAGE_SIZE, "%s\n",
6112 				    wq_affn_names[wq->unbound_attrs->affn_scope]);
6113 	mutex_unlock(&wq->mutex);
6114 
6115 	return written;
6116 }
6117 
wq_affn_scope_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)6118 static ssize_t wq_affn_scope_store(struct device *dev,
6119 				   struct device_attribute *attr,
6120 				   const char *buf, size_t count)
6121 {
6122 	struct workqueue_struct *wq = dev_to_wq(dev);
6123 	struct workqueue_attrs *attrs;
6124 	int affn, ret = -ENOMEM;
6125 
6126 	affn = parse_affn_scope(buf);
6127 	if (affn < 0)
6128 		return affn;
6129 
6130 	apply_wqattrs_lock();
6131 	attrs = wq_sysfs_prep_attrs(wq);
6132 	if (attrs) {
6133 		attrs->affn_scope = affn;
6134 		ret = apply_workqueue_attrs_locked(wq, attrs);
6135 	}
6136 	apply_wqattrs_unlock();
6137 	free_workqueue_attrs(attrs);
6138 	return ret ?: count;
6139 }
6140 
wq_affinity_strict_show(struct device * dev,struct device_attribute * attr,char * buf)6141 static ssize_t wq_affinity_strict_show(struct device *dev,
6142 				       struct device_attribute *attr, char *buf)
6143 {
6144 	struct workqueue_struct *wq = dev_to_wq(dev);
6145 
6146 	return scnprintf(buf, PAGE_SIZE, "%d\n",
6147 			 wq->unbound_attrs->affn_strict);
6148 }
6149 
wq_affinity_strict_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)6150 static ssize_t wq_affinity_strict_store(struct device *dev,
6151 					struct device_attribute *attr,
6152 					const char *buf, size_t count)
6153 {
6154 	struct workqueue_struct *wq = dev_to_wq(dev);
6155 	struct workqueue_attrs *attrs;
6156 	int v, ret = -ENOMEM;
6157 
6158 	if (sscanf(buf, "%d", &v) != 1)
6159 		return -EINVAL;
6160 
6161 	apply_wqattrs_lock();
6162 	attrs = wq_sysfs_prep_attrs(wq);
6163 	if (attrs) {
6164 		attrs->affn_strict = (bool)v;
6165 		ret = apply_workqueue_attrs_locked(wq, attrs);
6166 	}
6167 	apply_wqattrs_unlock();
6168 	free_workqueue_attrs(attrs);
6169 	return ret ?: count;
6170 }
6171 
6172 static struct device_attribute wq_sysfs_unbound_attrs[] = {
6173 	__ATTR(nice, 0644, wq_nice_show, wq_nice_store),
6174 	__ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store),
6175 	__ATTR(affinity_scope, 0644, wq_affn_scope_show, wq_affn_scope_store),
6176 	__ATTR(affinity_strict, 0644, wq_affinity_strict_show, wq_affinity_strict_store),
6177 	__ATTR_NULL,
6178 };
6179 
6180 static struct bus_type wq_subsys = {
6181 	.name				= "workqueue",
6182 	.dev_groups			= wq_sysfs_groups,
6183 };
6184 
wq_unbound_cpumask_show(struct device * dev,struct device_attribute * attr,char * buf)6185 static ssize_t wq_unbound_cpumask_show(struct device *dev,
6186 		struct device_attribute *attr, char *buf)
6187 {
6188 	int written;
6189 
6190 	mutex_lock(&wq_pool_mutex);
6191 	written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
6192 			    cpumask_pr_args(wq_unbound_cpumask));
6193 	mutex_unlock(&wq_pool_mutex);
6194 
6195 	return written;
6196 }
6197 
wq_unbound_cpumask_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)6198 static ssize_t wq_unbound_cpumask_store(struct device *dev,
6199 		struct device_attribute *attr, const char *buf, size_t count)
6200 {
6201 	cpumask_var_t cpumask;
6202 	int ret;
6203 
6204 	if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
6205 		return -ENOMEM;
6206 
6207 	ret = cpumask_parse(buf, cpumask);
6208 	if (!ret)
6209 		ret = workqueue_set_unbound_cpumask(cpumask);
6210 
6211 	free_cpumask_var(cpumask);
6212 	return ret ? ret : count;
6213 }
6214 
6215 static struct device_attribute wq_sysfs_cpumask_attr =
6216 	__ATTR(cpumask, 0644, wq_unbound_cpumask_show,
6217 	       wq_unbound_cpumask_store);
6218 
wq_sysfs_init(void)6219 static int __init wq_sysfs_init(void)
6220 {
6221 	struct device *dev_root;
6222 	int err;
6223 
6224 	err = subsys_virtual_register(&wq_subsys, NULL);
6225 	if (err)
6226 		return err;
6227 
6228 	dev_root = bus_get_dev_root(&wq_subsys);
6229 	if (dev_root) {
6230 		err = device_create_file(dev_root, &wq_sysfs_cpumask_attr);
6231 		put_device(dev_root);
6232 	}
6233 	return err;
6234 }
6235 core_initcall(wq_sysfs_init);
6236 
wq_device_release(struct device * dev)6237 static void wq_device_release(struct device *dev)
6238 {
6239 	struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
6240 
6241 	kfree(wq_dev);
6242 }
6243 
6244 /**
6245  * workqueue_sysfs_register - make a workqueue visible in sysfs
6246  * @wq: the workqueue to register
6247  *
6248  * Expose @wq in sysfs under /sys/bus/workqueue/devices.
6249  * alloc_workqueue*() automatically calls this function if WQ_SYSFS is set
6250  * which is the preferred method.
6251  *
6252  * Workqueue user should use this function directly iff it wants to apply
6253  * workqueue_attrs before making the workqueue visible in sysfs; otherwise,
6254  * apply_workqueue_attrs() may race against userland updating the
6255  * attributes.
6256  *
6257  * Return: 0 on success, -errno on failure.
6258  */
workqueue_sysfs_register(struct workqueue_struct * wq)6259 int workqueue_sysfs_register(struct workqueue_struct *wq)
6260 {
6261 	struct wq_device *wq_dev;
6262 	int ret;
6263 
6264 	/*
6265 	 * Adjusting max_active or creating new pwqs by applying
6266 	 * attributes breaks ordering guarantee.  Disallow exposing ordered
6267 	 * workqueues.
6268 	 */
6269 	if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
6270 		return -EINVAL;
6271 
6272 	wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
6273 	if (!wq_dev)
6274 		return -ENOMEM;
6275 
6276 	wq_dev->wq = wq;
6277 	wq_dev->dev.bus = &wq_subsys;
6278 	wq_dev->dev.release = wq_device_release;
6279 	dev_set_name(&wq_dev->dev, "%s", wq->name);
6280 
6281 	/*
6282 	 * unbound_attrs are created separately.  Suppress uevent until
6283 	 * everything is ready.
6284 	 */
6285 	dev_set_uevent_suppress(&wq_dev->dev, true);
6286 
6287 	ret = device_register(&wq_dev->dev);
6288 	if (ret) {
6289 		put_device(&wq_dev->dev);
6290 		wq->wq_dev = NULL;
6291 		return ret;
6292 	}
6293 
6294 	if (wq->flags & WQ_UNBOUND) {
6295 		struct device_attribute *attr;
6296 
6297 		for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) {
6298 			ret = device_create_file(&wq_dev->dev, attr);
6299 			if (ret) {
6300 				device_unregister(&wq_dev->dev);
6301 				wq->wq_dev = NULL;
6302 				return ret;
6303 			}
6304 		}
6305 	}
6306 
6307 	dev_set_uevent_suppress(&wq_dev->dev, false);
6308 	kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD);
6309 	return 0;
6310 }
6311 
6312 /**
6313  * workqueue_sysfs_unregister - undo workqueue_sysfs_register()
6314  * @wq: the workqueue to unregister
6315  *
6316  * If @wq is registered to sysfs by workqueue_sysfs_register(), unregister.
6317  */
workqueue_sysfs_unregister(struct workqueue_struct * wq)6318 static void workqueue_sysfs_unregister(struct workqueue_struct *wq)
6319 {
6320 	struct wq_device *wq_dev = wq->wq_dev;
6321 
6322 	if (!wq->wq_dev)
6323 		return;
6324 
6325 	wq->wq_dev = NULL;
6326 	device_unregister(&wq_dev->dev);
6327 }
6328 #else	/* CONFIG_SYSFS */
workqueue_sysfs_unregister(struct workqueue_struct * wq)6329 static void workqueue_sysfs_unregister(struct workqueue_struct *wq)	{ }
6330 #endif	/* CONFIG_SYSFS */
6331 
6332 /*
6333  * Workqueue watchdog.
6334  *
6335  * Stall may be caused by various bugs - missing WQ_MEM_RECLAIM, illegal
6336  * flush dependency, a concurrency managed work item which stays RUNNING
6337  * indefinitely.  Workqueue stalls can be very difficult to debug as the
6338  * usual warning mechanisms don't trigger and internal workqueue state is
6339  * largely opaque.
6340  *
6341  * Workqueue watchdog monitors all worker pools periodically and dumps
6342  * state if some pools failed to make forward progress for a while where
6343  * forward progress is defined as the first item on ->worklist changing.
6344  *
6345  * This mechanism is controlled through the kernel parameter
6346  * "workqueue.watchdog_thresh" which can be updated at runtime through the
6347  * corresponding sysfs parameter file.
6348  */
6349 #ifdef CONFIG_WQ_WATCHDOG
6350 
6351 static unsigned long wq_watchdog_thresh = 30;
6352 static struct timer_list wq_watchdog_timer;
6353 
6354 static unsigned long wq_watchdog_touched = INITIAL_JIFFIES;
6355 static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES;
6356 
6357 static unsigned int wq_panic_on_stall;
6358 module_param_named(panic_on_stall, wq_panic_on_stall, uint, 0644);
6359 
6360 /*
6361  * Show workers that might prevent the processing of pending work items.
6362  * The only candidates are CPU-bound workers in the running state.
6363  * Pending work items should be handled by another idle worker
6364  * in all other situations.
6365  */
show_cpu_pool_hog(struct worker_pool * pool)6366 static void show_cpu_pool_hog(struct worker_pool *pool)
6367 {
6368 	struct worker *worker;
6369 	unsigned long flags;
6370 	int bkt;
6371 
6372 	raw_spin_lock_irqsave(&pool->lock, flags);
6373 
6374 	hash_for_each(pool->busy_hash, bkt, worker, hentry) {
6375 		if (task_is_running(worker->task)) {
6376 			/*
6377 			 * Defer printing to avoid deadlocks in console
6378 			 * drivers that queue work while holding locks
6379 			 * also taken in their write paths.
6380 			 */
6381 			printk_deferred_enter();
6382 
6383 			pr_info("pool %d:\n", pool->id);
6384 			sched_show_task(worker->task);
6385 
6386 			printk_deferred_exit();
6387 		}
6388 	}
6389 
6390 	raw_spin_unlock_irqrestore(&pool->lock, flags);
6391 }
6392 
show_cpu_pools_hogs(void)6393 static void show_cpu_pools_hogs(void)
6394 {
6395 	struct worker_pool *pool;
6396 	int pi;
6397 
6398 	pr_info("Showing backtraces of running workers in stalled CPU-bound worker pools:\n");
6399 
6400 	rcu_read_lock();
6401 
6402 	for_each_pool(pool, pi) {
6403 		if (pool->cpu_stall)
6404 			show_cpu_pool_hog(pool);
6405 
6406 	}
6407 
6408 	rcu_read_unlock();
6409 }
6410 
panic_on_wq_watchdog(void)6411 static void panic_on_wq_watchdog(void)
6412 {
6413 	static unsigned int wq_stall;
6414 
6415 	if (wq_panic_on_stall) {
6416 		wq_stall++;
6417 		BUG_ON(wq_stall >= wq_panic_on_stall);
6418 	}
6419 }
6420 
wq_watchdog_reset_touched(void)6421 static void wq_watchdog_reset_touched(void)
6422 {
6423 	int cpu;
6424 
6425 	wq_watchdog_touched = jiffies;
6426 	for_each_possible_cpu(cpu)
6427 		per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
6428 }
6429 
wq_watchdog_timer_fn(struct timer_list * unused)6430 static void wq_watchdog_timer_fn(struct timer_list *unused)
6431 {
6432 	unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ;
6433 	bool lockup_detected = false;
6434 	bool cpu_pool_stall = false;
6435 	unsigned long now = jiffies;
6436 	struct worker_pool *pool;
6437 	int pi;
6438 
6439 	if (!thresh)
6440 		return;
6441 
6442 	rcu_read_lock();
6443 
6444 	for_each_pool(pool, pi) {
6445 		unsigned long pool_ts, touched, ts;
6446 
6447 		pool->cpu_stall = false;
6448 		if (list_empty(&pool->worklist))
6449 			continue;
6450 
6451 		/*
6452 		 * If a virtual machine is stopped by the host it can look to
6453 		 * the watchdog like a stall.
6454 		 */
6455 		kvm_check_and_clear_guest_paused();
6456 
6457 		/* get the latest of pool and touched timestamps */
6458 		if (pool->cpu >= 0)
6459 			touched = READ_ONCE(per_cpu(wq_watchdog_touched_cpu, pool->cpu));
6460 		else
6461 			touched = READ_ONCE(wq_watchdog_touched);
6462 		pool_ts = READ_ONCE(pool->watchdog_ts);
6463 
6464 		if (time_after(pool_ts, touched))
6465 			ts = pool_ts;
6466 		else
6467 			ts = touched;
6468 
6469 		/* did we stall? */
6470 		if (time_after(now, ts + thresh)) {
6471 			lockup_detected = true;
6472 			if (pool->cpu >= 0) {
6473 				pool->cpu_stall = true;
6474 				cpu_pool_stall = true;
6475 			}
6476 			pr_emerg("BUG: workqueue lockup - pool");
6477 			pr_cont_pool_info(pool);
6478 			pr_cont(" stuck for %us!\n",
6479 				jiffies_to_msecs(now - pool_ts) / 1000);
6480 			trace_android_vh_wq_lockup_pool(pool->cpu, pool_ts);
6481 		}
6482 
6483 
6484 	}
6485 
6486 	rcu_read_unlock();
6487 
6488 	if (lockup_detected)
6489 		show_all_workqueues();
6490 
6491 	if (cpu_pool_stall)
6492 		show_cpu_pools_hogs();
6493 
6494 	if (lockup_detected)
6495 		panic_on_wq_watchdog();
6496 
6497 	wq_watchdog_reset_touched();
6498 	mod_timer(&wq_watchdog_timer, jiffies + thresh);
6499 }
6500 
wq_watchdog_touch(int cpu)6501 notrace void wq_watchdog_touch(int cpu)
6502 {
6503 	if (cpu >= 0)
6504 		per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
6505 
6506 	wq_watchdog_touched = jiffies;
6507 }
6508 
wq_watchdog_set_thresh(unsigned long thresh)6509 static void wq_watchdog_set_thresh(unsigned long thresh)
6510 {
6511 	wq_watchdog_thresh = 0;
6512 	del_timer_sync(&wq_watchdog_timer);
6513 
6514 	if (thresh) {
6515 		wq_watchdog_thresh = thresh;
6516 		wq_watchdog_reset_touched();
6517 		mod_timer(&wq_watchdog_timer, jiffies + thresh * HZ);
6518 	}
6519 }
6520 
wq_watchdog_param_set_thresh(const char * val,const struct kernel_param * kp)6521 static int wq_watchdog_param_set_thresh(const char *val,
6522 					const struct kernel_param *kp)
6523 {
6524 	unsigned long thresh;
6525 	int ret;
6526 
6527 	ret = kstrtoul(val, 0, &thresh);
6528 	if (ret)
6529 		return ret;
6530 
6531 	if (system_wq)
6532 		wq_watchdog_set_thresh(thresh);
6533 	else
6534 		wq_watchdog_thresh = thresh;
6535 
6536 	return 0;
6537 }
6538 
6539 static const struct kernel_param_ops wq_watchdog_thresh_ops = {
6540 	.set	= wq_watchdog_param_set_thresh,
6541 	.get	= param_get_ulong,
6542 };
6543 
6544 module_param_cb(watchdog_thresh, &wq_watchdog_thresh_ops, &wq_watchdog_thresh,
6545 		0644);
6546 
wq_watchdog_init(void)6547 static void wq_watchdog_init(void)
6548 {
6549 	timer_setup(&wq_watchdog_timer, wq_watchdog_timer_fn, TIMER_DEFERRABLE);
6550 	wq_watchdog_set_thresh(wq_watchdog_thresh);
6551 }
6552 
6553 #else	/* CONFIG_WQ_WATCHDOG */
6554 
wq_watchdog_init(void)6555 static inline void wq_watchdog_init(void) { }
6556 
6557 #endif	/* CONFIG_WQ_WATCHDOG */
6558 
restrict_unbound_cpumask(const char * name,const struct cpumask * mask)6559 static void __init restrict_unbound_cpumask(const char *name, const struct cpumask *mask)
6560 {
6561 	if (!cpumask_intersects(wq_unbound_cpumask, mask)) {
6562 		pr_warn("workqueue: Restricting unbound_cpumask (%*pb) with %s (%*pb) leaves no CPU, ignoring\n",
6563 			cpumask_pr_args(wq_unbound_cpumask), name, cpumask_pr_args(mask));
6564 		return;
6565 	}
6566 
6567 	cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, mask);
6568 }
6569 
6570 /**
6571  * workqueue_init_early - early init for workqueue subsystem
6572  *
6573  * This is the first step of three-staged workqueue subsystem initialization and
6574  * invoked as soon as the bare basics - memory allocation, cpumasks and idr are
6575  * up. It sets up all the data structures and system workqueues and allows early
6576  * boot code to create workqueues and queue/cancel work items. Actual work item
6577  * execution starts only after kthreads can be created and scheduled right
6578  * before early initcalls.
6579  */
workqueue_init_early(void)6580 void __init workqueue_init_early(void)
6581 {
6582 	struct wq_pod_type *pt = &wq_pod_types[WQ_AFFN_SYSTEM];
6583 	int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
6584 	int i, cpu;
6585 
6586 	BUILD_BUG_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
6587 
6588 	BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL));
6589 	cpumask_copy(wq_unbound_cpumask, cpu_possible_mask);
6590 	restrict_unbound_cpumask("HK_TYPE_WQ", housekeeping_cpumask(HK_TYPE_WQ));
6591 	restrict_unbound_cpumask("HK_TYPE_DOMAIN", housekeeping_cpumask(HK_TYPE_DOMAIN));
6592 	if (!cpumask_empty(&wq_cmdline_cpumask))
6593 		restrict_unbound_cpumask("workqueue.unbound_cpus", &wq_cmdline_cpumask);
6594 
6595 	pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
6596 
6597 	wq_update_pod_attrs_buf = alloc_workqueue_attrs();
6598 	BUG_ON(!wq_update_pod_attrs_buf);
6599 
6600 	/* initialize WQ_AFFN_SYSTEM pods */
6601 	pt->pod_cpus = kcalloc(1, sizeof(pt->pod_cpus[0]), GFP_KERNEL);
6602 	pt->pod_node = kcalloc(1, sizeof(pt->pod_node[0]), GFP_KERNEL);
6603 	pt->cpu_pod = kcalloc(nr_cpu_ids, sizeof(pt->cpu_pod[0]), GFP_KERNEL);
6604 	BUG_ON(!pt->pod_cpus || !pt->pod_node || !pt->cpu_pod);
6605 
6606 	BUG_ON(!zalloc_cpumask_var_node(&pt->pod_cpus[0], GFP_KERNEL, NUMA_NO_NODE));
6607 
6608 	pt->nr_pods = 1;
6609 	cpumask_copy(pt->pod_cpus[0], cpu_possible_mask);
6610 	pt->pod_node[0] = NUMA_NO_NODE;
6611 	pt->cpu_pod[0] = 0;
6612 
6613 	/* initialize CPU pools */
6614 	for_each_possible_cpu(cpu) {
6615 		struct worker_pool *pool;
6616 
6617 		i = 0;
6618 		for_each_cpu_worker_pool(pool, cpu) {
6619 			BUG_ON(init_worker_pool(pool));
6620 			pool->cpu = cpu;
6621 			cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
6622 			cpumask_copy(pool->attrs->__pod_cpumask, cpumask_of(cpu));
6623 			pool->attrs->nice = std_nice[i++];
6624 			pool->attrs->affn_strict = true;
6625 			pool->node = cpu_to_node(cpu);
6626 
6627 			/* alloc pool ID */
6628 			mutex_lock(&wq_pool_mutex);
6629 			BUG_ON(worker_pool_assign_id(pool));
6630 			mutex_unlock(&wq_pool_mutex);
6631 		}
6632 	}
6633 
6634 	/* create default unbound and ordered wq attrs */
6635 	for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
6636 		struct workqueue_attrs *attrs;
6637 
6638 		BUG_ON(!(attrs = alloc_workqueue_attrs()));
6639 		attrs->nice = std_nice[i];
6640 		unbound_std_wq_attrs[i] = attrs;
6641 
6642 		/*
6643 		 * An ordered wq should have only one pwq as ordering is
6644 		 * guaranteed by max_active which is enforced by pwqs.
6645 		 */
6646 		BUG_ON(!(attrs = alloc_workqueue_attrs()));
6647 		attrs->nice = std_nice[i];
6648 		attrs->ordered = true;
6649 		ordered_wq_attrs[i] = attrs;
6650 	}
6651 
6652 	system_wq = alloc_workqueue("events", 0, 0);
6653 	system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0);
6654 	system_long_wq = alloc_workqueue("events_long", 0, 0);
6655 	system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
6656 					    WQ_MAX_ACTIVE);
6657 	system_freezable_wq = alloc_workqueue("events_freezable",
6658 					      WQ_FREEZABLE, 0);
6659 	system_power_efficient_wq = alloc_workqueue("events_power_efficient",
6660 					      WQ_POWER_EFFICIENT, 0);
6661 	system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient",
6662 					      WQ_FREEZABLE | WQ_POWER_EFFICIENT,
6663 					      0);
6664 	BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq ||
6665 	       !system_unbound_wq || !system_freezable_wq ||
6666 	       !system_power_efficient_wq ||
6667 	       !system_freezable_power_efficient_wq);
6668 }
6669 
wq_cpu_intensive_thresh_init(void)6670 static void __init wq_cpu_intensive_thresh_init(void)
6671 {
6672 	unsigned long thresh;
6673 	unsigned long bogo;
6674 
6675 	pwq_release_worker = kthread_create_worker(0, "pool_workqueue_release");
6676 	BUG_ON(IS_ERR(pwq_release_worker));
6677 
6678 	/* if the user set it to a specific value, keep it */
6679 	if (wq_cpu_intensive_thresh_us != ULONG_MAX)
6680 		return;
6681 
6682 	/*
6683 	 * The default of 10ms is derived from the fact that most modern (as of
6684 	 * 2023) processors can do a lot in 10ms and that it's just below what
6685 	 * most consider human-perceivable. However, the kernel also runs on a
6686 	 * lot slower CPUs including microcontrollers where the threshold is way
6687 	 * too low.
6688 	 *
6689 	 * Let's scale up the threshold upto 1 second if BogoMips is below 4000.
6690 	 * This is by no means accurate but it doesn't have to be. The mechanism
6691 	 * is still useful even when the threshold is fully scaled up. Also, as
6692 	 * the reports would usually be applicable to everyone, some machines
6693 	 * operating on longer thresholds won't significantly diminish their
6694 	 * usefulness.
6695 	 */
6696 	thresh = 10 * USEC_PER_MSEC;
6697 
6698 	/* see init/calibrate.c for lpj -> BogoMIPS calculation */
6699 	bogo = max_t(unsigned long, loops_per_jiffy / 500000 * HZ, 1);
6700 	if (bogo < 4000)
6701 		thresh = min_t(unsigned long, thresh * 4000 / bogo, USEC_PER_SEC);
6702 
6703 	pr_debug("wq_cpu_intensive_thresh: lpj=%lu BogoMIPS=%lu thresh_us=%lu\n",
6704 		 loops_per_jiffy, bogo, thresh);
6705 
6706 	wq_cpu_intensive_thresh_us = thresh;
6707 }
6708 
6709 /**
6710  * workqueue_init - bring workqueue subsystem fully online
6711  *
6712  * This is the second step of three-staged workqueue subsystem initialization
6713  * and invoked as soon as kthreads can be created and scheduled. Workqueues have
6714  * been created and work items queued on them, but there are no kworkers
6715  * executing the work items yet. Populate the worker pools with the initial
6716  * workers and enable future kworker creations.
6717  */
workqueue_init(void)6718 void __init workqueue_init(void)
6719 {
6720 	struct workqueue_struct *wq;
6721 	struct worker_pool *pool;
6722 	int cpu, bkt;
6723 
6724 	wq_cpu_intensive_thresh_init();
6725 
6726 	mutex_lock(&wq_pool_mutex);
6727 
6728 	/*
6729 	 * Per-cpu pools created earlier could be missing node hint. Fix them
6730 	 * up. Also, create a rescuer for workqueues that requested it.
6731 	 */
6732 	for_each_possible_cpu(cpu) {
6733 		for_each_cpu_worker_pool(pool, cpu) {
6734 			pool->node = cpu_to_node(cpu);
6735 		}
6736 	}
6737 
6738 	list_for_each_entry(wq, &workqueues, list) {
6739 		WARN(init_rescuer(wq),
6740 		     "workqueue: failed to create early rescuer for %s",
6741 		     wq->name);
6742 	}
6743 
6744 	mutex_unlock(&wq_pool_mutex);
6745 
6746 	/* create the initial workers */
6747 	for_each_online_cpu(cpu) {
6748 		for_each_cpu_worker_pool(pool, cpu) {
6749 			pool->flags &= ~POOL_DISASSOCIATED;
6750 			BUG_ON(!create_worker(pool));
6751 		}
6752 	}
6753 
6754 	hash_for_each(unbound_pool_hash, bkt, pool, hash_node)
6755 		BUG_ON(!create_worker(pool));
6756 
6757 	wq_online = true;
6758 	wq_watchdog_init();
6759 }
6760 
6761 /*
6762  * Initialize @pt by first initializing @pt->cpu_pod[] with pod IDs according to
6763  * @cpu_shares_pod(). Each subset of CPUs that share a pod is assigned a unique
6764  * and consecutive pod ID. The rest of @pt is initialized accordingly.
6765  */
init_pod_type(struct wq_pod_type * pt,bool (* cpus_share_pod)(int,int))6766 static void __init init_pod_type(struct wq_pod_type *pt,
6767 				 bool (*cpus_share_pod)(int, int))
6768 {
6769 	int cur, pre, cpu, pod;
6770 
6771 	pt->nr_pods = 0;
6772 
6773 	/* init @pt->cpu_pod[] according to @cpus_share_pod() */
6774 	pt->cpu_pod = kcalloc(nr_cpu_ids, sizeof(pt->cpu_pod[0]), GFP_KERNEL);
6775 	BUG_ON(!pt->cpu_pod);
6776 
6777 	for_each_possible_cpu(cur) {
6778 		for_each_possible_cpu(pre) {
6779 			if (pre >= cur) {
6780 				pt->cpu_pod[cur] = pt->nr_pods++;
6781 				break;
6782 			}
6783 			if (cpus_share_pod(cur, pre)) {
6784 				pt->cpu_pod[cur] = pt->cpu_pod[pre];
6785 				break;
6786 			}
6787 		}
6788 	}
6789 
6790 	/* init the rest to match @pt->cpu_pod[] */
6791 	pt->pod_cpus = kcalloc(pt->nr_pods, sizeof(pt->pod_cpus[0]), GFP_KERNEL);
6792 	pt->pod_node = kcalloc(pt->nr_pods, sizeof(pt->pod_node[0]), GFP_KERNEL);
6793 	BUG_ON(!pt->pod_cpus || !pt->pod_node);
6794 
6795 	for (pod = 0; pod < pt->nr_pods; pod++)
6796 		BUG_ON(!zalloc_cpumask_var(&pt->pod_cpus[pod], GFP_KERNEL));
6797 
6798 	for_each_possible_cpu(cpu) {
6799 		cpumask_set_cpu(cpu, pt->pod_cpus[pt->cpu_pod[cpu]]);
6800 		pt->pod_node[pt->cpu_pod[cpu]] = cpu_to_node(cpu);
6801 	}
6802 }
6803 
cpus_dont_share(int cpu0,int cpu1)6804 static bool __init cpus_dont_share(int cpu0, int cpu1)
6805 {
6806 	return false;
6807 }
6808 
cpus_share_smt(int cpu0,int cpu1)6809 static bool __init cpus_share_smt(int cpu0, int cpu1)
6810 {
6811 #ifdef CONFIG_SCHED_SMT
6812 	return cpumask_test_cpu(cpu0, cpu_smt_mask(cpu1));
6813 #else
6814 	return false;
6815 #endif
6816 }
6817 
cpus_share_numa(int cpu0,int cpu1)6818 static bool __init cpus_share_numa(int cpu0, int cpu1)
6819 {
6820 	return cpu_to_node(cpu0) == cpu_to_node(cpu1);
6821 }
6822 
6823 /**
6824  * workqueue_init_topology - initialize CPU pods for unbound workqueues
6825  *
6826  * This is the third step of there-staged workqueue subsystem initialization and
6827  * invoked after SMP and topology information are fully initialized. It
6828  * initializes the unbound CPU pods accordingly.
6829  */
workqueue_init_topology(void)6830 void __init workqueue_init_topology(void)
6831 {
6832 	struct workqueue_struct *wq;
6833 	int cpu;
6834 
6835 	init_pod_type(&wq_pod_types[WQ_AFFN_CPU], cpus_dont_share);
6836 	init_pod_type(&wq_pod_types[WQ_AFFN_SMT], cpus_share_smt);
6837 	init_pod_type(&wq_pod_types[WQ_AFFN_CACHE], cpus_share_cache);
6838 	init_pod_type(&wq_pod_types[WQ_AFFN_NUMA], cpus_share_numa);
6839 
6840 	mutex_lock(&wq_pool_mutex);
6841 
6842 	/*
6843 	 * Workqueues allocated earlier would have all CPUs sharing the default
6844 	 * worker pool. Explicitly call wq_update_pod() on all workqueue and CPU
6845 	 * combinations to apply per-pod sharing.
6846 	 */
6847 	list_for_each_entry(wq, &workqueues, list) {
6848 		for_each_online_cpu(cpu) {
6849 			wq_update_pod(wq, cpu, cpu, true);
6850 		}
6851 	}
6852 
6853 	mutex_unlock(&wq_pool_mutex);
6854 }
6855 
__warn_flushing_systemwide_wq(void)6856 void __warn_flushing_systemwide_wq(void)
6857 {
6858 	pr_warn("WARNING: Flushing system-wide workqueues will be prohibited in near future.\n");
6859 	dump_stack();
6860 }
6861 EXPORT_SYMBOL(__warn_flushing_systemwide_wq);
6862 
workqueue_unbound_cpus_setup(char * str)6863 static int __init workqueue_unbound_cpus_setup(char *str)
6864 {
6865 	if (cpulist_parse(str, &wq_cmdline_cpumask) < 0) {
6866 		cpumask_clear(&wq_cmdline_cpumask);
6867 		pr_warn("workqueue.unbound_cpus: incorrect CPU range, using default\n");
6868 	}
6869 
6870 	return 1;
6871 }
6872 __setup("workqueue.unbound_cpus=", workqueue_unbound_cpus_setup);
6873