1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * workqueue.h --- work queue handling for Linux.
4  */
5 
6 #ifndef _LINUX_WORKQUEUE_H
7 #define _LINUX_WORKQUEUE_H
8 
9 #include <linux/timer.h>
10 #include <linux/linkage.h>
11 #include <linux/bitops.h>
12 #include <linux/lockdep.h>
13 #include <linux/threads.h>
14 #include <linux/atomic.h>
15 #include <linux/cpumask_types.h>
16 #include <linux/rcupdate.h>
17 #include <linux/workqueue_types.h>
18 #include <linux/android_kabi.h>
19 
20 /*
21  * The first word is the work queue pointer and the flags rolled into
22  * one
23  */
24 #define work_data_bits(work) ((unsigned long *)(&(work)->data))
25 
26 enum work_bits {
27 	WORK_STRUCT_PENDING_BIT	= 0,	/* work item is pending execution */
28 	WORK_STRUCT_INACTIVE_BIT,	/* work item is inactive */
29 	WORK_STRUCT_PWQ_BIT,		/* data points to pwq */
30 	WORK_STRUCT_LINKED_BIT,		/* next work is linked to this one */
31 #ifdef CONFIG_DEBUG_OBJECTS_WORK
32 	WORK_STRUCT_STATIC_BIT,		/* static initializer (debugobjects) */
33 #endif
34 	WORK_STRUCT_FLAG_BITS,
35 
36 	/* color for workqueue flushing */
37 	WORK_STRUCT_COLOR_SHIFT	= WORK_STRUCT_FLAG_BITS,
38 	WORK_STRUCT_COLOR_BITS	= 4,
39 
40 	/*
41 	 * When WORK_STRUCT_PWQ is set, reserve 8 bits off of pwq pointer w/
42 	 * debugobjects turned off. This makes pwqs aligned to 256 bytes (512
43 	 * bytes w/ DEBUG_OBJECTS_WORK) and allows 16 workqueue flush colors.
44 	 *
45 	 * MSB
46 	 * [ pwq pointer ] [ flush color ] [ STRUCT flags ]
47 	 *                     4 bits        4 or 5 bits
48 	 */
49 	WORK_STRUCT_PWQ_SHIFT	= WORK_STRUCT_COLOR_SHIFT + WORK_STRUCT_COLOR_BITS,
50 
51 	/*
52 	 * data contains off-queue information when !WORK_STRUCT_PWQ.
53 	 *
54 	 * MSB
55 	 * [ pool ID ] [ disable depth ] [ OFFQ flags ] [ STRUCT flags ]
56 	 *                  16 bits          1 bit        4 or 5 bits
57 	 */
58 	WORK_OFFQ_FLAG_SHIFT	= WORK_STRUCT_FLAG_BITS,
59 	WORK_OFFQ_BH_BIT	= WORK_OFFQ_FLAG_SHIFT,
60 	WORK_OFFQ_FLAG_END,
61 	WORK_OFFQ_FLAG_BITS	= WORK_OFFQ_FLAG_END - WORK_OFFQ_FLAG_SHIFT,
62 
63 	WORK_OFFQ_DISABLE_SHIFT	= WORK_OFFQ_FLAG_SHIFT + WORK_OFFQ_FLAG_BITS,
64 	WORK_OFFQ_DISABLE_BITS	= 16,
65 
66 	/*
67 	 * When a work item is off queue, the high bits encode off-queue flags
68 	 * and the last pool it was on. Cap pool ID to 31 bits and use the
69 	 * highest number to indicate that no pool is associated.
70 	 */
71 	WORK_OFFQ_POOL_SHIFT	= WORK_OFFQ_DISABLE_SHIFT + WORK_OFFQ_DISABLE_BITS,
72 	WORK_OFFQ_LEFT		= BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT,
73 	WORK_OFFQ_POOL_BITS	= WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31,
74 };
75 
76 enum work_flags {
77 	WORK_STRUCT_PENDING	= 1 << WORK_STRUCT_PENDING_BIT,
78 	WORK_STRUCT_INACTIVE	= 1 << WORK_STRUCT_INACTIVE_BIT,
79 	WORK_STRUCT_PWQ		= 1 << WORK_STRUCT_PWQ_BIT,
80 	WORK_STRUCT_LINKED	= 1 << WORK_STRUCT_LINKED_BIT,
81 #ifdef CONFIG_DEBUG_OBJECTS_WORK
82 	WORK_STRUCT_STATIC	= 1 << WORK_STRUCT_STATIC_BIT,
83 #else
84 	WORK_STRUCT_STATIC	= 0,
85 #endif
86 };
87 
88 enum wq_misc_consts {
89 	WORK_NR_COLORS		= (1 << WORK_STRUCT_COLOR_BITS),
90 
91 	/* not bound to any CPU, prefer the local CPU */
92 	WORK_CPU_UNBOUND	= NR_CPUS,
93 
94 	/* bit mask for work_busy() return values */
95 	WORK_BUSY_PENDING	= 1 << 0,
96 	WORK_BUSY_RUNNING	= 1 << 1,
97 
98 	/* maximum string length for set_worker_desc() */
99 	WORKER_DESC_LEN		= 32,
100 };
101 
102 /* Convenience constants - of type 'unsigned long', not 'enum'! */
103 #define WORK_OFFQ_BH		(1ul << WORK_OFFQ_BH_BIT)
104 #define WORK_OFFQ_FLAG_MASK	(((1ul << WORK_OFFQ_FLAG_BITS) - 1) << WORK_OFFQ_FLAG_SHIFT)
105 #define WORK_OFFQ_DISABLE_MASK	(((1ul << WORK_OFFQ_DISABLE_BITS) - 1) << WORK_OFFQ_DISABLE_SHIFT)
106 #define WORK_OFFQ_POOL_NONE	((1ul << WORK_OFFQ_POOL_BITS) - 1)
107 #define WORK_STRUCT_NO_POOL	(WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT)
108 #define WORK_STRUCT_PWQ_MASK	(~((1ul << WORK_STRUCT_PWQ_SHIFT) - 1))
109 
110 #define WORK_DATA_INIT()	ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL)
111 #define WORK_DATA_STATIC_INIT()	\
112 	ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC))
113 
114 struct delayed_work {
115 	struct work_struct work;
116 	struct timer_list timer;
117 
118 	/* target workqueue and CPU ->timer uses to queue ->work */
119 	struct workqueue_struct *wq;
120 	int cpu;
121 
122 	ANDROID_KABI_RESERVE(1);
123 	ANDROID_KABI_RESERVE(2);
124 };
125 
126 struct rcu_work {
127 	struct work_struct work;
128 	struct rcu_head rcu;
129 
130 	/* target workqueue ->rcu uses to queue ->work */
131 	struct workqueue_struct *wq;
132 };
133 
134 enum wq_affn_scope {
135 	WQ_AFFN_DFL,			/* use system default */
136 	WQ_AFFN_CPU,			/* one pod per CPU */
137 	WQ_AFFN_SMT,			/* one pod poer SMT */
138 	WQ_AFFN_CACHE,			/* one pod per LLC */
139 	WQ_AFFN_NUMA,			/* one pod per NUMA node */
140 	WQ_AFFN_SYSTEM,			/* one pod across the whole system */
141 
142 	WQ_AFFN_NR_TYPES,
143 };
144 
145 /**
146  * struct workqueue_attrs - A struct for workqueue attributes.
147  *
148  * This can be used to change attributes of an unbound workqueue.
149  */
150 struct workqueue_attrs {
151 	/**
152 	 * @nice: nice level
153 	 */
154 	int nice;
155 
156 	/**
157 	 * @cpumask: allowed CPUs
158 	 *
159 	 * Work items in this workqueue are affine to these CPUs and not allowed
160 	 * to execute on other CPUs. A pool serving a workqueue must have the
161 	 * same @cpumask.
162 	 */
163 	cpumask_var_t cpumask;
164 
165 	/**
166 	 * @__pod_cpumask: internal attribute used to create per-pod pools
167 	 *
168 	 * Internal use only.
169 	 *
170 	 * Per-pod unbound worker pools are used to improve locality. Always a
171 	 * subset of ->cpumask. A workqueue can be associated with multiple
172 	 * worker pools with disjoint @__pod_cpumask's. Whether the enforcement
173 	 * of a pool's @__pod_cpumask is strict depends on @affn_strict.
174 	 */
175 	cpumask_var_t __pod_cpumask;
176 
177 	/**
178 	 * @affn_strict: affinity scope is strict
179 	 *
180 	 * If clear, workqueue will make a best-effort attempt at starting the
181 	 * worker inside @__pod_cpumask but the scheduler is free to migrate it
182 	 * outside.
183 	 *
184 	 * If set, workers are only allowed to run inside @__pod_cpumask.
185 	 */
186 	bool affn_strict;
187 
188 	/*
189 	 * Below fields aren't properties of a worker_pool. They only modify how
190 	 * :c:func:`apply_workqueue_attrs` select pools and thus don't
191 	 * participate in pool hash calculations or equality comparisons.
192 	 *
193 	 * If @affn_strict is set, @cpumask isn't a property of a worker_pool
194 	 * either.
195 	 */
196 
197 	/**
198 	 * @affn_scope: unbound CPU affinity scope
199 	 *
200 	 * CPU pods are used to improve execution locality of unbound work
201 	 * items. There are multiple pod types, one for each wq_affn_scope, and
202 	 * every CPU in the system belongs to one pod in every pod type. CPUs
203 	 * that belong to the same pod share the worker pool. For example,
204 	 * selecting %WQ_AFFN_NUMA makes the workqueue use a separate worker
205 	 * pool for each NUMA node.
206 	 */
207 	enum wq_affn_scope affn_scope;
208 
209 	/**
210 	 * @ordered: work items must be executed one by one in queueing order
211 	 */
212 	bool ordered;
213 };
214 
to_delayed_work(struct work_struct * work)215 static inline struct delayed_work *to_delayed_work(struct work_struct *work)
216 {
217 	return container_of(work, struct delayed_work, work);
218 }
219 
to_rcu_work(struct work_struct * work)220 static inline struct rcu_work *to_rcu_work(struct work_struct *work)
221 {
222 	return container_of(work, struct rcu_work, work);
223 }
224 
225 struct execute_work {
226 	struct work_struct work;
227 };
228 
229 #ifdef CONFIG_LOCKDEP
230 /*
231  * NB: because we have to copy the lockdep_map, setting _key
232  * here is required, otherwise it could get initialised to the
233  * copy of the lockdep_map!
234  */
235 #define __WORK_INIT_LOCKDEP_MAP(n, k) \
236 	.lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
237 #else
238 #define __WORK_INIT_LOCKDEP_MAP(n, k)
239 #endif
240 
241 #define __WORK_INITIALIZER(n, f) {					\
242 	.data = WORK_DATA_STATIC_INIT(),				\
243 	.entry	= { &(n).entry, &(n).entry },				\
244 	.func = (f),							\
245 	__WORK_INIT_LOCKDEP_MAP(#n, &(n))				\
246 	}
247 
248 #define __DELAYED_WORK_INITIALIZER(n, f, tflags) {			\
249 	.work = __WORK_INITIALIZER((n).work, (f)),			\
250 	.timer = __TIMER_INITIALIZER(delayed_work_timer_fn,\
251 				     (tflags) | TIMER_IRQSAFE),		\
252 	}
253 
254 #define DECLARE_WORK(n, f)						\
255 	struct work_struct n = __WORK_INITIALIZER(n, f)
256 
257 #define DECLARE_DELAYED_WORK(n, f)					\
258 	struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0)
259 
260 #define DECLARE_DEFERRABLE_WORK(n, f)					\
261 	struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE)
262 
263 #ifdef CONFIG_DEBUG_OBJECTS_WORK
264 extern void __init_work(struct work_struct *work, int onstack);
265 extern void destroy_work_on_stack(struct work_struct *work);
266 extern void destroy_delayed_work_on_stack(struct delayed_work *work);
work_static(struct work_struct * work)267 static inline unsigned int work_static(struct work_struct *work)
268 {
269 	return *work_data_bits(work) & WORK_STRUCT_STATIC;
270 }
271 #else
__init_work(struct work_struct * work,int onstack)272 static inline void __init_work(struct work_struct *work, int onstack) { }
destroy_work_on_stack(struct work_struct * work)273 static inline void destroy_work_on_stack(struct work_struct *work) { }
destroy_delayed_work_on_stack(struct delayed_work * work)274 static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { }
work_static(struct work_struct * work)275 static inline unsigned int work_static(struct work_struct *work) { return 0; }
276 #endif
277 
278 /*
279  * initialize all of a work item in one go
280  *
281  * NOTE! No point in using "atomic_long_set()": using a direct
282  * assignment of the work data initializer allows the compiler
283  * to generate better code.
284  */
285 #ifdef CONFIG_LOCKDEP
286 #define __INIT_WORK_KEY(_work, _func, _onstack, _key)			\
287 	do {								\
288 		__init_work((_work), _onstack);				\
289 		(_work)->data = (atomic_long_t) WORK_DATA_INIT();	\
290 		lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, (_key), 0); \
291 		INIT_LIST_HEAD(&(_work)->entry);			\
292 		(_work)->func = (_func);				\
293 	} while (0)
294 #else
295 #define __INIT_WORK_KEY(_work, _func, _onstack, _key)			\
296 	do {								\
297 		__init_work((_work), _onstack);				\
298 		(_work)->data = (atomic_long_t) WORK_DATA_INIT();	\
299 		INIT_LIST_HEAD(&(_work)->entry);			\
300 		(_work)->func = (_func);				\
301 	} while (0)
302 #endif
303 
304 #define __INIT_WORK(_work, _func, _onstack)				\
305 	do {								\
306 		static __maybe_unused struct lock_class_key __key;	\
307 									\
308 		__INIT_WORK_KEY(_work, _func, _onstack, &__key);	\
309 	} while (0)
310 
311 #define INIT_WORK(_work, _func)						\
312 	__INIT_WORK((_work), (_func), 0)
313 
314 #define INIT_WORK_ONSTACK(_work, _func)					\
315 	__INIT_WORK((_work), (_func), 1)
316 
317 #define INIT_WORK_ONSTACK_KEY(_work, _func, _key)			\
318 	__INIT_WORK_KEY((_work), (_func), 1, _key)
319 
320 #define __INIT_DELAYED_WORK(_work, _func, _tflags)			\
321 	do {								\
322 		INIT_WORK(&(_work)->work, (_func));			\
323 		__init_timer(&(_work)->timer,				\
324 			     delayed_work_timer_fn,			\
325 			     (_tflags) | TIMER_IRQSAFE);		\
326 	} while (0)
327 
328 #define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags)		\
329 	do {								\
330 		INIT_WORK_ONSTACK(&(_work)->work, (_func));		\
331 		__init_timer_on_stack(&(_work)->timer,			\
332 				      delayed_work_timer_fn,		\
333 				      (_tflags) | TIMER_IRQSAFE);	\
334 	} while (0)
335 
336 #define INIT_DELAYED_WORK(_work, _func)					\
337 	__INIT_DELAYED_WORK(_work, _func, 0)
338 
339 #define INIT_DELAYED_WORK_ONSTACK(_work, _func)				\
340 	__INIT_DELAYED_WORK_ONSTACK(_work, _func, 0)
341 
342 #define INIT_DEFERRABLE_WORK(_work, _func)				\
343 	__INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE)
344 
345 #define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func)			\
346 	__INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE)
347 
348 #define INIT_RCU_WORK(_work, _func)					\
349 	INIT_WORK(&(_work)->work, (_func))
350 
351 #define INIT_RCU_WORK_ONSTACK(_work, _func)				\
352 	INIT_WORK_ONSTACK(&(_work)->work, (_func))
353 
354 /**
355  * work_pending - Find out whether a work item is currently pending
356  * @work: The work item in question
357  */
358 #define work_pending(work) \
359 	test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
360 
361 /**
362  * delayed_work_pending - Find out whether a delayable work item is currently
363  * pending
364  * @w: The work item in question
365  */
366 #define delayed_work_pending(w) \
367 	work_pending(&(w)->work)
368 
369 /*
370  * Workqueue flags and constants.  For details, please refer to
371  * Documentation/core-api/workqueue.rst.
372  */
373 enum wq_flags {
374 	WQ_BH			= 1 << 0, /* execute in bottom half (softirq) context */
375 	WQ_UNBOUND		= 1 << 1, /* not bound to any cpu */
376 	WQ_FREEZABLE		= 1 << 2, /* freeze during suspend */
377 	WQ_MEM_RECLAIM		= 1 << 3, /* may be used for memory reclaim */
378 	WQ_HIGHPRI		= 1 << 4, /* high priority */
379 	WQ_CPU_INTENSIVE	= 1 << 5, /* cpu intensive workqueue */
380 	WQ_SYSFS		= 1 << 6, /* visible in sysfs, see workqueue_sysfs_register() */
381 
382 	/*
383 	 * Per-cpu workqueues are generally preferred because they tend to
384 	 * show better performance thanks to cache locality.  Per-cpu
385 	 * workqueues exclude the scheduler from choosing the CPU to
386 	 * execute the worker threads, which has an unfortunate side effect
387 	 * of increasing power consumption.
388 	 *
389 	 * The scheduler considers a CPU idle if it doesn't have any task
390 	 * to execute and tries to keep idle cores idle to conserve power;
391 	 * however, for example, a per-cpu work item scheduled from an
392 	 * interrupt handler on an idle CPU will force the scheduler to
393 	 * execute the work item on that CPU breaking the idleness, which in
394 	 * turn may lead to more scheduling choices which are sub-optimal
395 	 * in terms of power consumption.
396 	 *
397 	 * Workqueues marked with WQ_POWER_EFFICIENT are per-cpu by default
398 	 * but become unbound if workqueue.power_efficient kernel param is
399 	 * specified.  Per-cpu workqueues which are identified to
400 	 * contribute significantly to power-consumption are identified and
401 	 * marked with this flag and enabling the power_efficient mode
402 	 * leads to noticeable power saving at the cost of small
403 	 * performance disadvantage.
404 	 *
405 	 * http://thread.gmane.org/gmane.linux.kernel/1480396
406 	 */
407 	WQ_POWER_EFFICIENT	= 1 << 7,
408 
409 	__WQ_DESTROYING		= 1 << 15, /* internal: workqueue is destroying */
410 	__WQ_DRAINING		= 1 << 16, /* internal: workqueue is draining */
411 	__WQ_ORDERED		= 1 << 17, /* internal: workqueue is ordered */
412 	__WQ_LEGACY		= 1 << 18, /* internal: create*_workqueue() */
413 
414 	/* BH wq only allows the following flags */
415 	__WQ_BH_ALLOWS		= WQ_BH | WQ_HIGHPRI,
416 };
417 
418 enum wq_consts {
419 	WQ_MAX_ACTIVE		= 512,	  /* I like 512, better ideas? */
420 	WQ_UNBOUND_MAX_ACTIVE	= WQ_MAX_ACTIVE,
421 	WQ_DFL_ACTIVE		= WQ_MAX_ACTIVE / 2,
422 
423 	/*
424 	 * Per-node default cap on min_active. Unless explicitly set, min_active
425 	 * is set to min(max_active, WQ_DFL_MIN_ACTIVE). For more details, see
426 	 * workqueue_struct->min_active definition.
427 	 */
428 	WQ_DFL_MIN_ACTIVE	= 8,
429 };
430 
431 /*
432  * System-wide workqueues which are always present.
433  *
434  * system_wq is the one used by schedule[_delayed]_work[_on]().
435  * Multi-CPU multi-threaded.  There are users which expect relatively
436  * short queue flush time.  Don't queue works which can run for too
437  * long.
438  *
439  * system_highpri_wq is similar to system_wq but for work items which
440  * require WQ_HIGHPRI.
441  *
442  * system_long_wq is similar to system_wq but may host long running
443  * works.  Queue flushing might take relatively long.
444  *
445  * system_unbound_wq is unbound workqueue.  Workers are not bound to
446  * any specific CPU, not concurrency managed, and all queued works are
447  * executed immediately as long as max_active limit is not reached and
448  * resources are available.
449  *
450  * system_freezable_wq is equivalent to system_wq except that it's
451  * freezable.
452  *
453  * *_power_efficient_wq are inclined towards saving power and converted
454  * into WQ_UNBOUND variants if 'wq_power_efficient' is enabled; otherwise,
455  * they are same as their non-power-efficient counterparts - e.g.
456  * system_power_efficient_wq is identical to system_wq if
457  * 'wq_power_efficient' is disabled.  See WQ_POWER_EFFICIENT for more info.
458  *
459  * system_bh[_highpri]_wq are convenience interface to softirq. BH work items
460  * are executed in the queueing CPU's BH context in the queueing order.
461  */
462 extern struct workqueue_struct *system_wq;
463 extern struct workqueue_struct *system_highpri_wq;
464 extern struct workqueue_struct *system_long_wq;
465 extern struct workqueue_struct *system_unbound_wq;
466 extern struct workqueue_struct *system_freezable_wq;
467 extern struct workqueue_struct *system_power_efficient_wq;
468 extern struct workqueue_struct *system_freezable_power_efficient_wq;
469 extern struct workqueue_struct *system_bh_wq;
470 extern struct workqueue_struct *system_bh_highpri_wq;
471 
472 void workqueue_softirq_action(bool highpri);
473 void workqueue_softirq_dead(unsigned int cpu);
474 
475 /**
476  * alloc_workqueue - allocate a workqueue
477  * @fmt: printf format for the name of the workqueue
478  * @flags: WQ_* flags
479  * @max_active: max in-flight work items, 0 for default
480  * @...: args for @fmt
481  *
482  * For a per-cpu workqueue, @max_active limits the number of in-flight work
483  * items for each CPU. e.g. @max_active of 1 indicates that each CPU can be
484  * executing at most one work item for the workqueue.
485  *
486  * For unbound workqueues, @max_active limits the number of in-flight work items
487  * for the whole system. e.g. @max_active of 16 indicates that that there can be
488  * at most 16 work items executing for the workqueue in the whole system.
489  *
490  * As sharing the same active counter for an unbound workqueue across multiple
491  * NUMA nodes can be expensive, @max_active is distributed to each NUMA node
492  * according to the proportion of the number of online CPUs and enforced
493  * independently.
494  *
495  * Depending on online CPU distribution, a node may end up with per-node
496  * max_active which is significantly lower than @max_active, which can lead to
497  * deadlocks if the per-node concurrency limit is lower than the maximum number
498  * of interdependent work items for the workqueue.
499  *
500  * To guarantee forward progress regardless of online CPU distribution, the
501  * concurrency limit on every node is guaranteed to be equal to or greater than
502  * min_active which is set to min(@max_active, %WQ_DFL_MIN_ACTIVE). This means
503  * that the sum of per-node max_active's may be larger than @max_active.
504  *
505  * For detailed information on %WQ_* flags, please refer to
506  * Documentation/core-api/workqueue.rst.
507  *
508  * RETURNS:
509  * Pointer to the allocated workqueue on success, %NULL on failure.
510  */
511 __printf(1, 4) struct workqueue_struct *
512 alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...);
513 
514 #ifdef CONFIG_LOCKDEP
515 /**
516  * alloc_workqueue_lockdep_map - allocate a workqueue with user-defined lockdep_map
517  * @fmt: printf format for the name of the workqueue
518  * @flags: WQ_* flags
519  * @max_active: max in-flight work items, 0 for default
520  * @lockdep_map: user-defined lockdep_map
521  * @...: args for @fmt
522  *
523  * Same as alloc_workqueue but with the a user-define lockdep_map. Useful for
524  * workqueues created with the same purpose and to avoid leaking a lockdep_map
525  * on each workqueue creation.
526  *
527  * RETURNS:
528  * Pointer to the allocated workqueue on success, %NULL on failure.
529  */
530 __printf(1, 5) struct workqueue_struct *
531 alloc_workqueue_lockdep_map(const char *fmt, unsigned int flags, int max_active,
532 			    struct lockdep_map *lockdep_map, ...);
533 
534 /**
535  * alloc_ordered_workqueue_lockdep_map - allocate an ordered workqueue with
536  * user-defined lockdep_map
537  *
538  * @fmt: printf format for the name of the workqueue
539  * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
540  * @lockdep_map: user-defined lockdep_map
541  * @args: args for @fmt
542  *
543  * Same as alloc_ordered_workqueue but with the a user-define lockdep_map.
544  * Useful for workqueues created with the same purpose and to avoid leaking a
545  * lockdep_map on each workqueue creation.
546  *
547  * RETURNS:
548  * Pointer to the allocated workqueue on success, %NULL on failure.
549  */
550 #define alloc_ordered_workqueue_lockdep_map(fmt, flags, lockdep_map, args...)	\
551 	alloc_workqueue_lockdep_map(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags),	\
552 				    1, lockdep_map, ##args)
553 #endif
554 
555 /**
556  * alloc_ordered_workqueue - allocate an ordered workqueue
557  * @fmt: printf format for the name of the workqueue
558  * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
559  * @args: args for @fmt
560  *
561  * Allocate an ordered workqueue.  An ordered workqueue executes at
562  * most one work item at any given time in the queued order.  They are
563  * implemented as unbound workqueues with @max_active of one.
564  *
565  * RETURNS:
566  * Pointer to the allocated workqueue on success, %NULL on failure.
567  */
568 #define alloc_ordered_workqueue(fmt, flags, args...)			\
569 	alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
570 
571 #define create_workqueue(name)						\
572 	alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
573 #define create_freezable_workqueue(name)				\
574 	alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND |	\
575 			WQ_MEM_RECLAIM, 1, (name))
576 #define create_singlethread_workqueue(name)				\
577 	alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name)
578 
579 #define from_work(var, callback_work, work_fieldname)	\
580 	container_of(callback_work, typeof(*var), work_fieldname)
581 
582 extern void destroy_workqueue(struct workqueue_struct *wq);
583 
584 struct workqueue_attrs *alloc_workqueue_attrs(void);
585 void free_workqueue_attrs(struct workqueue_attrs *attrs);
586 int apply_workqueue_attrs(struct workqueue_struct *wq,
587 			  const struct workqueue_attrs *attrs);
588 int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
589 				const struct workqueue_attrs *attrs);
590 extern int workqueue_unbound_exclude_cpumask(cpumask_var_t cpumask);
591 
592 extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
593 			struct work_struct *work);
594 extern bool queue_work_node(int node, struct workqueue_struct *wq,
595 			    struct work_struct *work);
596 extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
597 			struct delayed_work *work, unsigned long delay);
598 extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
599 			struct delayed_work *dwork, unsigned long delay);
600 extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork);
601 
602 extern void __flush_workqueue(struct workqueue_struct *wq);
603 extern void drain_workqueue(struct workqueue_struct *wq);
604 
605 extern int schedule_on_each_cpu(work_func_t func);
606 
607 int execute_in_process_context(work_func_t fn, struct execute_work *);
608 
609 extern bool flush_work(struct work_struct *work);
610 extern bool cancel_work(struct work_struct *work);
611 extern bool cancel_work_sync(struct work_struct *work);
612 
613 extern bool flush_delayed_work(struct delayed_work *dwork);
614 extern bool cancel_delayed_work(struct delayed_work *dwork);
615 extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
616 
617 extern bool disable_work(struct work_struct *work);
618 extern bool disable_work_sync(struct work_struct *work);
619 extern bool enable_work(struct work_struct *work);
620 
621 extern bool disable_delayed_work(struct delayed_work *dwork);
622 extern bool disable_delayed_work_sync(struct delayed_work *dwork);
623 extern bool enable_delayed_work(struct delayed_work *dwork);
624 
625 extern bool flush_rcu_work(struct rcu_work *rwork);
626 
627 extern void workqueue_set_max_active(struct workqueue_struct *wq,
628 				     int max_active);
629 extern void workqueue_set_min_active(struct workqueue_struct *wq,
630 				     int min_active);
631 extern struct work_struct *current_work(void);
632 extern bool current_is_workqueue_rescuer(void);
633 extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
634 extern unsigned int work_busy(struct work_struct *work);
635 extern __printf(1, 2) void set_worker_desc(const char *fmt, ...);
636 extern void print_worker_info(const char *log_lvl, struct task_struct *task);
637 extern void show_all_workqueues(void);
638 extern void show_freezable_workqueues(void);
639 extern void show_one_workqueue(struct workqueue_struct *wq);
640 extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task);
641 
642 /**
643  * queue_work - queue work on a workqueue
644  * @wq: workqueue to use
645  * @work: work to queue
646  *
647  * Returns %false if @work was already on a queue, %true otherwise.
648  *
649  * We queue the work to the CPU on which it was submitted, but if the CPU dies
650  * it can be processed by another CPU.
651  *
652  * Memory-ordering properties:  If it returns %true, guarantees that all stores
653  * preceding the call to queue_work() in the program order will be visible from
654  * the CPU which will execute @work by the time such work executes, e.g.,
655  *
656  * { x is initially 0 }
657  *
658  *   CPU0				CPU1
659  *
660  *   WRITE_ONCE(x, 1);			[ @work is being executed ]
661  *   r0 = queue_work(wq, work);		  r1 = READ_ONCE(x);
662  *
663  * Forbids: r0 == true && r1 == 0
664  */
queue_work(struct workqueue_struct * wq,struct work_struct * work)665 static inline bool queue_work(struct workqueue_struct *wq,
666 			      struct work_struct *work)
667 {
668 	return queue_work_on(WORK_CPU_UNBOUND, wq, work);
669 }
670 
671 /**
672  * queue_delayed_work - queue work on a workqueue after delay
673  * @wq: workqueue to use
674  * @dwork: delayable work to queue
675  * @delay: number of jiffies to wait before queueing
676  *
677  * Equivalent to queue_delayed_work_on() but tries to use the local CPU.
678  */
queue_delayed_work(struct workqueue_struct * wq,struct delayed_work * dwork,unsigned long delay)679 static inline bool queue_delayed_work(struct workqueue_struct *wq,
680 				      struct delayed_work *dwork,
681 				      unsigned long delay)
682 {
683 	return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
684 }
685 
686 /**
687  * mod_delayed_work - modify delay of or queue a delayed work
688  * @wq: workqueue to use
689  * @dwork: work to queue
690  * @delay: number of jiffies to wait before queueing
691  *
692  * mod_delayed_work_on() on local CPU.
693  */
mod_delayed_work(struct workqueue_struct * wq,struct delayed_work * dwork,unsigned long delay)694 static inline bool mod_delayed_work(struct workqueue_struct *wq,
695 				    struct delayed_work *dwork,
696 				    unsigned long delay)
697 {
698 	return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
699 }
700 
701 /**
702  * schedule_work_on - put work task on a specific cpu
703  * @cpu: cpu to put the work task on
704  * @work: job to be done
705  *
706  * This puts a job on a specific cpu
707  */
schedule_work_on(int cpu,struct work_struct * work)708 static inline bool schedule_work_on(int cpu, struct work_struct *work)
709 {
710 	return queue_work_on(cpu, system_wq, work);
711 }
712 
713 /**
714  * schedule_work - put work task in global workqueue
715  * @work: job to be done
716  *
717  * Returns %false if @work was already on the kernel-global workqueue and
718  * %true otherwise.
719  *
720  * This puts a job in the kernel-global workqueue if it was not already
721  * queued and leaves it in the same position on the kernel-global
722  * workqueue otherwise.
723  *
724  * Shares the same memory-ordering properties of queue_work(), cf. the
725  * DocBook header of queue_work().
726  */
schedule_work(struct work_struct * work)727 static inline bool schedule_work(struct work_struct *work)
728 {
729 	return queue_work(system_wq, work);
730 }
731 
732 /**
733  * enable_and_queue_work - Enable and queue a work item on a specific workqueue
734  * @wq: The target workqueue
735  * @work: The work item to be enabled and queued
736  *
737  * This function combines the operations of enable_work() and queue_work(),
738  * providing a convenient way to enable and queue a work item in a single call.
739  * It invokes enable_work() on @work and then queues it if the disable depth
740  * reached 0. Returns %true if the disable depth reached 0 and @work is queued,
741  * and %false otherwise.
742  *
743  * Note that @work is always queued when disable depth reaches zero. If the
744  * desired behavior is queueing only if certain events took place while @work is
745  * disabled, the user should implement the necessary state tracking and perform
746  * explicit conditional queueing after enable_work().
747  */
enable_and_queue_work(struct workqueue_struct * wq,struct work_struct * work)748 static inline bool enable_and_queue_work(struct workqueue_struct *wq,
749 					 struct work_struct *work)
750 {
751 	if (enable_work(work)) {
752 		queue_work(wq, work);
753 		return true;
754 	}
755 	return false;
756 }
757 
758 /*
759  * Detect attempt to flush system-wide workqueues at compile time when possible.
760  * Warn attempt to flush system-wide workqueues at runtime.
761  *
762  * See https://lkml.kernel.org/r/49925af7-78a8-a3dd-bce6-cfc02e1a9236@I-love.SAKURA.ne.jp
763  * for reasons and steps for converting system-wide workqueues into local workqueues.
764  */
765 extern void __warn_flushing_systemwide_wq(void)
766 	__compiletime_warning("Please avoid flushing system-wide workqueues.");
767 
768 /* Please stop using this function, for this function will be removed in near future. */
769 #define flush_scheduled_work()						\
770 ({									\
771 	__warn_flushing_systemwide_wq();				\
772 	__flush_workqueue(system_wq);					\
773 })
774 
775 #define flush_workqueue(wq)						\
776 ({									\
777 	struct workqueue_struct *_wq = (wq);				\
778 									\
779 	if ((__builtin_constant_p(_wq == system_wq) &&			\
780 	     _wq == system_wq) ||					\
781 	    (__builtin_constant_p(_wq == system_highpri_wq) &&		\
782 	     _wq == system_highpri_wq) ||				\
783 	    (__builtin_constant_p(_wq == system_long_wq) &&		\
784 	     _wq == system_long_wq) ||					\
785 	    (__builtin_constant_p(_wq == system_unbound_wq) &&		\
786 	     _wq == system_unbound_wq) ||				\
787 	    (__builtin_constant_p(_wq == system_freezable_wq) &&	\
788 	     _wq == system_freezable_wq) ||				\
789 	    (__builtin_constant_p(_wq == system_power_efficient_wq) &&	\
790 	     _wq == system_power_efficient_wq) ||			\
791 	    (__builtin_constant_p(_wq == system_freezable_power_efficient_wq) && \
792 	     _wq == system_freezable_power_efficient_wq))		\
793 		__warn_flushing_systemwide_wq();			\
794 	__flush_workqueue(_wq);						\
795 })
796 
797 /**
798  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
799  * @cpu: cpu to use
800  * @dwork: job to be done
801  * @delay: number of jiffies to wait
802  *
803  * After waiting for a given time this puts a job in the kernel-global
804  * workqueue on the specified CPU.
805  */
schedule_delayed_work_on(int cpu,struct delayed_work * dwork,unsigned long delay)806 static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
807 					    unsigned long delay)
808 {
809 	return queue_delayed_work_on(cpu, system_wq, dwork, delay);
810 }
811 
812 /**
813  * schedule_delayed_work - put work task in global workqueue after delay
814  * @dwork: job to be done
815  * @delay: number of jiffies to wait or 0 for immediate execution
816  *
817  * After waiting for a given time this puts a job in the kernel-global
818  * workqueue.
819  */
schedule_delayed_work(struct delayed_work * dwork,unsigned long delay)820 static inline bool schedule_delayed_work(struct delayed_work *dwork,
821 					 unsigned long delay)
822 {
823 	return queue_delayed_work(system_wq, dwork, delay);
824 }
825 
826 #ifndef CONFIG_SMP
work_on_cpu(int cpu,long (* fn)(void *),void * arg)827 static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
828 {
829 	return fn(arg);
830 }
work_on_cpu_safe(int cpu,long (* fn)(void *),void * arg)831 static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
832 {
833 	return fn(arg);
834 }
835 #else
836 long work_on_cpu_key(int cpu, long (*fn)(void *),
837 		     void *arg, struct lock_class_key *key);
838 /*
839  * A new key is defined for each caller to make sure the work
840  * associated with the function doesn't share its locking class.
841  */
842 #define work_on_cpu(_cpu, _fn, _arg)			\
843 ({							\
844 	static struct lock_class_key __key;		\
845 							\
846 	work_on_cpu_key(_cpu, _fn, _arg, &__key);	\
847 })
848 
849 long work_on_cpu_safe_key(int cpu, long (*fn)(void *),
850 			  void *arg, struct lock_class_key *key);
851 
852 /*
853  * A new key is defined for each caller to make sure the work
854  * associated with the function doesn't share its locking class.
855  */
856 #define work_on_cpu_safe(_cpu, _fn, _arg)		\
857 ({							\
858 	static struct lock_class_key __key;		\
859 							\
860 	work_on_cpu_safe_key(_cpu, _fn, _arg, &__key);	\
861 })
862 #endif /* CONFIG_SMP */
863 
864 #ifdef CONFIG_FREEZER
865 extern void freeze_workqueues_begin(void);
866 extern bool freeze_workqueues_busy(void);
867 extern void thaw_workqueues(void);
868 #endif /* CONFIG_FREEZER */
869 
870 #ifdef CONFIG_SYSFS
871 int workqueue_sysfs_register(struct workqueue_struct *wq);
872 #else	/* CONFIG_SYSFS */
workqueue_sysfs_register(struct workqueue_struct * wq)873 static inline int workqueue_sysfs_register(struct workqueue_struct *wq)
874 { return 0; }
875 #endif	/* CONFIG_SYSFS */
876 
877 #ifdef CONFIG_WQ_WATCHDOG
878 void wq_watchdog_touch(int cpu);
879 #else	/* CONFIG_WQ_WATCHDOG */
wq_watchdog_touch(int cpu)880 static inline void wq_watchdog_touch(int cpu) { }
881 #endif	/* CONFIG_WQ_WATCHDOG */
882 
883 #ifdef CONFIG_SMP
884 int workqueue_prepare_cpu(unsigned int cpu);
885 int workqueue_online_cpu(unsigned int cpu);
886 int workqueue_offline_cpu(unsigned int cpu);
887 #endif
888 
889 void __init workqueue_init_early(void);
890 void __init workqueue_init(void);
891 void __init workqueue_init_topology(void);
892 
893 #endif
894