1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * workqueue.h --- work queue handling for Linux.
4 */
5
6 #ifndef _LINUX_WORKQUEUE_H
7 #define _LINUX_WORKQUEUE_H
8
9 #include <linux/timer.h>
10 #include <linux/linkage.h>
11 #include <linux/bitops.h>
12 #include <linux/lockdep.h>
13 #include <linux/threads.h>
14 #include <linux/atomic.h>
15 #include <linux/cpumask.h>
16 #include <linux/rcupdate.h>
17
18 struct workqueue_struct;
19
20 struct work_struct;
21 typedef void (*work_func_t)(struct work_struct *work);
22 void delayed_work_timer_fn(struct timer_list *t);
23
24 /*
25 * The first word is the work queue pointer and the flags rolled into
26 * one
27 */
28 #define work_data_bits(work) ((unsigned long *)(&(work)->data))
29
30 enum {
31 WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
32 WORK_STRUCT_INACTIVE_BIT= 1, /* work item is inactive */
33 WORK_STRUCT_PWQ_BIT = 2, /* data points to pwq */
34 WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */
35 #ifdef CONFIG_DEBUG_OBJECTS_WORK
36 WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */
37 WORK_STRUCT_COLOR_SHIFT = 5, /* color for workqueue flushing */
38 #else
39 WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */
40 #endif
41
42 WORK_STRUCT_COLOR_BITS = 4,
43
44 WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
45 WORK_STRUCT_INACTIVE = 1 << WORK_STRUCT_INACTIVE_BIT,
46 WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT,
47 WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
48 #ifdef CONFIG_DEBUG_OBJECTS_WORK
49 WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT,
50 #else
51 WORK_STRUCT_STATIC = 0,
52 #endif
53
54 WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS),
55
56 /* not bound to any CPU, prefer the local CPU */
57 WORK_CPU_UNBOUND = NR_CPUS,
58
59 /*
60 * Reserve 8 bits off of pwq pointer w/ debugobjects turned off.
61 * This makes pwqs aligned to 256 bytes and allows 16 workqueue
62 * flush colors.
63 */
64 WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
65 WORK_STRUCT_COLOR_BITS,
66
67 /* data contains off-queue information when !WORK_STRUCT_PWQ */
68 WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT,
69
70 __WORK_OFFQ_CANCELING = WORK_OFFQ_FLAG_BASE,
71
72 /*
73 * When a work item is off queue, its high bits point to the last
74 * pool it was on. Cap at 31 bits and use the highest number to
75 * indicate that no pool is associated.
76 */
77 WORK_OFFQ_FLAG_BITS = 1,
78 WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS,
79 WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT,
80 WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31,
81
82 /* bit mask for work_busy() return values */
83 WORK_BUSY_PENDING = 1 << 0,
84 WORK_BUSY_RUNNING = 1 << 1,
85
86 /* maximum string length for set_worker_desc() */
87 WORKER_DESC_LEN = 24,
88 };
89
90 /* Convenience constants - of type 'unsigned long', not 'enum'! */
91 #define WORK_OFFQ_CANCELING (1ul << __WORK_OFFQ_CANCELING)
92 #define WORK_OFFQ_POOL_NONE ((1ul << WORK_OFFQ_POOL_BITS) - 1)
93 #define WORK_STRUCT_NO_POOL (WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT)
94
95 #define WORK_STRUCT_FLAG_MASK ((1ul << WORK_STRUCT_FLAG_BITS) - 1)
96 #define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK)
97
98 struct work_struct {
99 atomic_long_t data;
100 struct list_head entry;
101 work_func_t func;
102 #ifdef CONFIG_LOCKDEP
103 struct lockdep_map lockdep_map;
104 #endif
105 };
106
107 #define WORK_DATA_INIT() ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL)
108 #define WORK_DATA_STATIC_INIT() \
109 ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC))
110
111 struct delayed_work {
112 struct work_struct work;
113 struct timer_list timer;
114
115 /* target workqueue and CPU ->timer uses to queue ->work */
116 struct workqueue_struct *wq;
117 int cpu;
118 };
119
120 struct rcu_work {
121 struct work_struct work;
122 struct rcu_head rcu;
123
124 /* target workqueue ->rcu uses to queue ->work */
125 struct workqueue_struct *wq;
126 };
127
128 enum wq_affn_scope {
129 WQ_AFFN_DFL, /* use system default */
130 WQ_AFFN_CPU, /* one pod per CPU */
131 WQ_AFFN_SMT, /* one pod poer SMT */
132 WQ_AFFN_CACHE, /* one pod per LLC */
133 WQ_AFFN_NUMA, /* one pod per NUMA node */
134 WQ_AFFN_SYSTEM, /* one pod across the whole system */
135
136 WQ_AFFN_NR_TYPES,
137 };
138
139 /**
140 * struct workqueue_attrs - A struct for workqueue attributes.
141 *
142 * This can be used to change attributes of an unbound workqueue.
143 */
144 struct workqueue_attrs {
145 /**
146 * @nice: nice level
147 */
148 int nice;
149
150 /**
151 * @cpumask: allowed CPUs
152 *
153 * Work items in this workqueue are affine to these CPUs and not allowed
154 * to execute on other CPUs. A pool serving a workqueue must have the
155 * same @cpumask.
156 */
157 cpumask_var_t cpumask;
158
159 /**
160 * @__pod_cpumask: internal attribute used to create per-pod pools
161 *
162 * Internal use only.
163 *
164 * Per-pod unbound worker pools are used to improve locality. Always a
165 * subset of ->cpumask. A workqueue can be associated with multiple
166 * worker pools with disjoint @__pod_cpumask's. Whether the enforcement
167 * of a pool's @__pod_cpumask is strict depends on @affn_strict.
168 */
169 cpumask_var_t __pod_cpumask;
170
171 /**
172 * @affn_strict: affinity scope is strict
173 *
174 * If clear, workqueue will make a best-effort attempt at starting the
175 * worker inside @__pod_cpumask but the scheduler is free to migrate it
176 * outside.
177 *
178 * If set, workers are only allowed to run inside @__pod_cpumask.
179 */
180 bool affn_strict;
181
182 /*
183 * Below fields aren't properties of a worker_pool. They only modify how
184 * :c:func:`apply_workqueue_attrs` select pools and thus don't
185 * participate in pool hash calculations or equality comparisons.
186 */
187
188 /**
189 * @affn_scope: unbound CPU affinity scope
190 *
191 * CPU pods are used to improve execution locality of unbound work
192 * items. There are multiple pod types, one for each wq_affn_scope, and
193 * every CPU in the system belongs to one pod in every pod type. CPUs
194 * that belong to the same pod share the worker pool. For example,
195 * selecting %WQ_AFFN_NUMA makes the workqueue use a separate worker
196 * pool for each NUMA node.
197 */
198 enum wq_affn_scope affn_scope;
199
200 /**
201 * @ordered: work items must be executed one by one in queueing order
202 */
203 bool ordered;
204 };
205
to_delayed_work(struct work_struct * work)206 static inline struct delayed_work *to_delayed_work(struct work_struct *work)
207 {
208 return container_of(work, struct delayed_work, work);
209 }
210
to_rcu_work(struct work_struct * work)211 static inline struct rcu_work *to_rcu_work(struct work_struct *work)
212 {
213 return container_of(work, struct rcu_work, work);
214 }
215
216 struct execute_work {
217 struct work_struct work;
218 };
219
220 #ifdef CONFIG_LOCKDEP
221 /*
222 * NB: because we have to copy the lockdep_map, setting _key
223 * here is required, otherwise it could get initialised to the
224 * copy of the lockdep_map!
225 */
226 #define __WORK_INIT_LOCKDEP_MAP(n, k) \
227 .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
228 #else
229 #define __WORK_INIT_LOCKDEP_MAP(n, k)
230 #endif
231
232 #define __WORK_INITIALIZER(n, f) { \
233 .data = WORK_DATA_STATIC_INIT(), \
234 .entry = { &(n).entry, &(n).entry }, \
235 .func = (f), \
236 __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \
237 }
238
239 #define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \
240 .work = __WORK_INITIALIZER((n).work, (f)), \
241 .timer = __TIMER_INITIALIZER(delayed_work_timer_fn,\
242 (tflags) | TIMER_IRQSAFE), \
243 }
244
245 #define DECLARE_WORK(n, f) \
246 struct work_struct n = __WORK_INITIALIZER(n, f)
247
248 #define DECLARE_DELAYED_WORK(n, f) \
249 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0)
250
251 #define DECLARE_DEFERRABLE_WORK(n, f) \
252 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE)
253
254 #ifdef CONFIG_DEBUG_OBJECTS_WORK
255 extern void __init_work(struct work_struct *work, int onstack);
256 extern void destroy_work_on_stack(struct work_struct *work);
257 extern void destroy_delayed_work_on_stack(struct delayed_work *work);
work_static(struct work_struct * work)258 static inline unsigned int work_static(struct work_struct *work)
259 {
260 return *work_data_bits(work) & WORK_STRUCT_STATIC;
261 }
262 #else
__init_work(struct work_struct * work,int onstack)263 static inline void __init_work(struct work_struct *work, int onstack) { }
destroy_work_on_stack(struct work_struct * work)264 static inline void destroy_work_on_stack(struct work_struct *work) { }
destroy_delayed_work_on_stack(struct delayed_work * work)265 static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { }
work_static(struct work_struct * work)266 static inline unsigned int work_static(struct work_struct *work) { return 0; }
267 #endif
268
269 /*
270 * initialize all of a work item in one go
271 *
272 * NOTE! No point in using "atomic_long_set()": using a direct
273 * assignment of the work data initializer allows the compiler
274 * to generate better code.
275 */
276 #ifdef CONFIG_LOCKDEP
277 #define __INIT_WORK_KEY(_work, _func, _onstack, _key) \
278 do { \
279 __init_work((_work), _onstack); \
280 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
281 lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, (_key), 0); \
282 INIT_LIST_HEAD(&(_work)->entry); \
283 (_work)->func = (_func); \
284 } while (0)
285 #else
286 #define __INIT_WORK_KEY(_work, _func, _onstack, _key) \
287 do { \
288 __init_work((_work), _onstack); \
289 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
290 INIT_LIST_HEAD(&(_work)->entry); \
291 (_work)->func = (_func); \
292 } while (0)
293 #endif
294
295 #define __INIT_WORK(_work, _func, _onstack) \
296 do { \
297 static __maybe_unused struct lock_class_key __key; \
298 \
299 __INIT_WORK_KEY(_work, _func, _onstack, &__key); \
300 } while (0)
301
302 #define INIT_WORK(_work, _func) \
303 __INIT_WORK((_work), (_func), 0)
304
305 #define INIT_WORK_ONSTACK(_work, _func) \
306 __INIT_WORK((_work), (_func), 1)
307
308 #define INIT_WORK_ONSTACK_KEY(_work, _func, _key) \
309 __INIT_WORK_KEY((_work), (_func), 1, _key)
310
311 #define __INIT_DELAYED_WORK(_work, _func, _tflags) \
312 do { \
313 INIT_WORK(&(_work)->work, (_func)); \
314 __init_timer(&(_work)->timer, \
315 delayed_work_timer_fn, \
316 (_tflags) | TIMER_IRQSAFE); \
317 } while (0)
318
319 #define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \
320 do { \
321 INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
322 __init_timer_on_stack(&(_work)->timer, \
323 delayed_work_timer_fn, \
324 (_tflags) | TIMER_IRQSAFE); \
325 } while (0)
326
327 #define INIT_DELAYED_WORK(_work, _func) \
328 __INIT_DELAYED_WORK(_work, _func, 0)
329
330 #define INIT_DELAYED_WORK_ONSTACK(_work, _func) \
331 __INIT_DELAYED_WORK_ONSTACK(_work, _func, 0)
332
333 #define INIT_DEFERRABLE_WORK(_work, _func) \
334 __INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE)
335
336 #define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func) \
337 __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE)
338
339 #define INIT_RCU_WORK(_work, _func) \
340 INIT_WORK(&(_work)->work, (_func))
341
342 #define INIT_RCU_WORK_ONSTACK(_work, _func) \
343 INIT_WORK_ONSTACK(&(_work)->work, (_func))
344
345 /**
346 * work_pending - Find out whether a work item is currently pending
347 * @work: The work item in question
348 */
349 #define work_pending(work) \
350 test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
351
352 /**
353 * delayed_work_pending - Find out whether a delayable work item is currently
354 * pending
355 * @w: The work item in question
356 */
357 #define delayed_work_pending(w) \
358 work_pending(&(w)->work)
359
360 /*
361 * Workqueue flags and constants. For details, please refer to
362 * Documentation/core-api/workqueue.rst.
363 */
364 enum {
365 WQ_UNBOUND = 1 << 1, /* not bound to any cpu */
366 WQ_FREEZABLE = 1 << 2, /* freeze during suspend */
367 WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */
368 WQ_HIGHPRI = 1 << 4, /* high priority */
369 WQ_CPU_INTENSIVE = 1 << 5, /* cpu intensive workqueue */
370 WQ_SYSFS = 1 << 6, /* visible in sysfs, see workqueue_sysfs_register() */
371
372 /*
373 * Per-cpu workqueues are generally preferred because they tend to
374 * show better performance thanks to cache locality. Per-cpu
375 * workqueues exclude the scheduler from choosing the CPU to
376 * execute the worker threads, which has an unfortunate side effect
377 * of increasing power consumption.
378 *
379 * The scheduler considers a CPU idle if it doesn't have any task
380 * to execute and tries to keep idle cores idle to conserve power;
381 * however, for example, a per-cpu work item scheduled from an
382 * interrupt handler on an idle CPU will force the scheduler to
383 * execute the work item on that CPU breaking the idleness, which in
384 * turn may lead to more scheduling choices which are sub-optimal
385 * in terms of power consumption.
386 *
387 * Workqueues marked with WQ_POWER_EFFICIENT are per-cpu by default
388 * but become unbound if workqueue.power_efficient kernel param is
389 * specified. Per-cpu workqueues which are identified to
390 * contribute significantly to power-consumption are identified and
391 * marked with this flag and enabling the power_efficient mode
392 * leads to noticeable power saving at the cost of small
393 * performance disadvantage.
394 *
395 * http://thread.gmane.org/gmane.linux.kernel/1480396
396 */
397 WQ_POWER_EFFICIENT = 1 << 7,
398
399 __WQ_DESTROYING = 1 << 15, /* internal: workqueue is destroying */
400 __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */
401 __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */
402 __WQ_LEGACY = 1 << 18, /* internal: create*_workqueue() */
403 __WQ_ORDERED_EXPLICIT = 1 << 19, /* internal: alloc_ordered_workqueue() */
404
405 WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
406 WQ_UNBOUND_MAX_ACTIVE = WQ_MAX_ACTIVE,
407 WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
408
409 /*
410 * Per-node default cap on min_active. Unless explicitly set, min_active
411 * is set to min(max_active, WQ_DFL_MIN_ACTIVE). For more details, see
412 * workqueue_struct->min_active definition.
413 */
414 WQ_DFL_MIN_ACTIVE = 8,
415 };
416
417 /*
418 * System-wide workqueues which are always present.
419 *
420 * system_wq is the one used by schedule[_delayed]_work[_on]().
421 * Multi-CPU multi-threaded. There are users which expect relatively
422 * short queue flush time. Don't queue works which can run for too
423 * long.
424 *
425 * system_highpri_wq is similar to system_wq but for work items which
426 * require WQ_HIGHPRI.
427 *
428 * system_long_wq is similar to system_wq but may host long running
429 * works. Queue flushing might take relatively long.
430 *
431 * system_unbound_wq is unbound workqueue. Workers are not bound to
432 * any specific CPU, not concurrency managed, and all queued works are
433 * executed immediately as long as max_active limit is not reached and
434 * resources are available.
435 *
436 * system_freezable_wq is equivalent to system_wq except that it's
437 * freezable.
438 *
439 * *_power_efficient_wq are inclined towards saving power and converted
440 * into WQ_UNBOUND variants if 'wq_power_efficient' is enabled; otherwise,
441 * they are same as their non-power-efficient counterparts - e.g.
442 * system_power_efficient_wq is identical to system_wq if
443 * 'wq_power_efficient' is disabled. See WQ_POWER_EFFICIENT for more info.
444 */
445 extern struct workqueue_struct *system_wq;
446 extern struct workqueue_struct *system_highpri_wq;
447 extern struct workqueue_struct *system_long_wq;
448 extern struct workqueue_struct *system_unbound_wq;
449 extern struct workqueue_struct *system_freezable_wq;
450 extern struct workqueue_struct *system_power_efficient_wq;
451 extern struct workqueue_struct *system_freezable_power_efficient_wq;
452
453 /**
454 * alloc_workqueue - allocate a workqueue
455 * @fmt: printf format for the name of the workqueue
456 * @flags: WQ_* flags
457 * @max_active: max in-flight work items, 0 for default
458 * remaining args: args for @fmt
459 *
460 * For a per-cpu workqueue, @max_active limits the number of in-flight work
461 * items for each CPU. e.g. @max_active of 1 indicates that each CPU can be
462 * executing at most one work item for the workqueue.
463 *
464 * For unbound workqueues, @max_active limits the number of in-flight work items
465 * for the whole system. e.g. @max_active of 16 indicates that that there can be
466 * at most 16 work items executing for the workqueue in the whole system.
467 *
468 * As sharing the same active counter for an unbound workqueue across multiple
469 * NUMA nodes can be expensive, @max_active is distributed to each NUMA node
470 * according to the proportion of the number of online CPUs and enforced
471 * independently.
472 *
473 * Depending on online CPU distribution, a node may end up with per-node
474 * max_active which is significantly lower than @max_active, which can lead to
475 * deadlocks if the per-node concurrency limit is lower than the maximum number
476 * of interdependent work items for the workqueue.
477 *
478 * To guarantee forward progress regardless of online CPU distribution, the
479 * concurrency limit on every node is guaranteed to be equal to or greater than
480 * min_active which is set to min(@max_active, %WQ_DFL_MIN_ACTIVE). This means
481 * that the sum of per-node max_active's may be larger than @max_active.
482 *
483 * For detailed information on %WQ_* flags, please refer to
484 * Documentation/core-api/workqueue.rst.
485 *
486 * RETURNS:
487 * Pointer to the allocated workqueue on success, %NULL on failure.
488 */
489 __printf(1, 4) struct workqueue_struct *
490 alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...);
491
492 /**
493 * alloc_ordered_workqueue - allocate an ordered workqueue
494 * @fmt: printf format for the name of the workqueue
495 * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
496 * @args: args for @fmt
497 *
498 * Allocate an ordered workqueue. An ordered workqueue executes at
499 * most one work item at any given time in the queued order. They are
500 * implemented as unbound workqueues with @max_active of one.
501 *
502 * RETURNS:
503 * Pointer to the allocated workqueue on success, %NULL on failure.
504 */
505 #define alloc_ordered_workqueue(fmt, flags, args...) \
506 alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | \
507 __WQ_ORDERED_EXPLICIT | (flags), 1, ##args)
508
509 #define create_workqueue(name) \
510 alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
511 #define create_freezable_workqueue(name) \
512 alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND | \
513 WQ_MEM_RECLAIM, 1, (name))
514 #define create_singlethread_workqueue(name) \
515 alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name)
516
517 extern void destroy_workqueue(struct workqueue_struct *wq);
518
519 struct workqueue_attrs *alloc_workqueue_attrs(void);
520 void free_workqueue_attrs(struct workqueue_attrs *attrs);
521 int apply_workqueue_attrs(struct workqueue_struct *wq,
522 const struct workqueue_attrs *attrs);
523 int workqueue_set_unbound_cpumask(cpumask_var_t cpumask);
524
525 extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
526 struct work_struct *work);
527 extern bool queue_work_node(int node, struct workqueue_struct *wq,
528 struct work_struct *work);
529 extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
530 struct delayed_work *work, unsigned long delay);
531 extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
532 struct delayed_work *dwork, unsigned long delay);
533 extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork);
534
535 extern void __flush_workqueue(struct workqueue_struct *wq);
536 extern void drain_workqueue(struct workqueue_struct *wq);
537
538 extern int schedule_on_each_cpu(work_func_t func);
539
540 int execute_in_process_context(work_func_t fn, struct execute_work *);
541
542 extern bool flush_work(struct work_struct *work);
543 extern bool cancel_work(struct work_struct *work);
544 extern bool cancel_work_sync(struct work_struct *work);
545
546 extern bool flush_delayed_work(struct delayed_work *dwork);
547 extern bool cancel_delayed_work(struct delayed_work *dwork);
548 extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
549
550 extern bool flush_rcu_work(struct rcu_work *rwork);
551
552 extern void workqueue_set_max_active(struct workqueue_struct *wq,
553 int max_active);
554 extern struct work_struct *current_work(void);
555 extern bool current_is_workqueue_rescuer(void);
556 extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
557 extern unsigned int work_busy(struct work_struct *work);
558 extern __printf(1, 2) void set_worker_desc(const char *fmt, ...);
559 extern void print_worker_info(const char *log_lvl, struct task_struct *task);
560 extern void show_all_workqueues(void);
561 extern void show_freezable_workqueues(void);
562 extern void show_one_workqueue(struct workqueue_struct *wq);
563 extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task);
564
565 /**
566 * queue_work - queue work on a workqueue
567 * @wq: workqueue to use
568 * @work: work to queue
569 *
570 * Returns %false if @work was already on a queue, %true otherwise.
571 *
572 * We queue the work to the CPU on which it was submitted, but if the CPU dies
573 * it can be processed by another CPU.
574 *
575 * Memory-ordering properties: If it returns %true, guarantees that all stores
576 * preceding the call to queue_work() in the program order will be visible from
577 * the CPU which will execute @work by the time such work executes, e.g.,
578 *
579 * { x is initially 0 }
580 *
581 * CPU0 CPU1
582 *
583 * WRITE_ONCE(x, 1); [ @work is being executed ]
584 * r0 = queue_work(wq, work); r1 = READ_ONCE(x);
585 *
586 * Forbids: r0 == true && r1 == 0
587 */
queue_work(struct workqueue_struct * wq,struct work_struct * work)588 static inline bool queue_work(struct workqueue_struct *wq,
589 struct work_struct *work)
590 {
591 return queue_work_on(WORK_CPU_UNBOUND, wq, work);
592 }
593
594 /**
595 * queue_delayed_work - queue work on a workqueue after delay
596 * @wq: workqueue to use
597 * @dwork: delayable work to queue
598 * @delay: number of jiffies to wait before queueing
599 *
600 * Equivalent to queue_delayed_work_on() but tries to use the local CPU.
601 */
queue_delayed_work(struct workqueue_struct * wq,struct delayed_work * dwork,unsigned long delay)602 static inline bool queue_delayed_work(struct workqueue_struct *wq,
603 struct delayed_work *dwork,
604 unsigned long delay)
605 {
606 return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
607 }
608
609 /**
610 * mod_delayed_work - modify delay of or queue a delayed work
611 * @wq: workqueue to use
612 * @dwork: work to queue
613 * @delay: number of jiffies to wait before queueing
614 *
615 * mod_delayed_work_on() on local CPU.
616 */
mod_delayed_work(struct workqueue_struct * wq,struct delayed_work * dwork,unsigned long delay)617 static inline bool mod_delayed_work(struct workqueue_struct *wq,
618 struct delayed_work *dwork,
619 unsigned long delay)
620 {
621 return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
622 }
623
624 /**
625 * schedule_work_on - put work task on a specific cpu
626 * @cpu: cpu to put the work task on
627 * @work: job to be done
628 *
629 * This puts a job on a specific cpu
630 */
schedule_work_on(int cpu,struct work_struct * work)631 static inline bool schedule_work_on(int cpu, struct work_struct *work)
632 {
633 return queue_work_on(cpu, system_wq, work);
634 }
635
636 /**
637 * schedule_work - put work task in global workqueue
638 * @work: job to be done
639 *
640 * Returns %false if @work was already on the kernel-global workqueue and
641 * %true otherwise.
642 *
643 * This puts a job in the kernel-global workqueue if it was not already
644 * queued and leaves it in the same position on the kernel-global
645 * workqueue otherwise.
646 *
647 * Shares the same memory-ordering properties of queue_work(), cf. the
648 * DocBook header of queue_work().
649 */
schedule_work(struct work_struct * work)650 static inline bool schedule_work(struct work_struct *work)
651 {
652 return queue_work(system_wq, work);
653 }
654
655 /*
656 * Detect attempt to flush system-wide workqueues at compile time when possible.
657 * Warn attempt to flush system-wide workqueues at runtime.
658 *
659 * See https://lkml.kernel.org/r/49925af7-78a8-a3dd-bce6-cfc02e1a9236@I-love.SAKURA.ne.jp
660 * for reasons and steps for converting system-wide workqueues into local workqueues.
661 */
662 extern void __warn_flushing_systemwide_wq(void)
663 __compiletime_warning("Please avoid flushing system-wide workqueues.");
664
665 /* Please stop using this function, for this function will be removed in near future. */
666 #define flush_scheduled_work() \
667 ({ \
668 __warn_flushing_systemwide_wq(); \
669 __flush_workqueue(system_wq); \
670 })
671
672 #define flush_workqueue(wq) \
673 ({ \
674 struct workqueue_struct *_wq = (wq); \
675 \
676 if ((__builtin_constant_p(_wq == system_wq) && \
677 _wq == system_wq) || \
678 (__builtin_constant_p(_wq == system_highpri_wq) && \
679 _wq == system_highpri_wq) || \
680 (__builtin_constant_p(_wq == system_long_wq) && \
681 _wq == system_long_wq) || \
682 (__builtin_constant_p(_wq == system_unbound_wq) && \
683 _wq == system_unbound_wq) || \
684 (__builtin_constant_p(_wq == system_freezable_wq) && \
685 _wq == system_freezable_wq) || \
686 (__builtin_constant_p(_wq == system_power_efficient_wq) && \
687 _wq == system_power_efficient_wq) || \
688 (__builtin_constant_p(_wq == system_freezable_power_efficient_wq) && \
689 _wq == system_freezable_power_efficient_wq)) \
690 __warn_flushing_systemwide_wq(); \
691 __flush_workqueue(_wq); \
692 })
693
694 /**
695 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
696 * @cpu: cpu to use
697 * @dwork: job to be done
698 * @delay: number of jiffies to wait
699 *
700 * After waiting for a given time this puts a job in the kernel-global
701 * workqueue on the specified CPU.
702 */
schedule_delayed_work_on(int cpu,struct delayed_work * dwork,unsigned long delay)703 static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
704 unsigned long delay)
705 {
706 return queue_delayed_work_on(cpu, system_wq, dwork, delay);
707 }
708
709 /**
710 * schedule_delayed_work - put work task in global workqueue after delay
711 * @dwork: job to be done
712 * @delay: number of jiffies to wait or 0 for immediate execution
713 *
714 * After waiting for a given time this puts a job in the kernel-global
715 * workqueue.
716 */
schedule_delayed_work(struct delayed_work * dwork,unsigned long delay)717 static inline bool schedule_delayed_work(struct delayed_work *dwork,
718 unsigned long delay)
719 {
720 return queue_delayed_work(system_wq, dwork, delay);
721 }
722
723 #ifndef CONFIG_SMP
work_on_cpu(int cpu,long (* fn)(void *),void * arg)724 static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
725 {
726 return fn(arg);
727 }
work_on_cpu_safe(int cpu,long (* fn)(void *),void * arg)728 static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
729 {
730 return fn(arg);
731 }
732 #else
733 long work_on_cpu_key(int cpu, long (*fn)(void *),
734 void *arg, struct lock_class_key *key);
735 /*
736 * A new key is defined for each caller to make sure the work
737 * associated with the function doesn't share its locking class.
738 */
739 #define work_on_cpu(_cpu, _fn, _arg) \
740 ({ \
741 static struct lock_class_key __key; \
742 \
743 work_on_cpu_key(_cpu, _fn, _arg, &__key); \
744 })
745
746 long work_on_cpu_safe_key(int cpu, long (*fn)(void *),
747 void *arg, struct lock_class_key *key);
748
749 /*
750 * A new key is defined for each caller to make sure the work
751 * associated with the function doesn't share its locking class.
752 */
753 #define work_on_cpu_safe(_cpu, _fn, _arg) \
754 ({ \
755 static struct lock_class_key __key; \
756 \
757 work_on_cpu_safe_key(_cpu, _fn, _arg, &__key); \
758 })
759 #endif /* CONFIG_SMP */
760
761 #ifdef CONFIG_FREEZER
762 extern void freeze_workqueues_begin(void);
763 extern bool freeze_workqueues_busy(void);
764 extern void thaw_workqueues(void);
765 #endif /* CONFIG_FREEZER */
766
767 #ifdef CONFIG_SYSFS
768 int workqueue_sysfs_register(struct workqueue_struct *wq);
769 #else /* CONFIG_SYSFS */
workqueue_sysfs_register(struct workqueue_struct * wq)770 static inline int workqueue_sysfs_register(struct workqueue_struct *wq)
771 { return 0; }
772 #endif /* CONFIG_SYSFS */
773
774 #ifdef CONFIG_WQ_WATCHDOG
775 void wq_watchdog_touch(int cpu);
776 #else /* CONFIG_WQ_WATCHDOG */
wq_watchdog_touch(int cpu)777 static inline void wq_watchdog_touch(int cpu) { }
778 #endif /* CONFIG_WQ_WATCHDOG */
779
780 #ifdef CONFIG_SMP
781 int workqueue_prepare_cpu(unsigned int cpu);
782 int workqueue_online_cpu(unsigned int cpu);
783 int workqueue_offline_cpu(unsigned int cpu);
784 #endif
785
786 void __init workqueue_init_early(void);
787 void __init workqueue_init(void);
788 void __init workqueue_init_topology(void);
789
790 #endif
791