1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * workqueue.h --- work queue handling for Linux.
4 */
5
6 #ifndef _LINUX_WORKQUEUE_H
7 #define _LINUX_WORKQUEUE_H
8
9 #include <linux/timer.h>
10 #include <linux/linkage.h>
11 #include <linux/bitops.h>
12 #include <linux/lockdep.h>
13 #include <linux/threads.h>
14 #include <linux/atomic.h>
15 #include <linux/cpumask.h>
16 #include <linux/rcupdate.h>
17
18 struct workqueue_struct;
19
20 struct work_struct;
21 typedef void (*work_func_t)(struct work_struct *work);
22 void delayed_work_timer_fn(struct timer_list *t);
23
24 /*
25 * The first word is the work queue pointer and the flags rolled into
26 * one
27 */
28 #define work_data_bits(work) ((unsigned long *)(&(work)->data))
29
30 enum {
31 WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
32 WORK_STRUCT_INACTIVE_BIT= 1, /* work item is inactive */
33 WORK_STRUCT_PWQ_BIT = 2, /* data points to pwq */
34 WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */
35 #ifdef CONFIG_DEBUG_OBJECTS_WORK
36 WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */
37 WORK_STRUCT_COLOR_SHIFT = 5, /* color for workqueue flushing */
38 #else
39 WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */
40 #endif
41
42 WORK_STRUCT_COLOR_BITS = 4,
43
44 WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
45 WORK_STRUCT_INACTIVE = 1 << WORK_STRUCT_INACTIVE_BIT,
46 WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT,
47 WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
48 #ifdef CONFIG_DEBUG_OBJECTS_WORK
49 WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT,
50 #else
51 WORK_STRUCT_STATIC = 0,
52 #endif
53
54 /*
55 * The last color is no color used for works which don't
56 * participate in workqueue flushing.
57 */
58 WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1,
59 WORK_NO_COLOR = WORK_NR_COLORS,
60
61 /* not bound to any CPU, prefer the local CPU */
62 WORK_CPU_UNBOUND = NR_CPUS,
63
64 /*
65 * Reserve 8 bits off of pwq pointer w/ debugobjects turned off.
66 * This makes pwqs aligned to 256 bytes and allows 15 workqueue
67 * flush colors.
68 */
69 WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
70 WORK_STRUCT_COLOR_BITS,
71
72 /* data contains off-queue information when !WORK_STRUCT_PWQ */
73 WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT,
74
75 __WORK_OFFQ_CANCELING = WORK_OFFQ_FLAG_BASE,
76
77 /*
78 * When a work item is off queue, its high bits point to the last
79 * pool it was on. Cap at 31 bits and use the highest number to
80 * indicate that no pool is associated.
81 */
82 WORK_OFFQ_FLAG_BITS = 1,
83 WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS,
84 WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT,
85 WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31,
86
87 /* bit mask for work_busy() return values */
88 WORK_BUSY_PENDING = 1 << 0,
89 WORK_BUSY_RUNNING = 1 << 1,
90
91 /* maximum string length for set_worker_desc() */
92 WORKER_DESC_LEN = 24,
93 };
94
95 /* Convenience constants - of type 'unsigned long', not 'enum'! */
96 #define WORK_OFFQ_CANCELING (1ul << __WORK_OFFQ_CANCELING)
97 #define WORK_OFFQ_POOL_NONE ((1ul << WORK_OFFQ_POOL_BITS) - 1)
98 #define WORK_STRUCT_NO_POOL (WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT)
99
100 #define WORK_STRUCT_FLAG_MASK ((1ul << WORK_STRUCT_FLAG_BITS) - 1)
101 #define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK)
102
103 struct work_struct {
104 atomic_long_t data;
105 struct list_head entry;
106 work_func_t func;
107 #ifdef CONFIG_LOCKDEP
108 struct lockdep_map lockdep_map;
109 #endif
110 };
111
112 #define WORK_DATA_INIT() ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL)
113 #define WORK_DATA_STATIC_INIT() \
114 ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC))
115
116 struct delayed_work {
117 struct work_struct work;
118 struct timer_list timer;
119
120 /* target workqueue and CPU ->timer uses to queue ->work */
121 struct workqueue_struct *wq;
122 int cpu;
123 };
124
125 struct rcu_work {
126 struct work_struct work;
127 struct rcu_head rcu;
128
129 /* target workqueue ->rcu uses to queue ->work */
130 struct workqueue_struct *wq;
131 };
132
133 /**
134 * struct workqueue_attrs - A struct for workqueue attributes.
135 *
136 * This can be used to change attributes of an unbound workqueue.
137 */
138 struct workqueue_attrs {
139 /**
140 * @nice: nice level
141 */
142 int nice;
143
144 /**
145 * @cpumask: allowed CPUs
146 */
147 cpumask_var_t cpumask;
148
149 /**
150 * @no_numa: disable NUMA affinity
151 *
152 * Unlike other fields, ``no_numa`` isn't a property of a worker_pool. It
153 * only modifies how :c:func:`apply_workqueue_attrs` select pools and thus
154 * doesn't participate in pool hash calculations or equality comparisons.
155 */
156 bool no_numa;
157 };
158
to_delayed_work(struct work_struct * work)159 static inline struct delayed_work *to_delayed_work(struct work_struct *work)
160 {
161 return container_of(work, struct delayed_work, work);
162 }
163
to_rcu_work(struct work_struct * work)164 static inline struct rcu_work *to_rcu_work(struct work_struct *work)
165 {
166 return container_of(work, struct rcu_work, work);
167 }
168
169 struct execute_work {
170 struct work_struct work;
171 };
172
173 #ifdef CONFIG_LOCKDEP
174 /*
175 * NB: because we have to copy the lockdep_map, setting _key
176 * here is required, otherwise it could get initialised to the
177 * copy of the lockdep_map!
178 */
179 #define __WORK_INIT_LOCKDEP_MAP(n, k) \
180 .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
181 #else
182 #define __WORK_INIT_LOCKDEP_MAP(n, k)
183 #endif
184
185 #define __WORK_INITIALIZER(n, f) { \
186 .data = WORK_DATA_STATIC_INIT(), \
187 .entry = { &(n).entry, &(n).entry }, \
188 .func = (f), \
189 __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \
190 }
191
192 #define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \
193 .work = __WORK_INITIALIZER((n).work, (f)), \
194 .timer = __TIMER_INITIALIZER(delayed_work_timer_fn,\
195 (tflags) | TIMER_IRQSAFE), \
196 }
197
198 #define DECLARE_WORK(n, f) \
199 struct work_struct n = __WORK_INITIALIZER(n, f)
200
201 #define DECLARE_DELAYED_WORK(n, f) \
202 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0)
203
204 #define DECLARE_DEFERRABLE_WORK(n, f) \
205 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE)
206
207 #ifdef CONFIG_DEBUG_OBJECTS_WORK
208 extern void __init_work(struct work_struct *work, int onstack);
209 extern void destroy_work_on_stack(struct work_struct *work);
210 extern void destroy_delayed_work_on_stack(struct delayed_work *work);
work_static(struct work_struct * work)211 static inline unsigned int work_static(struct work_struct *work)
212 {
213 return *work_data_bits(work) & WORK_STRUCT_STATIC;
214 }
215 #else
__init_work(struct work_struct * work,int onstack)216 static inline void __init_work(struct work_struct *work, int onstack) { }
destroy_work_on_stack(struct work_struct * work)217 static inline void destroy_work_on_stack(struct work_struct *work) { }
destroy_delayed_work_on_stack(struct delayed_work * work)218 static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { }
work_static(struct work_struct * work)219 static inline unsigned int work_static(struct work_struct *work) { return 0; }
220 #endif
221
222 /*
223 * initialize all of a work item in one go
224 *
225 * NOTE! No point in using "atomic_long_set()": using a direct
226 * assignment of the work data initializer allows the compiler
227 * to generate better code.
228 */
229 #ifdef CONFIG_LOCKDEP
230 #define __INIT_WORK(_work, _func, _onstack) \
231 do { \
232 static struct lock_class_key __key; \
233 \
234 __init_work((_work), _onstack); \
235 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
236 lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, &__key, 0); \
237 INIT_LIST_HEAD(&(_work)->entry); \
238 (_work)->func = (_func); \
239 } while (0)
240 #else
241 #define __INIT_WORK(_work, _func, _onstack) \
242 do { \
243 __init_work((_work), _onstack); \
244 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
245 INIT_LIST_HEAD(&(_work)->entry); \
246 (_work)->func = (_func); \
247 } while (0)
248 #endif
249
250 #define INIT_WORK(_work, _func) \
251 __INIT_WORK((_work), (_func), 0)
252
253 #define INIT_WORK_ONSTACK(_work, _func) \
254 __INIT_WORK((_work), (_func), 1)
255
256 #define __INIT_DELAYED_WORK(_work, _func, _tflags) \
257 do { \
258 INIT_WORK(&(_work)->work, (_func)); \
259 __init_timer(&(_work)->timer, \
260 delayed_work_timer_fn, \
261 (_tflags) | TIMER_IRQSAFE); \
262 } while (0)
263
264 #define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \
265 do { \
266 INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
267 __init_timer_on_stack(&(_work)->timer, \
268 delayed_work_timer_fn, \
269 (_tflags) | TIMER_IRQSAFE); \
270 } while (0)
271
272 #define INIT_DELAYED_WORK(_work, _func) \
273 __INIT_DELAYED_WORK(_work, _func, 0)
274
275 #define INIT_DELAYED_WORK_ONSTACK(_work, _func) \
276 __INIT_DELAYED_WORK_ONSTACK(_work, _func, 0)
277
278 #define INIT_DEFERRABLE_WORK(_work, _func) \
279 __INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE)
280
281 #define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func) \
282 __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE)
283
284 #define INIT_RCU_WORK(_work, _func) \
285 INIT_WORK(&(_work)->work, (_func))
286
287 #define INIT_RCU_WORK_ONSTACK(_work, _func) \
288 INIT_WORK_ONSTACK(&(_work)->work, (_func))
289
290 /**
291 * work_pending - Find out whether a work item is currently pending
292 * @work: The work item in question
293 */
294 #define work_pending(work) \
295 test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
296
297 /**
298 * delayed_work_pending - Find out whether a delayable work item is currently
299 * pending
300 * @w: The work item in question
301 */
302 #define delayed_work_pending(w) \
303 work_pending(&(w)->work)
304
305 /*
306 * Workqueue flags and constants. For details, please refer to
307 * Documentation/core-api/workqueue.rst.
308 */
309 enum {
310 WQ_UNBOUND = 1 << 1, /* not bound to any cpu */
311 WQ_FREEZABLE = 1 << 2, /* freeze during suspend */
312 WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */
313 WQ_HIGHPRI = 1 << 4, /* high priority */
314 WQ_CPU_INTENSIVE = 1 << 5, /* cpu intensive workqueue */
315 WQ_SYSFS = 1 << 6, /* visible in sysfs, see wq_sysfs_register() */
316
317 /*
318 * Per-cpu workqueues are generally preferred because they tend to
319 * show better performance thanks to cache locality. Per-cpu
320 * workqueues exclude the scheduler from choosing the CPU to
321 * execute the worker threads, which has an unfortunate side effect
322 * of increasing power consumption.
323 *
324 * The scheduler considers a CPU idle if it doesn't have any task
325 * to execute and tries to keep idle cores idle to conserve power;
326 * however, for example, a per-cpu work item scheduled from an
327 * interrupt handler on an idle CPU will force the scheduler to
328 * excute the work item on that CPU breaking the idleness, which in
329 * turn may lead to more scheduling choices which are sub-optimal
330 * in terms of power consumption.
331 *
332 * Workqueues marked with WQ_POWER_EFFICIENT are per-cpu by default
333 * but become unbound if workqueue.power_efficient kernel param is
334 * specified. Per-cpu workqueues which are identified to
335 * contribute significantly to power-consumption are identified and
336 * marked with this flag and enabling the power_efficient mode
337 * leads to noticeable power saving at the cost of small
338 * performance disadvantage.
339 *
340 * http://thread.gmane.org/gmane.linux.kernel/1480396
341 */
342 WQ_POWER_EFFICIENT = 1 << 7,
343
344 __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */
345 __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */
346 __WQ_LEGACY = 1 << 18, /* internal: create*_workqueue() */
347 __WQ_ORDERED_EXPLICIT = 1 << 19, /* internal: alloc_ordered_workqueue() */
348
349 WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
350 WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
351 WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
352 };
353
354 /* unbound wq's aren't per-cpu, scale max_active according to #cpus */
355 #define WQ_UNBOUND_MAX_ACTIVE \
356 max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU)
357
358 /*
359 * System-wide workqueues which are always present.
360 *
361 * system_wq is the one used by schedule[_delayed]_work[_on]().
362 * Multi-CPU multi-threaded. There are users which expect relatively
363 * short queue flush time. Don't queue works which can run for too
364 * long.
365 *
366 * system_highpri_wq is similar to system_wq but for work items which
367 * require WQ_HIGHPRI.
368 *
369 * system_long_wq is similar to system_wq but may host long running
370 * works. Queue flushing might take relatively long.
371 *
372 * system_unbound_wq is unbound workqueue. Workers are not bound to
373 * any specific CPU, not concurrency managed, and all queued works are
374 * executed immediately as long as max_active limit is not reached and
375 * resources are available.
376 *
377 * system_freezable_wq is equivalent to system_wq except that it's
378 * freezable.
379 *
380 * *_power_efficient_wq are inclined towards saving power and converted
381 * into WQ_UNBOUND variants if 'wq_power_efficient' is enabled; otherwise,
382 * they are same as their non-power-efficient counterparts - e.g.
383 * system_power_efficient_wq is identical to system_wq if
384 * 'wq_power_efficient' is disabled. See WQ_POWER_EFFICIENT for more info.
385 */
386 extern struct workqueue_struct *system_wq;
387 extern struct workqueue_struct *system_highpri_wq;
388 extern struct workqueue_struct *system_long_wq;
389 extern struct workqueue_struct *system_unbound_wq;
390 extern struct workqueue_struct *system_freezable_wq;
391 extern struct workqueue_struct *system_power_efficient_wq;
392 extern struct workqueue_struct *system_freezable_power_efficient_wq;
393
394 /**
395 * alloc_workqueue - allocate a workqueue
396 * @fmt: printf format for the name of the workqueue
397 * @flags: WQ_* flags
398 * @max_active: max in-flight work items, 0 for default
399 * remaining args: args for @fmt
400 *
401 * Allocate a workqueue with the specified parameters. For detailed
402 * information on WQ_* flags, please refer to
403 * Documentation/core-api/workqueue.rst.
404 *
405 * RETURNS:
406 * Pointer to the allocated workqueue on success, %NULL on failure.
407 */
408 struct workqueue_struct *alloc_workqueue(const char *fmt,
409 unsigned int flags,
410 int max_active, ...);
411
412 /**
413 * alloc_ordered_workqueue - allocate an ordered workqueue
414 * @fmt: printf format for the name of the workqueue
415 * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
416 * @args...: args for @fmt
417 *
418 * Allocate an ordered workqueue. An ordered workqueue executes at
419 * most one work item at any given time in the queued order. They are
420 * implemented as unbound workqueues with @max_active of one.
421 *
422 * RETURNS:
423 * Pointer to the allocated workqueue on success, %NULL on failure.
424 */
425 #define alloc_ordered_workqueue(fmt, flags, args...) \
426 alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | \
427 __WQ_ORDERED_EXPLICIT | (flags), 1, ##args)
428
429 #define create_workqueue(name) \
430 alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
431 #define create_freezable_workqueue(name) \
432 alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND | \
433 WQ_MEM_RECLAIM, 1, (name))
434 #define create_singlethread_workqueue(name) \
435 alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name)
436
437 extern void destroy_workqueue(struct workqueue_struct *wq);
438
439 struct workqueue_attrs *alloc_workqueue_attrs(void);
440 void free_workqueue_attrs(struct workqueue_attrs *attrs);
441 int apply_workqueue_attrs(struct workqueue_struct *wq,
442 const struct workqueue_attrs *attrs);
443 int workqueue_set_unbound_cpumask(cpumask_var_t cpumask);
444
445 extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
446 struct work_struct *work);
447 extern bool queue_work_node(int node, struct workqueue_struct *wq,
448 struct work_struct *work);
449 extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
450 struct delayed_work *work, unsigned long delay);
451 extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
452 struct delayed_work *dwork, unsigned long delay);
453 extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork);
454
455 extern void flush_workqueue(struct workqueue_struct *wq);
456 extern void drain_workqueue(struct workqueue_struct *wq);
457
458 extern int schedule_on_each_cpu(work_func_t func);
459
460 int execute_in_process_context(work_func_t fn, struct execute_work *);
461
462 extern bool flush_work(struct work_struct *work);
463 extern bool cancel_work_sync(struct work_struct *work);
464
465 extern bool flush_delayed_work(struct delayed_work *dwork);
466 extern bool cancel_delayed_work(struct delayed_work *dwork);
467 extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
468
469 extern bool flush_rcu_work(struct rcu_work *rwork);
470
471 extern void workqueue_set_max_active(struct workqueue_struct *wq,
472 int max_active);
473 extern struct work_struct *current_work(void);
474 extern bool current_is_workqueue_rescuer(void);
475 extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
476 extern unsigned int work_busy(struct work_struct *work);
477 extern __printf(1, 2) void set_worker_desc(const char *fmt, ...);
478 extern void print_worker_info(const char *log_lvl, struct task_struct *task);
479 extern void show_workqueue_state(void);
480 extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task);
481
482 /**
483 * queue_work - queue work on a workqueue
484 * @wq: workqueue to use
485 * @work: work to queue
486 *
487 * Returns %false if @work was already on a queue, %true otherwise.
488 *
489 * We queue the work to the CPU on which it was submitted, but if the CPU dies
490 * it can be processed by another CPU.
491 *
492 * Memory-ordering properties: If it returns %true, guarantees that all stores
493 * preceding the call to queue_work() in the program order will be visible from
494 * the CPU which will execute @work by the time such work executes, e.g.,
495 *
496 * { x is initially 0 }
497 *
498 * CPU0 CPU1
499 *
500 * WRITE_ONCE(x, 1); [ @work is being executed ]
501 * r0 = queue_work(wq, work); r1 = READ_ONCE(x);
502 *
503 * Forbids: r0 == true && r1 == 0
504 */
queue_work(struct workqueue_struct * wq,struct work_struct * work)505 static inline bool queue_work(struct workqueue_struct *wq,
506 struct work_struct *work)
507 {
508 return queue_work_on(WORK_CPU_UNBOUND, wq, work);
509 }
510
511 /**
512 * queue_delayed_work - queue work on a workqueue after delay
513 * @wq: workqueue to use
514 * @dwork: delayable work to queue
515 * @delay: number of jiffies to wait before queueing
516 *
517 * Equivalent to queue_delayed_work_on() but tries to use the local CPU.
518 */
queue_delayed_work(struct workqueue_struct * wq,struct delayed_work * dwork,unsigned long delay)519 static inline bool queue_delayed_work(struct workqueue_struct *wq,
520 struct delayed_work *dwork,
521 unsigned long delay)
522 {
523 return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
524 }
525
526 /**
527 * mod_delayed_work - modify delay of or queue a delayed work
528 * @wq: workqueue to use
529 * @dwork: work to queue
530 * @delay: number of jiffies to wait before queueing
531 *
532 * mod_delayed_work_on() on local CPU.
533 */
mod_delayed_work(struct workqueue_struct * wq,struct delayed_work * dwork,unsigned long delay)534 static inline bool mod_delayed_work(struct workqueue_struct *wq,
535 struct delayed_work *dwork,
536 unsigned long delay)
537 {
538 return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
539 }
540
541 /**
542 * schedule_work_on - put work task on a specific cpu
543 * @cpu: cpu to put the work task on
544 * @work: job to be done
545 *
546 * This puts a job on a specific cpu
547 */
schedule_work_on(int cpu,struct work_struct * work)548 static inline bool schedule_work_on(int cpu, struct work_struct *work)
549 {
550 return queue_work_on(cpu, system_wq, work);
551 }
552
553 /**
554 * schedule_work - put work task in global workqueue
555 * @work: job to be done
556 *
557 * Returns %false if @work was already on the kernel-global workqueue and
558 * %true otherwise.
559 *
560 * This puts a job in the kernel-global workqueue if it was not already
561 * queued and leaves it in the same position on the kernel-global
562 * workqueue otherwise.
563 *
564 * Shares the same memory-ordering properties of queue_work(), cf. the
565 * DocBook header of queue_work().
566 */
schedule_work(struct work_struct * work)567 static inline bool schedule_work(struct work_struct *work)
568 {
569 return queue_work(system_wq, work);
570 }
571
572 /**
573 * flush_scheduled_work - ensure that any scheduled work has run to completion.
574 *
575 * Forces execution of the kernel-global workqueue and blocks until its
576 * completion.
577 *
578 * Think twice before calling this function! It's very easy to get into
579 * trouble if you don't take great care. Either of the following situations
580 * will lead to deadlock:
581 *
582 * One of the work items currently on the workqueue needs to acquire
583 * a lock held by your code or its caller.
584 *
585 * Your code is running in the context of a work routine.
586 *
587 * They will be detected by lockdep when they occur, but the first might not
588 * occur very often. It depends on what work items are on the workqueue and
589 * what locks they need, which you have no control over.
590 *
591 * In most situations flushing the entire workqueue is overkill; you merely
592 * need to know that a particular work item isn't queued and isn't running.
593 * In such cases you should use cancel_delayed_work_sync() or
594 * cancel_work_sync() instead.
595 */
flush_scheduled_work(void)596 static inline void flush_scheduled_work(void)
597 {
598 flush_workqueue(system_wq);
599 }
600
601 /**
602 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
603 * @cpu: cpu to use
604 * @dwork: job to be done
605 * @delay: number of jiffies to wait
606 *
607 * After waiting for a given time this puts a job in the kernel-global
608 * workqueue on the specified CPU.
609 */
schedule_delayed_work_on(int cpu,struct delayed_work * dwork,unsigned long delay)610 static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
611 unsigned long delay)
612 {
613 return queue_delayed_work_on(cpu, system_wq, dwork, delay);
614 }
615
616 /**
617 * schedule_delayed_work - put work task in global workqueue after delay
618 * @dwork: job to be done
619 * @delay: number of jiffies to wait or 0 for immediate execution
620 *
621 * After waiting for a given time this puts a job in the kernel-global
622 * workqueue.
623 */
schedule_delayed_work(struct delayed_work * dwork,unsigned long delay)624 static inline bool schedule_delayed_work(struct delayed_work *dwork,
625 unsigned long delay)
626 {
627 return queue_delayed_work(system_wq, dwork, delay);
628 }
629
630 #ifndef CONFIG_SMP
work_on_cpu(int cpu,long (* fn)(void *),void * arg)631 static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
632 {
633 return fn(arg);
634 }
work_on_cpu_safe(int cpu,long (* fn)(void *),void * arg)635 static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
636 {
637 return fn(arg);
638 }
639 #else
640 long work_on_cpu(int cpu, long (*fn)(void *), void *arg);
641 long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg);
642 #endif /* CONFIG_SMP */
643
644 #ifdef CONFIG_FREEZER
645 extern void freeze_workqueues_begin(void);
646 extern bool freeze_workqueues_busy(void);
647 extern void thaw_workqueues(void);
648 #endif /* CONFIG_FREEZER */
649
650 #ifdef CONFIG_SYSFS
651 int workqueue_sysfs_register(struct workqueue_struct *wq);
652 #else /* CONFIG_SYSFS */
workqueue_sysfs_register(struct workqueue_struct * wq)653 static inline int workqueue_sysfs_register(struct workqueue_struct *wq)
654 { return 0; }
655 #endif /* CONFIG_SYSFS */
656
657 #ifdef CONFIG_WQ_WATCHDOG
658 void wq_watchdog_touch(int cpu);
659 #else /* CONFIG_WQ_WATCHDOG */
wq_watchdog_touch(int cpu)660 static inline void wq_watchdog_touch(int cpu) { }
661 #endif /* CONFIG_WQ_WATCHDOG */
662
663 #ifdef CONFIG_SMP
664 int workqueue_prepare_cpu(unsigned int cpu);
665 int workqueue_online_cpu(unsigned int cpu);
666 int workqueue_offline_cpu(unsigned int cpu);
667 #endif
668
669 void __init workqueue_init_early(void);
670 void __init workqueue_init(void);
671
672 #endif
673