1 /*
2 * kernel/workqueue.c - generic async execution with shared worker pool
3 *
4 * Copyright (C) 2002 Ingo Molnar
5 *
6 * Derived from the taskqueue/keventd code by:
7 * David Woodhouse <dwmw2@infradead.org>
8 * Andrew Morton
9 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
10 * Theodore Ts'o <tytso@mit.edu>
11 *
12 * Made to use alloc_percpu by Christoph Lameter.
13 *
14 * Copyright (C) 2010 SUSE Linux Products GmbH
15 * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
16 *
17 * This is the generic async execution mechanism. Work items as are
18 * executed in process context. The worker pool is shared and
19 * automatically managed. There are two worker pools for each CPU (one for
20 * normal work items and the other for high priority ones) and some extra
21 * pools for workqueues which are not bound to any specific CPU - the
22 * number of these backing pools is dynamic.
23 *
24 * Please read Documentation/workqueue.txt for details.
25 */
26
27 #include <linux/export.h>
28 #include <linux/kernel.h>
29 #include <linux/sched.h>
30 #include <linux/init.h>
31 #include <linux/signal.h>
32 #include <linux/completion.h>
33 #include <linux/workqueue.h>
34 #include <linux/slab.h>
35 #include <linux/cpu.h>
36 #include <linux/notifier.h>
37 #include <linux/kthread.h>
38 #include <linux/hardirq.h>
39 #include <linux/mempolicy.h>
40 #include <linux/freezer.h>
41 #include <linux/kallsyms.h>
42 #include <linux/debug_locks.h>
43 #include <linux/lockdep.h>
44 #include <linux/idr.h>
45 #include <linux/jhash.h>
46 #include <linux/hashtable.h>
47 #include <linux/rculist.h>
48 #include <linux/nodemask.h>
49 #include <linux/moduleparam.h>
50 #include <linux/uaccess.h>
51
52 #include "workqueue_internal.h"
53
54 enum {
55 /*
56 * worker_pool flags
57 *
58 * A bound pool is either associated or disassociated with its CPU.
59 * While associated (!DISASSOCIATED), all workers are bound to the
60 * CPU and none has %WORKER_UNBOUND set and concurrency management
61 * is in effect.
62 *
63 * While DISASSOCIATED, the cpu may be offline and all workers have
64 * %WORKER_UNBOUND set and concurrency management disabled, and may
65 * be executing on any CPU. The pool behaves as an unbound one.
66 *
67 * Note that DISASSOCIATED should be flipped only while holding
68 * attach_mutex to avoid changing binding state while
69 * worker_attach_to_pool() is in progress.
70 */
71 POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */
72
73 /* worker flags */
74 WORKER_DIE = 1 << 1, /* die die die */
75 WORKER_IDLE = 1 << 2, /* is idle */
76 WORKER_PREP = 1 << 3, /* preparing to run works */
77 WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */
78 WORKER_UNBOUND = 1 << 7, /* worker is unbound */
79 WORKER_REBOUND = 1 << 8, /* worker was rebound */
80
81 WORKER_NOT_RUNNING = WORKER_PREP | WORKER_CPU_INTENSIVE |
82 WORKER_UNBOUND | WORKER_REBOUND,
83
84 NR_STD_WORKER_POOLS = 2, /* # standard pools per cpu */
85
86 UNBOUND_POOL_HASH_ORDER = 6, /* hashed by pool->attrs */
87 BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */
88
89 MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */
90 IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */
91
92 MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2,
93 /* call for help after 10ms
94 (min two ticks) */
95 MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */
96 CREATE_COOLDOWN = HZ, /* time to breath after fail */
97
98 /*
99 * Rescue workers are used only on emergencies and shared by
100 * all cpus. Give MIN_NICE.
101 */
102 RESCUER_NICE_LEVEL = MIN_NICE,
103 HIGHPRI_NICE_LEVEL = MIN_NICE,
104
105 WQ_NAME_LEN = 24,
106 };
107
108 /*
109 * Structure fields follow one of the following exclusion rules.
110 *
111 * I: Modifiable by initialization/destruction paths and read-only for
112 * everyone else.
113 *
114 * P: Preemption protected. Disabling preemption is enough and should
115 * only be modified and accessed from the local cpu.
116 *
117 * L: pool->lock protected. Access with pool->lock held.
118 *
119 * X: During normal operation, modification requires pool->lock and should
120 * be done only from local cpu. Either disabling preemption on local
121 * cpu or grabbing pool->lock is enough for read access. If
122 * POOL_DISASSOCIATED is set, it's identical to L.
123 *
124 * A: pool->attach_mutex protected.
125 *
126 * PL: wq_pool_mutex protected.
127 *
128 * PR: wq_pool_mutex protected for writes. Sched-RCU protected for reads.
129 *
130 * WQ: wq->mutex protected.
131 *
132 * WR: wq->mutex protected for writes. Sched-RCU protected for reads.
133 *
134 * MD: wq_mayday_lock protected.
135 */
136
137 /* struct worker is defined in workqueue_internal.h */
138
139 struct worker_pool {
140 spinlock_t lock; /* the pool lock */
141 int cpu; /* I: the associated cpu */
142 int node; /* I: the associated node ID */
143 int id; /* I: pool ID */
144 unsigned int flags; /* X: flags */
145
146 struct list_head worklist; /* L: list of pending works */
147 int nr_workers; /* L: total number of workers */
148
149 /* nr_idle includes the ones off idle_list for rebinding */
150 int nr_idle; /* L: currently idle ones */
151
152 struct list_head idle_list; /* X: list of idle workers */
153 struct timer_list idle_timer; /* L: worker idle timeout */
154 struct timer_list mayday_timer; /* L: SOS timer for workers */
155
156 /* a workers is either on busy_hash or idle_list, or the manager */
157 DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER);
158 /* L: hash of busy workers */
159
160 /* see manage_workers() for details on the two manager mutexes */
161 struct mutex manager_arb; /* manager arbitration */
162 struct mutex attach_mutex; /* attach/detach exclusion */
163 struct list_head workers; /* A: attached workers */
164 struct completion *detach_completion; /* all workers detached */
165
166 struct ida worker_ida; /* worker IDs for task name */
167
168 struct workqueue_attrs *attrs; /* I: worker attributes */
169 struct hlist_node hash_node; /* PL: unbound_pool_hash node */
170 int refcnt; /* PL: refcnt for unbound pools */
171
172 /*
173 * The current concurrency level. As it's likely to be accessed
174 * from other CPUs during try_to_wake_up(), put it in a separate
175 * cacheline.
176 */
177 atomic_t nr_running ____cacheline_aligned_in_smp;
178
179 /*
180 * Destruction of pool is sched-RCU protected to allow dereferences
181 * from get_work_pool().
182 */
183 struct rcu_head rcu;
184 } ____cacheline_aligned_in_smp;
185
186 /*
187 * The per-pool workqueue. While queued, the lower WORK_STRUCT_FLAG_BITS
188 * of work_struct->data are used for flags and the remaining high bits
189 * point to the pwq; thus, pwqs need to be aligned at two's power of the
190 * number of flag bits.
191 */
192 struct pool_workqueue {
193 struct worker_pool *pool; /* I: the associated pool */
194 struct workqueue_struct *wq; /* I: the owning workqueue */
195 int work_color; /* L: current color */
196 int flush_color; /* L: flushing color */
197 int refcnt; /* L: reference count */
198 int nr_in_flight[WORK_NR_COLORS];
199 /* L: nr of in_flight works */
200 int nr_active; /* L: nr of active works */
201 int max_active; /* L: max active works */
202 struct list_head delayed_works; /* L: delayed works */
203 struct list_head pwqs_node; /* WR: node on wq->pwqs */
204 struct list_head mayday_node; /* MD: node on wq->maydays */
205
206 /*
207 * Release of unbound pwq is punted to system_wq. See put_pwq()
208 * and pwq_unbound_release_workfn() for details. pool_workqueue
209 * itself is also sched-RCU protected so that the first pwq can be
210 * determined without grabbing wq->mutex.
211 */
212 struct work_struct unbound_release_work;
213 struct rcu_head rcu;
214 } __aligned(1 << WORK_STRUCT_FLAG_BITS);
215
216 /*
217 * Structure used to wait for workqueue flush.
218 */
219 struct wq_flusher {
220 struct list_head list; /* WQ: list of flushers */
221 int flush_color; /* WQ: flush color waiting for */
222 struct completion done; /* flush completion */
223 };
224
225 struct wq_device;
226
227 /*
228 * The externally visible workqueue. It relays the issued work items to
229 * the appropriate worker_pool through its pool_workqueues.
230 */
231 struct workqueue_struct {
232 struct list_head pwqs; /* WR: all pwqs of this wq */
233 struct list_head list; /* PL: list of all workqueues */
234
235 struct mutex mutex; /* protects this wq */
236 int work_color; /* WQ: current work color */
237 int flush_color; /* WQ: current flush color */
238 atomic_t nr_pwqs_to_flush; /* flush in progress */
239 struct wq_flusher *first_flusher; /* WQ: first flusher */
240 struct list_head flusher_queue; /* WQ: flush waiters */
241 struct list_head flusher_overflow; /* WQ: flush overflow list */
242
243 struct list_head maydays; /* MD: pwqs requesting rescue */
244 struct worker *rescuer; /* I: rescue worker */
245
246 int nr_drainers; /* WQ: drain in progress */
247 int saved_max_active; /* WQ: saved pwq max_active */
248
249 struct workqueue_attrs *unbound_attrs; /* WQ: only for unbound wqs */
250 struct pool_workqueue *dfl_pwq; /* WQ: only for unbound wqs */
251
252 #ifdef CONFIG_SYSFS
253 struct wq_device *wq_dev; /* I: for sysfs interface */
254 #endif
255 #ifdef CONFIG_LOCKDEP
256 struct lockdep_map lockdep_map;
257 #endif
258 char name[WQ_NAME_LEN]; /* I: workqueue name */
259
260 /* hot fields used during command issue, aligned to cacheline */
261 unsigned int flags ____cacheline_aligned; /* WQ: WQ_* flags */
262 struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwqs */
263 struct pool_workqueue __rcu *numa_pwq_tbl[]; /* FR: unbound pwqs indexed by node */
264 };
265
266 static struct kmem_cache *pwq_cache;
267
268 static cpumask_var_t *wq_numa_possible_cpumask;
269 /* possible CPUs of each node */
270
271 static bool wq_disable_numa;
272 module_param_named(disable_numa, wq_disable_numa, bool, 0444);
273
274 /* see the comment above the definition of WQ_POWER_EFFICIENT */
275 #ifdef CONFIG_WQ_POWER_EFFICIENT_DEFAULT
276 static bool wq_power_efficient = true;
277 #else
278 static bool wq_power_efficient;
279 #endif
280
281 module_param_named(power_efficient, wq_power_efficient, bool, 0444);
282
283 static bool wq_numa_enabled; /* unbound NUMA affinity enabled */
284
285 /* buf for wq_update_unbound_numa_attrs(), protected by CPU hotplug exclusion */
286 static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
287
288 static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
289 static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
290
291 static LIST_HEAD(workqueues); /* PL: list of all workqueues */
292 static bool workqueue_freezing; /* PL: have wqs started freezing? */
293
294 /* the per-cpu worker pools */
295 static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS],
296 cpu_worker_pools);
297
298 static DEFINE_IDR(worker_pool_idr); /* PR: idr of all pools */
299
300 /* PL: hash of all unbound pools keyed by pool->attrs */
301 static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
302
303 /* I: attributes used when instantiating standard unbound pools on demand */
304 static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
305
306 /* I: attributes used when instantiating ordered pools on demand */
307 static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS];
308
309 struct workqueue_struct *system_wq __read_mostly;
310 EXPORT_SYMBOL(system_wq);
311 struct workqueue_struct *system_highpri_wq __read_mostly;
312 EXPORT_SYMBOL_GPL(system_highpri_wq);
313 struct workqueue_struct *system_long_wq __read_mostly;
314 EXPORT_SYMBOL_GPL(system_long_wq);
315 struct workqueue_struct *system_unbound_wq __read_mostly;
316 EXPORT_SYMBOL_GPL(system_unbound_wq);
317 struct workqueue_struct *system_freezable_wq __read_mostly;
318 EXPORT_SYMBOL_GPL(system_freezable_wq);
319 struct workqueue_struct *system_power_efficient_wq __read_mostly;
320 EXPORT_SYMBOL_GPL(system_power_efficient_wq);
321 struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
322 EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
323
324 static int worker_thread(void *__worker);
325 static void copy_workqueue_attrs(struct workqueue_attrs *to,
326 const struct workqueue_attrs *from);
327
328 #define CREATE_TRACE_POINTS
329 #include <trace/events/workqueue.h>
330
331 #define assert_rcu_or_pool_mutex() \
332 rcu_lockdep_assert(rcu_read_lock_sched_held() || \
333 lockdep_is_held(&wq_pool_mutex), \
334 "sched RCU or wq_pool_mutex should be held")
335
336 #define assert_rcu_or_wq_mutex(wq) \
337 rcu_lockdep_assert(rcu_read_lock_sched_held() || \
338 lockdep_is_held(&wq->mutex), \
339 "sched RCU or wq->mutex should be held")
340
341 #define for_each_cpu_worker_pool(pool, cpu) \
342 for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
343 (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
344 (pool)++)
345
346 /**
347 * for_each_pool - iterate through all worker_pools in the system
348 * @pool: iteration cursor
349 * @pi: integer used for iteration
350 *
351 * This must be called either with wq_pool_mutex held or sched RCU read
352 * locked. If the pool needs to be used beyond the locking in effect, the
353 * caller is responsible for guaranteeing that the pool stays online.
354 *
355 * The if/else clause exists only for the lockdep assertion and can be
356 * ignored.
357 */
358 #define for_each_pool(pool, pi) \
359 idr_for_each_entry(&worker_pool_idr, pool, pi) \
360 if (({ assert_rcu_or_pool_mutex(); false; })) { } \
361 else
362
363 /**
364 * for_each_pool_worker - iterate through all workers of a worker_pool
365 * @worker: iteration cursor
366 * @pool: worker_pool to iterate workers of
367 *
368 * This must be called with @pool->attach_mutex.
369 *
370 * The if/else clause exists only for the lockdep assertion and can be
371 * ignored.
372 */
373 #define for_each_pool_worker(worker, pool) \
374 list_for_each_entry((worker), &(pool)->workers, node) \
375 if (({ lockdep_assert_held(&pool->attach_mutex); false; })) { } \
376 else
377
378 /**
379 * for_each_pwq - iterate through all pool_workqueues of the specified workqueue
380 * @pwq: iteration cursor
381 * @wq: the target workqueue
382 *
383 * This must be called either with wq->mutex held or sched RCU read locked.
384 * If the pwq needs to be used beyond the locking in effect, the caller is
385 * responsible for guaranteeing that the pwq stays online.
386 *
387 * The if/else clause exists only for the lockdep assertion and can be
388 * ignored.
389 */
390 #define for_each_pwq(pwq, wq) \
391 list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node) \
392 if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \
393 else
394
395 #ifdef CONFIG_DEBUG_OBJECTS_WORK
396
397 static struct debug_obj_descr work_debug_descr;
398
work_debug_hint(void * addr)399 static void *work_debug_hint(void *addr)
400 {
401 return ((struct work_struct *) addr)->func;
402 }
403
404 /*
405 * fixup_init is called when:
406 * - an active object is initialized
407 */
work_fixup_init(void * addr,enum debug_obj_state state)408 static int work_fixup_init(void *addr, enum debug_obj_state state)
409 {
410 struct work_struct *work = addr;
411
412 switch (state) {
413 case ODEBUG_STATE_ACTIVE:
414 cancel_work_sync(work);
415 debug_object_init(work, &work_debug_descr);
416 return 1;
417 default:
418 return 0;
419 }
420 }
421
422 /*
423 * fixup_activate is called when:
424 * - an active object is activated
425 * - an unknown object is activated (might be a statically initialized object)
426 */
work_fixup_activate(void * addr,enum debug_obj_state state)427 static int work_fixup_activate(void *addr, enum debug_obj_state state)
428 {
429 struct work_struct *work = addr;
430
431 switch (state) {
432
433 case ODEBUG_STATE_NOTAVAILABLE:
434 /*
435 * This is not really a fixup. The work struct was
436 * statically initialized. We just make sure that it
437 * is tracked in the object tracker.
438 */
439 if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
440 debug_object_init(work, &work_debug_descr);
441 debug_object_activate(work, &work_debug_descr);
442 return 0;
443 }
444 WARN_ON_ONCE(1);
445 return 0;
446
447 case ODEBUG_STATE_ACTIVE:
448 WARN_ON(1);
449
450 default:
451 return 0;
452 }
453 }
454
455 /*
456 * fixup_free is called when:
457 * - an active object is freed
458 */
work_fixup_free(void * addr,enum debug_obj_state state)459 static int work_fixup_free(void *addr, enum debug_obj_state state)
460 {
461 struct work_struct *work = addr;
462
463 switch (state) {
464 case ODEBUG_STATE_ACTIVE:
465 cancel_work_sync(work);
466 debug_object_free(work, &work_debug_descr);
467 return 1;
468 default:
469 return 0;
470 }
471 }
472
473 static struct debug_obj_descr work_debug_descr = {
474 .name = "work_struct",
475 .debug_hint = work_debug_hint,
476 .fixup_init = work_fixup_init,
477 .fixup_activate = work_fixup_activate,
478 .fixup_free = work_fixup_free,
479 };
480
debug_work_activate(struct work_struct * work)481 static inline void debug_work_activate(struct work_struct *work)
482 {
483 debug_object_activate(work, &work_debug_descr);
484 }
485
debug_work_deactivate(struct work_struct * work)486 static inline void debug_work_deactivate(struct work_struct *work)
487 {
488 debug_object_deactivate(work, &work_debug_descr);
489 }
490
__init_work(struct work_struct * work,int onstack)491 void __init_work(struct work_struct *work, int onstack)
492 {
493 if (onstack)
494 debug_object_init_on_stack(work, &work_debug_descr);
495 else
496 debug_object_init(work, &work_debug_descr);
497 }
498 EXPORT_SYMBOL_GPL(__init_work);
499
destroy_work_on_stack(struct work_struct * work)500 void destroy_work_on_stack(struct work_struct *work)
501 {
502 debug_object_free(work, &work_debug_descr);
503 }
504 EXPORT_SYMBOL_GPL(destroy_work_on_stack);
505
destroy_delayed_work_on_stack(struct delayed_work * work)506 void destroy_delayed_work_on_stack(struct delayed_work *work)
507 {
508 destroy_timer_on_stack(&work->timer);
509 debug_object_free(&work->work, &work_debug_descr);
510 }
511 EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stack);
512
513 #else
debug_work_activate(struct work_struct * work)514 static inline void debug_work_activate(struct work_struct *work) { }
debug_work_deactivate(struct work_struct * work)515 static inline void debug_work_deactivate(struct work_struct *work) { }
516 #endif
517
518 /**
519 * worker_pool_assign_id - allocate ID and assing it to @pool
520 * @pool: the pool pointer of interest
521 *
522 * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned
523 * successfully, -errno on failure.
524 */
worker_pool_assign_id(struct worker_pool * pool)525 static int worker_pool_assign_id(struct worker_pool *pool)
526 {
527 int ret;
528
529 lockdep_assert_held(&wq_pool_mutex);
530
531 ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE,
532 GFP_KERNEL);
533 if (ret >= 0) {
534 pool->id = ret;
535 return 0;
536 }
537 return ret;
538 }
539
540 /**
541 * unbound_pwq_by_node - return the unbound pool_workqueue for the given node
542 * @wq: the target workqueue
543 * @node: the node ID
544 *
545 * This must be called either with pwq_lock held or sched RCU read locked.
546 * If the pwq needs to be used beyond the locking in effect, the caller is
547 * responsible for guaranteeing that the pwq stays online.
548 *
549 * Return: The unbound pool_workqueue for @node.
550 */
unbound_pwq_by_node(struct workqueue_struct * wq,int node)551 static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq,
552 int node)
553 {
554 assert_rcu_or_wq_mutex(wq);
555 return rcu_dereference_raw(wq->numa_pwq_tbl[node]);
556 }
557
work_color_to_flags(int color)558 static unsigned int work_color_to_flags(int color)
559 {
560 return color << WORK_STRUCT_COLOR_SHIFT;
561 }
562
get_work_color(struct work_struct * work)563 static int get_work_color(struct work_struct *work)
564 {
565 return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
566 ((1 << WORK_STRUCT_COLOR_BITS) - 1);
567 }
568
work_next_color(int color)569 static int work_next_color(int color)
570 {
571 return (color + 1) % WORK_NR_COLORS;
572 }
573
574 /*
575 * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data
576 * contain the pointer to the queued pwq. Once execution starts, the flag
577 * is cleared and the high bits contain OFFQ flags and pool ID.
578 *
579 * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling()
580 * and clear_work_data() can be used to set the pwq, pool or clear
581 * work->data. These functions should only be called while the work is
582 * owned - ie. while the PENDING bit is set.
583 *
584 * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq
585 * corresponding to a work. Pool is available once the work has been
586 * queued anywhere after initialization until it is sync canceled. pwq is
587 * available only while the work item is queued.
588 *
589 * %WORK_OFFQ_CANCELING is used to mark a work item which is being
590 * canceled. While being canceled, a work item may have its PENDING set
591 * but stay off timer and worklist for arbitrarily long and nobody should
592 * try to steal the PENDING bit.
593 */
set_work_data(struct work_struct * work,unsigned long data,unsigned long flags)594 static inline void set_work_data(struct work_struct *work, unsigned long data,
595 unsigned long flags)
596 {
597 WARN_ON_ONCE(!work_pending(work));
598 atomic_long_set(&work->data, data | flags | work_static(work));
599 }
600
set_work_pwq(struct work_struct * work,struct pool_workqueue * pwq,unsigned long extra_flags)601 static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq,
602 unsigned long extra_flags)
603 {
604 set_work_data(work, (unsigned long)pwq,
605 WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags);
606 }
607
set_work_pool_and_keep_pending(struct work_struct * work,int pool_id)608 static void set_work_pool_and_keep_pending(struct work_struct *work,
609 int pool_id)
610 {
611 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT,
612 WORK_STRUCT_PENDING);
613 }
614
set_work_pool_and_clear_pending(struct work_struct * work,int pool_id)615 static void set_work_pool_and_clear_pending(struct work_struct *work,
616 int pool_id)
617 {
618 /*
619 * The following wmb is paired with the implied mb in
620 * test_and_set_bit(PENDING) and ensures all updates to @work made
621 * here are visible to and precede any updates by the next PENDING
622 * owner.
623 */
624 smp_wmb();
625 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
626 /*
627 * The following mb guarantees that previous clear of a PENDING bit
628 * will not be reordered with any speculative LOADS or STORES from
629 * work->current_func, which is executed afterwards. This possible
630 * reordering can lead to a missed execution on attempt to qeueue
631 * the same @work. E.g. consider this case:
632 *
633 * CPU#0 CPU#1
634 * ---------------------------- --------------------------------
635 *
636 * 1 STORE event_indicated
637 * 2 queue_work_on() {
638 * 3 test_and_set_bit(PENDING)
639 * 4 } set_..._and_clear_pending() {
640 * 5 set_work_data() # clear bit
641 * 6 smp_mb()
642 * 7 work->current_func() {
643 * 8 LOAD event_indicated
644 * }
645 *
646 * Without an explicit full barrier speculative LOAD on line 8 can
647 * be executed before CPU#0 does STORE on line 1. If that happens,
648 * CPU#0 observes the PENDING bit is still set and new execution of
649 * a @work is not queued in a hope, that CPU#1 will eventually
650 * finish the queued @work. Meanwhile CPU#1 does not see
651 * event_indicated is set, because speculative LOAD was executed
652 * before actual STORE.
653 */
654 smp_mb();
655 }
656
clear_work_data(struct work_struct * work)657 static void clear_work_data(struct work_struct *work)
658 {
659 smp_wmb(); /* see set_work_pool_and_clear_pending() */
660 set_work_data(work, WORK_STRUCT_NO_POOL, 0);
661 }
662
get_work_pwq(struct work_struct * work)663 static struct pool_workqueue *get_work_pwq(struct work_struct *work)
664 {
665 unsigned long data = atomic_long_read(&work->data);
666
667 if (data & WORK_STRUCT_PWQ)
668 return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
669 else
670 return NULL;
671 }
672
673 /**
674 * get_work_pool - return the worker_pool a given work was associated with
675 * @work: the work item of interest
676 *
677 * Pools are created and destroyed under wq_pool_mutex, and allows read
678 * access under sched-RCU read lock. As such, this function should be
679 * called under wq_pool_mutex or with preemption disabled.
680 *
681 * All fields of the returned pool are accessible as long as the above
682 * mentioned locking is in effect. If the returned pool needs to be used
683 * beyond the critical section, the caller is responsible for ensuring the
684 * returned pool is and stays online.
685 *
686 * Return: The worker_pool @work was last associated with. %NULL if none.
687 */
get_work_pool(struct work_struct * work)688 static struct worker_pool *get_work_pool(struct work_struct *work)
689 {
690 unsigned long data = atomic_long_read(&work->data);
691 int pool_id;
692
693 assert_rcu_or_pool_mutex();
694
695 if (data & WORK_STRUCT_PWQ)
696 return ((struct pool_workqueue *)
697 (data & WORK_STRUCT_WQ_DATA_MASK))->pool;
698
699 pool_id = data >> WORK_OFFQ_POOL_SHIFT;
700 if (pool_id == WORK_OFFQ_POOL_NONE)
701 return NULL;
702
703 return idr_find(&worker_pool_idr, pool_id);
704 }
705
706 /**
707 * get_work_pool_id - return the worker pool ID a given work is associated with
708 * @work: the work item of interest
709 *
710 * Return: The worker_pool ID @work was last associated with.
711 * %WORK_OFFQ_POOL_NONE if none.
712 */
get_work_pool_id(struct work_struct * work)713 static int get_work_pool_id(struct work_struct *work)
714 {
715 unsigned long data = atomic_long_read(&work->data);
716
717 if (data & WORK_STRUCT_PWQ)
718 return ((struct pool_workqueue *)
719 (data & WORK_STRUCT_WQ_DATA_MASK))->pool->id;
720
721 return data >> WORK_OFFQ_POOL_SHIFT;
722 }
723
mark_work_canceling(struct work_struct * work)724 static void mark_work_canceling(struct work_struct *work)
725 {
726 unsigned long pool_id = get_work_pool_id(work);
727
728 pool_id <<= WORK_OFFQ_POOL_SHIFT;
729 set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING);
730 }
731
work_is_canceling(struct work_struct * work)732 static bool work_is_canceling(struct work_struct *work)
733 {
734 unsigned long data = atomic_long_read(&work->data);
735
736 return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING);
737 }
738
739 /*
740 * Policy functions. These define the policies on how the global worker
741 * pools are managed. Unless noted otherwise, these functions assume that
742 * they're being called with pool->lock held.
743 */
744
__need_more_worker(struct worker_pool * pool)745 static bool __need_more_worker(struct worker_pool *pool)
746 {
747 return !atomic_read(&pool->nr_running);
748 }
749
750 /*
751 * Need to wake up a worker? Called from anything but currently
752 * running workers.
753 *
754 * Note that, because unbound workers never contribute to nr_running, this
755 * function will always return %true for unbound pools as long as the
756 * worklist isn't empty.
757 */
need_more_worker(struct worker_pool * pool)758 static bool need_more_worker(struct worker_pool *pool)
759 {
760 return !list_empty(&pool->worklist) && __need_more_worker(pool);
761 }
762
763 /* Can I start working? Called from busy but !running workers. */
may_start_working(struct worker_pool * pool)764 static bool may_start_working(struct worker_pool *pool)
765 {
766 return pool->nr_idle;
767 }
768
769 /* Do I need to keep working? Called from currently running workers. */
keep_working(struct worker_pool * pool)770 static bool keep_working(struct worker_pool *pool)
771 {
772 return !list_empty(&pool->worklist) &&
773 atomic_read(&pool->nr_running) <= 1;
774 }
775
776 /* Do we need a new worker? Called from manager. */
need_to_create_worker(struct worker_pool * pool)777 static bool need_to_create_worker(struct worker_pool *pool)
778 {
779 return need_more_worker(pool) && !may_start_working(pool);
780 }
781
782 /* Do we have too many workers and should some go away? */
too_many_workers(struct worker_pool * pool)783 static bool too_many_workers(struct worker_pool *pool)
784 {
785 bool managing = mutex_is_locked(&pool->manager_arb);
786 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
787 int nr_busy = pool->nr_workers - nr_idle;
788
789 return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
790 }
791
792 /*
793 * Wake up functions.
794 */
795
796 /* Return the first idle worker. Safe with preemption disabled */
first_idle_worker(struct worker_pool * pool)797 static struct worker *first_idle_worker(struct worker_pool *pool)
798 {
799 if (unlikely(list_empty(&pool->idle_list)))
800 return NULL;
801
802 return list_first_entry(&pool->idle_list, struct worker, entry);
803 }
804
805 /**
806 * wake_up_worker - wake up an idle worker
807 * @pool: worker pool to wake worker from
808 *
809 * Wake up the first idle worker of @pool.
810 *
811 * CONTEXT:
812 * spin_lock_irq(pool->lock).
813 */
wake_up_worker(struct worker_pool * pool)814 static void wake_up_worker(struct worker_pool *pool)
815 {
816 struct worker *worker = first_idle_worker(pool);
817
818 if (likely(worker))
819 wake_up_process(worker->task);
820 }
821
822 /**
823 * wq_worker_waking_up - a worker is waking up
824 * @task: task waking up
825 * @cpu: CPU @task is waking up to
826 *
827 * This function is called during try_to_wake_up() when a worker is
828 * being awoken.
829 *
830 * CONTEXT:
831 * spin_lock_irq(rq->lock)
832 */
wq_worker_waking_up(struct task_struct * task,int cpu)833 void wq_worker_waking_up(struct task_struct *task, int cpu)
834 {
835 struct worker *worker = kthread_data(task);
836
837 if (!(worker->flags & WORKER_NOT_RUNNING)) {
838 WARN_ON_ONCE(worker->pool->cpu != cpu);
839 atomic_inc(&worker->pool->nr_running);
840 }
841 }
842
843 /**
844 * wq_worker_sleeping - a worker is going to sleep
845 * @task: task going to sleep
846 * @cpu: CPU in question, must be the current CPU number
847 *
848 * This function is called during schedule() when a busy worker is
849 * going to sleep. Worker on the same cpu can be woken up by
850 * returning pointer to its task.
851 *
852 * CONTEXT:
853 * spin_lock_irq(rq->lock)
854 *
855 * Return:
856 * Worker task on @cpu to wake up, %NULL if none.
857 */
wq_worker_sleeping(struct task_struct * task,int cpu)858 struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu)
859 {
860 struct worker *worker = kthread_data(task), *to_wakeup = NULL;
861 struct worker_pool *pool;
862
863 /*
864 * Rescuers, which may not have all the fields set up like normal
865 * workers, also reach here, let's not access anything before
866 * checking NOT_RUNNING.
867 */
868 if (worker->flags & WORKER_NOT_RUNNING)
869 return NULL;
870
871 pool = worker->pool;
872
873 /* this can only happen on the local cpu */
874 if (WARN_ON_ONCE(cpu != raw_smp_processor_id() || pool->cpu != cpu))
875 return NULL;
876
877 /*
878 * The counterpart of the following dec_and_test, implied mb,
879 * worklist not empty test sequence is in insert_work().
880 * Please read comment there.
881 *
882 * NOT_RUNNING is clear. This means that we're bound to and
883 * running on the local cpu w/ rq lock held and preemption
884 * disabled, which in turn means that none else could be
885 * manipulating idle_list, so dereferencing idle_list without pool
886 * lock is safe.
887 */
888 if (atomic_dec_and_test(&pool->nr_running) &&
889 !list_empty(&pool->worklist))
890 to_wakeup = first_idle_worker(pool);
891 return to_wakeup ? to_wakeup->task : NULL;
892 }
893
894 /**
895 * worker_set_flags - set worker flags and adjust nr_running accordingly
896 * @worker: self
897 * @flags: flags to set
898 *
899 * Set @flags in @worker->flags and adjust nr_running accordingly.
900 *
901 * CONTEXT:
902 * spin_lock_irq(pool->lock)
903 */
worker_set_flags(struct worker * worker,unsigned int flags)904 static inline void worker_set_flags(struct worker *worker, unsigned int flags)
905 {
906 struct worker_pool *pool = worker->pool;
907
908 WARN_ON_ONCE(worker->task != current);
909
910 /* If transitioning into NOT_RUNNING, adjust nr_running. */
911 if ((flags & WORKER_NOT_RUNNING) &&
912 !(worker->flags & WORKER_NOT_RUNNING)) {
913 atomic_dec(&pool->nr_running);
914 }
915
916 worker->flags |= flags;
917 }
918
919 /**
920 * worker_clr_flags - clear worker flags and adjust nr_running accordingly
921 * @worker: self
922 * @flags: flags to clear
923 *
924 * Clear @flags in @worker->flags and adjust nr_running accordingly.
925 *
926 * CONTEXT:
927 * spin_lock_irq(pool->lock)
928 */
worker_clr_flags(struct worker * worker,unsigned int flags)929 static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
930 {
931 struct worker_pool *pool = worker->pool;
932 unsigned int oflags = worker->flags;
933
934 WARN_ON_ONCE(worker->task != current);
935
936 worker->flags &= ~flags;
937
938 /*
939 * If transitioning out of NOT_RUNNING, increment nr_running. Note
940 * that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask
941 * of multiple flags, not a single flag.
942 */
943 if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
944 if (!(worker->flags & WORKER_NOT_RUNNING))
945 atomic_inc(&pool->nr_running);
946 }
947
948 /**
949 * find_worker_executing_work - find worker which is executing a work
950 * @pool: pool of interest
951 * @work: work to find worker for
952 *
953 * Find a worker which is executing @work on @pool by searching
954 * @pool->busy_hash which is keyed by the address of @work. For a worker
955 * to match, its current execution should match the address of @work and
956 * its work function. This is to avoid unwanted dependency between
957 * unrelated work executions through a work item being recycled while still
958 * being executed.
959 *
960 * This is a bit tricky. A work item may be freed once its execution
961 * starts and nothing prevents the freed area from being recycled for
962 * another work item. If the same work item address ends up being reused
963 * before the original execution finishes, workqueue will identify the
964 * recycled work item as currently executing and make it wait until the
965 * current execution finishes, introducing an unwanted dependency.
966 *
967 * This function checks the work item address and work function to avoid
968 * false positives. Note that this isn't complete as one may construct a
969 * work function which can introduce dependency onto itself through a
970 * recycled work item. Well, if somebody wants to shoot oneself in the
971 * foot that badly, there's only so much we can do, and if such deadlock
972 * actually occurs, it should be easy to locate the culprit work function.
973 *
974 * CONTEXT:
975 * spin_lock_irq(pool->lock).
976 *
977 * Return:
978 * Pointer to worker which is executing @work if found, %NULL
979 * otherwise.
980 */
find_worker_executing_work(struct worker_pool * pool,struct work_struct * work)981 static struct worker *find_worker_executing_work(struct worker_pool *pool,
982 struct work_struct *work)
983 {
984 struct worker *worker;
985
986 hash_for_each_possible(pool->busy_hash, worker, hentry,
987 (unsigned long)work)
988 if (worker->current_work == work &&
989 worker->current_func == work->func)
990 return worker;
991
992 return NULL;
993 }
994
995 /**
996 * move_linked_works - move linked works to a list
997 * @work: start of series of works to be scheduled
998 * @head: target list to append @work to
999 * @nextp: out paramter for nested worklist walking
1000 *
1001 * Schedule linked works starting from @work to @head. Work series to
1002 * be scheduled starts at @work and includes any consecutive work with
1003 * WORK_STRUCT_LINKED set in its predecessor.
1004 *
1005 * If @nextp is not NULL, it's updated to point to the next work of
1006 * the last scheduled work. This allows move_linked_works() to be
1007 * nested inside outer list_for_each_entry_safe().
1008 *
1009 * CONTEXT:
1010 * spin_lock_irq(pool->lock).
1011 */
move_linked_works(struct work_struct * work,struct list_head * head,struct work_struct ** nextp)1012 static void move_linked_works(struct work_struct *work, struct list_head *head,
1013 struct work_struct **nextp)
1014 {
1015 struct work_struct *n;
1016
1017 /*
1018 * Linked worklist will always end before the end of the list,
1019 * use NULL for list head.
1020 */
1021 list_for_each_entry_safe_from(work, n, NULL, entry) {
1022 list_move_tail(&work->entry, head);
1023 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
1024 break;
1025 }
1026
1027 /*
1028 * If we're already inside safe list traversal and have moved
1029 * multiple works to the scheduled queue, the next position
1030 * needs to be updated.
1031 */
1032 if (nextp)
1033 *nextp = n;
1034 }
1035
1036 /**
1037 * get_pwq - get an extra reference on the specified pool_workqueue
1038 * @pwq: pool_workqueue to get
1039 *
1040 * Obtain an extra reference on @pwq. The caller should guarantee that
1041 * @pwq has positive refcnt and be holding the matching pool->lock.
1042 */
get_pwq(struct pool_workqueue * pwq)1043 static void get_pwq(struct pool_workqueue *pwq)
1044 {
1045 lockdep_assert_held(&pwq->pool->lock);
1046 WARN_ON_ONCE(pwq->refcnt <= 0);
1047 pwq->refcnt++;
1048 }
1049
1050 /**
1051 * put_pwq - put a pool_workqueue reference
1052 * @pwq: pool_workqueue to put
1053 *
1054 * Drop a reference of @pwq. If its refcnt reaches zero, schedule its
1055 * destruction. The caller should be holding the matching pool->lock.
1056 */
put_pwq(struct pool_workqueue * pwq)1057 static void put_pwq(struct pool_workqueue *pwq)
1058 {
1059 lockdep_assert_held(&pwq->pool->lock);
1060 if (likely(--pwq->refcnt))
1061 return;
1062 if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND)))
1063 return;
1064 /*
1065 * @pwq can't be released under pool->lock, bounce to
1066 * pwq_unbound_release_workfn(). This never recurses on the same
1067 * pool->lock as this path is taken only for unbound workqueues and
1068 * the release work item is scheduled on a per-cpu workqueue. To
1069 * avoid lockdep warning, unbound pool->locks are given lockdep
1070 * subclass of 1 in get_unbound_pool().
1071 */
1072 schedule_work(&pwq->unbound_release_work);
1073 }
1074
1075 /**
1076 * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock
1077 * @pwq: pool_workqueue to put (can be %NULL)
1078 *
1079 * put_pwq() with locking. This function also allows %NULL @pwq.
1080 */
put_pwq_unlocked(struct pool_workqueue * pwq)1081 static void put_pwq_unlocked(struct pool_workqueue *pwq)
1082 {
1083 if (pwq) {
1084 /*
1085 * As both pwqs and pools are sched-RCU protected, the
1086 * following lock operations are safe.
1087 */
1088 spin_lock_irq(&pwq->pool->lock);
1089 put_pwq(pwq);
1090 spin_unlock_irq(&pwq->pool->lock);
1091 }
1092 }
1093
pwq_activate_delayed_work(struct work_struct * work)1094 static void pwq_activate_delayed_work(struct work_struct *work)
1095 {
1096 struct pool_workqueue *pwq = get_work_pwq(work);
1097
1098 trace_workqueue_activate_work(work);
1099 move_linked_works(work, &pwq->pool->worklist, NULL);
1100 __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
1101 pwq->nr_active++;
1102 }
1103
pwq_activate_first_delayed(struct pool_workqueue * pwq)1104 static void pwq_activate_first_delayed(struct pool_workqueue *pwq)
1105 {
1106 struct work_struct *work = list_first_entry(&pwq->delayed_works,
1107 struct work_struct, entry);
1108
1109 pwq_activate_delayed_work(work);
1110 }
1111
1112 /**
1113 * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight
1114 * @pwq: pwq of interest
1115 * @color: color of work which left the queue
1116 *
1117 * A work either has completed or is removed from pending queue,
1118 * decrement nr_in_flight of its pwq and handle workqueue flushing.
1119 *
1120 * CONTEXT:
1121 * spin_lock_irq(pool->lock).
1122 */
pwq_dec_nr_in_flight(struct pool_workqueue * pwq,int color)1123 static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color)
1124 {
1125 /* uncolored work items don't participate in flushing or nr_active */
1126 if (color == WORK_NO_COLOR)
1127 goto out_put;
1128
1129 pwq->nr_in_flight[color]--;
1130
1131 pwq->nr_active--;
1132 if (!list_empty(&pwq->delayed_works)) {
1133 /* one down, submit a delayed one */
1134 if (pwq->nr_active < pwq->max_active)
1135 pwq_activate_first_delayed(pwq);
1136 }
1137
1138 /* is flush in progress and are we at the flushing tip? */
1139 if (likely(pwq->flush_color != color))
1140 goto out_put;
1141
1142 /* are there still in-flight works? */
1143 if (pwq->nr_in_flight[color])
1144 goto out_put;
1145
1146 /* this pwq is done, clear flush_color */
1147 pwq->flush_color = -1;
1148
1149 /*
1150 * If this was the last pwq, wake up the first flusher. It
1151 * will handle the rest.
1152 */
1153 if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush))
1154 complete(&pwq->wq->first_flusher->done);
1155 out_put:
1156 put_pwq(pwq);
1157 }
1158
1159 /**
1160 * try_to_grab_pending - steal work item from worklist and disable irq
1161 * @work: work item to steal
1162 * @is_dwork: @work is a delayed_work
1163 * @flags: place to store irq state
1164 *
1165 * Try to grab PENDING bit of @work. This function can handle @work in any
1166 * stable state - idle, on timer or on worklist.
1167 *
1168 * Return:
1169 * 1 if @work was pending and we successfully stole PENDING
1170 * 0 if @work was idle and we claimed PENDING
1171 * -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry
1172 * -ENOENT if someone else is canceling @work, this state may persist
1173 * for arbitrarily long
1174 *
1175 * Note:
1176 * On >= 0 return, the caller owns @work's PENDING bit. To avoid getting
1177 * interrupted while holding PENDING and @work off queue, irq must be
1178 * disabled on entry. This, combined with delayed_work->timer being
1179 * irqsafe, ensures that we return -EAGAIN for finite short period of time.
1180 *
1181 * On successful return, >= 0, irq is disabled and the caller is
1182 * responsible for releasing it using local_irq_restore(*@flags).
1183 *
1184 * This function is safe to call from any context including IRQ handler.
1185 */
try_to_grab_pending(struct work_struct * work,bool is_dwork,unsigned long * flags)1186 static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
1187 unsigned long *flags)
1188 {
1189 struct worker_pool *pool;
1190 struct pool_workqueue *pwq;
1191
1192 local_irq_save(*flags);
1193
1194 /* try to steal the timer if it exists */
1195 if (is_dwork) {
1196 struct delayed_work *dwork = to_delayed_work(work);
1197
1198 /*
1199 * dwork->timer is irqsafe. If del_timer() fails, it's
1200 * guaranteed that the timer is not queued anywhere and not
1201 * running on the local CPU.
1202 */
1203 if (likely(del_timer(&dwork->timer)))
1204 return 1;
1205 }
1206
1207 /* try to claim PENDING the normal way */
1208 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
1209 return 0;
1210
1211 /*
1212 * The queueing is in progress, or it is already queued. Try to
1213 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
1214 */
1215 pool = get_work_pool(work);
1216 if (!pool)
1217 goto fail;
1218
1219 spin_lock(&pool->lock);
1220 /*
1221 * work->data is guaranteed to point to pwq only while the work
1222 * item is queued on pwq->wq, and both updating work->data to point
1223 * to pwq on queueing and to pool on dequeueing are done under
1224 * pwq->pool->lock. This in turn guarantees that, if work->data
1225 * points to pwq which is associated with a locked pool, the work
1226 * item is currently queued on that pool.
1227 */
1228 pwq = get_work_pwq(work);
1229 if (pwq && pwq->pool == pool) {
1230 debug_work_deactivate(work);
1231
1232 /*
1233 * A delayed work item cannot be grabbed directly because
1234 * it might have linked NO_COLOR work items which, if left
1235 * on the delayed_list, will confuse pwq->nr_active
1236 * management later on and cause stall. Make sure the work
1237 * item is activated before grabbing.
1238 */
1239 if (*work_data_bits(work) & WORK_STRUCT_DELAYED)
1240 pwq_activate_delayed_work(work);
1241
1242 list_del_init(&work->entry);
1243 pwq_dec_nr_in_flight(pwq, get_work_color(work));
1244
1245 /* work->data points to pwq iff queued, point to pool */
1246 set_work_pool_and_keep_pending(work, pool->id);
1247
1248 spin_unlock(&pool->lock);
1249 return 1;
1250 }
1251 spin_unlock(&pool->lock);
1252 fail:
1253 local_irq_restore(*flags);
1254 if (work_is_canceling(work))
1255 return -ENOENT;
1256 cpu_relax();
1257 return -EAGAIN;
1258 }
1259
1260 /**
1261 * insert_work - insert a work into a pool
1262 * @pwq: pwq @work belongs to
1263 * @work: work to insert
1264 * @head: insertion point
1265 * @extra_flags: extra WORK_STRUCT_* flags to set
1266 *
1267 * Insert @work which belongs to @pwq after @head. @extra_flags is or'd to
1268 * work_struct flags.
1269 *
1270 * CONTEXT:
1271 * spin_lock_irq(pool->lock).
1272 */
insert_work(struct pool_workqueue * pwq,struct work_struct * work,struct list_head * head,unsigned int extra_flags)1273 static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
1274 struct list_head *head, unsigned int extra_flags)
1275 {
1276 struct worker_pool *pool = pwq->pool;
1277
1278 /* we own @work, set data and link */
1279 set_work_pwq(work, pwq, extra_flags);
1280 list_add_tail(&work->entry, head);
1281 get_pwq(pwq);
1282
1283 /*
1284 * Ensure either wq_worker_sleeping() sees the above
1285 * list_add_tail() or we see zero nr_running to avoid workers lying
1286 * around lazily while there are works to be processed.
1287 */
1288 smp_mb();
1289
1290 if (__need_more_worker(pool))
1291 wake_up_worker(pool);
1292 }
1293
1294 /*
1295 * Test whether @work is being queued from another work executing on the
1296 * same workqueue.
1297 */
is_chained_work(struct workqueue_struct * wq)1298 static bool is_chained_work(struct workqueue_struct *wq)
1299 {
1300 struct worker *worker;
1301
1302 worker = current_wq_worker();
1303 /*
1304 * Return %true iff I'm a worker execuing a work item on @wq. If
1305 * I'm @worker, it's safe to dereference it without locking.
1306 */
1307 return worker && worker->current_pwq->wq == wq;
1308 }
1309
__queue_work(int cpu,struct workqueue_struct * wq,struct work_struct * work)1310 static void __queue_work(int cpu, struct workqueue_struct *wq,
1311 struct work_struct *work)
1312 {
1313 struct pool_workqueue *pwq;
1314 struct worker_pool *last_pool;
1315 struct list_head *worklist;
1316 unsigned int work_flags;
1317 unsigned int req_cpu = cpu;
1318
1319 /*
1320 * While a work item is PENDING && off queue, a task trying to
1321 * steal the PENDING will busy-loop waiting for it to either get
1322 * queued or lose PENDING. Grabbing PENDING and queueing should
1323 * happen with IRQ disabled.
1324 */
1325 WARN_ON_ONCE(!irqs_disabled());
1326
1327 debug_work_activate(work);
1328
1329 /* if draining, only works from the same workqueue are allowed */
1330 if (unlikely(wq->flags & __WQ_DRAINING) &&
1331 WARN_ON_ONCE(!is_chained_work(wq)))
1332 return;
1333 retry:
1334 if (req_cpu == WORK_CPU_UNBOUND)
1335 cpu = raw_smp_processor_id();
1336
1337 /* pwq which will be used unless @work is executing elsewhere */
1338 if (!(wq->flags & WQ_UNBOUND))
1339 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
1340 else
1341 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
1342
1343 /*
1344 * If @work was previously on a different pool, it might still be
1345 * running there, in which case the work needs to be queued on that
1346 * pool to guarantee non-reentrancy.
1347 */
1348 last_pool = get_work_pool(work);
1349 if (last_pool && last_pool != pwq->pool) {
1350 struct worker *worker;
1351
1352 spin_lock(&last_pool->lock);
1353
1354 worker = find_worker_executing_work(last_pool, work);
1355
1356 if (worker && worker->current_pwq->wq == wq) {
1357 pwq = worker->current_pwq;
1358 } else {
1359 /* meh... not running there, queue here */
1360 spin_unlock(&last_pool->lock);
1361 spin_lock(&pwq->pool->lock);
1362 }
1363 } else {
1364 spin_lock(&pwq->pool->lock);
1365 }
1366
1367 /*
1368 * pwq is determined and locked. For unbound pools, we could have
1369 * raced with pwq release and it could already be dead. If its
1370 * refcnt is zero, repeat pwq selection. Note that pwqs never die
1371 * without another pwq replacing it in the numa_pwq_tbl or while
1372 * work items are executing on it, so the retrying is guaranteed to
1373 * make forward-progress.
1374 */
1375 if (unlikely(!pwq->refcnt)) {
1376 if (wq->flags & WQ_UNBOUND) {
1377 spin_unlock(&pwq->pool->lock);
1378 cpu_relax();
1379 goto retry;
1380 }
1381 /* oops */
1382 WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt",
1383 wq->name, cpu);
1384 }
1385
1386 /* pwq determined, queue */
1387 trace_workqueue_queue_work(req_cpu, pwq, work);
1388
1389 if (WARN_ON(!list_empty(&work->entry))) {
1390 spin_unlock(&pwq->pool->lock);
1391 return;
1392 }
1393
1394 pwq->nr_in_flight[pwq->work_color]++;
1395 work_flags = work_color_to_flags(pwq->work_color);
1396
1397 if (likely(pwq->nr_active < pwq->max_active)) {
1398 trace_workqueue_activate_work(work);
1399 pwq->nr_active++;
1400 worklist = &pwq->pool->worklist;
1401 } else {
1402 work_flags |= WORK_STRUCT_DELAYED;
1403 worklist = &pwq->delayed_works;
1404 }
1405
1406 insert_work(pwq, work, worklist, work_flags);
1407
1408 spin_unlock(&pwq->pool->lock);
1409 }
1410
1411 /**
1412 * queue_work_on - queue work on specific cpu
1413 * @cpu: CPU number to execute work on
1414 * @wq: workqueue to use
1415 * @work: work to queue
1416 *
1417 * We queue the work to a specific CPU, the caller must ensure it
1418 * can't go away.
1419 *
1420 * Return: %false if @work was already on a queue, %true otherwise.
1421 */
queue_work_on(int cpu,struct workqueue_struct * wq,struct work_struct * work)1422 bool queue_work_on(int cpu, struct workqueue_struct *wq,
1423 struct work_struct *work)
1424 {
1425 bool ret = false;
1426 unsigned long flags;
1427
1428 local_irq_save(flags);
1429
1430 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1431 __queue_work(cpu, wq, work);
1432 ret = true;
1433 }
1434
1435 local_irq_restore(flags);
1436 return ret;
1437 }
1438 EXPORT_SYMBOL(queue_work_on);
1439
delayed_work_timer_fn(unsigned long __data)1440 void delayed_work_timer_fn(unsigned long __data)
1441 {
1442 struct delayed_work *dwork = (struct delayed_work *)__data;
1443
1444 /* should have been called from irqsafe timer with irq already off */
1445 __queue_work(dwork->cpu, dwork->wq, &dwork->work);
1446 }
1447 EXPORT_SYMBOL(delayed_work_timer_fn);
1448
__queue_delayed_work(int cpu,struct workqueue_struct * wq,struct delayed_work * dwork,unsigned long delay)1449 static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
1450 struct delayed_work *dwork, unsigned long delay)
1451 {
1452 struct timer_list *timer = &dwork->timer;
1453 struct work_struct *work = &dwork->work;
1454
1455 WARN_ON_ONCE(!wq);
1456 WARN_ON_ONCE(timer->function != delayed_work_timer_fn ||
1457 timer->data != (unsigned long)dwork);
1458 WARN_ON_ONCE(timer_pending(timer));
1459 WARN_ON_ONCE(!list_empty(&work->entry));
1460
1461 /*
1462 * If @delay is 0, queue @dwork->work immediately. This is for
1463 * both optimization and correctness. The earliest @timer can
1464 * expire is on the closest next tick and delayed_work users depend
1465 * on that there's no such delay when @delay is 0.
1466 */
1467 if (!delay) {
1468 __queue_work(cpu, wq, &dwork->work);
1469 return;
1470 }
1471
1472 timer_stats_timer_set_start_info(&dwork->timer);
1473
1474 dwork->wq = wq;
1475 dwork->cpu = cpu;
1476 timer->expires = jiffies + delay;
1477
1478 if (unlikely(cpu != WORK_CPU_UNBOUND))
1479 add_timer_on(timer, cpu);
1480 else
1481 add_timer(timer);
1482 }
1483
1484 /**
1485 * queue_delayed_work_on - queue work on specific CPU after delay
1486 * @cpu: CPU number to execute work on
1487 * @wq: workqueue to use
1488 * @dwork: work to queue
1489 * @delay: number of jiffies to wait before queueing
1490 *
1491 * Return: %false if @work was already on a queue, %true otherwise. If
1492 * @delay is zero and @dwork is idle, it will be scheduled for immediate
1493 * execution.
1494 */
queue_delayed_work_on(int cpu,struct workqueue_struct * wq,struct delayed_work * dwork,unsigned long delay)1495 bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
1496 struct delayed_work *dwork, unsigned long delay)
1497 {
1498 struct work_struct *work = &dwork->work;
1499 bool ret = false;
1500 unsigned long flags;
1501
1502 /* read the comment in __queue_work() */
1503 local_irq_save(flags);
1504
1505 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1506 __queue_delayed_work(cpu, wq, dwork, delay);
1507 ret = true;
1508 }
1509
1510 local_irq_restore(flags);
1511 return ret;
1512 }
1513 EXPORT_SYMBOL(queue_delayed_work_on);
1514
1515 /**
1516 * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
1517 * @cpu: CPU number to execute work on
1518 * @wq: workqueue to use
1519 * @dwork: work to queue
1520 * @delay: number of jiffies to wait before queueing
1521 *
1522 * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise,
1523 * modify @dwork's timer so that it expires after @delay. If @delay is
1524 * zero, @work is guaranteed to be scheduled immediately regardless of its
1525 * current state.
1526 *
1527 * Return: %false if @dwork was idle and queued, %true if @dwork was
1528 * pending and its timer was modified.
1529 *
1530 * This function is safe to call from any context including IRQ handler.
1531 * See try_to_grab_pending() for details.
1532 */
mod_delayed_work_on(int cpu,struct workqueue_struct * wq,struct delayed_work * dwork,unsigned long delay)1533 bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
1534 struct delayed_work *dwork, unsigned long delay)
1535 {
1536 unsigned long flags;
1537 int ret;
1538
1539 do {
1540 ret = try_to_grab_pending(&dwork->work, true, &flags);
1541 } while (unlikely(ret == -EAGAIN));
1542
1543 if (likely(ret >= 0)) {
1544 __queue_delayed_work(cpu, wq, dwork, delay);
1545 local_irq_restore(flags);
1546 }
1547
1548 /* -ENOENT from try_to_grab_pending() becomes %true */
1549 return ret;
1550 }
1551 EXPORT_SYMBOL_GPL(mod_delayed_work_on);
1552
1553 /**
1554 * worker_enter_idle - enter idle state
1555 * @worker: worker which is entering idle state
1556 *
1557 * @worker is entering idle state. Update stats and idle timer if
1558 * necessary.
1559 *
1560 * LOCKING:
1561 * spin_lock_irq(pool->lock).
1562 */
worker_enter_idle(struct worker * worker)1563 static void worker_enter_idle(struct worker *worker)
1564 {
1565 struct worker_pool *pool = worker->pool;
1566
1567 if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) ||
1568 WARN_ON_ONCE(!list_empty(&worker->entry) &&
1569 (worker->hentry.next || worker->hentry.pprev)))
1570 return;
1571
1572 /* can't use worker_set_flags(), also called from create_worker() */
1573 worker->flags |= WORKER_IDLE;
1574 pool->nr_idle++;
1575 worker->last_active = jiffies;
1576
1577 /* idle_list is LIFO */
1578 list_add(&worker->entry, &pool->idle_list);
1579
1580 if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
1581 mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
1582
1583 /*
1584 * Sanity check nr_running. Because wq_unbind_fn() releases
1585 * pool->lock between setting %WORKER_UNBOUND and zapping
1586 * nr_running, the warning may trigger spuriously. Check iff
1587 * unbind is not in progress.
1588 */
1589 WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
1590 pool->nr_workers == pool->nr_idle &&
1591 atomic_read(&pool->nr_running));
1592 }
1593
1594 /**
1595 * worker_leave_idle - leave idle state
1596 * @worker: worker which is leaving idle state
1597 *
1598 * @worker is leaving idle state. Update stats.
1599 *
1600 * LOCKING:
1601 * spin_lock_irq(pool->lock).
1602 */
worker_leave_idle(struct worker * worker)1603 static void worker_leave_idle(struct worker *worker)
1604 {
1605 struct worker_pool *pool = worker->pool;
1606
1607 if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE)))
1608 return;
1609 worker_clr_flags(worker, WORKER_IDLE);
1610 pool->nr_idle--;
1611 list_del_init(&worker->entry);
1612 }
1613
alloc_worker(int node)1614 static struct worker *alloc_worker(int node)
1615 {
1616 struct worker *worker;
1617
1618 worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node);
1619 if (worker) {
1620 INIT_LIST_HEAD(&worker->entry);
1621 INIT_LIST_HEAD(&worker->scheduled);
1622 INIT_LIST_HEAD(&worker->node);
1623 /* on creation a worker is in !idle && prep state */
1624 worker->flags = WORKER_PREP;
1625 }
1626 return worker;
1627 }
1628
1629 /**
1630 * worker_attach_to_pool() - attach a worker to a pool
1631 * @worker: worker to be attached
1632 * @pool: the target pool
1633 *
1634 * Attach @worker to @pool. Once attached, the %WORKER_UNBOUND flag and
1635 * cpu-binding of @worker are kept coordinated with the pool across
1636 * cpu-[un]hotplugs.
1637 */
worker_attach_to_pool(struct worker * worker,struct worker_pool * pool)1638 static void worker_attach_to_pool(struct worker *worker,
1639 struct worker_pool *pool)
1640 {
1641 mutex_lock(&pool->attach_mutex);
1642
1643 /*
1644 * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any
1645 * online CPUs. It'll be re-applied when any of the CPUs come up.
1646 */
1647 set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
1648
1649 /*
1650 * The pool->attach_mutex ensures %POOL_DISASSOCIATED remains
1651 * stable across this function. See the comments above the
1652 * flag definition for details.
1653 */
1654 if (pool->flags & POOL_DISASSOCIATED)
1655 worker->flags |= WORKER_UNBOUND;
1656
1657 list_add_tail(&worker->node, &pool->workers);
1658
1659 mutex_unlock(&pool->attach_mutex);
1660 }
1661
1662 /**
1663 * worker_detach_from_pool() - detach a worker from its pool
1664 * @worker: worker which is attached to its pool
1665 * @pool: the pool @worker is attached to
1666 *
1667 * Undo the attaching which had been done in worker_attach_to_pool(). The
1668 * caller worker shouldn't access to the pool after detached except it has
1669 * other reference to the pool.
1670 */
worker_detach_from_pool(struct worker * worker,struct worker_pool * pool)1671 static void worker_detach_from_pool(struct worker *worker,
1672 struct worker_pool *pool)
1673 {
1674 struct completion *detach_completion = NULL;
1675
1676 mutex_lock(&pool->attach_mutex);
1677 list_del(&worker->node);
1678 if (list_empty(&pool->workers))
1679 detach_completion = pool->detach_completion;
1680 mutex_unlock(&pool->attach_mutex);
1681
1682 /* clear leftover flags without pool->lock after it is detached */
1683 worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND);
1684
1685 if (detach_completion)
1686 complete(detach_completion);
1687 }
1688
1689 /**
1690 * create_worker - create a new workqueue worker
1691 * @pool: pool the new worker will belong to
1692 *
1693 * Create and start a new worker which is attached to @pool.
1694 *
1695 * CONTEXT:
1696 * Might sleep. Does GFP_KERNEL allocations.
1697 *
1698 * Return:
1699 * Pointer to the newly created worker.
1700 */
create_worker(struct worker_pool * pool)1701 static struct worker *create_worker(struct worker_pool *pool)
1702 {
1703 struct worker *worker = NULL;
1704 int id = -1;
1705 char id_buf[16];
1706
1707 /* ID is needed to determine kthread name */
1708 id = ida_simple_get(&pool->worker_ida, 0, 0, GFP_KERNEL);
1709 if (id < 0)
1710 goto fail;
1711
1712 worker = alloc_worker(pool->node);
1713 if (!worker)
1714 goto fail;
1715
1716 worker->pool = pool;
1717 worker->id = id;
1718
1719 if (pool->cpu >= 0)
1720 snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id,
1721 pool->attrs->nice < 0 ? "H" : "");
1722 else
1723 snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id);
1724
1725 worker->task = kthread_create_on_node(worker_thread, worker, pool->node,
1726 "kworker/%s", id_buf);
1727 if (IS_ERR(worker->task))
1728 goto fail;
1729
1730 set_user_nice(worker->task, pool->attrs->nice);
1731 kthread_bind_mask(worker->task, pool->attrs->cpumask);
1732
1733 /* successful, attach the worker to the pool */
1734 worker_attach_to_pool(worker, pool);
1735
1736 /* start the newly created worker */
1737 spin_lock_irq(&pool->lock);
1738 worker->pool->nr_workers++;
1739 worker_enter_idle(worker);
1740 wake_up_process(worker->task);
1741 spin_unlock_irq(&pool->lock);
1742
1743 return worker;
1744
1745 fail:
1746 if (id >= 0)
1747 ida_simple_remove(&pool->worker_ida, id);
1748 kfree(worker);
1749 return NULL;
1750 }
1751
1752 /**
1753 * destroy_worker - destroy a workqueue worker
1754 * @worker: worker to be destroyed
1755 *
1756 * Destroy @worker and adjust @pool stats accordingly. The worker should
1757 * be idle.
1758 *
1759 * CONTEXT:
1760 * spin_lock_irq(pool->lock).
1761 */
destroy_worker(struct worker * worker)1762 static void destroy_worker(struct worker *worker)
1763 {
1764 struct worker_pool *pool = worker->pool;
1765
1766 lockdep_assert_held(&pool->lock);
1767
1768 /* sanity check frenzy */
1769 if (WARN_ON(worker->current_work) ||
1770 WARN_ON(!list_empty(&worker->scheduled)) ||
1771 WARN_ON(!(worker->flags & WORKER_IDLE)))
1772 return;
1773
1774 pool->nr_workers--;
1775 pool->nr_idle--;
1776
1777 list_del_init(&worker->entry);
1778 worker->flags |= WORKER_DIE;
1779 wake_up_process(worker->task);
1780 }
1781
idle_worker_timeout(unsigned long __pool)1782 static void idle_worker_timeout(unsigned long __pool)
1783 {
1784 struct worker_pool *pool = (void *)__pool;
1785
1786 spin_lock_irq(&pool->lock);
1787
1788 while (too_many_workers(pool)) {
1789 struct worker *worker;
1790 unsigned long expires;
1791
1792 /* idle_list is kept in LIFO order, check the last one */
1793 worker = list_entry(pool->idle_list.prev, struct worker, entry);
1794 expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1795
1796 if (time_before(jiffies, expires)) {
1797 mod_timer(&pool->idle_timer, expires);
1798 break;
1799 }
1800
1801 destroy_worker(worker);
1802 }
1803
1804 spin_unlock_irq(&pool->lock);
1805 }
1806
send_mayday(struct work_struct * work)1807 static void send_mayday(struct work_struct *work)
1808 {
1809 struct pool_workqueue *pwq = get_work_pwq(work);
1810 struct workqueue_struct *wq = pwq->wq;
1811
1812 lockdep_assert_held(&wq_mayday_lock);
1813
1814 if (!wq->rescuer)
1815 return;
1816
1817 /* mayday mayday mayday */
1818 if (list_empty(&pwq->mayday_node)) {
1819 /*
1820 * If @pwq is for an unbound wq, its base ref may be put at
1821 * any time due to an attribute change. Pin @pwq until the
1822 * rescuer is done with it.
1823 */
1824 get_pwq(pwq);
1825 list_add_tail(&pwq->mayday_node, &wq->maydays);
1826 wake_up_process(wq->rescuer->task);
1827 }
1828 }
1829
pool_mayday_timeout(unsigned long __pool)1830 static void pool_mayday_timeout(unsigned long __pool)
1831 {
1832 struct worker_pool *pool = (void *)__pool;
1833 struct work_struct *work;
1834
1835 spin_lock_irq(&wq_mayday_lock); /* for wq->maydays */
1836 spin_lock(&pool->lock);
1837
1838 if (need_to_create_worker(pool)) {
1839 /*
1840 * We've been trying to create a new worker but
1841 * haven't been successful. We might be hitting an
1842 * allocation deadlock. Send distress signals to
1843 * rescuers.
1844 */
1845 list_for_each_entry(work, &pool->worklist, entry)
1846 send_mayday(work);
1847 }
1848
1849 spin_unlock(&pool->lock);
1850 spin_unlock_irq(&wq_mayday_lock);
1851
1852 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
1853 }
1854
1855 /**
1856 * maybe_create_worker - create a new worker if necessary
1857 * @pool: pool to create a new worker for
1858 *
1859 * Create a new worker for @pool if necessary. @pool is guaranteed to
1860 * have at least one idle worker on return from this function. If
1861 * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
1862 * sent to all rescuers with works scheduled on @pool to resolve
1863 * possible allocation deadlock.
1864 *
1865 * On return, need_to_create_worker() is guaranteed to be %false and
1866 * may_start_working() %true.
1867 *
1868 * LOCKING:
1869 * spin_lock_irq(pool->lock) which may be released and regrabbed
1870 * multiple times. Does GFP_KERNEL allocations. Called only from
1871 * manager.
1872 */
maybe_create_worker(struct worker_pool * pool)1873 static void maybe_create_worker(struct worker_pool *pool)
1874 __releases(&pool->lock)
1875 __acquires(&pool->lock)
1876 {
1877 restart:
1878 spin_unlock_irq(&pool->lock);
1879
1880 /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
1881 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
1882
1883 while (true) {
1884 if (create_worker(pool) || !need_to_create_worker(pool))
1885 break;
1886
1887 schedule_timeout_interruptible(CREATE_COOLDOWN);
1888
1889 if (!need_to_create_worker(pool))
1890 break;
1891 }
1892
1893 del_timer_sync(&pool->mayday_timer);
1894 spin_lock_irq(&pool->lock);
1895 /*
1896 * This is necessary even after a new worker was just successfully
1897 * created as @pool->lock was dropped and the new worker might have
1898 * already become busy.
1899 */
1900 if (need_to_create_worker(pool))
1901 goto restart;
1902 }
1903
1904 /**
1905 * manage_workers - manage worker pool
1906 * @worker: self
1907 *
1908 * Assume the manager role and manage the worker pool @worker belongs
1909 * to. At any given time, there can be only zero or one manager per
1910 * pool. The exclusion is handled automatically by this function.
1911 *
1912 * The caller can safely start processing works on false return. On
1913 * true return, it's guaranteed that need_to_create_worker() is false
1914 * and may_start_working() is true.
1915 *
1916 * CONTEXT:
1917 * spin_lock_irq(pool->lock) which may be released and regrabbed
1918 * multiple times. Does GFP_KERNEL allocations.
1919 *
1920 * Return:
1921 * %false if the pool doesn't need management and the caller can safely
1922 * start processing works, %true if management function was performed and
1923 * the conditions that the caller verified before calling the function may
1924 * no longer be true.
1925 */
manage_workers(struct worker * worker)1926 static bool manage_workers(struct worker *worker)
1927 {
1928 struct worker_pool *pool = worker->pool;
1929
1930 /*
1931 * Anyone who successfully grabs manager_arb wins the arbitration
1932 * and becomes the manager. mutex_trylock() on pool->manager_arb
1933 * failure while holding pool->lock reliably indicates that someone
1934 * else is managing the pool and the worker which failed trylock
1935 * can proceed to executing work items. This means that anyone
1936 * grabbing manager_arb is responsible for actually performing
1937 * manager duties. If manager_arb is grabbed and released without
1938 * actual management, the pool may stall indefinitely.
1939 */
1940 if (!mutex_trylock(&pool->manager_arb))
1941 return false;
1942
1943 maybe_create_worker(pool);
1944
1945 mutex_unlock(&pool->manager_arb);
1946 return true;
1947 }
1948
1949 /**
1950 * process_one_work - process single work
1951 * @worker: self
1952 * @work: work to process
1953 *
1954 * Process @work. This function contains all the logics necessary to
1955 * process a single work including synchronization against and
1956 * interaction with other workers on the same cpu, queueing and
1957 * flushing. As long as context requirement is met, any worker can
1958 * call this function to process a work.
1959 *
1960 * CONTEXT:
1961 * spin_lock_irq(pool->lock) which is released and regrabbed.
1962 */
process_one_work(struct worker * worker,struct work_struct * work)1963 static void process_one_work(struct worker *worker, struct work_struct *work)
1964 __releases(&pool->lock)
1965 __acquires(&pool->lock)
1966 {
1967 struct pool_workqueue *pwq = get_work_pwq(work);
1968 struct worker_pool *pool = worker->pool;
1969 bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE;
1970 int work_color;
1971 struct worker *collision;
1972 #ifdef CONFIG_LOCKDEP
1973 /*
1974 * It is permissible to free the struct work_struct from
1975 * inside the function that is called from it, this we need to
1976 * take into account for lockdep too. To avoid bogus "held
1977 * lock freed" warnings as well as problems when looking into
1978 * work->lockdep_map, make a copy and use that here.
1979 */
1980 struct lockdep_map lockdep_map;
1981
1982 lockdep_copy_map(&lockdep_map, &work->lockdep_map);
1983 #endif
1984 /* ensure we're on the correct CPU */
1985 WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
1986 raw_smp_processor_id() != pool->cpu);
1987
1988 /*
1989 * A single work shouldn't be executed concurrently by
1990 * multiple workers on a single cpu. Check whether anyone is
1991 * already processing the work. If so, defer the work to the
1992 * currently executing one.
1993 */
1994 collision = find_worker_executing_work(pool, work);
1995 if (unlikely(collision)) {
1996 move_linked_works(work, &collision->scheduled, NULL);
1997 return;
1998 }
1999
2000 /* claim and dequeue */
2001 debug_work_deactivate(work);
2002 hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
2003 worker->current_work = work;
2004 worker->current_func = work->func;
2005 worker->current_pwq = pwq;
2006 work_color = get_work_color(work);
2007
2008 list_del_init(&work->entry);
2009
2010 /*
2011 * CPU intensive works don't participate in concurrency management.
2012 * They're the scheduler's responsibility. This takes @worker out
2013 * of concurrency management and the next code block will chain
2014 * execution of the pending work items.
2015 */
2016 if (unlikely(cpu_intensive))
2017 worker_set_flags(worker, WORKER_CPU_INTENSIVE);
2018
2019 /*
2020 * Wake up another worker if necessary. The condition is always
2021 * false for normal per-cpu workers since nr_running would always
2022 * be >= 1 at this point. This is used to chain execution of the
2023 * pending work items for WORKER_NOT_RUNNING workers such as the
2024 * UNBOUND and CPU_INTENSIVE ones.
2025 */
2026 if (need_more_worker(pool))
2027 wake_up_worker(pool);
2028
2029 /*
2030 * Record the last pool and clear PENDING which should be the last
2031 * update to @work. Also, do this inside @pool->lock so that
2032 * PENDING and queued state changes happen together while IRQ is
2033 * disabled.
2034 */
2035 set_work_pool_and_clear_pending(work, pool->id);
2036
2037 spin_unlock_irq(&pool->lock);
2038
2039 lock_map_acquire_read(&pwq->wq->lockdep_map);
2040 lock_map_acquire(&lockdep_map);
2041 trace_workqueue_execute_start(work);
2042 worker->current_func(work);
2043 /*
2044 * While we must be careful to not use "work" after this, the trace
2045 * point will only record its address.
2046 */
2047 trace_workqueue_execute_end(work);
2048 lock_map_release(&lockdep_map);
2049 lock_map_release(&pwq->wq->lockdep_map);
2050
2051 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
2052 pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
2053 " last function: %pf\n",
2054 current->comm, preempt_count(), task_pid_nr(current),
2055 worker->current_func);
2056 debug_show_held_locks(current);
2057 dump_stack();
2058 }
2059
2060 /*
2061 * The following prevents a kworker from hogging CPU on !PREEMPT
2062 * kernels, where a requeueing work item waiting for something to
2063 * happen could deadlock with stop_machine as such work item could
2064 * indefinitely requeue itself while all other CPUs are trapped in
2065 * stop_machine. At the same time, report a quiescent RCU state so
2066 * the same condition doesn't freeze RCU.
2067 */
2068 cond_resched_rcu_qs();
2069
2070 spin_lock_irq(&pool->lock);
2071
2072 /* clear cpu intensive status */
2073 if (unlikely(cpu_intensive))
2074 worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
2075
2076 /* we're done with it, release */
2077 hash_del(&worker->hentry);
2078 worker->current_work = NULL;
2079 worker->current_func = NULL;
2080 worker->current_pwq = NULL;
2081 worker->desc_valid = false;
2082 pwq_dec_nr_in_flight(pwq, work_color);
2083 }
2084
2085 /**
2086 * process_scheduled_works - process scheduled works
2087 * @worker: self
2088 *
2089 * Process all scheduled works. Please note that the scheduled list
2090 * may change while processing a work, so this function repeatedly
2091 * fetches a work from the top and executes it.
2092 *
2093 * CONTEXT:
2094 * spin_lock_irq(pool->lock) which may be released and regrabbed
2095 * multiple times.
2096 */
process_scheduled_works(struct worker * worker)2097 static void process_scheduled_works(struct worker *worker)
2098 {
2099 while (!list_empty(&worker->scheduled)) {
2100 struct work_struct *work = list_first_entry(&worker->scheduled,
2101 struct work_struct, entry);
2102 process_one_work(worker, work);
2103 }
2104 }
2105
2106 /**
2107 * worker_thread - the worker thread function
2108 * @__worker: self
2109 *
2110 * The worker thread function. All workers belong to a worker_pool -
2111 * either a per-cpu one or dynamic unbound one. These workers process all
2112 * work items regardless of their specific target workqueue. The only
2113 * exception is work items which belong to workqueues with a rescuer which
2114 * will be explained in rescuer_thread().
2115 *
2116 * Return: 0
2117 */
worker_thread(void * __worker)2118 static int worker_thread(void *__worker)
2119 {
2120 struct worker *worker = __worker;
2121 struct worker_pool *pool = worker->pool;
2122
2123 /* tell the scheduler that this is a workqueue worker */
2124 worker->task->flags |= PF_WQ_WORKER;
2125 woke_up:
2126 spin_lock_irq(&pool->lock);
2127
2128 /* am I supposed to die? */
2129 if (unlikely(worker->flags & WORKER_DIE)) {
2130 spin_unlock_irq(&pool->lock);
2131 WARN_ON_ONCE(!list_empty(&worker->entry));
2132 worker->task->flags &= ~PF_WQ_WORKER;
2133
2134 set_task_comm(worker->task, "kworker/dying");
2135 ida_simple_remove(&pool->worker_ida, worker->id);
2136 worker_detach_from_pool(worker, pool);
2137 kfree(worker);
2138 return 0;
2139 }
2140
2141 worker_leave_idle(worker);
2142 recheck:
2143 /* no more worker necessary? */
2144 if (!need_more_worker(pool))
2145 goto sleep;
2146
2147 /* do we need to manage? */
2148 if (unlikely(!may_start_working(pool)) && manage_workers(worker))
2149 goto recheck;
2150
2151 /*
2152 * ->scheduled list can only be filled while a worker is
2153 * preparing to process a work or actually processing it.
2154 * Make sure nobody diddled with it while I was sleeping.
2155 */
2156 WARN_ON_ONCE(!list_empty(&worker->scheduled));
2157
2158 /*
2159 * Finish PREP stage. We're guaranteed to have at least one idle
2160 * worker or that someone else has already assumed the manager
2161 * role. This is where @worker starts participating in concurrency
2162 * management if applicable and concurrency management is restored
2163 * after being rebound. See rebind_workers() for details.
2164 */
2165 worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND);
2166
2167 do {
2168 struct work_struct *work =
2169 list_first_entry(&pool->worklist,
2170 struct work_struct, entry);
2171
2172 if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
2173 /* optimization path, not strictly necessary */
2174 process_one_work(worker, work);
2175 if (unlikely(!list_empty(&worker->scheduled)))
2176 process_scheduled_works(worker);
2177 } else {
2178 move_linked_works(work, &worker->scheduled, NULL);
2179 process_scheduled_works(worker);
2180 }
2181 } while (keep_working(pool));
2182
2183 worker_set_flags(worker, WORKER_PREP);
2184 sleep:
2185 /*
2186 * pool->lock is held and there's no work to process and no need to
2187 * manage, sleep. Workers are woken up only while holding
2188 * pool->lock or from local cpu, so setting the current state
2189 * before releasing pool->lock is enough to prevent losing any
2190 * event.
2191 */
2192 worker_enter_idle(worker);
2193 __set_current_state(TASK_INTERRUPTIBLE);
2194 spin_unlock_irq(&pool->lock);
2195 schedule();
2196 goto woke_up;
2197 }
2198
2199 /**
2200 * rescuer_thread - the rescuer thread function
2201 * @__rescuer: self
2202 *
2203 * Workqueue rescuer thread function. There's one rescuer for each
2204 * workqueue which has WQ_MEM_RECLAIM set.
2205 *
2206 * Regular work processing on a pool may block trying to create a new
2207 * worker which uses GFP_KERNEL allocation which has slight chance of
2208 * developing into deadlock if some works currently on the same queue
2209 * need to be processed to satisfy the GFP_KERNEL allocation. This is
2210 * the problem rescuer solves.
2211 *
2212 * When such condition is possible, the pool summons rescuers of all
2213 * workqueues which have works queued on the pool and let them process
2214 * those works so that forward progress can be guaranteed.
2215 *
2216 * This should happen rarely.
2217 *
2218 * Return: 0
2219 */
rescuer_thread(void * __rescuer)2220 static int rescuer_thread(void *__rescuer)
2221 {
2222 struct worker *rescuer = __rescuer;
2223 struct workqueue_struct *wq = rescuer->rescue_wq;
2224 struct list_head *scheduled = &rescuer->scheduled;
2225 bool should_stop;
2226
2227 set_user_nice(current, RESCUER_NICE_LEVEL);
2228
2229 /*
2230 * Mark rescuer as worker too. As WORKER_PREP is never cleared, it
2231 * doesn't participate in concurrency management.
2232 */
2233 rescuer->task->flags |= PF_WQ_WORKER;
2234 repeat:
2235 set_current_state(TASK_INTERRUPTIBLE);
2236
2237 /*
2238 * By the time the rescuer is requested to stop, the workqueue
2239 * shouldn't have any work pending, but @wq->maydays may still have
2240 * pwq(s) queued. This can happen by non-rescuer workers consuming
2241 * all the work items before the rescuer got to them. Go through
2242 * @wq->maydays processing before acting on should_stop so that the
2243 * list is always empty on exit.
2244 */
2245 should_stop = kthread_should_stop();
2246
2247 /* see whether any pwq is asking for help */
2248 spin_lock_irq(&wq_mayday_lock);
2249
2250 while (!list_empty(&wq->maydays)) {
2251 struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
2252 struct pool_workqueue, mayday_node);
2253 struct worker_pool *pool = pwq->pool;
2254 struct work_struct *work, *n;
2255
2256 __set_current_state(TASK_RUNNING);
2257 list_del_init(&pwq->mayday_node);
2258
2259 spin_unlock_irq(&wq_mayday_lock);
2260
2261 worker_attach_to_pool(rescuer, pool);
2262
2263 spin_lock_irq(&pool->lock);
2264 rescuer->pool = pool;
2265
2266 /*
2267 * Slurp in all works issued via this workqueue and
2268 * process'em.
2269 */
2270 WARN_ON_ONCE(!list_empty(&rescuer->scheduled));
2271 list_for_each_entry_safe(work, n, &pool->worklist, entry)
2272 if (get_work_pwq(work) == pwq)
2273 move_linked_works(work, scheduled, &n);
2274
2275 process_scheduled_works(rescuer);
2276
2277 /*
2278 * Put the reference grabbed by send_mayday(). @pool won't
2279 * go away while we're still attached to it.
2280 */
2281 put_pwq(pwq);
2282
2283 /*
2284 * Leave this pool. If need_more_worker() is %true, notify a
2285 * regular worker; otherwise, we end up with 0 concurrency
2286 * and stalling the execution.
2287 */
2288 if (need_more_worker(pool))
2289 wake_up_worker(pool);
2290
2291 rescuer->pool = NULL;
2292 spin_unlock_irq(&pool->lock);
2293
2294 worker_detach_from_pool(rescuer, pool);
2295
2296 spin_lock_irq(&wq_mayday_lock);
2297 }
2298
2299 spin_unlock_irq(&wq_mayday_lock);
2300
2301 if (should_stop) {
2302 __set_current_state(TASK_RUNNING);
2303 rescuer->task->flags &= ~PF_WQ_WORKER;
2304 return 0;
2305 }
2306
2307 /* rescuers should never participate in concurrency management */
2308 WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
2309 schedule();
2310 goto repeat;
2311 }
2312
2313 struct wq_barrier {
2314 struct work_struct work;
2315 struct completion done;
2316 };
2317
wq_barrier_func(struct work_struct * work)2318 static void wq_barrier_func(struct work_struct *work)
2319 {
2320 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
2321 complete(&barr->done);
2322 }
2323
2324 /**
2325 * insert_wq_barrier - insert a barrier work
2326 * @pwq: pwq to insert barrier into
2327 * @barr: wq_barrier to insert
2328 * @target: target work to attach @barr to
2329 * @worker: worker currently executing @target, NULL if @target is not executing
2330 *
2331 * @barr is linked to @target such that @barr is completed only after
2332 * @target finishes execution. Please note that the ordering
2333 * guarantee is observed only with respect to @target and on the local
2334 * cpu.
2335 *
2336 * Currently, a queued barrier can't be canceled. This is because
2337 * try_to_grab_pending() can't determine whether the work to be
2338 * grabbed is at the head of the queue and thus can't clear LINKED
2339 * flag of the previous work while there must be a valid next work
2340 * after a work with LINKED flag set.
2341 *
2342 * Note that when @worker is non-NULL, @target may be modified
2343 * underneath us, so we can't reliably determine pwq from @target.
2344 *
2345 * CONTEXT:
2346 * spin_lock_irq(pool->lock).
2347 */
insert_wq_barrier(struct pool_workqueue * pwq,struct wq_barrier * barr,struct work_struct * target,struct worker * worker)2348 static void insert_wq_barrier(struct pool_workqueue *pwq,
2349 struct wq_barrier *barr,
2350 struct work_struct *target, struct worker *worker)
2351 {
2352 struct list_head *head;
2353 unsigned int linked = 0;
2354
2355 /*
2356 * debugobject calls are safe here even with pool->lock locked
2357 * as we know for sure that this will not trigger any of the
2358 * checks and call back into the fixup functions where we
2359 * might deadlock.
2360 */
2361 INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
2362 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
2363 init_completion(&barr->done);
2364
2365 /*
2366 * If @target is currently being executed, schedule the
2367 * barrier to the worker; otherwise, put it after @target.
2368 */
2369 if (worker)
2370 head = worker->scheduled.next;
2371 else {
2372 unsigned long *bits = work_data_bits(target);
2373
2374 head = target->entry.next;
2375 /* there can already be other linked works, inherit and set */
2376 linked = *bits & WORK_STRUCT_LINKED;
2377 __set_bit(WORK_STRUCT_LINKED_BIT, bits);
2378 }
2379
2380 debug_work_activate(&barr->work);
2381 insert_work(pwq, &barr->work, head,
2382 work_color_to_flags(WORK_NO_COLOR) | linked);
2383 }
2384
2385 /**
2386 * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing
2387 * @wq: workqueue being flushed
2388 * @flush_color: new flush color, < 0 for no-op
2389 * @work_color: new work color, < 0 for no-op
2390 *
2391 * Prepare pwqs for workqueue flushing.
2392 *
2393 * If @flush_color is non-negative, flush_color on all pwqs should be
2394 * -1. If no pwq has in-flight commands at the specified color, all
2395 * pwq->flush_color's stay at -1 and %false is returned. If any pwq
2396 * has in flight commands, its pwq->flush_color is set to
2397 * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq
2398 * wakeup logic is armed and %true is returned.
2399 *
2400 * The caller should have initialized @wq->first_flusher prior to
2401 * calling this function with non-negative @flush_color. If
2402 * @flush_color is negative, no flush color update is done and %false
2403 * is returned.
2404 *
2405 * If @work_color is non-negative, all pwqs should have the same
2406 * work_color which is previous to @work_color and all will be
2407 * advanced to @work_color.
2408 *
2409 * CONTEXT:
2410 * mutex_lock(wq->mutex).
2411 *
2412 * Return:
2413 * %true if @flush_color >= 0 and there's something to flush. %false
2414 * otherwise.
2415 */
flush_workqueue_prep_pwqs(struct workqueue_struct * wq,int flush_color,int work_color)2416 static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
2417 int flush_color, int work_color)
2418 {
2419 bool wait = false;
2420 struct pool_workqueue *pwq;
2421
2422 if (flush_color >= 0) {
2423 WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush));
2424 atomic_set(&wq->nr_pwqs_to_flush, 1);
2425 }
2426
2427 for_each_pwq(pwq, wq) {
2428 struct worker_pool *pool = pwq->pool;
2429
2430 spin_lock_irq(&pool->lock);
2431
2432 if (flush_color >= 0) {
2433 WARN_ON_ONCE(pwq->flush_color != -1);
2434
2435 if (pwq->nr_in_flight[flush_color]) {
2436 pwq->flush_color = flush_color;
2437 atomic_inc(&wq->nr_pwqs_to_flush);
2438 wait = true;
2439 }
2440 }
2441
2442 if (work_color >= 0) {
2443 WARN_ON_ONCE(work_color != work_next_color(pwq->work_color));
2444 pwq->work_color = work_color;
2445 }
2446
2447 spin_unlock_irq(&pool->lock);
2448 }
2449
2450 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
2451 complete(&wq->first_flusher->done);
2452
2453 return wait;
2454 }
2455
2456 /**
2457 * flush_workqueue - ensure that any scheduled work has run to completion.
2458 * @wq: workqueue to flush
2459 *
2460 * This function sleeps until all work items which were queued on entry
2461 * have finished execution, but it is not livelocked by new incoming ones.
2462 */
flush_workqueue(struct workqueue_struct * wq)2463 void flush_workqueue(struct workqueue_struct *wq)
2464 {
2465 struct wq_flusher this_flusher = {
2466 .list = LIST_HEAD_INIT(this_flusher.list),
2467 .flush_color = -1,
2468 .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
2469 };
2470 int next_color;
2471
2472 lock_map_acquire(&wq->lockdep_map);
2473 lock_map_release(&wq->lockdep_map);
2474
2475 mutex_lock(&wq->mutex);
2476
2477 /*
2478 * Start-to-wait phase
2479 */
2480 next_color = work_next_color(wq->work_color);
2481
2482 if (next_color != wq->flush_color) {
2483 /*
2484 * Color space is not full. The current work_color
2485 * becomes our flush_color and work_color is advanced
2486 * by one.
2487 */
2488 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow));
2489 this_flusher.flush_color = wq->work_color;
2490 wq->work_color = next_color;
2491
2492 if (!wq->first_flusher) {
2493 /* no flush in progress, become the first flusher */
2494 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
2495
2496 wq->first_flusher = &this_flusher;
2497
2498 if (!flush_workqueue_prep_pwqs(wq, wq->flush_color,
2499 wq->work_color)) {
2500 /* nothing to flush, done */
2501 wq->flush_color = next_color;
2502 wq->first_flusher = NULL;
2503 goto out_unlock;
2504 }
2505 } else {
2506 /* wait in queue */
2507 WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color);
2508 list_add_tail(&this_flusher.list, &wq->flusher_queue);
2509 flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
2510 }
2511 } else {
2512 /*
2513 * Oops, color space is full, wait on overflow queue.
2514 * The next flush completion will assign us
2515 * flush_color and transfer to flusher_queue.
2516 */
2517 list_add_tail(&this_flusher.list, &wq->flusher_overflow);
2518 }
2519
2520 mutex_unlock(&wq->mutex);
2521
2522 wait_for_completion(&this_flusher.done);
2523
2524 /*
2525 * Wake-up-and-cascade phase
2526 *
2527 * First flushers are responsible for cascading flushes and
2528 * handling overflow. Non-first flushers can simply return.
2529 */
2530 if (wq->first_flusher != &this_flusher)
2531 return;
2532
2533 mutex_lock(&wq->mutex);
2534
2535 /* we might have raced, check again with mutex held */
2536 if (wq->first_flusher != &this_flusher)
2537 goto out_unlock;
2538
2539 wq->first_flusher = NULL;
2540
2541 WARN_ON_ONCE(!list_empty(&this_flusher.list));
2542 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
2543
2544 while (true) {
2545 struct wq_flusher *next, *tmp;
2546
2547 /* complete all the flushers sharing the current flush color */
2548 list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
2549 if (next->flush_color != wq->flush_color)
2550 break;
2551 list_del_init(&next->list);
2552 complete(&next->done);
2553 }
2554
2555 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) &&
2556 wq->flush_color != work_next_color(wq->work_color));
2557
2558 /* this flush_color is finished, advance by one */
2559 wq->flush_color = work_next_color(wq->flush_color);
2560
2561 /* one color has been freed, handle overflow queue */
2562 if (!list_empty(&wq->flusher_overflow)) {
2563 /*
2564 * Assign the same color to all overflowed
2565 * flushers, advance work_color and append to
2566 * flusher_queue. This is the start-to-wait
2567 * phase for these overflowed flushers.
2568 */
2569 list_for_each_entry(tmp, &wq->flusher_overflow, list)
2570 tmp->flush_color = wq->work_color;
2571
2572 wq->work_color = work_next_color(wq->work_color);
2573
2574 list_splice_tail_init(&wq->flusher_overflow,
2575 &wq->flusher_queue);
2576 flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
2577 }
2578
2579 if (list_empty(&wq->flusher_queue)) {
2580 WARN_ON_ONCE(wq->flush_color != wq->work_color);
2581 break;
2582 }
2583
2584 /*
2585 * Need to flush more colors. Make the next flusher
2586 * the new first flusher and arm pwqs.
2587 */
2588 WARN_ON_ONCE(wq->flush_color == wq->work_color);
2589 WARN_ON_ONCE(wq->flush_color != next->flush_color);
2590
2591 list_del_init(&next->list);
2592 wq->first_flusher = next;
2593
2594 if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1))
2595 break;
2596
2597 /*
2598 * Meh... this color is already done, clear first
2599 * flusher and repeat cascading.
2600 */
2601 wq->first_flusher = NULL;
2602 }
2603
2604 out_unlock:
2605 mutex_unlock(&wq->mutex);
2606 }
2607 EXPORT_SYMBOL_GPL(flush_workqueue);
2608
2609 /**
2610 * drain_workqueue - drain a workqueue
2611 * @wq: workqueue to drain
2612 *
2613 * Wait until the workqueue becomes empty. While draining is in progress,
2614 * only chain queueing is allowed. IOW, only currently pending or running
2615 * work items on @wq can queue further work items on it. @wq is flushed
2616 * repeatedly until it becomes empty. The number of flushing is detemined
2617 * by the depth of chaining and should be relatively short. Whine if it
2618 * takes too long.
2619 */
drain_workqueue(struct workqueue_struct * wq)2620 void drain_workqueue(struct workqueue_struct *wq)
2621 {
2622 unsigned int flush_cnt = 0;
2623 struct pool_workqueue *pwq;
2624
2625 /*
2626 * __queue_work() needs to test whether there are drainers, is much
2627 * hotter than drain_workqueue() and already looks at @wq->flags.
2628 * Use __WQ_DRAINING so that queue doesn't have to check nr_drainers.
2629 */
2630 mutex_lock(&wq->mutex);
2631 if (!wq->nr_drainers++)
2632 wq->flags |= __WQ_DRAINING;
2633 mutex_unlock(&wq->mutex);
2634 reflush:
2635 flush_workqueue(wq);
2636
2637 mutex_lock(&wq->mutex);
2638
2639 for_each_pwq(pwq, wq) {
2640 bool drained;
2641
2642 spin_lock_irq(&pwq->pool->lock);
2643 drained = !pwq->nr_active && list_empty(&pwq->delayed_works);
2644 spin_unlock_irq(&pwq->pool->lock);
2645
2646 if (drained)
2647 continue;
2648
2649 if (++flush_cnt == 10 ||
2650 (flush_cnt % 100 == 0 && flush_cnt <= 1000))
2651 pr_warn("workqueue %s: drain_workqueue() isn't complete after %u tries\n",
2652 wq->name, flush_cnt);
2653
2654 mutex_unlock(&wq->mutex);
2655 goto reflush;
2656 }
2657
2658 if (!--wq->nr_drainers)
2659 wq->flags &= ~__WQ_DRAINING;
2660 mutex_unlock(&wq->mutex);
2661 }
2662 EXPORT_SYMBOL_GPL(drain_workqueue);
2663
start_flush_work(struct work_struct * work,struct wq_barrier * barr)2664 static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
2665 {
2666 struct worker *worker = NULL;
2667 struct worker_pool *pool;
2668 struct pool_workqueue *pwq;
2669
2670 might_sleep();
2671
2672 local_irq_disable();
2673 pool = get_work_pool(work);
2674 if (!pool) {
2675 local_irq_enable();
2676 return false;
2677 }
2678
2679 spin_lock(&pool->lock);
2680 /* see the comment in try_to_grab_pending() with the same code */
2681 pwq = get_work_pwq(work);
2682 if (pwq) {
2683 if (unlikely(pwq->pool != pool))
2684 goto already_gone;
2685 } else {
2686 worker = find_worker_executing_work(pool, work);
2687 if (!worker)
2688 goto already_gone;
2689 pwq = worker->current_pwq;
2690 }
2691
2692 insert_wq_barrier(pwq, barr, work, worker);
2693 spin_unlock_irq(&pool->lock);
2694
2695 /*
2696 * If @max_active is 1 or rescuer is in use, flushing another work
2697 * item on the same workqueue may lead to deadlock. Make sure the
2698 * flusher is not running on the same workqueue by verifying write
2699 * access.
2700 */
2701 if (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)
2702 lock_map_acquire(&pwq->wq->lockdep_map);
2703 else
2704 lock_map_acquire_read(&pwq->wq->lockdep_map);
2705 lock_map_release(&pwq->wq->lockdep_map);
2706
2707 return true;
2708 already_gone:
2709 spin_unlock_irq(&pool->lock);
2710 return false;
2711 }
2712
2713 /**
2714 * flush_work - wait for a work to finish executing the last queueing instance
2715 * @work: the work to flush
2716 *
2717 * Wait until @work has finished execution. @work is guaranteed to be idle
2718 * on return if it hasn't been requeued since flush started.
2719 *
2720 * Return:
2721 * %true if flush_work() waited for the work to finish execution,
2722 * %false if it was already idle.
2723 */
flush_work(struct work_struct * work)2724 bool flush_work(struct work_struct *work)
2725 {
2726 struct wq_barrier barr;
2727
2728 lock_map_acquire(&work->lockdep_map);
2729 lock_map_release(&work->lockdep_map);
2730
2731 if (start_flush_work(work, &barr)) {
2732 wait_for_completion(&barr.done);
2733 destroy_work_on_stack(&barr.work);
2734 return true;
2735 } else {
2736 return false;
2737 }
2738 }
2739 EXPORT_SYMBOL_GPL(flush_work);
2740
2741 struct cwt_wait {
2742 wait_queue_t wait;
2743 struct work_struct *work;
2744 };
2745
cwt_wakefn(wait_queue_t * wait,unsigned mode,int sync,void * key)2746 static int cwt_wakefn(wait_queue_t *wait, unsigned mode, int sync, void *key)
2747 {
2748 struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
2749
2750 if (cwait->work != key)
2751 return 0;
2752 return autoremove_wake_function(wait, mode, sync, key);
2753 }
2754
__cancel_work_timer(struct work_struct * work,bool is_dwork)2755 static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
2756 {
2757 static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
2758 unsigned long flags;
2759 int ret;
2760
2761 do {
2762 ret = try_to_grab_pending(work, is_dwork, &flags);
2763 /*
2764 * If someone else is already canceling, wait for it to
2765 * finish. flush_work() doesn't work for PREEMPT_NONE
2766 * because we may get scheduled between @work's completion
2767 * and the other canceling task resuming and clearing
2768 * CANCELING - flush_work() will return false immediately
2769 * as @work is no longer busy, try_to_grab_pending() will
2770 * return -ENOENT as @work is still being canceled and the
2771 * other canceling task won't be able to clear CANCELING as
2772 * we're hogging the CPU.
2773 *
2774 * Let's wait for completion using a waitqueue. As this
2775 * may lead to the thundering herd problem, use a custom
2776 * wake function which matches @work along with exclusive
2777 * wait and wakeup.
2778 */
2779 if (unlikely(ret == -ENOENT)) {
2780 struct cwt_wait cwait;
2781
2782 init_wait(&cwait.wait);
2783 cwait.wait.func = cwt_wakefn;
2784 cwait.work = work;
2785
2786 prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait,
2787 TASK_UNINTERRUPTIBLE);
2788 if (work_is_canceling(work))
2789 schedule();
2790 finish_wait(&cancel_waitq, &cwait.wait);
2791 }
2792 } while (unlikely(ret < 0));
2793
2794 /* tell other tasks trying to grab @work to back off */
2795 mark_work_canceling(work);
2796 local_irq_restore(flags);
2797
2798 flush_work(work);
2799 clear_work_data(work);
2800
2801 /*
2802 * Paired with prepare_to_wait() above so that either
2803 * waitqueue_active() is visible here or !work_is_canceling() is
2804 * visible there.
2805 */
2806 smp_mb();
2807 if (waitqueue_active(&cancel_waitq))
2808 __wake_up(&cancel_waitq, TASK_NORMAL, 1, work);
2809
2810 return ret;
2811 }
2812
2813 /**
2814 * cancel_work_sync - cancel a work and wait for it to finish
2815 * @work: the work to cancel
2816 *
2817 * Cancel @work and wait for its execution to finish. This function
2818 * can be used even if the work re-queues itself or migrates to
2819 * another workqueue. On return from this function, @work is
2820 * guaranteed to be not pending or executing on any CPU.
2821 *
2822 * cancel_work_sync(&delayed_work->work) must not be used for
2823 * delayed_work's. Use cancel_delayed_work_sync() instead.
2824 *
2825 * The caller must ensure that the workqueue on which @work was last
2826 * queued can't be destroyed before this function returns.
2827 *
2828 * Return:
2829 * %true if @work was pending, %false otherwise.
2830 */
cancel_work_sync(struct work_struct * work)2831 bool cancel_work_sync(struct work_struct *work)
2832 {
2833 return __cancel_work_timer(work, false);
2834 }
2835 EXPORT_SYMBOL_GPL(cancel_work_sync);
2836
2837 /**
2838 * flush_delayed_work - wait for a dwork to finish executing the last queueing
2839 * @dwork: the delayed work to flush
2840 *
2841 * Delayed timer is cancelled and the pending work is queued for
2842 * immediate execution. Like flush_work(), this function only
2843 * considers the last queueing instance of @dwork.
2844 *
2845 * Return:
2846 * %true if flush_work() waited for the work to finish execution,
2847 * %false if it was already idle.
2848 */
flush_delayed_work(struct delayed_work * dwork)2849 bool flush_delayed_work(struct delayed_work *dwork)
2850 {
2851 local_irq_disable();
2852 if (del_timer_sync(&dwork->timer))
2853 __queue_work(dwork->cpu, dwork->wq, &dwork->work);
2854 local_irq_enable();
2855 return flush_work(&dwork->work);
2856 }
2857 EXPORT_SYMBOL(flush_delayed_work);
2858
2859 /**
2860 * cancel_delayed_work - cancel a delayed work
2861 * @dwork: delayed_work to cancel
2862 *
2863 * Kill off a pending delayed_work.
2864 *
2865 * Return: %true if @dwork was pending and canceled; %false if it wasn't
2866 * pending.
2867 *
2868 * Note:
2869 * The work callback function may still be running on return, unless
2870 * it returns %true and the work doesn't re-arm itself. Explicitly flush or
2871 * use cancel_delayed_work_sync() to wait on it.
2872 *
2873 * This function is safe to call from any context including IRQ handler.
2874 */
cancel_delayed_work(struct delayed_work * dwork)2875 bool cancel_delayed_work(struct delayed_work *dwork)
2876 {
2877 unsigned long flags;
2878 int ret;
2879
2880 do {
2881 ret = try_to_grab_pending(&dwork->work, true, &flags);
2882 } while (unlikely(ret == -EAGAIN));
2883
2884 if (unlikely(ret < 0))
2885 return false;
2886
2887 set_work_pool_and_clear_pending(&dwork->work,
2888 get_work_pool_id(&dwork->work));
2889 local_irq_restore(flags);
2890 return ret;
2891 }
2892 EXPORT_SYMBOL(cancel_delayed_work);
2893
2894 /**
2895 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
2896 * @dwork: the delayed work cancel
2897 *
2898 * This is cancel_work_sync() for delayed works.
2899 *
2900 * Return:
2901 * %true if @dwork was pending, %false otherwise.
2902 */
cancel_delayed_work_sync(struct delayed_work * dwork)2903 bool cancel_delayed_work_sync(struct delayed_work *dwork)
2904 {
2905 return __cancel_work_timer(&dwork->work, true);
2906 }
2907 EXPORT_SYMBOL(cancel_delayed_work_sync);
2908
2909 /**
2910 * schedule_on_each_cpu - execute a function synchronously on each online CPU
2911 * @func: the function to call
2912 *
2913 * schedule_on_each_cpu() executes @func on each online CPU using the
2914 * system workqueue and blocks until all CPUs have completed.
2915 * schedule_on_each_cpu() is very slow.
2916 *
2917 * Return:
2918 * 0 on success, -errno on failure.
2919 */
schedule_on_each_cpu(work_func_t func)2920 int schedule_on_each_cpu(work_func_t func)
2921 {
2922 int cpu;
2923 struct work_struct __percpu *works;
2924
2925 works = alloc_percpu(struct work_struct);
2926 if (!works)
2927 return -ENOMEM;
2928
2929 get_online_cpus();
2930
2931 for_each_online_cpu(cpu) {
2932 struct work_struct *work = per_cpu_ptr(works, cpu);
2933
2934 INIT_WORK(work, func);
2935 schedule_work_on(cpu, work);
2936 }
2937
2938 for_each_online_cpu(cpu)
2939 flush_work(per_cpu_ptr(works, cpu));
2940
2941 put_online_cpus();
2942 free_percpu(works);
2943 return 0;
2944 }
2945
2946 /**
2947 * flush_scheduled_work - ensure that any scheduled work has run to completion.
2948 *
2949 * Forces execution of the kernel-global workqueue and blocks until its
2950 * completion.
2951 *
2952 * Think twice before calling this function! It's very easy to get into
2953 * trouble if you don't take great care. Either of the following situations
2954 * will lead to deadlock:
2955 *
2956 * One of the work items currently on the workqueue needs to acquire
2957 * a lock held by your code or its caller.
2958 *
2959 * Your code is running in the context of a work routine.
2960 *
2961 * They will be detected by lockdep when they occur, but the first might not
2962 * occur very often. It depends on what work items are on the workqueue and
2963 * what locks they need, which you have no control over.
2964 *
2965 * In most situations flushing the entire workqueue is overkill; you merely
2966 * need to know that a particular work item isn't queued and isn't running.
2967 * In such cases you should use cancel_delayed_work_sync() or
2968 * cancel_work_sync() instead.
2969 */
flush_scheduled_work(void)2970 void flush_scheduled_work(void)
2971 {
2972 flush_workqueue(system_wq);
2973 }
2974 EXPORT_SYMBOL(flush_scheduled_work);
2975
2976 /**
2977 * execute_in_process_context - reliably execute the routine with user context
2978 * @fn: the function to execute
2979 * @ew: guaranteed storage for the execute work structure (must
2980 * be available when the work executes)
2981 *
2982 * Executes the function immediately if process context is available,
2983 * otherwise schedules the function for delayed execution.
2984 *
2985 * Return: 0 - function was executed
2986 * 1 - function was scheduled for execution
2987 */
execute_in_process_context(work_func_t fn,struct execute_work * ew)2988 int execute_in_process_context(work_func_t fn, struct execute_work *ew)
2989 {
2990 if (!in_interrupt()) {
2991 fn(&ew->work);
2992 return 0;
2993 }
2994
2995 INIT_WORK(&ew->work, fn);
2996 schedule_work(&ew->work);
2997
2998 return 1;
2999 }
3000 EXPORT_SYMBOL_GPL(execute_in_process_context);
3001
3002 #ifdef CONFIG_SYSFS
3003 /*
3004 * Workqueues with WQ_SYSFS flag set is visible to userland via
3005 * /sys/bus/workqueue/devices/WQ_NAME. All visible workqueues have the
3006 * following attributes.
3007 *
3008 * per_cpu RO bool : whether the workqueue is per-cpu or unbound
3009 * max_active RW int : maximum number of in-flight work items
3010 *
3011 * Unbound workqueues have the following extra attributes.
3012 *
3013 * id RO int : the associated pool ID
3014 * nice RW int : nice value of the workers
3015 * cpumask RW mask : bitmask of allowed CPUs for the workers
3016 */
3017 struct wq_device {
3018 struct workqueue_struct *wq;
3019 struct device dev;
3020 };
3021
dev_to_wq(struct device * dev)3022 static struct workqueue_struct *dev_to_wq(struct device *dev)
3023 {
3024 struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
3025
3026 return wq_dev->wq;
3027 }
3028
per_cpu_show(struct device * dev,struct device_attribute * attr,char * buf)3029 static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr,
3030 char *buf)
3031 {
3032 struct workqueue_struct *wq = dev_to_wq(dev);
3033
3034 return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND));
3035 }
3036 static DEVICE_ATTR_RO(per_cpu);
3037
max_active_show(struct device * dev,struct device_attribute * attr,char * buf)3038 static ssize_t max_active_show(struct device *dev,
3039 struct device_attribute *attr, char *buf)
3040 {
3041 struct workqueue_struct *wq = dev_to_wq(dev);
3042
3043 return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active);
3044 }
3045
max_active_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3046 static ssize_t max_active_store(struct device *dev,
3047 struct device_attribute *attr, const char *buf,
3048 size_t count)
3049 {
3050 struct workqueue_struct *wq = dev_to_wq(dev);
3051 int val;
3052
3053 if (sscanf(buf, "%d", &val) != 1 || val <= 0)
3054 return -EINVAL;
3055
3056 workqueue_set_max_active(wq, val);
3057 return count;
3058 }
3059 static DEVICE_ATTR_RW(max_active);
3060
3061 static struct attribute *wq_sysfs_attrs[] = {
3062 &dev_attr_per_cpu.attr,
3063 &dev_attr_max_active.attr,
3064 NULL,
3065 };
3066 ATTRIBUTE_GROUPS(wq_sysfs);
3067
wq_pool_ids_show(struct device * dev,struct device_attribute * attr,char * buf)3068 static ssize_t wq_pool_ids_show(struct device *dev,
3069 struct device_attribute *attr, char *buf)
3070 {
3071 struct workqueue_struct *wq = dev_to_wq(dev);
3072 const char *delim = "";
3073 int node, written = 0;
3074
3075 rcu_read_lock_sched();
3076 for_each_node(node) {
3077 written += scnprintf(buf + written, PAGE_SIZE - written,
3078 "%s%d:%d", delim, node,
3079 unbound_pwq_by_node(wq, node)->pool->id);
3080 delim = " ";
3081 }
3082 written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
3083 rcu_read_unlock_sched();
3084
3085 return written;
3086 }
3087
wq_nice_show(struct device * dev,struct device_attribute * attr,char * buf)3088 static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr,
3089 char *buf)
3090 {
3091 struct workqueue_struct *wq = dev_to_wq(dev);
3092 int written;
3093
3094 mutex_lock(&wq->mutex);
3095 written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice);
3096 mutex_unlock(&wq->mutex);
3097
3098 return written;
3099 }
3100
3101 /* prepare workqueue_attrs for sysfs store operations */
wq_sysfs_prep_attrs(struct workqueue_struct * wq)3102 static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq)
3103 {
3104 struct workqueue_attrs *attrs;
3105
3106 attrs = alloc_workqueue_attrs(GFP_KERNEL);
3107 if (!attrs)
3108 return NULL;
3109
3110 mutex_lock(&wq->mutex);
3111 copy_workqueue_attrs(attrs, wq->unbound_attrs);
3112 mutex_unlock(&wq->mutex);
3113 return attrs;
3114 }
3115
wq_nice_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3116 static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr,
3117 const char *buf, size_t count)
3118 {
3119 struct workqueue_struct *wq = dev_to_wq(dev);
3120 struct workqueue_attrs *attrs;
3121 int ret;
3122
3123 attrs = wq_sysfs_prep_attrs(wq);
3124 if (!attrs)
3125 return -ENOMEM;
3126
3127 if (sscanf(buf, "%d", &attrs->nice) == 1 &&
3128 attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE)
3129 ret = apply_workqueue_attrs(wq, attrs);
3130 else
3131 ret = -EINVAL;
3132
3133 free_workqueue_attrs(attrs);
3134 return ret ?: count;
3135 }
3136
wq_cpumask_show(struct device * dev,struct device_attribute * attr,char * buf)3137 static ssize_t wq_cpumask_show(struct device *dev,
3138 struct device_attribute *attr, char *buf)
3139 {
3140 struct workqueue_struct *wq = dev_to_wq(dev);
3141 int written;
3142
3143 mutex_lock(&wq->mutex);
3144 written = cpumask_scnprintf(buf, PAGE_SIZE, wq->unbound_attrs->cpumask);
3145 mutex_unlock(&wq->mutex);
3146
3147 written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
3148 return written;
3149 }
3150
wq_cpumask_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3151 static ssize_t wq_cpumask_store(struct device *dev,
3152 struct device_attribute *attr,
3153 const char *buf, size_t count)
3154 {
3155 struct workqueue_struct *wq = dev_to_wq(dev);
3156 struct workqueue_attrs *attrs;
3157 int ret;
3158
3159 attrs = wq_sysfs_prep_attrs(wq);
3160 if (!attrs)
3161 return -ENOMEM;
3162
3163 ret = cpumask_parse(buf, attrs->cpumask);
3164 if (!ret)
3165 ret = apply_workqueue_attrs(wq, attrs);
3166
3167 free_workqueue_attrs(attrs);
3168 return ret ?: count;
3169 }
3170
wq_numa_show(struct device * dev,struct device_attribute * attr,char * buf)3171 static ssize_t wq_numa_show(struct device *dev, struct device_attribute *attr,
3172 char *buf)
3173 {
3174 struct workqueue_struct *wq = dev_to_wq(dev);
3175 int written;
3176
3177 mutex_lock(&wq->mutex);
3178 written = scnprintf(buf, PAGE_SIZE, "%d\n",
3179 !wq->unbound_attrs->no_numa);
3180 mutex_unlock(&wq->mutex);
3181
3182 return written;
3183 }
3184
wq_numa_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3185 static ssize_t wq_numa_store(struct device *dev, struct device_attribute *attr,
3186 const char *buf, size_t count)
3187 {
3188 struct workqueue_struct *wq = dev_to_wq(dev);
3189 struct workqueue_attrs *attrs;
3190 int v, ret;
3191
3192 attrs = wq_sysfs_prep_attrs(wq);
3193 if (!attrs)
3194 return -ENOMEM;
3195
3196 ret = -EINVAL;
3197 if (sscanf(buf, "%d", &v) == 1) {
3198 attrs->no_numa = !v;
3199 ret = apply_workqueue_attrs(wq, attrs);
3200 }
3201
3202 free_workqueue_attrs(attrs);
3203 return ret ?: count;
3204 }
3205
3206 static struct device_attribute wq_sysfs_unbound_attrs[] = {
3207 __ATTR(pool_ids, 0444, wq_pool_ids_show, NULL),
3208 __ATTR(nice, 0644, wq_nice_show, wq_nice_store),
3209 __ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store),
3210 __ATTR(numa, 0644, wq_numa_show, wq_numa_store),
3211 __ATTR_NULL,
3212 };
3213
3214 static struct bus_type wq_subsys = {
3215 .name = "workqueue",
3216 .dev_groups = wq_sysfs_groups,
3217 };
3218
wq_sysfs_init(void)3219 static int __init wq_sysfs_init(void)
3220 {
3221 return subsys_virtual_register(&wq_subsys, NULL);
3222 }
3223 core_initcall(wq_sysfs_init);
3224
wq_device_release(struct device * dev)3225 static void wq_device_release(struct device *dev)
3226 {
3227 struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
3228
3229 kfree(wq_dev);
3230 }
3231
3232 /**
3233 * workqueue_sysfs_register - make a workqueue visible in sysfs
3234 * @wq: the workqueue to register
3235 *
3236 * Expose @wq in sysfs under /sys/bus/workqueue/devices.
3237 * alloc_workqueue*() automatically calls this function if WQ_SYSFS is set
3238 * which is the preferred method.
3239 *
3240 * Workqueue user should use this function directly iff it wants to apply
3241 * workqueue_attrs before making the workqueue visible in sysfs; otherwise,
3242 * apply_workqueue_attrs() may race against userland updating the
3243 * attributes.
3244 *
3245 * Return: 0 on success, -errno on failure.
3246 */
workqueue_sysfs_register(struct workqueue_struct * wq)3247 int workqueue_sysfs_register(struct workqueue_struct *wq)
3248 {
3249 struct wq_device *wq_dev;
3250 int ret;
3251
3252 /*
3253 * Adjusting max_active or creating new pwqs by applyting
3254 * attributes breaks ordering guarantee. Disallow exposing ordered
3255 * workqueues.
3256 */
3257 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
3258 return -EINVAL;
3259
3260 wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
3261 if (!wq_dev)
3262 return -ENOMEM;
3263
3264 wq_dev->wq = wq;
3265 wq_dev->dev.bus = &wq_subsys;
3266 wq_dev->dev.init_name = wq->name;
3267 wq_dev->dev.release = wq_device_release;
3268
3269 /*
3270 * unbound_attrs are created separately. Suppress uevent until
3271 * everything is ready.
3272 */
3273 dev_set_uevent_suppress(&wq_dev->dev, true);
3274
3275 ret = device_register(&wq_dev->dev);
3276 if (ret) {
3277 kfree(wq_dev);
3278 wq->wq_dev = NULL;
3279 return ret;
3280 }
3281
3282 if (wq->flags & WQ_UNBOUND) {
3283 struct device_attribute *attr;
3284
3285 for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) {
3286 ret = device_create_file(&wq_dev->dev, attr);
3287 if (ret) {
3288 device_unregister(&wq_dev->dev);
3289 wq->wq_dev = NULL;
3290 return ret;
3291 }
3292 }
3293 }
3294
3295 dev_set_uevent_suppress(&wq_dev->dev, false);
3296 kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD);
3297 return 0;
3298 }
3299
3300 /**
3301 * workqueue_sysfs_unregister - undo workqueue_sysfs_register()
3302 * @wq: the workqueue to unregister
3303 *
3304 * If @wq is registered to sysfs by workqueue_sysfs_register(), unregister.
3305 */
workqueue_sysfs_unregister(struct workqueue_struct * wq)3306 static void workqueue_sysfs_unregister(struct workqueue_struct *wq)
3307 {
3308 struct wq_device *wq_dev = wq->wq_dev;
3309
3310 if (!wq->wq_dev)
3311 return;
3312
3313 wq->wq_dev = NULL;
3314 device_unregister(&wq_dev->dev);
3315 }
3316 #else /* CONFIG_SYSFS */
workqueue_sysfs_unregister(struct workqueue_struct * wq)3317 static void workqueue_sysfs_unregister(struct workqueue_struct *wq) { }
3318 #endif /* CONFIG_SYSFS */
3319
3320 /**
3321 * free_workqueue_attrs - free a workqueue_attrs
3322 * @attrs: workqueue_attrs to free
3323 *
3324 * Undo alloc_workqueue_attrs().
3325 */
free_workqueue_attrs(struct workqueue_attrs * attrs)3326 void free_workqueue_attrs(struct workqueue_attrs *attrs)
3327 {
3328 if (attrs) {
3329 free_cpumask_var(attrs->cpumask);
3330 kfree(attrs);
3331 }
3332 }
3333
3334 /**
3335 * alloc_workqueue_attrs - allocate a workqueue_attrs
3336 * @gfp_mask: allocation mask to use
3337 *
3338 * Allocate a new workqueue_attrs, initialize with default settings and
3339 * return it.
3340 *
3341 * Return: The allocated new workqueue_attr on success. %NULL on failure.
3342 */
alloc_workqueue_attrs(gfp_t gfp_mask)3343 struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask)
3344 {
3345 struct workqueue_attrs *attrs;
3346
3347 attrs = kzalloc(sizeof(*attrs), gfp_mask);
3348 if (!attrs)
3349 goto fail;
3350 if (!alloc_cpumask_var(&attrs->cpumask, gfp_mask))
3351 goto fail;
3352
3353 cpumask_copy(attrs->cpumask, cpu_possible_mask);
3354 return attrs;
3355 fail:
3356 free_workqueue_attrs(attrs);
3357 return NULL;
3358 }
3359
copy_workqueue_attrs(struct workqueue_attrs * to,const struct workqueue_attrs * from)3360 static void copy_workqueue_attrs(struct workqueue_attrs *to,
3361 const struct workqueue_attrs *from)
3362 {
3363 to->nice = from->nice;
3364 cpumask_copy(to->cpumask, from->cpumask);
3365 /*
3366 * Unlike hash and equality test, this function doesn't ignore
3367 * ->no_numa as it is used for both pool and wq attrs. Instead,
3368 * get_unbound_pool() explicitly clears ->no_numa after copying.
3369 */
3370 to->no_numa = from->no_numa;
3371 }
3372
3373 /* hash value of the content of @attr */
wqattrs_hash(const struct workqueue_attrs * attrs)3374 static u32 wqattrs_hash(const struct workqueue_attrs *attrs)
3375 {
3376 u32 hash = 0;
3377
3378 hash = jhash_1word(attrs->nice, hash);
3379 hash = jhash(cpumask_bits(attrs->cpumask),
3380 BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
3381 return hash;
3382 }
3383
3384 /* content equality test */
wqattrs_equal(const struct workqueue_attrs * a,const struct workqueue_attrs * b)3385 static bool wqattrs_equal(const struct workqueue_attrs *a,
3386 const struct workqueue_attrs *b)
3387 {
3388 if (a->nice != b->nice)
3389 return false;
3390 if (!cpumask_equal(a->cpumask, b->cpumask))
3391 return false;
3392 return true;
3393 }
3394
3395 /**
3396 * init_worker_pool - initialize a newly zalloc'd worker_pool
3397 * @pool: worker_pool to initialize
3398 *
3399 * Initiailize a newly zalloc'd @pool. It also allocates @pool->attrs.
3400 *
3401 * Return: 0 on success, -errno on failure. Even on failure, all fields
3402 * inside @pool proper are initialized and put_unbound_pool() can be called
3403 * on @pool safely to release it.
3404 */
init_worker_pool(struct worker_pool * pool)3405 static int init_worker_pool(struct worker_pool *pool)
3406 {
3407 spin_lock_init(&pool->lock);
3408 pool->id = -1;
3409 pool->cpu = -1;
3410 pool->node = NUMA_NO_NODE;
3411 pool->flags |= POOL_DISASSOCIATED;
3412 INIT_LIST_HEAD(&pool->worklist);
3413 INIT_LIST_HEAD(&pool->idle_list);
3414 hash_init(pool->busy_hash);
3415
3416 init_timer_deferrable(&pool->idle_timer);
3417 pool->idle_timer.function = idle_worker_timeout;
3418 pool->idle_timer.data = (unsigned long)pool;
3419
3420 setup_timer(&pool->mayday_timer, pool_mayday_timeout,
3421 (unsigned long)pool);
3422
3423 mutex_init(&pool->manager_arb);
3424 mutex_init(&pool->attach_mutex);
3425 INIT_LIST_HEAD(&pool->workers);
3426
3427 ida_init(&pool->worker_ida);
3428 INIT_HLIST_NODE(&pool->hash_node);
3429 pool->refcnt = 1;
3430
3431 /* shouldn't fail above this point */
3432 pool->attrs = alloc_workqueue_attrs(GFP_KERNEL);
3433 if (!pool->attrs)
3434 return -ENOMEM;
3435 return 0;
3436 }
3437
rcu_free_pool(struct rcu_head * rcu)3438 static void rcu_free_pool(struct rcu_head *rcu)
3439 {
3440 struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu);
3441
3442 ida_destroy(&pool->worker_ida);
3443 free_workqueue_attrs(pool->attrs);
3444 kfree(pool);
3445 }
3446
3447 /**
3448 * put_unbound_pool - put a worker_pool
3449 * @pool: worker_pool to put
3450 *
3451 * Put @pool. If its refcnt reaches zero, it gets destroyed in sched-RCU
3452 * safe manner. get_unbound_pool() calls this function on its failure path
3453 * and this function should be able to release pools which went through,
3454 * successfully or not, init_worker_pool().
3455 *
3456 * Should be called with wq_pool_mutex held.
3457 */
put_unbound_pool(struct worker_pool * pool)3458 static void put_unbound_pool(struct worker_pool *pool)
3459 {
3460 DECLARE_COMPLETION_ONSTACK(detach_completion);
3461 struct worker *worker;
3462
3463 lockdep_assert_held(&wq_pool_mutex);
3464
3465 if (--pool->refcnt)
3466 return;
3467
3468 /* sanity checks */
3469 if (WARN_ON(!(pool->cpu < 0)) ||
3470 WARN_ON(!list_empty(&pool->worklist)))
3471 return;
3472
3473 /* release id and unhash */
3474 if (pool->id >= 0)
3475 idr_remove(&worker_pool_idr, pool->id);
3476 hash_del(&pool->hash_node);
3477
3478 /*
3479 * Become the manager and destroy all workers. Grabbing
3480 * manager_arb prevents @pool's workers from blocking on
3481 * attach_mutex.
3482 */
3483 mutex_lock(&pool->manager_arb);
3484
3485 spin_lock_irq(&pool->lock);
3486 while ((worker = first_idle_worker(pool)))
3487 destroy_worker(worker);
3488 WARN_ON(pool->nr_workers || pool->nr_idle);
3489 spin_unlock_irq(&pool->lock);
3490
3491 mutex_lock(&pool->attach_mutex);
3492 if (!list_empty(&pool->workers))
3493 pool->detach_completion = &detach_completion;
3494 mutex_unlock(&pool->attach_mutex);
3495
3496 if (pool->detach_completion)
3497 wait_for_completion(pool->detach_completion);
3498
3499 mutex_unlock(&pool->manager_arb);
3500
3501 /* shut down the timers */
3502 del_timer_sync(&pool->idle_timer);
3503 del_timer_sync(&pool->mayday_timer);
3504
3505 /* sched-RCU protected to allow dereferences from get_work_pool() */
3506 call_rcu_sched(&pool->rcu, rcu_free_pool);
3507 }
3508
3509 /**
3510 * get_unbound_pool - get a worker_pool with the specified attributes
3511 * @attrs: the attributes of the worker_pool to get
3512 *
3513 * Obtain a worker_pool which has the same attributes as @attrs, bump the
3514 * reference count and return it. If there already is a matching
3515 * worker_pool, it will be used; otherwise, this function attempts to
3516 * create a new one.
3517 *
3518 * Should be called with wq_pool_mutex held.
3519 *
3520 * Return: On success, a worker_pool with the same attributes as @attrs.
3521 * On failure, %NULL.
3522 */
get_unbound_pool(const struct workqueue_attrs * attrs)3523 static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
3524 {
3525 u32 hash = wqattrs_hash(attrs);
3526 struct worker_pool *pool;
3527 int node;
3528
3529 lockdep_assert_held(&wq_pool_mutex);
3530
3531 /* do we already have a matching pool? */
3532 hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
3533 if (wqattrs_equal(pool->attrs, attrs)) {
3534 pool->refcnt++;
3535 return pool;
3536 }
3537 }
3538
3539 /* nope, create a new one */
3540 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
3541 if (!pool || init_worker_pool(pool) < 0)
3542 goto fail;
3543
3544 lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */
3545 copy_workqueue_attrs(pool->attrs, attrs);
3546
3547 /*
3548 * no_numa isn't a worker_pool attribute, always clear it. See
3549 * 'struct workqueue_attrs' comments for detail.
3550 */
3551 pool->attrs->no_numa = false;
3552
3553 /* if cpumask is contained inside a NUMA node, we belong to that node */
3554 if (wq_numa_enabled) {
3555 for_each_node(node) {
3556 if (cpumask_subset(pool->attrs->cpumask,
3557 wq_numa_possible_cpumask[node])) {
3558 pool->node = node;
3559 break;
3560 }
3561 }
3562 }
3563
3564 if (worker_pool_assign_id(pool) < 0)
3565 goto fail;
3566
3567 /* create and start the initial worker */
3568 if (!create_worker(pool))
3569 goto fail;
3570
3571 /* install */
3572 hash_add(unbound_pool_hash, &pool->hash_node, hash);
3573
3574 return pool;
3575 fail:
3576 if (pool)
3577 put_unbound_pool(pool);
3578 return NULL;
3579 }
3580
rcu_free_pwq(struct rcu_head * rcu)3581 static void rcu_free_pwq(struct rcu_head *rcu)
3582 {
3583 kmem_cache_free(pwq_cache,
3584 container_of(rcu, struct pool_workqueue, rcu));
3585 }
3586
3587 /*
3588 * Scheduled on system_wq by put_pwq() when an unbound pwq hits zero refcnt
3589 * and needs to be destroyed.
3590 */
pwq_unbound_release_workfn(struct work_struct * work)3591 static void pwq_unbound_release_workfn(struct work_struct *work)
3592 {
3593 struct pool_workqueue *pwq = container_of(work, struct pool_workqueue,
3594 unbound_release_work);
3595 struct workqueue_struct *wq = pwq->wq;
3596 struct worker_pool *pool = pwq->pool;
3597 bool is_last;
3598
3599 if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
3600 return;
3601
3602 mutex_lock(&wq->mutex);
3603 list_del_rcu(&pwq->pwqs_node);
3604 is_last = list_empty(&wq->pwqs);
3605 mutex_unlock(&wq->mutex);
3606
3607 mutex_lock(&wq_pool_mutex);
3608 put_unbound_pool(pool);
3609 mutex_unlock(&wq_pool_mutex);
3610
3611 call_rcu_sched(&pwq->rcu, rcu_free_pwq);
3612
3613 /*
3614 * If we're the last pwq going away, @wq is already dead and no one
3615 * is gonna access it anymore. Free it.
3616 */
3617 if (is_last) {
3618 free_workqueue_attrs(wq->unbound_attrs);
3619 kfree(wq);
3620 }
3621 }
3622
3623 /**
3624 * pwq_adjust_max_active - update a pwq's max_active to the current setting
3625 * @pwq: target pool_workqueue
3626 *
3627 * If @pwq isn't freezing, set @pwq->max_active to the associated
3628 * workqueue's saved_max_active and activate delayed work items
3629 * accordingly. If @pwq is freezing, clear @pwq->max_active to zero.
3630 */
pwq_adjust_max_active(struct pool_workqueue * pwq)3631 static void pwq_adjust_max_active(struct pool_workqueue *pwq)
3632 {
3633 struct workqueue_struct *wq = pwq->wq;
3634 bool freezable = wq->flags & WQ_FREEZABLE;
3635
3636 /* for @wq->saved_max_active */
3637 lockdep_assert_held(&wq->mutex);
3638
3639 /* fast exit for non-freezable wqs */
3640 if (!freezable && pwq->max_active == wq->saved_max_active)
3641 return;
3642
3643 spin_lock_irq(&pwq->pool->lock);
3644
3645 /*
3646 * During [un]freezing, the caller is responsible for ensuring that
3647 * this function is called at least once after @workqueue_freezing
3648 * is updated and visible.
3649 */
3650 if (!freezable || !workqueue_freezing) {
3651 pwq->max_active = wq->saved_max_active;
3652
3653 while (!list_empty(&pwq->delayed_works) &&
3654 pwq->nr_active < pwq->max_active)
3655 pwq_activate_first_delayed(pwq);
3656
3657 /*
3658 * Need to kick a worker after thawed or an unbound wq's
3659 * max_active is bumped. It's a slow path. Do it always.
3660 */
3661 wake_up_worker(pwq->pool);
3662 } else {
3663 pwq->max_active = 0;
3664 }
3665
3666 spin_unlock_irq(&pwq->pool->lock);
3667 }
3668
3669 /* initialize newly alloced @pwq which is associated with @wq and @pool */
init_pwq(struct pool_workqueue * pwq,struct workqueue_struct * wq,struct worker_pool * pool)3670 static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
3671 struct worker_pool *pool)
3672 {
3673 BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
3674
3675 memset(pwq, 0, sizeof(*pwq));
3676
3677 pwq->pool = pool;
3678 pwq->wq = wq;
3679 pwq->flush_color = -1;
3680 pwq->refcnt = 1;
3681 INIT_LIST_HEAD(&pwq->delayed_works);
3682 INIT_LIST_HEAD(&pwq->pwqs_node);
3683 INIT_LIST_HEAD(&pwq->mayday_node);
3684 INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn);
3685 }
3686
3687 /* sync @pwq with the current state of its associated wq and link it */
link_pwq(struct pool_workqueue * pwq)3688 static void link_pwq(struct pool_workqueue *pwq)
3689 {
3690 struct workqueue_struct *wq = pwq->wq;
3691
3692 lockdep_assert_held(&wq->mutex);
3693
3694 /* may be called multiple times, ignore if already linked */
3695 if (!list_empty(&pwq->pwqs_node))
3696 return;
3697
3698 /* set the matching work_color */
3699 pwq->work_color = wq->work_color;
3700
3701 /* sync max_active to the current setting */
3702 pwq_adjust_max_active(pwq);
3703
3704 /* link in @pwq */
3705 list_add_rcu(&pwq->pwqs_node, &wq->pwqs);
3706 }
3707
3708 /* obtain a pool matching @attr and create a pwq associating the pool and @wq */
alloc_unbound_pwq(struct workqueue_struct * wq,const struct workqueue_attrs * attrs)3709 static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq,
3710 const struct workqueue_attrs *attrs)
3711 {
3712 struct worker_pool *pool;
3713 struct pool_workqueue *pwq;
3714
3715 lockdep_assert_held(&wq_pool_mutex);
3716
3717 pool = get_unbound_pool(attrs);
3718 if (!pool)
3719 return NULL;
3720
3721 pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node);
3722 if (!pwq) {
3723 put_unbound_pool(pool);
3724 return NULL;
3725 }
3726
3727 init_pwq(pwq, wq, pool);
3728 return pwq;
3729 }
3730
3731 /* undo alloc_unbound_pwq(), used only in the error path */
free_unbound_pwq(struct pool_workqueue * pwq)3732 static void free_unbound_pwq(struct pool_workqueue *pwq)
3733 {
3734 lockdep_assert_held(&wq_pool_mutex);
3735
3736 if (pwq) {
3737 put_unbound_pool(pwq->pool);
3738 kmem_cache_free(pwq_cache, pwq);
3739 }
3740 }
3741
3742 /**
3743 * wq_calc_node_mask - calculate a wq_attrs' cpumask for the specified node
3744 * @attrs: the wq_attrs of interest
3745 * @node: the target NUMA node
3746 * @cpu_going_down: if >= 0, the CPU to consider as offline
3747 * @cpumask: outarg, the resulting cpumask
3748 *
3749 * Calculate the cpumask a workqueue with @attrs should use on @node. If
3750 * @cpu_going_down is >= 0, that cpu is considered offline during
3751 * calculation. The result is stored in @cpumask.
3752 *
3753 * If NUMA affinity is not enabled, @attrs->cpumask is always used. If
3754 * enabled and @node has online CPUs requested by @attrs, the returned
3755 * cpumask is the intersection of the possible CPUs of @node and
3756 * @attrs->cpumask.
3757 *
3758 * The caller is responsible for ensuring that the cpumask of @node stays
3759 * stable.
3760 *
3761 * Return: %true if the resulting @cpumask is different from @attrs->cpumask,
3762 * %false if equal.
3763 */
wq_calc_node_cpumask(const struct workqueue_attrs * attrs,int node,int cpu_going_down,cpumask_t * cpumask)3764 static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node,
3765 int cpu_going_down, cpumask_t *cpumask)
3766 {
3767 if (!wq_numa_enabled || attrs->no_numa)
3768 goto use_dfl;
3769
3770 /* does @node have any online CPUs @attrs wants? */
3771 cpumask_and(cpumask, cpumask_of_node(node), attrs->cpumask);
3772 if (cpu_going_down >= 0)
3773 cpumask_clear_cpu(cpu_going_down, cpumask);
3774
3775 if (cpumask_empty(cpumask))
3776 goto use_dfl;
3777
3778 /* yeap, return possible CPUs in @node that @attrs wants */
3779 cpumask_and(cpumask, attrs->cpumask, wq_numa_possible_cpumask[node]);
3780 return !cpumask_equal(cpumask, attrs->cpumask);
3781
3782 use_dfl:
3783 cpumask_copy(cpumask, attrs->cpumask);
3784 return false;
3785 }
3786
3787 /* install @pwq into @wq's numa_pwq_tbl[] for @node and return the old pwq */
numa_pwq_tbl_install(struct workqueue_struct * wq,int node,struct pool_workqueue * pwq)3788 static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq,
3789 int node,
3790 struct pool_workqueue *pwq)
3791 {
3792 struct pool_workqueue *old_pwq;
3793
3794 lockdep_assert_held(&wq->mutex);
3795
3796 /* link_pwq() can handle duplicate calls */
3797 link_pwq(pwq);
3798
3799 old_pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
3800 rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq);
3801 return old_pwq;
3802 }
3803
3804 /**
3805 * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue
3806 * @wq: the target workqueue
3807 * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs()
3808 *
3809 * Apply @attrs to an unbound workqueue @wq. Unless disabled, on NUMA
3810 * machines, this function maps a separate pwq to each NUMA node with
3811 * possibles CPUs in @attrs->cpumask so that work items are affine to the
3812 * NUMA node it was issued on. Older pwqs are released as in-flight work
3813 * items finish. Note that a work item which repeatedly requeues itself
3814 * back-to-back will stay on its current pwq.
3815 *
3816 * Performs GFP_KERNEL allocations.
3817 *
3818 * Return: 0 on success and -errno on failure.
3819 */
apply_workqueue_attrs(struct workqueue_struct * wq,const struct workqueue_attrs * attrs)3820 int apply_workqueue_attrs(struct workqueue_struct *wq,
3821 const struct workqueue_attrs *attrs)
3822 {
3823 struct workqueue_attrs *new_attrs, *tmp_attrs;
3824 struct pool_workqueue **pwq_tbl, *dfl_pwq;
3825 int node, ret;
3826
3827 /* only unbound workqueues can change attributes */
3828 if (WARN_ON(!(wq->flags & WQ_UNBOUND)))
3829 return -EINVAL;
3830
3831 /* creating multiple pwqs breaks ordering guarantee */
3832 if (!list_empty(&wq->pwqs)) {
3833 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
3834 return -EINVAL;
3835
3836 wq->flags &= ~__WQ_ORDERED;
3837 }
3838
3839 pwq_tbl = kzalloc(nr_node_ids * sizeof(pwq_tbl[0]), GFP_KERNEL);
3840 new_attrs = alloc_workqueue_attrs(GFP_KERNEL);
3841 tmp_attrs = alloc_workqueue_attrs(GFP_KERNEL);
3842 if (!pwq_tbl || !new_attrs || !tmp_attrs)
3843 goto enomem;
3844
3845 /* make a copy of @attrs and sanitize it */
3846 copy_workqueue_attrs(new_attrs, attrs);
3847 cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask);
3848
3849 /*
3850 * We may create multiple pwqs with differing cpumasks. Make a
3851 * copy of @new_attrs which will be modified and used to obtain
3852 * pools.
3853 */
3854 copy_workqueue_attrs(tmp_attrs, new_attrs);
3855
3856 /*
3857 * CPUs should stay stable across pwq creations and installations.
3858 * Pin CPUs, determine the target cpumask for each node and create
3859 * pwqs accordingly.
3860 */
3861 get_online_cpus();
3862
3863 mutex_lock(&wq_pool_mutex);
3864
3865 /*
3866 * If something goes wrong during CPU up/down, we'll fall back to
3867 * the default pwq covering whole @attrs->cpumask. Always create
3868 * it even if we don't use it immediately.
3869 */
3870 dfl_pwq = alloc_unbound_pwq(wq, new_attrs);
3871 if (!dfl_pwq)
3872 goto enomem_pwq;
3873
3874 for_each_node(node) {
3875 if (wq_calc_node_cpumask(attrs, node, -1, tmp_attrs->cpumask)) {
3876 pwq_tbl[node] = alloc_unbound_pwq(wq, tmp_attrs);
3877 if (!pwq_tbl[node])
3878 goto enomem_pwq;
3879 } else {
3880 dfl_pwq->refcnt++;
3881 pwq_tbl[node] = dfl_pwq;
3882 }
3883 }
3884
3885 mutex_unlock(&wq_pool_mutex);
3886
3887 /* all pwqs have been created successfully, let's install'em */
3888 mutex_lock(&wq->mutex);
3889
3890 copy_workqueue_attrs(wq->unbound_attrs, new_attrs);
3891
3892 /* save the previous pwq and install the new one */
3893 for_each_node(node)
3894 pwq_tbl[node] = numa_pwq_tbl_install(wq, node, pwq_tbl[node]);
3895
3896 /* @dfl_pwq might not have been used, ensure it's linked */
3897 link_pwq(dfl_pwq);
3898 swap(wq->dfl_pwq, dfl_pwq);
3899
3900 mutex_unlock(&wq->mutex);
3901
3902 /* put the old pwqs */
3903 for_each_node(node)
3904 put_pwq_unlocked(pwq_tbl[node]);
3905 put_pwq_unlocked(dfl_pwq);
3906
3907 put_online_cpus();
3908 ret = 0;
3909 /* fall through */
3910 out_free:
3911 free_workqueue_attrs(tmp_attrs);
3912 free_workqueue_attrs(new_attrs);
3913 kfree(pwq_tbl);
3914 return ret;
3915
3916 enomem_pwq:
3917 free_unbound_pwq(dfl_pwq);
3918 for_each_node(node)
3919 if (pwq_tbl && pwq_tbl[node] != dfl_pwq)
3920 free_unbound_pwq(pwq_tbl[node]);
3921 mutex_unlock(&wq_pool_mutex);
3922 put_online_cpus();
3923 enomem:
3924 ret = -ENOMEM;
3925 goto out_free;
3926 }
3927
3928 /**
3929 * wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug
3930 * @wq: the target workqueue
3931 * @cpu: the CPU coming up or going down
3932 * @online: whether @cpu is coming up or going down
3933 *
3934 * This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and
3935 * %CPU_DOWN_FAILED. @cpu is being hot[un]plugged, update NUMA affinity of
3936 * @wq accordingly.
3937 *
3938 * If NUMA affinity can't be adjusted due to memory allocation failure, it
3939 * falls back to @wq->dfl_pwq which may not be optimal but is always
3940 * correct.
3941 *
3942 * Note that when the last allowed CPU of a NUMA node goes offline for a
3943 * workqueue with a cpumask spanning multiple nodes, the workers which were
3944 * already executing the work items for the workqueue will lose their CPU
3945 * affinity and may execute on any CPU. This is similar to how per-cpu
3946 * workqueues behave on CPU_DOWN. If a workqueue user wants strict
3947 * affinity, it's the user's responsibility to flush the work item from
3948 * CPU_DOWN_PREPARE.
3949 */
wq_update_unbound_numa(struct workqueue_struct * wq,int cpu,bool online)3950 static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
3951 bool online)
3952 {
3953 int node = cpu_to_node(cpu);
3954 int cpu_off = online ? -1 : cpu;
3955 struct pool_workqueue *old_pwq = NULL, *pwq;
3956 struct workqueue_attrs *target_attrs;
3957 cpumask_t *cpumask;
3958
3959 lockdep_assert_held(&wq_pool_mutex);
3960
3961 if (!wq_numa_enabled || !(wq->flags & WQ_UNBOUND))
3962 return;
3963
3964 /*
3965 * We don't wanna alloc/free wq_attrs for each wq for each CPU.
3966 * Let's use a preallocated one. The following buf is protected by
3967 * CPU hotplug exclusion.
3968 */
3969 target_attrs = wq_update_unbound_numa_attrs_buf;
3970 cpumask = target_attrs->cpumask;
3971
3972 mutex_lock(&wq->mutex);
3973 if (wq->unbound_attrs->no_numa)
3974 goto out_unlock;
3975
3976 copy_workqueue_attrs(target_attrs, wq->unbound_attrs);
3977 pwq = unbound_pwq_by_node(wq, node);
3978
3979 /*
3980 * Let's determine what needs to be done. If the target cpumask is
3981 * different from wq's, we need to compare it to @pwq's and create
3982 * a new one if they don't match. If the target cpumask equals
3983 * wq's, the default pwq should be used.
3984 */
3985 if (wq_calc_node_cpumask(wq->unbound_attrs, node, cpu_off, cpumask)) {
3986 if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask))
3987 goto out_unlock;
3988 } else {
3989 goto use_dfl_pwq;
3990 }
3991
3992 mutex_unlock(&wq->mutex);
3993
3994 /* create a new pwq */
3995 pwq = alloc_unbound_pwq(wq, target_attrs);
3996 if (!pwq) {
3997 pr_warn("workqueue: allocation failed while updating NUMA affinity of \"%s\"\n",
3998 wq->name);
3999 mutex_lock(&wq->mutex);
4000 goto use_dfl_pwq;
4001 }
4002
4003 /*
4004 * Install the new pwq. As this function is called only from CPU
4005 * hotplug callbacks and applying a new attrs is wrapped with
4006 * get/put_online_cpus(), @wq->unbound_attrs couldn't have changed
4007 * inbetween.
4008 */
4009 mutex_lock(&wq->mutex);
4010 old_pwq = numa_pwq_tbl_install(wq, node, pwq);
4011 goto out_unlock;
4012
4013 use_dfl_pwq:
4014 spin_lock_irq(&wq->dfl_pwq->pool->lock);
4015 get_pwq(wq->dfl_pwq);
4016 spin_unlock_irq(&wq->dfl_pwq->pool->lock);
4017 old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq);
4018 out_unlock:
4019 mutex_unlock(&wq->mutex);
4020 put_pwq_unlocked(old_pwq);
4021 }
4022
alloc_and_link_pwqs(struct workqueue_struct * wq)4023 static int alloc_and_link_pwqs(struct workqueue_struct *wq)
4024 {
4025 bool highpri = wq->flags & WQ_HIGHPRI;
4026 int cpu, ret;
4027
4028 if (!(wq->flags & WQ_UNBOUND)) {
4029 wq->cpu_pwqs = alloc_percpu(struct pool_workqueue);
4030 if (!wq->cpu_pwqs)
4031 return -ENOMEM;
4032
4033 for_each_possible_cpu(cpu) {
4034 struct pool_workqueue *pwq =
4035 per_cpu_ptr(wq->cpu_pwqs, cpu);
4036 struct worker_pool *cpu_pools =
4037 per_cpu(cpu_worker_pools, cpu);
4038
4039 init_pwq(pwq, wq, &cpu_pools[highpri]);
4040
4041 mutex_lock(&wq->mutex);
4042 link_pwq(pwq);
4043 mutex_unlock(&wq->mutex);
4044 }
4045 return 0;
4046 } else if (wq->flags & __WQ_ORDERED) {
4047 ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
4048 /* there should only be single pwq for ordering guarantee */
4049 WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node ||
4050 wq->pwqs.prev != &wq->dfl_pwq->pwqs_node),
4051 "ordering guarantee broken for workqueue %s\n", wq->name);
4052 return ret;
4053 } else {
4054 return apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
4055 }
4056 }
4057
wq_clamp_max_active(int max_active,unsigned int flags,const char * name)4058 static int wq_clamp_max_active(int max_active, unsigned int flags,
4059 const char *name)
4060 {
4061 int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
4062
4063 if (max_active < 1 || max_active > lim)
4064 pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n",
4065 max_active, name, 1, lim);
4066
4067 return clamp_val(max_active, 1, lim);
4068 }
4069
__alloc_workqueue_key(const char * fmt,unsigned int flags,int max_active,struct lock_class_key * key,const char * lock_name,...)4070 struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
4071 unsigned int flags,
4072 int max_active,
4073 struct lock_class_key *key,
4074 const char *lock_name, ...)
4075 {
4076 size_t tbl_size = 0;
4077 va_list args;
4078 struct workqueue_struct *wq;
4079 struct pool_workqueue *pwq;
4080
4081 /*
4082 * Unbound && max_active == 1 used to imply ordered, which is no
4083 * longer the case on NUMA machines due to per-node pools. While
4084 * alloc_ordered_workqueue() is the right way to create an ordered
4085 * workqueue, keep the previous behavior to avoid subtle breakages
4086 * on NUMA.
4087 */
4088 if ((flags & WQ_UNBOUND) && max_active == 1)
4089 flags |= __WQ_ORDERED;
4090
4091 /* see the comment above the definition of WQ_POWER_EFFICIENT */
4092 if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
4093 flags |= WQ_UNBOUND;
4094
4095 /* allocate wq and format name */
4096 if (flags & WQ_UNBOUND)
4097 tbl_size = nr_node_ids * sizeof(wq->numa_pwq_tbl[0]);
4098
4099 wq = kzalloc(sizeof(*wq) + tbl_size, GFP_KERNEL);
4100 if (!wq)
4101 return NULL;
4102
4103 if (flags & WQ_UNBOUND) {
4104 wq->unbound_attrs = alloc_workqueue_attrs(GFP_KERNEL);
4105 if (!wq->unbound_attrs)
4106 goto err_free_wq;
4107 }
4108
4109 va_start(args, lock_name);
4110 vsnprintf(wq->name, sizeof(wq->name), fmt, args);
4111 va_end(args);
4112
4113 max_active = max_active ?: WQ_DFL_ACTIVE;
4114 max_active = wq_clamp_max_active(max_active, flags, wq->name);
4115
4116 /* init wq */
4117 wq->flags = flags;
4118 wq->saved_max_active = max_active;
4119 mutex_init(&wq->mutex);
4120 atomic_set(&wq->nr_pwqs_to_flush, 0);
4121 INIT_LIST_HEAD(&wq->pwqs);
4122 INIT_LIST_HEAD(&wq->flusher_queue);
4123 INIT_LIST_HEAD(&wq->flusher_overflow);
4124 INIT_LIST_HEAD(&wq->maydays);
4125
4126 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
4127 INIT_LIST_HEAD(&wq->list);
4128
4129 if (alloc_and_link_pwqs(wq) < 0)
4130 goto err_free_wq;
4131
4132 /*
4133 * Workqueues which may be used during memory reclaim should
4134 * have a rescuer to guarantee forward progress.
4135 */
4136 if (flags & WQ_MEM_RECLAIM) {
4137 struct worker *rescuer;
4138
4139 rescuer = alloc_worker(NUMA_NO_NODE);
4140 if (!rescuer)
4141 goto err_destroy;
4142
4143 rescuer->rescue_wq = wq;
4144 rescuer->task = kthread_create(rescuer_thread, rescuer, "%s",
4145 wq->name);
4146 if (IS_ERR(rescuer->task)) {
4147 kfree(rescuer);
4148 goto err_destroy;
4149 }
4150
4151 wq->rescuer = rescuer;
4152 kthread_bind_mask(rescuer->task, cpu_possible_mask);
4153 wake_up_process(rescuer->task);
4154 }
4155
4156 if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq))
4157 goto err_destroy;
4158
4159 /*
4160 * wq_pool_mutex protects global freeze state and workqueues list.
4161 * Grab it, adjust max_active and add the new @wq to workqueues
4162 * list.
4163 */
4164 mutex_lock(&wq_pool_mutex);
4165
4166 mutex_lock(&wq->mutex);
4167 for_each_pwq(pwq, wq)
4168 pwq_adjust_max_active(pwq);
4169 mutex_unlock(&wq->mutex);
4170
4171 list_add(&wq->list, &workqueues);
4172
4173 mutex_unlock(&wq_pool_mutex);
4174
4175 return wq;
4176
4177 err_free_wq:
4178 free_workqueue_attrs(wq->unbound_attrs);
4179 kfree(wq);
4180 return NULL;
4181 err_destroy:
4182 destroy_workqueue(wq);
4183 return NULL;
4184 }
4185 EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
4186
4187 /**
4188 * destroy_workqueue - safely terminate a workqueue
4189 * @wq: target workqueue
4190 *
4191 * Safely destroy a workqueue. All work currently pending will be done first.
4192 */
destroy_workqueue(struct workqueue_struct * wq)4193 void destroy_workqueue(struct workqueue_struct *wq)
4194 {
4195 struct pool_workqueue *pwq;
4196 int node;
4197
4198 /* drain it before proceeding with destruction */
4199 drain_workqueue(wq);
4200
4201 /* sanity checks */
4202 mutex_lock(&wq->mutex);
4203 for_each_pwq(pwq, wq) {
4204 int i;
4205
4206 for (i = 0; i < WORK_NR_COLORS; i++) {
4207 if (WARN_ON(pwq->nr_in_flight[i])) {
4208 mutex_unlock(&wq->mutex);
4209 return;
4210 }
4211 }
4212
4213 if (WARN_ON((pwq != wq->dfl_pwq) && (pwq->refcnt > 1)) ||
4214 WARN_ON(pwq->nr_active) ||
4215 WARN_ON(!list_empty(&pwq->delayed_works))) {
4216 mutex_unlock(&wq->mutex);
4217 return;
4218 }
4219 }
4220 mutex_unlock(&wq->mutex);
4221
4222 /*
4223 * wq list is used to freeze wq, remove from list after
4224 * flushing is complete in case freeze races us.
4225 */
4226 mutex_lock(&wq_pool_mutex);
4227 list_del_init(&wq->list);
4228 mutex_unlock(&wq_pool_mutex);
4229
4230 workqueue_sysfs_unregister(wq);
4231
4232 if (wq->rescuer) {
4233 kthread_stop(wq->rescuer->task);
4234 kfree(wq->rescuer);
4235 wq->rescuer = NULL;
4236 }
4237
4238 if (!(wq->flags & WQ_UNBOUND)) {
4239 /*
4240 * The base ref is never dropped on per-cpu pwqs. Directly
4241 * free the pwqs and wq.
4242 */
4243 free_percpu(wq->cpu_pwqs);
4244 kfree(wq);
4245 } else {
4246 /*
4247 * We're the sole accessor of @wq at this point. Directly
4248 * access numa_pwq_tbl[] and dfl_pwq to put the base refs.
4249 * @wq will be freed when the last pwq is released.
4250 */
4251 for_each_node(node) {
4252 pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
4253 RCU_INIT_POINTER(wq->numa_pwq_tbl[node], NULL);
4254 put_pwq_unlocked(pwq);
4255 }
4256
4257 /*
4258 * Put dfl_pwq. @wq may be freed any time after dfl_pwq is
4259 * put. Don't access it afterwards.
4260 */
4261 pwq = wq->dfl_pwq;
4262 wq->dfl_pwq = NULL;
4263 put_pwq_unlocked(pwq);
4264 }
4265 }
4266 EXPORT_SYMBOL_GPL(destroy_workqueue);
4267
4268 /**
4269 * workqueue_set_max_active - adjust max_active of a workqueue
4270 * @wq: target workqueue
4271 * @max_active: new max_active value.
4272 *
4273 * Set max_active of @wq to @max_active.
4274 *
4275 * CONTEXT:
4276 * Don't call from IRQ context.
4277 */
workqueue_set_max_active(struct workqueue_struct * wq,int max_active)4278 void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
4279 {
4280 struct pool_workqueue *pwq;
4281
4282 /* disallow meddling with max_active for ordered workqueues */
4283 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
4284 return;
4285
4286 max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
4287
4288 mutex_lock(&wq->mutex);
4289
4290 wq->flags &= ~__WQ_ORDERED;
4291 wq->saved_max_active = max_active;
4292
4293 for_each_pwq(pwq, wq)
4294 pwq_adjust_max_active(pwq);
4295
4296 mutex_unlock(&wq->mutex);
4297 }
4298 EXPORT_SYMBOL_GPL(workqueue_set_max_active);
4299
4300 /**
4301 * current_is_workqueue_rescuer - is %current workqueue rescuer?
4302 *
4303 * Determine whether %current is a workqueue rescuer. Can be used from
4304 * work functions to determine whether it's being run off the rescuer task.
4305 *
4306 * Return: %true if %current is a workqueue rescuer. %false otherwise.
4307 */
current_is_workqueue_rescuer(void)4308 bool current_is_workqueue_rescuer(void)
4309 {
4310 struct worker *worker = current_wq_worker();
4311
4312 return worker && worker->rescue_wq;
4313 }
4314
4315 /**
4316 * workqueue_congested - test whether a workqueue is congested
4317 * @cpu: CPU in question
4318 * @wq: target workqueue
4319 *
4320 * Test whether @wq's cpu workqueue for @cpu is congested. There is
4321 * no synchronization around this function and the test result is
4322 * unreliable and only useful as advisory hints or for debugging.
4323 *
4324 * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU.
4325 * Note that both per-cpu and unbound workqueues may be associated with
4326 * multiple pool_workqueues which have separate congested states. A
4327 * workqueue being congested on one CPU doesn't mean the workqueue is also
4328 * contested on other CPUs / NUMA nodes.
4329 *
4330 * Return:
4331 * %true if congested, %false otherwise.
4332 */
workqueue_congested(int cpu,struct workqueue_struct * wq)4333 bool workqueue_congested(int cpu, struct workqueue_struct *wq)
4334 {
4335 struct pool_workqueue *pwq;
4336 bool ret;
4337
4338 rcu_read_lock_sched();
4339
4340 if (cpu == WORK_CPU_UNBOUND)
4341 cpu = smp_processor_id();
4342
4343 if (!(wq->flags & WQ_UNBOUND))
4344 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
4345 else
4346 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
4347
4348 ret = !list_empty(&pwq->delayed_works);
4349 rcu_read_unlock_sched();
4350
4351 return ret;
4352 }
4353 EXPORT_SYMBOL_GPL(workqueue_congested);
4354
4355 /**
4356 * work_busy - test whether a work is currently pending or running
4357 * @work: the work to be tested
4358 *
4359 * Test whether @work is currently pending or running. There is no
4360 * synchronization around this function and the test result is
4361 * unreliable and only useful as advisory hints or for debugging.
4362 *
4363 * Return:
4364 * OR'd bitmask of WORK_BUSY_* bits.
4365 */
work_busy(struct work_struct * work)4366 unsigned int work_busy(struct work_struct *work)
4367 {
4368 struct worker_pool *pool;
4369 unsigned long flags;
4370 unsigned int ret = 0;
4371
4372 if (work_pending(work))
4373 ret |= WORK_BUSY_PENDING;
4374
4375 local_irq_save(flags);
4376 pool = get_work_pool(work);
4377 if (pool) {
4378 spin_lock(&pool->lock);
4379 if (find_worker_executing_work(pool, work))
4380 ret |= WORK_BUSY_RUNNING;
4381 spin_unlock(&pool->lock);
4382 }
4383 local_irq_restore(flags);
4384
4385 return ret;
4386 }
4387 EXPORT_SYMBOL_GPL(work_busy);
4388
4389 /**
4390 * set_worker_desc - set description for the current work item
4391 * @fmt: printf-style format string
4392 * @...: arguments for the format string
4393 *
4394 * This function can be called by a running work function to describe what
4395 * the work item is about. If the worker task gets dumped, this
4396 * information will be printed out together to help debugging. The
4397 * description can be at most WORKER_DESC_LEN including the trailing '\0'.
4398 */
set_worker_desc(const char * fmt,...)4399 void set_worker_desc(const char *fmt, ...)
4400 {
4401 struct worker *worker = current_wq_worker();
4402 va_list args;
4403
4404 if (worker) {
4405 va_start(args, fmt);
4406 vsnprintf(worker->desc, sizeof(worker->desc), fmt, args);
4407 va_end(args);
4408 worker->desc_valid = true;
4409 }
4410 }
4411
4412 /**
4413 * print_worker_info - print out worker information and description
4414 * @log_lvl: the log level to use when printing
4415 * @task: target task
4416 *
4417 * If @task is a worker and currently executing a work item, print out the
4418 * name of the workqueue being serviced and worker description set with
4419 * set_worker_desc() by the currently executing work item.
4420 *
4421 * This function can be safely called on any task as long as the
4422 * task_struct itself is accessible. While safe, this function isn't
4423 * synchronized and may print out mixups or garbages of limited length.
4424 */
print_worker_info(const char * log_lvl,struct task_struct * task)4425 void print_worker_info(const char *log_lvl, struct task_struct *task)
4426 {
4427 work_func_t *fn = NULL;
4428 char name[WQ_NAME_LEN] = { };
4429 char desc[WORKER_DESC_LEN] = { };
4430 struct pool_workqueue *pwq = NULL;
4431 struct workqueue_struct *wq = NULL;
4432 bool desc_valid = false;
4433 struct worker *worker;
4434
4435 if (!(task->flags & PF_WQ_WORKER))
4436 return;
4437
4438 /*
4439 * This function is called without any synchronization and @task
4440 * could be in any state. Be careful with dereferences.
4441 */
4442 worker = probe_kthread_data(task);
4443
4444 /*
4445 * Carefully copy the associated workqueue's workfn and name. Keep
4446 * the original last '\0' in case the original contains garbage.
4447 */
4448 probe_kernel_read(&fn, &worker->current_func, sizeof(fn));
4449 probe_kernel_read(&pwq, &worker->current_pwq, sizeof(pwq));
4450 probe_kernel_read(&wq, &pwq->wq, sizeof(wq));
4451 probe_kernel_read(name, wq->name, sizeof(name) - 1);
4452
4453 /* copy worker description */
4454 probe_kernel_read(&desc_valid, &worker->desc_valid, sizeof(desc_valid));
4455 if (desc_valid)
4456 probe_kernel_read(desc, worker->desc, sizeof(desc) - 1);
4457
4458 if (fn || name[0] || desc[0]) {
4459 printk("%sWorkqueue: %s %pf", log_lvl, name, fn);
4460 if (desc[0])
4461 pr_cont(" (%s)", desc);
4462 pr_cont("\n");
4463 }
4464 }
4465
4466 /*
4467 * CPU hotplug.
4468 *
4469 * There are two challenges in supporting CPU hotplug. Firstly, there
4470 * are a lot of assumptions on strong associations among work, pwq and
4471 * pool which make migrating pending and scheduled works very
4472 * difficult to implement without impacting hot paths. Secondly,
4473 * worker pools serve mix of short, long and very long running works making
4474 * blocked draining impractical.
4475 *
4476 * This is solved by allowing the pools to be disassociated from the CPU
4477 * running as an unbound one and allowing it to be reattached later if the
4478 * cpu comes back online.
4479 */
4480
wq_unbind_fn(struct work_struct * work)4481 static void wq_unbind_fn(struct work_struct *work)
4482 {
4483 int cpu = smp_processor_id();
4484 struct worker_pool *pool;
4485 struct worker *worker;
4486
4487 for_each_cpu_worker_pool(pool, cpu) {
4488 mutex_lock(&pool->attach_mutex);
4489 spin_lock_irq(&pool->lock);
4490
4491 /*
4492 * We've blocked all attach/detach operations. Make all workers
4493 * unbound and set DISASSOCIATED. Before this, all workers
4494 * except for the ones which are still executing works from
4495 * before the last CPU down must be on the cpu. After
4496 * this, they may become diasporas.
4497 */
4498 for_each_pool_worker(worker, pool)
4499 worker->flags |= WORKER_UNBOUND;
4500
4501 pool->flags |= POOL_DISASSOCIATED;
4502
4503 spin_unlock_irq(&pool->lock);
4504 mutex_unlock(&pool->attach_mutex);
4505
4506 /*
4507 * Call schedule() so that we cross rq->lock and thus can
4508 * guarantee sched callbacks see the %WORKER_UNBOUND flag.
4509 * This is necessary as scheduler callbacks may be invoked
4510 * from other cpus.
4511 */
4512 schedule();
4513
4514 /*
4515 * Sched callbacks are disabled now. Zap nr_running.
4516 * After this, nr_running stays zero and need_more_worker()
4517 * and keep_working() are always true as long as the
4518 * worklist is not empty. This pool now behaves as an
4519 * unbound (in terms of concurrency management) pool which
4520 * are served by workers tied to the pool.
4521 */
4522 atomic_set(&pool->nr_running, 0);
4523
4524 /*
4525 * With concurrency management just turned off, a busy
4526 * worker blocking could lead to lengthy stalls. Kick off
4527 * unbound chain execution of currently pending work items.
4528 */
4529 spin_lock_irq(&pool->lock);
4530 wake_up_worker(pool);
4531 spin_unlock_irq(&pool->lock);
4532 }
4533 }
4534
4535 /**
4536 * rebind_workers - rebind all workers of a pool to the associated CPU
4537 * @pool: pool of interest
4538 *
4539 * @pool->cpu is coming online. Rebind all workers to the CPU.
4540 */
rebind_workers(struct worker_pool * pool)4541 static void rebind_workers(struct worker_pool *pool)
4542 {
4543 struct worker *worker;
4544
4545 lockdep_assert_held(&pool->attach_mutex);
4546
4547 /*
4548 * Restore CPU affinity of all workers. As all idle workers should
4549 * be on the run-queue of the associated CPU before any local
4550 * wake-ups for concurrency management happen, restore CPU affinty
4551 * of all workers first and then clear UNBOUND. As we're called
4552 * from CPU_ONLINE, the following shouldn't fail.
4553 */
4554 for_each_pool_worker(worker, pool)
4555 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
4556 pool->attrs->cpumask) < 0);
4557
4558 spin_lock_irq(&pool->lock);
4559
4560 /*
4561 * XXX: CPU hotplug notifiers are weird and can call DOWN_FAILED
4562 * w/o preceding DOWN_PREPARE. Work around it. CPU hotplug is
4563 * being reworked and this can go away in time.
4564 */
4565 if (!(pool->flags & POOL_DISASSOCIATED)) {
4566 spin_unlock_irq(&pool->lock);
4567 return;
4568 }
4569
4570 pool->flags &= ~POOL_DISASSOCIATED;
4571
4572 for_each_pool_worker(worker, pool) {
4573 unsigned int worker_flags = worker->flags;
4574
4575 /*
4576 * A bound idle worker should actually be on the runqueue
4577 * of the associated CPU for local wake-ups targeting it to
4578 * work. Kick all idle workers so that they migrate to the
4579 * associated CPU. Doing this in the same loop as
4580 * replacing UNBOUND with REBOUND is safe as no worker will
4581 * be bound before @pool->lock is released.
4582 */
4583 if (worker_flags & WORKER_IDLE)
4584 wake_up_process(worker->task);
4585
4586 /*
4587 * We want to clear UNBOUND but can't directly call
4588 * worker_clr_flags() or adjust nr_running. Atomically
4589 * replace UNBOUND with another NOT_RUNNING flag REBOUND.
4590 * @worker will clear REBOUND using worker_clr_flags() when
4591 * it initiates the next execution cycle thus restoring
4592 * concurrency management. Note that when or whether
4593 * @worker clears REBOUND doesn't affect correctness.
4594 *
4595 * ACCESS_ONCE() is necessary because @worker->flags may be
4596 * tested without holding any lock in
4597 * wq_worker_waking_up(). Without it, NOT_RUNNING test may
4598 * fail incorrectly leading to premature concurrency
4599 * management operations.
4600 */
4601 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
4602 worker_flags |= WORKER_REBOUND;
4603 worker_flags &= ~WORKER_UNBOUND;
4604 ACCESS_ONCE(worker->flags) = worker_flags;
4605 }
4606
4607 spin_unlock_irq(&pool->lock);
4608 }
4609
4610 /**
4611 * restore_unbound_workers_cpumask - restore cpumask of unbound workers
4612 * @pool: unbound pool of interest
4613 * @cpu: the CPU which is coming up
4614 *
4615 * An unbound pool may end up with a cpumask which doesn't have any online
4616 * CPUs. When a worker of such pool get scheduled, the scheduler resets
4617 * its cpus_allowed. If @cpu is in @pool's cpumask which didn't have any
4618 * online CPU before, cpus_allowed of all its workers should be restored.
4619 */
restore_unbound_workers_cpumask(struct worker_pool * pool,int cpu)4620 static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
4621 {
4622 static cpumask_t cpumask;
4623 struct worker *worker;
4624
4625 lockdep_assert_held(&pool->attach_mutex);
4626
4627 /* is @cpu allowed for @pool? */
4628 if (!cpumask_test_cpu(cpu, pool->attrs->cpumask))
4629 return;
4630
4631 /* is @cpu the only online CPU? */
4632 cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask);
4633 if (cpumask_weight(&cpumask) != 1)
4634 return;
4635
4636 /* as we're called from CPU_ONLINE, the following shouldn't fail */
4637 for_each_pool_worker(worker, pool)
4638 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
4639 pool->attrs->cpumask) < 0);
4640 }
4641
4642 /*
4643 * Workqueues should be brought up before normal priority CPU notifiers.
4644 * This will be registered high priority CPU notifier.
4645 */
workqueue_cpu_up_callback(struct notifier_block * nfb,unsigned long action,void * hcpu)4646 static int workqueue_cpu_up_callback(struct notifier_block *nfb,
4647 unsigned long action,
4648 void *hcpu)
4649 {
4650 int cpu = (unsigned long)hcpu;
4651 struct worker_pool *pool;
4652 struct workqueue_struct *wq;
4653 int pi;
4654
4655 switch (action & ~CPU_TASKS_FROZEN) {
4656 case CPU_UP_PREPARE:
4657 for_each_cpu_worker_pool(pool, cpu) {
4658 if (pool->nr_workers)
4659 continue;
4660 if (!create_worker(pool))
4661 return NOTIFY_BAD;
4662 }
4663 break;
4664
4665 case CPU_DOWN_FAILED:
4666 case CPU_ONLINE:
4667 mutex_lock(&wq_pool_mutex);
4668
4669 for_each_pool(pool, pi) {
4670 mutex_lock(&pool->attach_mutex);
4671
4672 if (pool->cpu == cpu)
4673 rebind_workers(pool);
4674 else if (pool->cpu < 0)
4675 restore_unbound_workers_cpumask(pool, cpu);
4676
4677 mutex_unlock(&pool->attach_mutex);
4678 }
4679
4680 /* update NUMA affinity of unbound workqueues */
4681 list_for_each_entry(wq, &workqueues, list)
4682 wq_update_unbound_numa(wq, cpu, true);
4683
4684 mutex_unlock(&wq_pool_mutex);
4685 break;
4686 }
4687 return NOTIFY_OK;
4688 }
4689
4690 /*
4691 * Workqueues should be brought down after normal priority CPU notifiers.
4692 * This will be registered as low priority CPU notifier.
4693 */
workqueue_cpu_down_callback(struct notifier_block * nfb,unsigned long action,void * hcpu)4694 static int workqueue_cpu_down_callback(struct notifier_block *nfb,
4695 unsigned long action,
4696 void *hcpu)
4697 {
4698 int cpu = (unsigned long)hcpu;
4699 struct work_struct unbind_work;
4700 struct workqueue_struct *wq;
4701
4702 switch (action & ~CPU_TASKS_FROZEN) {
4703 case CPU_DOWN_PREPARE:
4704 /* unbinding per-cpu workers should happen on the local CPU */
4705 INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn);
4706 queue_work_on(cpu, system_highpri_wq, &unbind_work);
4707
4708 /* update NUMA affinity of unbound workqueues */
4709 mutex_lock(&wq_pool_mutex);
4710 list_for_each_entry(wq, &workqueues, list)
4711 wq_update_unbound_numa(wq, cpu, false);
4712 mutex_unlock(&wq_pool_mutex);
4713
4714 /* wait for per-cpu unbinding to finish */
4715 flush_work(&unbind_work);
4716 destroy_work_on_stack(&unbind_work);
4717 break;
4718 }
4719 return NOTIFY_OK;
4720 }
4721
4722 #ifdef CONFIG_SMP
4723
4724 struct work_for_cpu {
4725 struct work_struct work;
4726 long (*fn)(void *);
4727 void *arg;
4728 long ret;
4729 };
4730
work_for_cpu_fn(struct work_struct * work)4731 static void work_for_cpu_fn(struct work_struct *work)
4732 {
4733 struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
4734
4735 wfc->ret = wfc->fn(wfc->arg);
4736 }
4737
4738 /**
4739 * work_on_cpu - run a function in user context on a particular cpu
4740 * @cpu: the cpu to run on
4741 * @fn: the function to run
4742 * @arg: the function arg
4743 *
4744 * It is up to the caller to ensure that the cpu doesn't go offline.
4745 * The caller must not hold any locks which would prevent @fn from completing.
4746 *
4747 * Return: The value @fn returns.
4748 */
work_on_cpu(int cpu,long (* fn)(void *),void * arg)4749 long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
4750 {
4751 struct work_for_cpu wfc = { .fn = fn, .arg = arg };
4752
4753 INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
4754 schedule_work_on(cpu, &wfc.work);
4755 flush_work(&wfc.work);
4756 destroy_work_on_stack(&wfc.work);
4757 return wfc.ret;
4758 }
4759 EXPORT_SYMBOL_GPL(work_on_cpu);
4760 #endif /* CONFIG_SMP */
4761
4762 #ifdef CONFIG_FREEZER
4763
4764 /**
4765 * freeze_workqueues_begin - begin freezing workqueues
4766 *
4767 * Start freezing workqueues. After this function returns, all freezable
4768 * workqueues will queue new works to their delayed_works list instead of
4769 * pool->worklist.
4770 *
4771 * CONTEXT:
4772 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
4773 */
freeze_workqueues_begin(void)4774 void freeze_workqueues_begin(void)
4775 {
4776 struct workqueue_struct *wq;
4777 struct pool_workqueue *pwq;
4778
4779 mutex_lock(&wq_pool_mutex);
4780
4781 WARN_ON_ONCE(workqueue_freezing);
4782 workqueue_freezing = true;
4783
4784 list_for_each_entry(wq, &workqueues, list) {
4785 mutex_lock(&wq->mutex);
4786 for_each_pwq(pwq, wq)
4787 pwq_adjust_max_active(pwq);
4788 mutex_unlock(&wq->mutex);
4789 }
4790
4791 mutex_unlock(&wq_pool_mutex);
4792 }
4793
4794 /**
4795 * freeze_workqueues_busy - are freezable workqueues still busy?
4796 *
4797 * Check whether freezing is complete. This function must be called
4798 * between freeze_workqueues_begin() and thaw_workqueues().
4799 *
4800 * CONTEXT:
4801 * Grabs and releases wq_pool_mutex.
4802 *
4803 * Return:
4804 * %true if some freezable workqueues are still busy. %false if freezing
4805 * is complete.
4806 */
freeze_workqueues_busy(void)4807 bool freeze_workqueues_busy(void)
4808 {
4809 bool busy = false;
4810 struct workqueue_struct *wq;
4811 struct pool_workqueue *pwq;
4812
4813 mutex_lock(&wq_pool_mutex);
4814
4815 WARN_ON_ONCE(!workqueue_freezing);
4816
4817 list_for_each_entry(wq, &workqueues, list) {
4818 if (!(wq->flags & WQ_FREEZABLE))
4819 continue;
4820 /*
4821 * nr_active is monotonically decreasing. It's safe
4822 * to peek without lock.
4823 */
4824 rcu_read_lock_sched();
4825 for_each_pwq(pwq, wq) {
4826 WARN_ON_ONCE(pwq->nr_active < 0);
4827 if (pwq->nr_active) {
4828 busy = true;
4829 rcu_read_unlock_sched();
4830 goto out_unlock;
4831 }
4832 }
4833 rcu_read_unlock_sched();
4834 }
4835 out_unlock:
4836 mutex_unlock(&wq_pool_mutex);
4837 return busy;
4838 }
4839
4840 /**
4841 * thaw_workqueues - thaw workqueues
4842 *
4843 * Thaw workqueues. Normal queueing is restored and all collected
4844 * frozen works are transferred to their respective pool worklists.
4845 *
4846 * CONTEXT:
4847 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
4848 */
thaw_workqueues(void)4849 void thaw_workqueues(void)
4850 {
4851 struct workqueue_struct *wq;
4852 struct pool_workqueue *pwq;
4853
4854 mutex_lock(&wq_pool_mutex);
4855
4856 if (!workqueue_freezing)
4857 goto out_unlock;
4858
4859 workqueue_freezing = false;
4860
4861 /* restore max_active and repopulate worklist */
4862 list_for_each_entry(wq, &workqueues, list) {
4863 mutex_lock(&wq->mutex);
4864 for_each_pwq(pwq, wq)
4865 pwq_adjust_max_active(pwq);
4866 mutex_unlock(&wq->mutex);
4867 }
4868
4869 out_unlock:
4870 mutex_unlock(&wq_pool_mutex);
4871 }
4872 #endif /* CONFIG_FREEZER */
4873
wq_numa_init(void)4874 static void __init wq_numa_init(void)
4875 {
4876 cpumask_var_t *tbl;
4877 int node, cpu;
4878
4879 if (num_possible_nodes() <= 1)
4880 return;
4881
4882 if (wq_disable_numa) {
4883 pr_info("workqueue: NUMA affinity support disabled\n");
4884 return;
4885 }
4886
4887 wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs(GFP_KERNEL);
4888 BUG_ON(!wq_update_unbound_numa_attrs_buf);
4889
4890 /*
4891 * We want masks of possible CPUs of each node which isn't readily
4892 * available. Build one from cpu_to_node() which should have been
4893 * fully initialized by now.
4894 */
4895 tbl = kzalloc(nr_node_ids * sizeof(tbl[0]), GFP_KERNEL);
4896 BUG_ON(!tbl);
4897
4898 for_each_node(node)
4899 BUG_ON(!zalloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
4900 node_online(node) ? node : NUMA_NO_NODE));
4901
4902 for_each_possible_cpu(cpu) {
4903 node = cpu_to_node(cpu);
4904 if (WARN_ON(node == NUMA_NO_NODE)) {
4905 pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu);
4906 /* happens iff arch is bonkers, let's just proceed */
4907 return;
4908 }
4909 cpumask_set_cpu(cpu, tbl[node]);
4910 }
4911
4912 wq_numa_possible_cpumask = tbl;
4913 wq_numa_enabled = true;
4914 }
4915
init_workqueues(void)4916 static int __init init_workqueues(void)
4917 {
4918 int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
4919 int i, cpu;
4920
4921 WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
4922
4923 pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
4924
4925 cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
4926 hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
4927
4928 wq_numa_init();
4929
4930 /* initialize CPU pools */
4931 for_each_possible_cpu(cpu) {
4932 struct worker_pool *pool;
4933
4934 i = 0;
4935 for_each_cpu_worker_pool(pool, cpu) {
4936 BUG_ON(init_worker_pool(pool));
4937 pool->cpu = cpu;
4938 cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
4939 pool->attrs->nice = std_nice[i++];
4940 pool->node = cpu_to_node(cpu);
4941
4942 /* alloc pool ID */
4943 mutex_lock(&wq_pool_mutex);
4944 BUG_ON(worker_pool_assign_id(pool));
4945 mutex_unlock(&wq_pool_mutex);
4946 }
4947 }
4948
4949 /* create the initial worker */
4950 for_each_online_cpu(cpu) {
4951 struct worker_pool *pool;
4952
4953 for_each_cpu_worker_pool(pool, cpu) {
4954 pool->flags &= ~POOL_DISASSOCIATED;
4955 BUG_ON(!create_worker(pool));
4956 }
4957 }
4958
4959 /* create default unbound and ordered wq attrs */
4960 for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
4961 struct workqueue_attrs *attrs;
4962
4963 BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
4964 attrs->nice = std_nice[i];
4965 unbound_std_wq_attrs[i] = attrs;
4966
4967 /*
4968 * An ordered wq should have only one pwq as ordering is
4969 * guaranteed by max_active which is enforced by pwqs.
4970 * Turn off NUMA so that dfl_pwq is used for all nodes.
4971 */
4972 BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
4973 attrs->nice = std_nice[i];
4974 attrs->no_numa = true;
4975 ordered_wq_attrs[i] = attrs;
4976 }
4977
4978 system_wq = alloc_workqueue("events", 0, 0);
4979 system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0);
4980 system_long_wq = alloc_workqueue("events_long", 0, 0);
4981 system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
4982 WQ_UNBOUND_MAX_ACTIVE);
4983 system_freezable_wq = alloc_workqueue("events_freezable",
4984 WQ_FREEZABLE, 0);
4985 system_power_efficient_wq = alloc_workqueue("events_power_efficient",
4986 WQ_POWER_EFFICIENT, 0);
4987 system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient",
4988 WQ_FREEZABLE | WQ_POWER_EFFICIENT,
4989 0);
4990 BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq ||
4991 !system_unbound_wq || !system_freezable_wq ||
4992 !system_power_efficient_wq ||
4993 !system_freezable_power_efficient_wq);
4994 return 0;
4995 }
4996 early_initcall(init_workqueues);
4997