1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * kernel/workqueue.c - generic async execution with shared worker pool
4 *
5 * Copyright (C) 2002 Ingo Molnar
6 *
7 * Derived from the taskqueue/keventd code by:
8 * David Woodhouse <dwmw2@infradead.org>
9 * Andrew Morton
10 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
11 * Theodore Ts'o <tytso@mit.edu>
12 *
13 * Made to use alloc_percpu by Christoph Lameter.
14 *
15 * Copyright (C) 2010 SUSE Linux Products GmbH
16 * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
17 *
18 * This is the generic async execution mechanism. Work items as are
19 * executed in process context. The worker pool is shared and
20 * automatically managed. There are two worker pools for each CPU (one for
21 * normal work items and the other for high priority ones) and some extra
22 * pools for workqueues which are not bound to any specific CPU - the
23 * number of these backing pools is dynamic.
24 *
25 * Please read Documentation/core-api/workqueue.rst for details.
26 */
27
28 #include <linux/export.h>
29 #include <linux/kernel.h>
30 #include <linux/sched.h>
31 #include <linux/init.h>
32 #include <linux/signal.h>
33 #include <linux/completion.h>
34 #include <linux/workqueue.h>
35 #include <linux/slab.h>
36 #include <linux/cpu.h>
37 #include <linux/notifier.h>
38 #include <linux/kthread.h>
39 #include <linux/hardirq.h>
40 #include <linux/mempolicy.h>
41 #include <linux/freezer.h>
42 #include <linux/debug_locks.h>
43 #include <linux/lockdep.h>
44 #include <linux/idr.h>
45 #include <linux/jhash.h>
46 #include <linux/hashtable.h>
47 #include <linux/rculist.h>
48 #include <linux/nodemask.h>
49 #include <linux/moduleparam.h>
50 #include <linux/uaccess.h>
51 #include <linux/sched/isolation.h>
52 #include <linux/nmi.h>
53 #include <linux/kvm_para.h>
54
55 #include "workqueue_internal.h"
56
57 #include <trace/hooks/wqlockup.h>
58 /* events/workqueue.h uses default TRACE_INCLUDE_PATH */
59 #undef TRACE_INCLUDE_PATH
60
61 enum {
62 /*
63 * worker_pool flags
64 *
65 * A bound pool is either associated or disassociated with its CPU.
66 * While associated (!DISASSOCIATED), all workers are bound to the
67 * CPU and none has %WORKER_UNBOUND set and concurrency management
68 * is in effect.
69 *
70 * While DISASSOCIATED, the cpu may be offline and all workers have
71 * %WORKER_UNBOUND set and concurrency management disabled, and may
72 * be executing on any CPU. The pool behaves as an unbound one.
73 *
74 * Note that DISASSOCIATED should be flipped only while holding
75 * wq_pool_attach_mutex to avoid changing binding state while
76 * worker_attach_to_pool() is in progress.
77 */
78 POOL_MANAGER_ACTIVE = 1 << 0, /* being managed */
79 POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */
80
81 /* worker flags */
82 WORKER_DIE = 1 << 1, /* die die die */
83 WORKER_IDLE = 1 << 2, /* is idle */
84 WORKER_PREP = 1 << 3, /* preparing to run works */
85 WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */
86 WORKER_UNBOUND = 1 << 7, /* worker is unbound */
87 WORKER_REBOUND = 1 << 8, /* worker was rebound */
88
89 WORKER_NOT_RUNNING = WORKER_PREP | WORKER_CPU_INTENSIVE |
90 WORKER_UNBOUND | WORKER_REBOUND,
91
92 NR_STD_WORKER_POOLS = 2, /* # standard pools per cpu */
93
94 UNBOUND_POOL_HASH_ORDER = 6, /* hashed by pool->attrs */
95 BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */
96
97 MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */
98 IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */
99
100 MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2,
101 /* call for help after 10ms
102 (min two ticks) */
103 MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */
104 CREATE_COOLDOWN = HZ, /* time to breath after fail */
105
106 /*
107 * Rescue workers are used only on emergencies and shared by
108 * all cpus. Give MIN_NICE.
109 */
110 RESCUER_NICE_LEVEL = MIN_NICE,
111 HIGHPRI_NICE_LEVEL = MIN_NICE,
112
113 WQ_NAME_LEN = 24,
114 };
115
116 /*
117 * Structure fields follow one of the following exclusion rules.
118 *
119 * I: Modifiable by initialization/destruction paths and read-only for
120 * everyone else.
121 *
122 * P: Preemption protected. Disabling preemption is enough and should
123 * only be modified and accessed from the local cpu.
124 *
125 * L: pool->lock protected. Access with pool->lock held.
126 *
127 * X: During normal operation, modification requires pool->lock and should
128 * be done only from local cpu. Either disabling preemption on local
129 * cpu or grabbing pool->lock is enough for read access. If
130 * POOL_DISASSOCIATED is set, it's identical to L.
131 *
132 * A: wq_pool_attach_mutex protected.
133 *
134 * PL: wq_pool_mutex protected.
135 *
136 * PR: wq_pool_mutex protected for writes. RCU protected for reads.
137 *
138 * PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads.
139 *
140 * PWR: wq_pool_mutex and wq->mutex protected for writes. Either or
141 * RCU for reads.
142 *
143 * WQ: wq->mutex protected.
144 *
145 * WR: wq->mutex protected for writes. RCU protected for reads.
146 *
147 * MD: wq_mayday_lock protected.
148 */
149
150 /* struct worker is defined in workqueue_internal.h */
151
152 struct worker_pool {
153 raw_spinlock_t lock; /* the pool lock */
154 int cpu; /* I: the associated cpu */
155 int node; /* I: the associated node ID */
156 int id; /* I: pool ID */
157 unsigned int flags; /* X: flags */
158
159 unsigned long watchdog_ts; /* L: watchdog timestamp */
160
161 struct list_head worklist; /* L: list of pending works */
162
163 int nr_workers; /* L: total number of workers */
164 int nr_idle; /* L: currently idle workers */
165
166 struct list_head idle_list; /* X: list of idle workers */
167 struct timer_list idle_timer; /* L: worker idle timeout */
168 struct timer_list mayday_timer; /* L: SOS timer for workers */
169
170 /* a workers is either on busy_hash or idle_list, or the manager */
171 DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER);
172 /* L: hash of busy workers */
173
174 struct worker *manager; /* L: purely informational */
175 struct list_head workers; /* A: attached workers */
176 struct completion *detach_completion; /* all workers detached */
177
178 struct ida worker_ida; /* worker IDs for task name */
179
180 struct workqueue_attrs *attrs; /* I: worker attributes */
181 struct hlist_node hash_node; /* PL: unbound_pool_hash node */
182 int refcnt; /* PL: refcnt for unbound pools */
183
184 /*
185 * The current concurrency level. As it's likely to be accessed
186 * from other CPUs during try_to_wake_up(), put it in a separate
187 * cacheline.
188 */
189 atomic_t nr_running ____cacheline_aligned_in_smp;
190
191 /*
192 * Destruction of pool is RCU protected to allow dereferences
193 * from get_work_pool().
194 */
195 struct rcu_head rcu;
196 } ____cacheline_aligned_in_smp;
197
198 /*
199 * The per-pool workqueue. While queued, the lower WORK_STRUCT_FLAG_BITS
200 * of work_struct->data are used for flags and the remaining high bits
201 * point to the pwq; thus, pwqs need to be aligned at two's power of the
202 * number of flag bits.
203 */
204 struct pool_workqueue {
205 struct worker_pool *pool; /* I: the associated pool */
206 struct workqueue_struct *wq; /* I: the owning workqueue */
207 int work_color; /* L: current color */
208 int flush_color; /* L: flushing color */
209 int refcnt; /* L: reference count */
210 int nr_in_flight[WORK_NR_COLORS];
211 /* L: nr of in_flight works */
212
213 /*
214 * nr_active management and WORK_STRUCT_INACTIVE:
215 *
216 * When pwq->nr_active >= max_active, new work item is queued to
217 * pwq->inactive_works instead of pool->worklist and marked with
218 * WORK_STRUCT_INACTIVE.
219 *
220 * All work items marked with WORK_STRUCT_INACTIVE do not participate
221 * in pwq->nr_active and all work items in pwq->inactive_works are
222 * marked with WORK_STRUCT_INACTIVE. But not all WORK_STRUCT_INACTIVE
223 * work items are in pwq->inactive_works. Some of them are ready to
224 * run in pool->worklist or worker->scheduled. Those work itmes are
225 * only struct wq_barrier which is used for flush_work() and should
226 * not participate in pwq->nr_active. For non-barrier work item, it
227 * is marked with WORK_STRUCT_INACTIVE iff it is in pwq->inactive_works.
228 */
229 int nr_active; /* L: nr of active works */
230 int max_active; /* L: max active works */
231 struct list_head inactive_works; /* L: inactive works */
232 struct list_head pwqs_node; /* WR: node on wq->pwqs */
233 struct list_head mayday_node; /* MD: node on wq->maydays */
234
235 /*
236 * Release of unbound pwq is punted to system_wq. See put_pwq()
237 * and pwq_unbound_release_workfn() for details. pool_workqueue
238 * itself is also RCU protected so that the first pwq can be
239 * determined without grabbing wq->mutex.
240 */
241 struct work_struct unbound_release_work;
242 struct rcu_head rcu;
243 } __aligned(1 << WORK_STRUCT_FLAG_BITS);
244
245 /*
246 * Structure used to wait for workqueue flush.
247 */
248 struct wq_flusher {
249 struct list_head list; /* WQ: list of flushers */
250 int flush_color; /* WQ: flush color waiting for */
251 struct completion done; /* flush completion */
252 };
253
254 struct wq_device;
255
256 /*
257 * The externally visible workqueue. It relays the issued work items to
258 * the appropriate worker_pool through its pool_workqueues.
259 */
260 struct workqueue_struct {
261 struct list_head pwqs; /* WR: all pwqs of this wq */
262 struct list_head list; /* PR: list of all workqueues */
263
264 struct mutex mutex; /* protects this wq */
265 int work_color; /* WQ: current work color */
266 int flush_color; /* WQ: current flush color */
267 atomic_t nr_pwqs_to_flush; /* flush in progress */
268 struct wq_flusher *first_flusher; /* WQ: first flusher */
269 struct list_head flusher_queue; /* WQ: flush waiters */
270 struct list_head flusher_overflow; /* WQ: flush overflow list */
271
272 struct list_head maydays; /* MD: pwqs requesting rescue */
273 struct worker *rescuer; /* MD: rescue worker */
274
275 int nr_drainers; /* WQ: drain in progress */
276 int saved_max_active; /* WQ: saved pwq max_active */
277
278 struct workqueue_attrs *unbound_attrs; /* PW: only for unbound wqs */
279 struct pool_workqueue *dfl_pwq; /* PW: only for unbound wqs */
280
281 #ifdef CONFIG_SYSFS
282 struct wq_device *wq_dev; /* I: for sysfs interface */
283 #endif
284 #ifdef CONFIG_LOCKDEP
285 char *lock_name;
286 struct lock_class_key key;
287 struct lockdep_map lockdep_map;
288 #endif
289 char name[WQ_NAME_LEN]; /* I: workqueue name */
290
291 /*
292 * Destruction of workqueue_struct is RCU protected to allow walking
293 * the workqueues list without grabbing wq_pool_mutex.
294 * This is used to dump all workqueues from sysrq.
295 */
296 struct rcu_head rcu;
297
298 /* hot fields used during command issue, aligned to cacheline */
299 unsigned int flags ____cacheline_aligned; /* WQ: WQ_* flags */
300 struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwqs */
301 struct pool_workqueue __rcu *numa_pwq_tbl[]; /* PWR: unbound pwqs indexed by node */
302 };
303
304 static struct kmem_cache *pwq_cache;
305
306 static cpumask_var_t *wq_numa_possible_cpumask;
307 /* possible CPUs of each node */
308
309 static bool wq_disable_numa;
310 module_param_named(disable_numa, wq_disable_numa, bool, 0444);
311
312 /* see the comment above the definition of WQ_POWER_EFFICIENT */
313 static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT);
314 module_param_named(power_efficient, wq_power_efficient, bool, 0444);
315
316 static bool wq_online; /* can kworkers be created yet? */
317
318 static bool wq_numa_enabled; /* unbound NUMA affinity enabled */
319
320 /* buf for wq_update_unbound_numa_attrs(), protected by CPU hotplug exclusion */
321 static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
322
323 static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
324 static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */
325 static DEFINE_RAW_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
326 /* wait for manager to go away */
327 static struct rcuwait manager_wait = __RCUWAIT_INITIALIZER(manager_wait);
328
329 static LIST_HEAD(workqueues); /* PR: list of all workqueues */
330 static bool workqueue_freezing; /* PL: have wqs started freezing? */
331
332 /* PL: allowable cpus for unbound wqs and work items */
333 static cpumask_var_t wq_unbound_cpumask;
334
335 /* CPU where unbound work was last round robin scheduled from this CPU */
336 static DEFINE_PER_CPU(int, wq_rr_cpu_last);
337
338 /*
339 * Local execution of unbound work items is no longer guaranteed. The
340 * following always forces round-robin CPU selection on unbound work items
341 * to uncover usages which depend on it.
342 */
343 #ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU
344 static bool wq_debug_force_rr_cpu = true;
345 #else
346 static bool wq_debug_force_rr_cpu = false;
347 #endif
348 module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644);
349
350 /* the per-cpu worker pools */
351 static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], cpu_worker_pools);
352
353 static DEFINE_IDR(worker_pool_idr); /* PR: idr of all pools */
354
355 /* PL: hash of all unbound pools keyed by pool->attrs */
356 static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
357
358 /* I: attributes used when instantiating standard unbound pools on demand */
359 static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
360
361 /* I: attributes used when instantiating ordered pools on demand */
362 static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS];
363
364 struct workqueue_struct *system_wq __read_mostly;
365 EXPORT_SYMBOL(system_wq);
366 struct workqueue_struct *system_highpri_wq __read_mostly;
367 EXPORT_SYMBOL_GPL(system_highpri_wq);
368 struct workqueue_struct *system_long_wq __read_mostly;
369 EXPORT_SYMBOL_GPL(system_long_wq);
370 struct workqueue_struct *system_unbound_wq __read_mostly;
371 EXPORT_SYMBOL_GPL(system_unbound_wq);
372 struct workqueue_struct *system_freezable_wq __read_mostly;
373 EXPORT_SYMBOL_GPL(system_freezable_wq);
374 struct workqueue_struct *system_power_efficient_wq __read_mostly;
375 EXPORT_SYMBOL_GPL(system_power_efficient_wq);
376 struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
377 EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
378
379 static int worker_thread(void *__worker);
380 static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
381 static void show_pwq(struct pool_workqueue *pwq);
382 static void show_one_worker_pool(struct worker_pool *pool);
383
384 #define CREATE_TRACE_POINTS
385 #include <trace/events/workqueue.h>
386
387 EXPORT_TRACEPOINT_SYMBOL_GPL(workqueue_execute_start);
388 EXPORT_TRACEPOINT_SYMBOL_GPL(workqueue_execute_end);
389
390 #define assert_rcu_or_pool_mutex() \
391 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
392 !lockdep_is_held(&wq_pool_mutex), \
393 "RCU or wq_pool_mutex should be held")
394
395 #define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \
396 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
397 !lockdep_is_held(&wq->mutex) && \
398 !lockdep_is_held(&wq_pool_mutex), \
399 "RCU, wq->mutex or wq_pool_mutex should be held")
400
401 #define for_each_cpu_worker_pool(pool, cpu) \
402 for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
403 (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
404 (pool)++)
405
406 /**
407 * for_each_pool - iterate through all worker_pools in the system
408 * @pool: iteration cursor
409 * @pi: integer used for iteration
410 *
411 * This must be called either with wq_pool_mutex held or RCU read
412 * locked. If the pool needs to be used beyond the locking in effect, the
413 * caller is responsible for guaranteeing that the pool stays online.
414 *
415 * The if/else clause exists only for the lockdep assertion and can be
416 * ignored.
417 */
418 #define for_each_pool(pool, pi) \
419 idr_for_each_entry(&worker_pool_idr, pool, pi) \
420 if (({ assert_rcu_or_pool_mutex(); false; })) { } \
421 else
422
423 /**
424 * for_each_pool_worker - iterate through all workers of a worker_pool
425 * @worker: iteration cursor
426 * @pool: worker_pool to iterate workers of
427 *
428 * This must be called with wq_pool_attach_mutex.
429 *
430 * The if/else clause exists only for the lockdep assertion and can be
431 * ignored.
432 */
433 #define for_each_pool_worker(worker, pool) \
434 list_for_each_entry((worker), &(pool)->workers, node) \
435 if (({ lockdep_assert_held(&wq_pool_attach_mutex); false; })) { } \
436 else
437
438 /**
439 * for_each_pwq - iterate through all pool_workqueues of the specified workqueue
440 * @pwq: iteration cursor
441 * @wq: the target workqueue
442 *
443 * This must be called either with wq->mutex held or RCU read locked.
444 * If the pwq needs to be used beyond the locking in effect, the caller is
445 * responsible for guaranteeing that the pwq stays online.
446 *
447 * The if/else clause exists only for the lockdep assertion and can be
448 * ignored.
449 */
450 #define for_each_pwq(pwq, wq) \
451 list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node, \
452 lockdep_is_held(&(wq->mutex)))
453
454 #ifdef CONFIG_DEBUG_OBJECTS_WORK
455
456 static const struct debug_obj_descr work_debug_descr;
457
work_debug_hint(void * addr)458 static void *work_debug_hint(void *addr)
459 {
460 return ((struct work_struct *) addr)->func;
461 }
462
work_is_static_object(void * addr)463 static bool work_is_static_object(void *addr)
464 {
465 struct work_struct *work = addr;
466
467 return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work));
468 }
469
470 /*
471 * fixup_init is called when:
472 * - an active object is initialized
473 */
work_fixup_init(void * addr,enum debug_obj_state state)474 static bool work_fixup_init(void *addr, enum debug_obj_state state)
475 {
476 struct work_struct *work = addr;
477
478 switch (state) {
479 case ODEBUG_STATE_ACTIVE:
480 cancel_work_sync(work);
481 debug_object_init(work, &work_debug_descr);
482 return true;
483 default:
484 return false;
485 }
486 }
487
488 /*
489 * fixup_free is called when:
490 * - an active object is freed
491 */
work_fixup_free(void * addr,enum debug_obj_state state)492 static bool work_fixup_free(void *addr, enum debug_obj_state state)
493 {
494 struct work_struct *work = addr;
495
496 switch (state) {
497 case ODEBUG_STATE_ACTIVE:
498 cancel_work_sync(work);
499 debug_object_free(work, &work_debug_descr);
500 return true;
501 default:
502 return false;
503 }
504 }
505
506 static const struct debug_obj_descr work_debug_descr = {
507 .name = "work_struct",
508 .debug_hint = work_debug_hint,
509 .is_static_object = work_is_static_object,
510 .fixup_init = work_fixup_init,
511 .fixup_free = work_fixup_free,
512 };
513
debug_work_activate(struct work_struct * work)514 static inline void debug_work_activate(struct work_struct *work)
515 {
516 debug_object_activate(work, &work_debug_descr);
517 }
518
debug_work_deactivate(struct work_struct * work)519 static inline void debug_work_deactivate(struct work_struct *work)
520 {
521 debug_object_deactivate(work, &work_debug_descr);
522 }
523
__init_work(struct work_struct * work,int onstack)524 void __init_work(struct work_struct *work, int onstack)
525 {
526 if (onstack)
527 debug_object_init_on_stack(work, &work_debug_descr);
528 else
529 debug_object_init(work, &work_debug_descr);
530 }
531 EXPORT_SYMBOL_GPL(__init_work);
532
destroy_work_on_stack(struct work_struct * work)533 void destroy_work_on_stack(struct work_struct *work)
534 {
535 debug_object_free(work, &work_debug_descr);
536 }
537 EXPORT_SYMBOL_GPL(destroy_work_on_stack);
538
destroy_delayed_work_on_stack(struct delayed_work * work)539 void destroy_delayed_work_on_stack(struct delayed_work *work)
540 {
541 destroy_timer_on_stack(&work->timer);
542 debug_object_free(&work->work, &work_debug_descr);
543 }
544 EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stack);
545
546 #else
debug_work_activate(struct work_struct * work)547 static inline void debug_work_activate(struct work_struct *work) { }
debug_work_deactivate(struct work_struct * work)548 static inline void debug_work_deactivate(struct work_struct *work) { }
549 #endif
550
551 /**
552 * worker_pool_assign_id - allocate ID and assign it to @pool
553 * @pool: the pool pointer of interest
554 *
555 * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned
556 * successfully, -errno on failure.
557 */
worker_pool_assign_id(struct worker_pool * pool)558 static int worker_pool_assign_id(struct worker_pool *pool)
559 {
560 int ret;
561
562 lockdep_assert_held(&wq_pool_mutex);
563
564 ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE,
565 GFP_KERNEL);
566 if (ret >= 0) {
567 pool->id = ret;
568 return 0;
569 }
570 return ret;
571 }
572
573 /**
574 * unbound_pwq_by_node - return the unbound pool_workqueue for the given node
575 * @wq: the target workqueue
576 * @node: the node ID
577 *
578 * This must be called with any of wq_pool_mutex, wq->mutex or RCU
579 * read locked.
580 * If the pwq needs to be used beyond the locking in effect, the caller is
581 * responsible for guaranteeing that the pwq stays online.
582 *
583 * Return: The unbound pool_workqueue for @node.
584 */
unbound_pwq_by_node(struct workqueue_struct * wq,int node)585 static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq,
586 int node)
587 {
588 assert_rcu_or_wq_mutex_or_pool_mutex(wq);
589
590 /*
591 * XXX: @node can be NUMA_NO_NODE if CPU goes offline while a
592 * delayed item is pending. The plan is to keep CPU -> NODE
593 * mapping valid and stable across CPU on/offlines. Once that
594 * happens, this workaround can be removed.
595 */
596 if (unlikely(node == NUMA_NO_NODE))
597 return wq->dfl_pwq;
598
599 return rcu_dereference_raw(wq->numa_pwq_tbl[node]);
600 }
601
work_color_to_flags(int color)602 static unsigned int work_color_to_flags(int color)
603 {
604 return color << WORK_STRUCT_COLOR_SHIFT;
605 }
606
get_work_color(unsigned long work_data)607 static int get_work_color(unsigned long work_data)
608 {
609 return (work_data >> WORK_STRUCT_COLOR_SHIFT) &
610 ((1 << WORK_STRUCT_COLOR_BITS) - 1);
611 }
612
work_next_color(int color)613 static int work_next_color(int color)
614 {
615 return (color + 1) % WORK_NR_COLORS;
616 }
617
618 /*
619 * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data
620 * contain the pointer to the queued pwq. Once execution starts, the flag
621 * is cleared and the high bits contain OFFQ flags and pool ID.
622 *
623 * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling()
624 * and clear_work_data() can be used to set the pwq, pool or clear
625 * work->data. These functions should only be called while the work is
626 * owned - ie. while the PENDING bit is set.
627 *
628 * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq
629 * corresponding to a work. Pool is available once the work has been
630 * queued anywhere after initialization until it is sync canceled. pwq is
631 * available only while the work item is queued.
632 *
633 * %WORK_OFFQ_CANCELING is used to mark a work item which is being
634 * canceled. While being canceled, a work item may have its PENDING set
635 * but stay off timer and worklist for arbitrarily long and nobody should
636 * try to steal the PENDING bit.
637 */
set_work_data(struct work_struct * work,unsigned long data,unsigned long flags)638 static inline void set_work_data(struct work_struct *work, unsigned long data,
639 unsigned long flags)
640 {
641 WARN_ON_ONCE(!work_pending(work));
642 atomic_long_set(&work->data, data | flags | work_static(work));
643 }
644
set_work_pwq(struct work_struct * work,struct pool_workqueue * pwq,unsigned long extra_flags)645 static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq,
646 unsigned long extra_flags)
647 {
648 set_work_data(work, (unsigned long)pwq,
649 WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags);
650 }
651
set_work_pool_and_keep_pending(struct work_struct * work,int pool_id)652 static void set_work_pool_and_keep_pending(struct work_struct *work,
653 int pool_id)
654 {
655 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT,
656 WORK_STRUCT_PENDING);
657 }
658
set_work_pool_and_clear_pending(struct work_struct * work,int pool_id)659 static void set_work_pool_and_clear_pending(struct work_struct *work,
660 int pool_id)
661 {
662 /*
663 * The following wmb is paired with the implied mb in
664 * test_and_set_bit(PENDING) and ensures all updates to @work made
665 * here are visible to and precede any updates by the next PENDING
666 * owner.
667 */
668 smp_wmb();
669 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
670 /*
671 * The following mb guarantees that previous clear of a PENDING bit
672 * will not be reordered with any speculative LOADS or STORES from
673 * work->current_func, which is executed afterwards. This possible
674 * reordering can lead to a missed execution on attempt to queue
675 * the same @work. E.g. consider this case:
676 *
677 * CPU#0 CPU#1
678 * ---------------------------- --------------------------------
679 *
680 * 1 STORE event_indicated
681 * 2 queue_work_on() {
682 * 3 test_and_set_bit(PENDING)
683 * 4 } set_..._and_clear_pending() {
684 * 5 set_work_data() # clear bit
685 * 6 smp_mb()
686 * 7 work->current_func() {
687 * 8 LOAD event_indicated
688 * }
689 *
690 * Without an explicit full barrier speculative LOAD on line 8 can
691 * be executed before CPU#0 does STORE on line 1. If that happens,
692 * CPU#0 observes the PENDING bit is still set and new execution of
693 * a @work is not queued in a hope, that CPU#1 will eventually
694 * finish the queued @work. Meanwhile CPU#1 does not see
695 * event_indicated is set, because speculative LOAD was executed
696 * before actual STORE.
697 */
698 smp_mb();
699 }
700
clear_work_data(struct work_struct * work)701 static void clear_work_data(struct work_struct *work)
702 {
703 smp_wmb(); /* see set_work_pool_and_clear_pending() */
704 set_work_data(work, WORK_STRUCT_NO_POOL, 0);
705 }
706
work_struct_pwq(unsigned long data)707 static inline struct pool_workqueue *work_struct_pwq(unsigned long data)
708 {
709 return (struct pool_workqueue *)(data & WORK_STRUCT_WQ_DATA_MASK);
710 }
711
get_work_pwq(struct work_struct * work)712 static struct pool_workqueue *get_work_pwq(struct work_struct *work)
713 {
714 unsigned long data = atomic_long_read(&work->data);
715
716 if (data & WORK_STRUCT_PWQ)
717 return work_struct_pwq(data);
718 else
719 return NULL;
720 }
721
722 /**
723 * get_work_pool - return the worker_pool a given work was associated with
724 * @work: the work item of interest
725 *
726 * Pools are created and destroyed under wq_pool_mutex, and allows read
727 * access under RCU read lock. As such, this function should be
728 * called under wq_pool_mutex or inside of a rcu_read_lock() region.
729 *
730 * All fields of the returned pool are accessible as long as the above
731 * mentioned locking is in effect. If the returned pool needs to be used
732 * beyond the critical section, the caller is responsible for ensuring the
733 * returned pool is and stays online.
734 *
735 * Return: The worker_pool @work was last associated with. %NULL if none.
736 */
get_work_pool(struct work_struct * work)737 static struct worker_pool *get_work_pool(struct work_struct *work)
738 {
739 unsigned long data = atomic_long_read(&work->data);
740 int pool_id;
741
742 assert_rcu_or_pool_mutex();
743
744 if (data & WORK_STRUCT_PWQ)
745 return work_struct_pwq(data)->pool;
746
747 pool_id = data >> WORK_OFFQ_POOL_SHIFT;
748 if (pool_id == WORK_OFFQ_POOL_NONE)
749 return NULL;
750
751 return idr_find(&worker_pool_idr, pool_id);
752 }
753
754 /**
755 * get_work_pool_id - return the worker pool ID a given work is associated with
756 * @work: the work item of interest
757 *
758 * Return: The worker_pool ID @work was last associated with.
759 * %WORK_OFFQ_POOL_NONE if none.
760 */
get_work_pool_id(struct work_struct * work)761 static int get_work_pool_id(struct work_struct *work)
762 {
763 unsigned long data = atomic_long_read(&work->data);
764
765 if (data & WORK_STRUCT_PWQ)
766 return work_struct_pwq(data)->pool->id;
767
768 return data >> WORK_OFFQ_POOL_SHIFT;
769 }
770
mark_work_canceling(struct work_struct * work)771 static void mark_work_canceling(struct work_struct *work)
772 {
773 unsigned long pool_id = get_work_pool_id(work);
774
775 pool_id <<= WORK_OFFQ_POOL_SHIFT;
776 set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING);
777 }
778
work_is_canceling(struct work_struct * work)779 static bool work_is_canceling(struct work_struct *work)
780 {
781 unsigned long data = atomic_long_read(&work->data);
782
783 return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING);
784 }
785
786 /*
787 * Policy functions. These define the policies on how the global worker
788 * pools are managed. Unless noted otherwise, these functions assume that
789 * they're being called with pool->lock held.
790 */
791
__need_more_worker(struct worker_pool * pool)792 static bool __need_more_worker(struct worker_pool *pool)
793 {
794 return !atomic_read(&pool->nr_running);
795 }
796
797 /*
798 * Need to wake up a worker? Called from anything but currently
799 * running workers.
800 *
801 * Note that, because unbound workers never contribute to nr_running, this
802 * function will always return %true for unbound pools as long as the
803 * worklist isn't empty.
804 */
need_more_worker(struct worker_pool * pool)805 static bool need_more_worker(struct worker_pool *pool)
806 {
807 return !list_empty(&pool->worklist) && __need_more_worker(pool);
808 }
809
810 /* Can I start working? Called from busy but !running workers. */
may_start_working(struct worker_pool * pool)811 static bool may_start_working(struct worker_pool *pool)
812 {
813 return pool->nr_idle;
814 }
815
816 /* Do I need to keep working? Called from currently running workers. */
keep_working(struct worker_pool * pool)817 static bool keep_working(struct worker_pool *pool)
818 {
819 return !list_empty(&pool->worklist) &&
820 atomic_read(&pool->nr_running) <= 1;
821 }
822
823 /* Do we need a new worker? Called from manager. */
need_to_create_worker(struct worker_pool * pool)824 static bool need_to_create_worker(struct worker_pool *pool)
825 {
826 return need_more_worker(pool) && !may_start_working(pool);
827 }
828
829 /* Do we have too many workers and should some go away? */
too_many_workers(struct worker_pool * pool)830 static bool too_many_workers(struct worker_pool *pool)
831 {
832 bool managing = pool->flags & POOL_MANAGER_ACTIVE;
833 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
834 int nr_busy = pool->nr_workers - nr_idle;
835
836 return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
837 }
838
839 /*
840 * Wake up functions.
841 */
842
843 /* Return the first idle worker. Safe with preemption disabled */
first_idle_worker(struct worker_pool * pool)844 static struct worker *first_idle_worker(struct worker_pool *pool)
845 {
846 if (unlikely(list_empty(&pool->idle_list)))
847 return NULL;
848
849 return list_first_entry(&pool->idle_list, struct worker, entry);
850 }
851
852 /**
853 * wake_up_worker - wake up an idle worker
854 * @pool: worker pool to wake worker from
855 *
856 * Wake up the first idle worker of @pool.
857 *
858 * CONTEXT:
859 * raw_spin_lock_irq(pool->lock).
860 */
wake_up_worker(struct worker_pool * pool)861 static void wake_up_worker(struct worker_pool *pool)
862 {
863 struct worker *worker = first_idle_worker(pool);
864
865 if (likely(worker))
866 wake_up_process(worker->task);
867 }
868
869 /**
870 * wq_worker_running - a worker is running again
871 * @task: task waking up
872 *
873 * This function is called when a worker returns from schedule()
874 */
wq_worker_running(struct task_struct * task)875 void wq_worker_running(struct task_struct *task)
876 {
877 struct worker *worker = kthread_data(task);
878
879 if (!worker->sleeping)
880 return;
881
882 /*
883 * If preempted by unbind_workers() between the WORKER_NOT_RUNNING check
884 * and the nr_running increment below, we may ruin the nr_running reset
885 * and leave with an unexpected pool->nr_running == 1 on the newly unbound
886 * pool. Protect against such race.
887 */
888 preempt_disable();
889 if (!(worker->flags & WORKER_NOT_RUNNING))
890 atomic_inc(&worker->pool->nr_running);
891 preempt_enable();
892 worker->sleeping = 0;
893 }
894
895 /**
896 * wq_worker_sleeping - a worker is going to sleep
897 * @task: task going to sleep
898 *
899 * This function is called from schedule() when a busy worker is
900 * going to sleep. Preemption needs to be disabled to protect ->sleeping
901 * assignment.
902 */
wq_worker_sleeping(struct task_struct * task)903 void wq_worker_sleeping(struct task_struct *task)
904 {
905 struct worker *next, *worker = kthread_data(task);
906 struct worker_pool *pool;
907
908 /*
909 * Rescuers, which may not have all the fields set up like normal
910 * workers, also reach here, let's not access anything before
911 * checking NOT_RUNNING.
912 */
913 if (worker->flags & WORKER_NOT_RUNNING)
914 return;
915
916 pool = worker->pool;
917
918 /* Return if preempted before wq_worker_running() was reached */
919 if (worker->sleeping)
920 return;
921
922 worker->sleeping = 1;
923 raw_spin_lock_irq(&pool->lock);
924
925 /*
926 * The counterpart of the following dec_and_test, implied mb,
927 * worklist not empty test sequence is in insert_work().
928 * Please read comment there.
929 *
930 * NOT_RUNNING is clear. This means that we're bound to and
931 * running on the local cpu w/ rq lock held and preemption
932 * disabled, which in turn means that none else could be
933 * manipulating idle_list, so dereferencing idle_list without pool
934 * lock is safe.
935 */
936 if (atomic_dec_and_test(&pool->nr_running) &&
937 !list_empty(&pool->worklist)) {
938 next = first_idle_worker(pool);
939 if (next)
940 wake_up_process(next->task);
941 }
942 raw_spin_unlock_irq(&pool->lock);
943 }
944
945 /**
946 * wq_worker_last_func - retrieve worker's last work function
947 * @task: Task to retrieve last work function of.
948 *
949 * Determine the last function a worker executed. This is called from
950 * the scheduler to get a worker's last known identity.
951 *
952 * CONTEXT:
953 * raw_spin_lock_irq(rq->lock)
954 *
955 * This function is called during schedule() when a kworker is going
956 * to sleep. It's used by psi to identify aggregation workers during
957 * dequeuing, to allow periodic aggregation to shut-off when that
958 * worker is the last task in the system or cgroup to go to sleep.
959 *
960 * As this function doesn't involve any workqueue-related locking, it
961 * only returns stable values when called from inside the scheduler's
962 * queuing and dequeuing paths, when @task, which must be a kworker,
963 * is guaranteed to not be processing any works.
964 *
965 * Return:
966 * The last work function %current executed as a worker, NULL if it
967 * hasn't executed any work yet.
968 */
wq_worker_last_func(struct task_struct * task)969 work_func_t wq_worker_last_func(struct task_struct *task)
970 {
971 struct worker *worker = kthread_data(task);
972
973 return worker->last_func;
974 }
975
976 /**
977 * worker_set_flags - set worker flags and adjust nr_running accordingly
978 * @worker: self
979 * @flags: flags to set
980 *
981 * Set @flags in @worker->flags and adjust nr_running accordingly.
982 *
983 * CONTEXT:
984 * raw_spin_lock_irq(pool->lock)
985 */
worker_set_flags(struct worker * worker,unsigned int flags)986 static inline void worker_set_flags(struct worker *worker, unsigned int flags)
987 {
988 struct worker_pool *pool = worker->pool;
989
990 WARN_ON_ONCE(worker->task != current);
991
992 /* If transitioning into NOT_RUNNING, adjust nr_running. */
993 if ((flags & WORKER_NOT_RUNNING) &&
994 !(worker->flags & WORKER_NOT_RUNNING)) {
995 atomic_dec(&pool->nr_running);
996 }
997
998 worker->flags |= flags;
999 }
1000
1001 /**
1002 * worker_clr_flags - clear worker flags and adjust nr_running accordingly
1003 * @worker: self
1004 * @flags: flags to clear
1005 *
1006 * Clear @flags in @worker->flags and adjust nr_running accordingly.
1007 *
1008 * CONTEXT:
1009 * raw_spin_lock_irq(pool->lock)
1010 */
worker_clr_flags(struct worker * worker,unsigned int flags)1011 static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
1012 {
1013 struct worker_pool *pool = worker->pool;
1014 unsigned int oflags = worker->flags;
1015
1016 WARN_ON_ONCE(worker->task != current);
1017
1018 worker->flags &= ~flags;
1019
1020 /*
1021 * If transitioning out of NOT_RUNNING, increment nr_running. Note
1022 * that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask
1023 * of multiple flags, not a single flag.
1024 */
1025 if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
1026 if (!(worker->flags & WORKER_NOT_RUNNING))
1027 atomic_inc(&pool->nr_running);
1028 }
1029
1030 /**
1031 * find_worker_executing_work - find worker which is executing a work
1032 * @pool: pool of interest
1033 * @work: work to find worker for
1034 *
1035 * Find a worker which is executing @work on @pool by searching
1036 * @pool->busy_hash which is keyed by the address of @work. For a worker
1037 * to match, its current execution should match the address of @work and
1038 * its work function. This is to avoid unwanted dependency between
1039 * unrelated work executions through a work item being recycled while still
1040 * being executed.
1041 *
1042 * This is a bit tricky. A work item may be freed once its execution
1043 * starts and nothing prevents the freed area from being recycled for
1044 * another work item. If the same work item address ends up being reused
1045 * before the original execution finishes, workqueue will identify the
1046 * recycled work item as currently executing and make it wait until the
1047 * current execution finishes, introducing an unwanted dependency.
1048 *
1049 * This function checks the work item address and work function to avoid
1050 * false positives. Note that this isn't complete as one may construct a
1051 * work function which can introduce dependency onto itself through a
1052 * recycled work item. Well, if somebody wants to shoot oneself in the
1053 * foot that badly, there's only so much we can do, and if such deadlock
1054 * actually occurs, it should be easy to locate the culprit work function.
1055 *
1056 * CONTEXT:
1057 * raw_spin_lock_irq(pool->lock).
1058 *
1059 * Return:
1060 * Pointer to worker which is executing @work if found, %NULL
1061 * otherwise.
1062 */
find_worker_executing_work(struct worker_pool * pool,struct work_struct * work)1063 static struct worker *find_worker_executing_work(struct worker_pool *pool,
1064 struct work_struct *work)
1065 {
1066 struct worker *worker;
1067
1068 hash_for_each_possible(pool->busy_hash, worker, hentry,
1069 (unsigned long)work)
1070 if (worker->current_work == work &&
1071 worker->current_func == work->func)
1072 return worker;
1073
1074 return NULL;
1075 }
1076
1077 /**
1078 * move_linked_works - move linked works to a list
1079 * @work: start of series of works to be scheduled
1080 * @head: target list to append @work to
1081 * @nextp: out parameter for nested worklist walking
1082 *
1083 * Schedule linked works starting from @work to @head. Work series to
1084 * be scheduled starts at @work and includes any consecutive work with
1085 * WORK_STRUCT_LINKED set in its predecessor.
1086 *
1087 * If @nextp is not NULL, it's updated to point to the next work of
1088 * the last scheduled work. This allows move_linked_works() to be
1089 * nested inside outer list_for_each_entry_safe().
1090 *
1091 * CONTEXT:
1092 * raw_spin_lock_irq(pool->lock).
1093 */
move_linked_works(struct work_struct * work,struct list_head * head,struct work_struct ** nextp)1094 static void move_linked_works(struct work_struct *work, struct list_head *head,
1095 struct work_struct **nextp)
1096 {
1097 struct work_struct *n;
1098
1099 /*
1100 * Linked worklist will always end before the end of the list,
1101 * use NULL for list head.
1102 */
1103 list_for_each_entry_safe_from(work, n, NULL, entry) {
1104 list_move_tail(&work->entry, head);
1105 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
1106 break;
1107 }
1108
1109 /*
1110 * If we're already inside safe list traversal and have moved
1111 * multiple works to the scheduled queue, the next position
1112 * needs to be updated.
1113 */
1114 if (nextp)
1115 *nextp = n;
1116 }
1117
1118 /**
1119 * get_pwq - get an extra reference on the specified pool_workqueue
1120 * @pwq: pool_workqueue to get
1121 *
1122 * Obtain an extra reference on @pwq. The caller should guarantee that
1123 * @pwq has positive refcnt and be holding the matching pool->lock.
1124 */
get_pwq(struct pool_workqueue * pwq)1125 static void get_pwq(struct pool_workqueue *pwq)
1126 {
1127 lockdep_assert_held(&pwq->pool->lock);
1128 WARN_ON_ONCE(pwq->refcnt <= 0);
1129 pwq->refcnt++;
1130 }
1131
1132 /**
1133 * put_pwq - put a pool_workqueue reference
1134 * @pwq: pool_workqueue to put
1135 *
1136 * Drop a reference of @pwq. If its refcnt reaches zero, schedule its
1137 * destruction. The caller should be holding the matching pool->lock.
1138 */
put_pwq(struct pool_workqueue * pwq)1139 static void put_pwq(struct pool_workqueue *pwq)
1140 {
1141 lockdep_assert_held(&pwq->pool->lock);
1142 if (likely(--pwq->refcnt))
1143 return;
1144 if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND)))
1145 return;
1146 /*
1147 * @pwq can't be released under pool->lock, bounce to
1148 * pwq_unbound_release_workfn(). This never recurses on the same
1149 * pool->lock as this path is taken only for unbound workqueues and
1150 * the release work item is scheduled on a per-cpu workqueue. To
1151 * avoid lockdep warning, unbound pool->locks are given lockdep
1152 * subclass of 1 in get_unbound_pool().
1153 */
1154 schedule_work(&pwq->unbound_release_work);
1155 }
1156
1157 /**
1158 * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock
1159 * @pwq: pool_workqueue to put (can be %NULL)
1160 *
1161 * put_pwq() with locking. This function also allows %NULL @pwq.
1162 */
put_pwq_unlocked(struct pool_workqueue * pwq)1163 static void put_pwq_unlocked(struct pool_workqueue *pwq)
1164 {
1165 if (pwq) {
1166 /*
1167 * As both pwqs and pools are RCU protected, the
1168 * following lock operations are safe.
1169 */
1170 raw_spin_lock_irq(&pwq->pool->lock);
1171 put_pwq(pwq);
1172 raw_spin_unlock_irq(&pwq->pool->lock);
1173 }
1174 }
1175
pwq_activate_inactive_work(struct work_struct * work)1176 static void pwq_activate_inactive_work(struct work_struct *work)
1177 {
1178 struct pool_workqueue *pwq = get_work_pwq(work);
1179
1180 trace_workqueue_activate_work(work);
1181 if (list_empty(&pwq->pool->worklist))
1182 pwq->pool->watchdog_ts = jiffies;
1183 move_linked_works(work, &pwq->pool->worklist, NULL);
1184 __clear_bit(WORK_STRUCT_INACTIVE_BIT, work_data_bits(work));
1185 pwq->nr_active++;
1186 }
1187
pwq_activate_first_inactive(struct pool_workqueue * pwq)1188 static void pwq_activate_first_inactive(struct pool_workqueue *pwq)
1189 {
1190 struct work_struct *work = list_first_entry(&pwq->inactive_works,
1191 struct work_struct, entry);
1192
1193 pwq_activate_inactive_work(work);
1194 }
1195
1196 /**
1197 * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight
1198 * @pwq: pwq of interest
1199 * @work_data: work_data of work which left the queue
1200 *
1201 * A work either has completed or is removed from pending queue,
1202 * decrement nr_in_flight of its pwq and handle workqueue flushing.
1203 *
1204 * CONTEXT:
1205 * raw_spin_lock_irq(pool->lock).
1206 */
pwq_dec_nr_in_flight(struct pool_workqueue * pwq,unsigned long work_data)1207 static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, unsigned long work_data)
1208 {
1209 int color = get_work_color(work_data);
1210
1211 if (!(work_data & WORK_STRUCT_INACTIVE)) {
1212 pwq->nr_active--;
1213 if (!list_empty(&pwq->inactive_works)) {
1214 /* one down, submit an inactive one */
1215 if (pwq->nr_active < pwq->max_active)
1216 pwq_activate_first_inactive(pwq);
1217 }
1218 }
1219
1220 pwq->nr_in_flight[color]--;
1221
1222 /* is flush in progress and are we at the flushing tip? */
1223 if (likely(pwq->flush_color != color))
1224 goto out_put;
1225
1226 /* are there still in-flight works? */
1227 if (pwq->nr_in_flight[color])
1228 goto out_put;
1229
1230 /* this pwq is done, clear flush_color */
1231 pwq->flush_color = -1;
1232
1233 /*
1234 * If this was the last pwq, wake up the first flusher. It
1235 * will handle the rest.
1236 */
1237 if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush))
1238 complete(&pwq->wq->first_flusher->done);
1239 out_put:
1240 put_pwq(pwq);
1241 }
1242
1243 /**
1244 * try_to_grab_pending - steal work item from worklist and disable irq
1245 * @work: work item to steal
1246 * @is_dwork: @work is a delayed_work
1247 * @flags: place to store irq state
1248 *
1249 * Try to grab PENDING bit of @work. This function can handle @work in any
1250 * stable state - idle, on timer or on worklist.
1251 *
1252 * Return:
1253 *
1254 * ======== ================================================================
1255 * 1 if @work was pending and we successfully stole PENDING
1256 * 0 if @work was idle and we claimed PENDING
1257 * -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry
1258 * -ENOENT if someone else is canceling @work, this state may persist
1259 * for arbitrarily long
1260 * ======== ================================================================
1261 *
1262 * Note:
1263 * On >= 0 return, the caller owns @work's PENDING bit. To avoid getting
1264 * interrupted while holding PENDING and @work off queue, irq must be
1265 * disabled on entry. This, combined with delayed_work->timer being
1266 * irqsafe, ensures that we return -EAGAIN for finite short period of time.
1267 *
1268 * On successful return, >= 0, irq is disabled and the caller is
1269 * responsible for releasing it using local_irq_restore(*@flags).
1270 *
1271 * This function is safe to call from any context including IRQ handler.
1272 */
try_to_grab_pending(struct work_struct * work,bool is_dwork,unsigned long * flags)1273 static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
1274 unsigned long *flags)
1275 {
1276 struct worker_pool *pool;
1277 struct pool_workqueue *pwq;
1278
1279 local_irq_save(*flags);
1280
1281 /* try to steal the timer if it exists */
1282 if (is_dwork) {
1283 struct delayed_work *dwork = to_delayed_work(work);
1284
1285 /*
1286 * dwork->timer is irqsafe. If del_timer() fails, it's
1287 * guaranteed that the timer is not queued anywhere and not
1288 * running on the local CPU.
1289 */
1290 if (likely(del_timer(&dwork->timer)))
1291 return 1;
1292 }
1293
1294 /* try to claim PENDING the normal way */
1295 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
1296 return 0;
1297
1298 rcu_read_lock();
1299 /*
1300 * The queueing is in progress, or it is already queued. Try to
1301 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
1302 */
1303 pool = get_work_pool(work);
1304 if (!pool)
1305 goto fail;
1306
1307 raw_spin_lock(&pool->lock);
1308 /*
1309 * work->data is guaranteed to point to pwq only while the work
1310 * item is queued on pwq->wq, and both updating work->data to point
1311 * to pwq on queueing and to pool on dequeueing are done under
1312 * pwq->pool->lock. This in turn guarantees that, if work->data
1313 * points to pwq which is associated with a locked pool, the work
1314 * item is currently queued on that pool.
1315 */
1316 pwq = get_work_pwq(work);
1317 if (pwq && pwq->pool == pool) {
1318 debug_work_deactivate(work);
1319
1320 /*
1321 * A cancelable inactive work item must be in the
1322 * pwq->inactive_works since a queued barrier can't be
1323 * canceled (see the comments in insert_wq_barrier()).
1324 *
1325 * An inactive work item cannot be grabbed directly because
1326 * it might have linked barrier work items which, if left
1327 * on the inactive_works list, will confuse pwq->nr_active
1328 * management later on and cause stall. Make sure the work
1329 * item is activated before grabbing.
1330 */
1331 if (*work_data_bits(work) & WORK_STRUCT_INACTIVE)
1332 pwq_activate_inactive_work(work);
1333
1334 list_del_init(&work->entry);
1335 pwq_dec_nr_in_flight(pwq, *work_data_bits(work));
1336
1337 /* work->data points to pwq iff queued, point to pool */
1338 set_work_pool_and_keep_pending(work, pool->id);
1339
1340 raw_spin_unlock(&pool->lock);
1341 rcu_read_unlock();
1342 return 1;
1343 }
1344 raw_spin_unlock(&pool->lock);
1345 fail:
1346 rcu_read_unlock();
1347 local_irq_restore(*flags);
1348 if (work_is_canceling(work))
1349 return -ENOENT;
1350 cpu_relax();
1351 return -EAGAIN;
1352 }
1353
1354 /**
1355 * insert_work - insert a work into a pool
1356 * @pwq: pwq @work belongs to
1357 * @work: work to insert
1358 * @head: insertion point
1359 * @extra_flags: extra WORK_STRUCT_* flags to set
1360 *
1361 * Insert @work which belongs to @pwq after @head. @extra_flags is or'd to
1362 * work_struct flags.
1363 *
1364 * CONTEXT:
1365 * raw_spin_lock_irq(pool->lock).
1366 */
insert_work(struct pool_workqueue * pwq,struct work_struct * work,struct list_head * head,unsigned int extra_flags)1367 static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
1368 struct list_head *head, unsigned int extra_flags)
1369 {
1370 struct worker_pool *pool = pwq->pool;
1371
1372 /* record the work call stack in order to print it in KASAN reports */
1373 kasan_record_aux_stack_noalloc(work);
1374
1375 /* we own @work, set data and link */
1376 set_work_pwq(work, pwq, extra_flags);
1377 list_add_tail(&work->entry, head);
1378 get_pwq(pwq);
1379
1380 /*
1381 * Ensure either wq_worker_sleeping() sees the above
1382 * list_add_tail() or we see zero nr_running to avoid workers lying
1383 * around lazily while there are works to be processed.
1384 */
1385 smp_mb();
1386
1387 if (__need_more_worker(pool))
1388 wake_up_worker(pool);
1389 }
1390
1391 /*
1392 * Test whether @work is being queued from another work executing on the
1393 * same workqueue.
1394 */
is_chained_work(struct workqueue_struct * wq)1395 static bool is_chained_work(struct workqueue_struct *wq)
1396 {
1397 struct worker *worker;
1398
1399 worker = current_wq_worker();
1400 /*
1401 * Return %true iff I'm a worker executing a work item on @wq. If
1402 * I'm @worker, it's safe to dereference it without locking.
1403 */
1404 return worker && worker->current_pwq->wq == wq;
1405 }
1406
1407 /*
1408 * When queueing an unbound work item to a wq, prefer local CPU if allowed
1409 * by wq_unbound_cpumask. Otherwise, round robin among the allowed ones to
1410 * avoid perturbing sensitive tasks.
1411 */
wq_select_unbound_cpu(int cpu)1412 static int wq_select_unbound_cpu(int cpu)
1413 {
1414 static bool printed_dbg_warning;
1415 int new_cpu;
1416
1417 if (likely(!wq_debug_force_rr_cpu)) {
1418 if (cpumask_test_cpu(cpu, wq_unbound_cpumask))
1419 return cpu;
1420 } else if (!printed_dbg_warning) {
1421 pr_warn("workqueue: round-robin CPU selection forced, expect performance impact\n");
1422 printed_dbg_warning = true;
1423 }
1424
1425 if (cpumask_empty(wq_unbound_cpumask))
1426 return cpu;
1427
1428 new_cpu = __this_cpu_read(wq_rr_cpu_last);
1429 new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask);
1430 if (unlikely(new_cpu >= nr_cpu_ids)) {
1431 new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask);
1432 if (unlikely(new_cpu >= nr_cpu_ids))
1433 return cpu;
1434 }
1435 __this_cpu_write(wq_rr_cpu_last, new_cpu);
1436
1437 return new_cpu;
1438 }
1439
__queue_work(int cpu,struct workqueue_struct * wq,struct work_struct * work)1440 static void __queue_work(int cpu, struct workqueue_struct *wq,
1441 struct work_struct *work)
1442 {
1443 struct pool_workqueue *pwq;
1444 struct worker_pool *last_pool;
1445 struct list_head *worklist;
1446 unsigned int work_flags;
1447 unsigned int req_cpu = cpu;
1448
1449 /*
1450 * While a work item is PENDING && off queue, a task trying to
1451 * steal the PENDING will busy-loop waiting for it to either get
1452 * queued or lose PENDING. Grabbing PENDING and queueing should
1453 * happen with IRQ disabled.
1454 */
1455 lockdep_assert_irqs_disabled();
1456
1457
1458 /* if draining, only works from the same workqueue are allowed */
1459 if (unlikely(wq->flags & __WQ_DRAINING) &&
1460 WARN_ON_ONCE(!is_chained_work(wq)))
1461 return;
1462 rcu_read_lock();
1463 retry:
1464 /* pwq which will be used unless @work is executing elsewhere */
1465 if (wq->flags & WQ_UNBOUND) {
1466 if (req_cpu == WORK_CPU_UNBOUND)
1467 cpu = wq_select_unbound_cpu(raw_smp_processor_id());
1468 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
1469 } else {
1470 if (req_cpu == WORK_CPU_UNBOUND)
1471 cpu = raw_smp_processor_id();
1472 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
1473 }
1474
1475 /*
1476 * If @work was previously on a different pool, it might still be
1477 * running there, in which case the work needs to be queued on that
1478 * pool to guarantee non-reentrancy.
1479 */
1480 last_pool = get_work_pool(work);
1481 if (last_pool && last_pool != pwq->pool) {
1482 struct worker *worker;
1483
1484 raw_spin_lock(&last_pool->lock);
1485
1486 worker = find_worker_executing_work(last_pool, work);
1487
1488 if (worker && worker->current_pwq->wq == wq) {
1489 pwq = worker->current_pwq;
1490 } else {
1491 /* meh... not running there, queue here */
1492 raw_spin_unlock(&last_pool->lock);
1493 raw_spin_lock(&pwq->pool->lock);
1494 }
1495 } else {
1496 raw_spin_lock(&pwq->pool->lock);
1497 }
1498
1499 /*
1500 * pwq is determined and locked. For unbound pools, we could have
1501 * raced with pwq release and it could already be dead. If its
1502 * refcnt is zero, repeat pwq selection. Note that pwqs never die
1503 * without another pwq replacing it in the numa_pwq_tbl or while
1504 * work items are executing on it, so the retrying is guaranteed to
1505 * make forward-progress.
1506 */
1507 if (unlikely(!pwq->refcnt)) {
1508 if (wq->flags & WQ_UNBOUND) {
1509 raw_spin_unlock(&pwq->pool->lock);
1510 cpu_relax();
1511 goto retry;
1512 }
1513 /* oops */
1514 WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt",
1515 wq->name, cpu);
1516 }
1517
1518 /* pwq determined, queue */
1519 trace_workqueue_queue_work(req_cpu, pwq, work);
1520
1521 if (WARN_ON(!list_empty(&work->entry)))
1522 goto out;
1523
1524 pwq->nr_in_flight[pwq->work_color]++;
1525 work_flags = work_color_to_flags(pwq->work_color);
1526
1527 if (likely(pwq->nr_active < pwq->max_active)) {
1528 trace_workqueue_activate_work(work);
1529 pwq->nr_active++;
1530 worklist = &pwq->pool->worklist;
1531 if (list_empty(worklist))
1532 pwq->pool->watchdog_ts = jiffies;
1533 } else {
1534 work_flags |= WORK_STRUCT_INACTIVE;
1535 worklist = &pwq->inactive_works;
1536 }
1537
1538 debug_work_activate(work);
1539 insert_work(pwq, work, worklist, work_flags);
1540
1541 out:
1542 raw_spin_unlock(&pwq->pool->lock);
1543 rcu_read_unlock();
1544 }
1545
1546 /**
1547 * queue_work_on - queue work on specific cpu
1548 * @cpu: CPU number to execute work on
1549 * @wq: workqueue to use
1550 * @work: work to queue
1551 *
1552 * We queue the work to a specific CPU, the caller must ensure it
1553 * can't go away.
1554 *
1555 * Return: %false if @work was already on a queue, %true otherwise.
1556 */
queue_work_on(int cpu,struct workqueue_struct * wq,struct work_struct * work)1557 bool queue_work_on(int cpu, struct workqueue_struct *wq,
1558 struct work_struct *work)
1559 {
1560 bool ret = false;
1561 unsigned long flags;
1562
1563 local_irq_save(flags);
1564
1565 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1566 __queue_work(cpu, wq, work);
1567 ret = true;
1568 }
1569
1570 local_irq_restore(flags);
1571 return ret;
1572 }
1573 EXPORT_SYMBOL(queue_work_on);
1574
1575 /**
1576 * workqueue_select_cpu_near - Select a CPU based on NUMA node
1577 * @node: NUMA node ID that we want to select a CPU from
1578 *
1579 * This function will attempt to find a "random" cpu available on a given
1580 * node. If there are no CPUs available on the given node it will return
1581 * WORK_CPU_UNBOUND indicating that we should just schedule to any
1582 * available CPU if we need to schedule this work.
1583 */
workqueue_select_cpu_near(int node)1584 static int workqueue_select_cpu_near(int node)
1585 {
1586 int cpu;
1587
1588 /* No point in doing this if NUMA isn't enabled for workqueues */
1589 if (!wq_numa_enabled)
1590 return WORK_CPU_UNBOUND;
1591
1592 /* Delay binding to CPU if node is not valid or online */
1593 if (node < 0 || node >= MAX_NUMNODES || !node_online(node))
1594 return WORK_CPU_UNBOUND;
1595
1596 /* Use local node/cpu if we are already there */
1597 cpu = raw_smp_processor_id();
1598 if (node == cpu_to_node(cpu))
1599 return cpu;
1600
1601 /* Use "random" otherwise know as "first" online CPU of node */
1602 cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask);
1603
1604 /* If CPU is valid return that, otherwise just defer */
1605 return cpu < nr_cpu_ids ? cpu : WORK_CPU_UNBOUND;
1606 }
1607
1608 /**
1609 * queue_work_node - queue work on a "random" cpu for a given NUMA node
1610 * @node: NUMA node that we are targeting the work for
1611 * @wq: workqueue to use
1612 * @work: work to queue
1613 *
1614 * We queue the work to a "random" CPU within a given NUMA node. The basic
1615 * idea here is to provide a way to somehow associate work with a given
1616 * NUMA node.
1617 *
1618 * This function will only make a best effort attempt at getting this onto
1619 * the right NUMA node. If no node is requested or the requested node is
1620 * offline then we just fall back to standard queue_work behavior.
1621 *
1622 * Currently the "random" CPU ends up being the first available CPU in the
1623 * intersection of cpu_online_mask and the cpumask of the node, unless we
1624 * are running on the node. In that case we just use the current CPU.
1625 *
1626 * Return: %false if @work was already on a queue, %true otherwise.
1627 */
queue_work_node(int node,struct workqueue_struct * wq,struct work_struct * work)1628 bool queue_work_node(int node, struct workqueue_struct *wq,
1629 struct work_struct *work)
1630 {
1631 unsigned long flags;
1632 bool ret = false;
1633
1634 /*
1635 * This current implementation is specific to unbound workqueues.
1636 * Specifically we only return the first available CPU for a given
1637 * node instead of cycling through individual CPUs within the node.
1638 *
1639 * If this is used with a per-cpu workqueue then the logic in
1640 * workqueue_select_cpu_near would need to be updated to allow for
1641 * some round robin type logic.
1642 */
1643 WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND));
1644
1645 local_irq_save(flags);
1646
1647 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1648 int cpu = workqueue_select_cpu_near(node);
1649
1650 __queue_work(cpu, wq, work);
1651 ret = true;
1652 }
1653
1654 local_irq_restore(flags);
1655 return ret;
1656 }
1657 EXPORT_SYMBOL_GPL(queue_work_node);
1658
delayed_work_timer_fn(struct timer_list * t)1659 void delayed_work_timer_fn(struct timer_list *t)
1660 {
1661 struct delayed_work *dwork = from_timer(dwork, t, timer);
1662
1663 /* should have been called from irqsafe timer with irq already off */
1664 __queue_work(dwork->cpu, dwork->wq, &dwork->work);
1665 }
1666 EXPORT_SYMBOL(delayed_work_timer_fn);
1667
__queue_delayed_work(int cpu,struct workqueue_struct * wq,struct delayed_work * dwork,unsigned long delay)1668 static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
1669 struct delayed_work *dwork, unsigned long delay)
1670 {
1671 struct timer_list *timer = &dwork->timer;
1672 struct work_struct *work = &dwork->work;
1673
1674 WARN_ON_ONCE(!wq);
1675 WARN_ON_FUNCTION_MISMATCH(timer->function, delayed_work_timer_fn);
1676 WARN_ON_ONCE(timer_pending(timer));
1677 WARN_ON_ONCE(!list_empty(&work->entry));
1678
1679 /*
1680 * If @delay is 0, queue @dwork->work immediately. This is for
1681 * both optimization and correctness. The earliest @timer can
1682 * expire is on the closest next tick and delayed_work users depend
1683 * on that there's no such delay when @delay is 0.
1684 */
1685 if (!delay) {
1686 __queue_work(cpu, wq, &dwork->work);
1687 return;
1688 }
1689
1690 dwork->wq = wq;
1691 dwork->cpu = cpu;
1692 timer->expires = jiffies + delay;
1693
1694 if (unlikely(cpu != WORK_CPU_UNBOUND))
1695 add_timer_on(timer, cpu);
1696 else
1697 add_timer(timer);
1698 }
1699
1700 /**
1701 * queue_delayed_work_on - queue work on specific CPU after delay
1702 * @cpu: CPU number to execute work on
1703 * @wq: workqueue to use
1704 * @dwork: work to queue
1705 * @delay: number of jiffies to wait before queueing
1706 *
1707 * Return: %false if @work was already on a queue, %true otherwise. If
1708 * @delay is zero and @dwork is idle, it will be scheduled for immediate
1709 * execution.
1710 */
queue_delayed_work_on(int cpu,struct workqueue_struct * wq,struct delayed_work * dwork,unsigned long delay)1711 bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
1712 struct delayed_work *dwork, unsigned long delay)
1713 {
1714 struct work_struct *work = &dwork->work;
1715 bool ret = false;
1716 unsigned long flags;
1717
1718 /* read the comment in __queue_work() */
1719 local_irq_save(flags);
1720
1721 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1722 __queue_delayed_work(cpu, wq, dwork, delay);
1723 ret = true;
1724 }
1725
1726 local_irq_restore(flags);
1727 return ret;
1728 }
1729 EXPORT_SYMBOL(queue_delayed_work_on);
1730
1731 /**
1732 * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
1733 * @cpu: CPU number to execute work on
1734 * @wq: workqueue to use
1735 * @dwork: work to queue
1736 * @delay: number of jiffies to wait before queueing
1737 *
1738 * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise,
1739 * modify @dwork's timer so that it expires after @delay. If @delay is
1740 * zero, @work is guaranteed to be scheduled immediately regardless of its
1741 * current state.
1742 *
1743 * Return: %false if @dwork was idle and queued, %true if @dwork was
1744 * pending and its timer was modified.
1745 *
1746 * This function is safe to call from any context including IRQ handler.
1747 * See try_to_grab_pending() for details.
1748 */
mod_delayed_work_on(int cpu,struct workqueue_struct * wq,struct delayed_work * dwork,unsigned long delay)1749 bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
1750 struct delayed_work *dwork, unsigned long delay)
1751 {
1752 unsigned long flags;
1753 int ret;
1754
1755 do {
1756 ret = try_to_grab_pending(&dwork->work, true, &flags);
1757 } while (unlikely(ret == -EAGAIN));
1758
1759 if (likely(ret >= 0)) {
1760 __queue_delayed_work(cpu, wq, dwork, delay);
1761 local_irq_restore(flags);
1762 }
1763
1764 /* -ENOENT from try_to_grab_pending() becomes %true */
1765 return ret;
1766 }
1767 EXPORT_SYMBOL_GPL(mod_delayed_work_on);
1768
rcu_work_rcufn(struct rcu_head * rcu)1769 static void rcu_work_rcufn(struct rcu_head *rcu)
1770 {
1771 struct rcu_work *rwork = container_of(rcu, struct rcu_work, rcu);
1772
1773 /* read the comment in __queue_work() */
1774 local_irq_disable();
1775 __queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work);
1776 local_irq_enable();
1777 }
1778
1779 /**
1780 * queue_rcu_work - queue work after a RCU grace period
1781 * @wq: workqueue to use
1782 * @rwork: work to queue
1783 *
1784 * Return: %false if @rwork was already pending, %true otherwise. Note
1785 * that a full RCU grace period is guaranteed only after a %true return.
1786 * While @rwork is guaranteed to be executed after a %false return, the
1787 * execution may happen before a full RCU grace period has passed.
1788 */
queue_rcu_work(struct workqueue_struct * wq,struct rcu_work * rwork)1789 bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork)
1790 {
1791 struct work_struct *work = &rwork->work;
1792
1793 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1794 rwork->wq = wq;
1795 call_rcu(&rwork->rcu, rcu_work_rcufn);
1796 return true;
1797 }
1798
1799 return false;
1800 }
1801 EXPORT_SYMBOL(queue_rcu_work);
1802
1803 /**
1804 * worker_enter_idle - enter idle state
1805 * @worker: worker which is entering idle state
1806 *
1807 * @worker is entering idle state. Update stats and idle timer if
1808 * necessary.
1809 *
1810 * LOCKING:
1811 * raw_spin_lock_irq(pool->lock).
1812 */
worker_enter_idle(struct worker * worker)1813 static void worker_enter_idle(struct worker *worker)
1814 {
1815 struct worker_pool *pool = worker->pool;
1816
1817 if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) ||
1818 WARN_ON_ONCE(!list_empty(&worker->entry) &&
1819 (worker->hentry.next || worker->hentry.pprev)))
1820 return;
1821
1822 /* can't use worker_set_flags(), also called from create_worker() */
1823 worker->flags |= WORKER_IDLE;
1824 pool->nr_idle++;
1825 worker->last_active = jiffies;
1826
1827 /* idle_list is LIFO */
1828 list_add(&worker->entry, &pool->idle_list);
1829
1830 if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
1831 mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
1832
1833 /*
1834 * Sanity check nr_running. Because unbind_workers() releases
1835 * pool->lock between setting %WORKER_UNBOUND and zapping
1836 * nr_running, the warning may trigger spuriously. Check iff
1837 * unbind is not in progress.
1838 */
1839 WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
1840 pool->nr_workers == pool->nr_idle &&
1841 atomic_read(&pool->nr_running));
1842 }
1843
1844 /**
1845 * worker_leave_idle - leave idle state
1846 * @worker: worker which is leaving idle state
1847 *
1848 * @worker is leaving idle state. Update stats.
1849 *
1850 * LOCKING:
1851 * raw_spin_lock_irq(pool->lock).
1852 */
worker_leave_idle(struct worker * worker)1853 static void worker_leave_idle(struct worker *worker)
1854 {
1855 struct worker_pool *pool = worker->pool;
1856
1857 if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE)))
1858 return;
1859 worker_clr_flags(worker, WORKER_IDLE);
1860 pool->nr_idle--;
1861 list_del_init(&worker->entry);
1862 }
1863
alloc_worker(int node)1864 static struct worker *alloc_worker(int node)
1865 {
1866 struct worker *worker;
1867
1868 worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node);
1869 if (worker) {
1870 INIT_LIST_HEAD(&worker->entry);
1871 INIT_LIST_HEAD(&worker->scheduled);
1872 INIT_LIST_HEAD(&worker->node);
1873 /* on creation a worker is in !idle && prep state */
1874 worker->flags = WORKER_PREP;
1875 }
1876 return worker;
1877 }
1878
1879 /**
1880 * worker_attach_to_pool() - attach a worker to a pool
1881 * @worker: worker to be attached
1882 * @pool: the target pool
1883 *
1884 * Attach @worker to @pool. Once attached, the %WORKER_UNBOUND flag and
1885 * cpu-binding of @worker are kept coordinated with the pool across
1886 * cpu-[un]hotplugs.
1887 */
worker_attach_to_pool(struct worker * worker,struct worker_pool * pool)1888 static void worker_attach_to_pool(struct worker *worker,
1889 struct worker_pool *pool)
1890 {
1891 mutex_lock(&wq_pool_attach_mutex);
1892
1893 /*
1894 * The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains
1895 * stable across this function. See the comments above the flag
1896 * definition for details.
1897 */
1898 if (pool->flags & POOL_DISASSOCIATED)
1899 worker->flags |= WORKER_UNBOUND;
1900 else
1901 kthread_set_per_cpu(worker->task, pool->cpu);
1902
1903 if (worker->rescue_wq)
1904 set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
1905
1906 list_add_tail(&worker->node, &pool->workers);
1907 worker->pool = pool;
1908
1909 mutex_unlock(&wq_pool_attach_mutex);
1910 }
1911
1912 /**
1913 * worker_detach_from_pool() - detach a worker from its pool
1914 * @worker: worker which is attached to its pool
1915 *
1916 * Undo the attaching which had been done in worker_attach_to_pool(). The
1917 * caller worker shouldn't access to the pool after detached except it has
1918 * other reference to the pool.
1919 */
worker_detach_from_pool(struct worker * worker)1920 static void worker_detach_from_pool(struct worker *worker)
1921 {
1922 struct worker_pool *pool = worker->pool;
1923 struct completion *detach_completion = NULL;
1924
1925 mutex_lock(&wq_pool_attach_mutex);
1926
1927 kthread_set_per_cpu(worker->task, -1);
1928 list_del(&worker->node);
1929 worker->pool = NULL;
1930
1931 if (list_empty(&pool->workers))
1932 detach_completion = pool->detach_completion;
1933 mutex_unlock(&wq_pool_attach_mutex);
1934
1935 /* clear leftover flags without pool->lock after it is detached */
1936 worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND);
1937
1938 if (detach_completion)
1939 complete(detach_completion);
1940 }
1941
1942 /**
1943 * create_worker - create a new workqueue worker
1944 * @pool: pool the new worker will belong to
1945 *
1946 * Create and start a new worker which is attached to @pool.
1947 *
1948 * CONTEXT:
1949 * Might sleep. Does GFP_KERNEL allocations.
1950 *
1951 * Return:
1952 * Pointer to the newly created worker.
1953 */
create_worker(struct worker_pool * pool)1954 static struct worker *create_worker(struct worker_pool *pool)
1955 {
1956 struct worker *worker;
1957 int id;
1958 char id_buf[16];
1959
1960 /* ID is needed to determine kthread name */
1961 id = ida_alloc(&pool->worker_ida, GFP_KERNEL);
1962 if (id < 0)
1963 return NULL;
1964
1965 worker = alloc_worker(pool->node);
1966 if (!worker)
1967 goto fail;
1968
1969 worker->id = id;
1970
1971 if (pool->cpu >= 0)
1972 snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id,
1973 pool->attrs->nice < 0 ? "H" : "");
1974 else
1975 snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id);
1976
1977 worker->task = kthread_create_on_node(worker_thread, worker, pool->node,
1978 "kworker/%s", id_buf);
1979 if (IS_ERR(worker->task))
1980 goto fail;
1981
1982 set_user_nice(worker->task, pool->attrs->nice);
1983 kthread_bind_mask(worker->task, pool->attrs->cpumask);
1984
1985 /* successful, attach the worker to the pool */
1986 worker_attach_to_pool(worker, pool);
1987
1988 /* start the newly created worker */
1989 raw_spin_lock_irq(&pool->lock);
1990 worker->pool->nr_workers++;
1991 worker_enter_idle(worker);
1992 wake_up_process(worker->task);
1993 raw_spin_unlock_irq(&pool->lock);
1994
1995 return worker;
1996
1997 fail:
1998 ida_free(&pool->worker_ida, id);
1999 kfree(worker);
2000 return NULL;
2001 }
2002
2003 /**
2004 * destroy_worker - destroy a workqueue worker
2005 * @worker: worker to be destroyed
2006 *
2007 * Destroy @worker and adjust @pool stats accordingly. The worker should
2008 * be idle.
2009 *
2010 * CONTEXT:
2011 * raw_spin_lock_irq(pool->lock).
2012 */
destroy_worker(struct worker * worker)2013 static void destroy_worker(struct worker *worker)
2014 {
2015 struct worker_pool *pool = worker->pool;
2016
2017 lockdep_assert_held(&pool->lock);
2018
2019 /* sanity check frenzy */
2020 if (WARN_ON(worker->current_work) ||
2021 WARN_ON(!list_empty(&worker->scheduled)) ||
2022 WARN_ON(!(worker->flags & WORKER_IDLE)))
2023 return;
2024
2025 pool->nr_workers--;
2026 pool->nr_idle--;
2027
2028 list_del_init(&worker->entry);
2029 worker->flags |= WORKER_DIE;
2030 wake_up_process(worker->task);
2031 }
2032
idle_worker_timeout(struct timer_list * t)2033 static void idle_worker_timeout(struct timer_list *t)
2034 {
2035 struct worker_pool *pool = from_timer(pool, t, idle_timer);
2036
2037 raw_spin_lock_irq(&pool->lock);
2038
2039 while (too_many_workers(pool)) {
2040 struct worker *worker;
2041 unsigned long expires;
2042
2043 /* idle_list is kept in LIFO order, check the last one */
2044 worker = list_entry(pool->idle_list.prev, struct worker, entry);
2045 expires = worker->last_active + IDLE_WORKER_TIMEOUT;
2046
2047 if (time_before(jiffies, expires)) {
2048 mod_timer(&pool->idle_timer, expires);
2049 break;
2050 }
2051
2052 destroy_worker(worker);
2053 }
2054
2055 raw_spin_unlock_irq(&pool->lock);
2056 }
2057
send_mayday(struct work_struct * work)2058 static void send_mayday(struct work_struct *work)
2059 {
2060 struct pool_workqueue *pwq = get_work_pwq(work);
2061 struct workqueue_struct *wq = pwq->wq;
2062
2063 lockdep_assert_held(&wq_mayday_lock);
2064
2065 if (!wq->rescuer)
2066 return;
2067
2068 /* mayday mayday mayday */
2069 if (list_empty(&pwq->mayday_node)) {
2070 /*
2071 * If @pwq is for an unbound wq, its base ref may be put at
2072 * any time due to an attribute change. Pin @pwq until the
2073 * rescuer is done with it.
2074 */
2075 get_pwq(pwq);
2076 list_add_tail(&pwq->mayday_node, &wq->maydays);
2077 wake_up_process(wq->rescuer->task);
2078 }
2079 }
2080
pool_mayday_timeout(struct timer_list * t)2081 static void pool_mayday_timeout(struct timer_list *t)
2082 {
2083 struct worker_pool *pool = from_timer(pool, t, mayday_timer);
2084 struct work_struct *work;
2085
2086 raw_spin_lock_irq(&pool->lock);
2087 raw_spin_lock(&wq_mayday_lock); /* for wq->maydays */
2088
2089 if (need_to_create_worker(pool)) {
2090 /*
2091 * We've been trying to create a new worker but
2092 * haven't been successful. We might be hitting an
2093 * allocation deadlock. Send distress signals to
2094 * rescuers.
2095 */
2096 list_for_each_entry(work, &pool->worklist, entry)
2097 send_mayday(work);
2098 }
2099
2100 raw_spin_unlock(&wq_mayday_lock);
2101 raw_spin_unlock_irq(&pool->lock);
2102
2103 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
2104 }
2105
2106 /**
2107 * maybe_create_worker - create a new worker if necessary
2108 * @pool: pool to create a new worker for
2109 *
2110 * Create a new worker for @pool if necessary. @pool is guaranteed to
2111 * have at least one idle worker on return from this function. If
2112 * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
2113 * sent to all rescuers with works scheduled on @pool to resolve
2114 * possible allocation deadlock.
2115 *
2116 * On return, need_to_create_worker() is guaranteed to be %false and
2117 * may_start_working() %true.
2118 *
2119 * LOCKING:
2120 * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
2121 * multiple times. Does GFP_KERNEL allocations. Called only from
2122 * manager.
2123 */
maybe_create_worker(struct worker_pool * pool)2124 static void maybe_create_worker(struct worker_pool *pool)
2125 __releases(&pool->lock)
2126 __acquires(&pool->lock)
2127 {
2128 restart:
2129 raw_spin_unlock_irq(&pool->lock);
2130
2131 /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
2132 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
2133
2134 while (true) {
2135 if (create_worker(pool) || !need_to_create_worker(pool))
2136 break;
2137
2138 schedule_timeout_interruptible(CREATE_COOLDOWN);
2139
2140 if (!need_to_create_worker(pool))
2141 break;
2142 }
2143
2144 del_timer_sync(&pool->mayday_timer);
2145 raw_spin_lock_irq(&pool->lock);
2146 /*
2147 * This is necessary even after a new worker was just successfully
2148 * created as @pool->lock was dropped and the new worker might have
2149 * already become busy.
2150 */
2151 if (need_to_create_worker(pool))
2152 goto restart;
2153 }
2154
2155 /**
2156 * manage_workers - manage worker pool
2157 * @worker: self
2158 *
2159 * Assume the manager role and manage the worker pool @worker belongs
2160 * to. At any given time, there can be only zero or one manager per
2161 * pool. The exclusion is handled automatically by this function.
2162 *
2163 * The caller can safely start processing works on false return. On
2164 * true return, it's guaranteed that need_to_create_worker() is false
2165 * and may_start_working() is true.
2166 *
2167 * CONTEXT:
2168 * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
2169 * multiple times. Does GFP_KERNEL allocations.
2170 *
2171 * Return:
2172 * %false if the pool doesn't need management and the caller can safely
2173 * start processing works, %true if management function was performed and
2174 * the conditions that the caller verified before calling the function may
2175 * no longer be true.
2176 */
manage_workers(struct worker * worker)2177 static bool manage_workers(struct worker *worker)
2178 {
2179 struct worker_pool *pool = worker->pool;
2180
2181 if (pool->flags & POOL_MANAGER_ACTIVE)
2182 return false;
2183
2184 pool->flags |= POOL_MANAGER_ACTIVE;
2185 pool->manager = worker;
2186
2187 maybe_create_worker(pool);
2188
2189 pool->manager = NULL;
2190 pool->flags &= ~POOL_MANAGER_ACTIVE;
2191 rcuwait_wake_up(&manager_wait);
2192 return true;
2193 }
2194
2195 /**
2196 * process_one_work - process single work
2197 * @worker: self
2198 * @work: work to process
2199 *
2200 * Process @work. This function contains all the logics necessary to
2201 * process a single work including synchronization against and
2202 * interaction with other workers on the same cpu, queueing and
2203 * flushing. As long as context requirement is met, any worker can
2204 * call this function to process a work.
2205 *
2206 * CONTEXT:
2207 * raw_spin_lock_irq(pool->lock) which is released and regrabbed.
2208 */
process_one_work(struct worker * worker,struct work_struct * work)2209 static void process_one_work(struct worker *worker, struct work_struct *work)
2210 __releases(&pool->lock)
2211 __acquires(&pool->lock)
2212 {
2213 struct pool_workqueue *pwq = get_work_pwq(work);
2214 struct worker_pool *pool = worker->pool;
2215 bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE;
2216 unsigned long work_data;
2217 struct worker *collision;
2218 #ifdef CONFIG_LOCKDEP
2219 /*
2220 * It is permissible to free the struct work_struct from
2221 * inside the function that is called from it, this we need to
2222 * take into account for lockdep too. To avoid bogus "held
2223 * lock freed" warnings as well as problems when looking into
2224 * work->lockdep_map, make a copy and use that here.
2225 */
2226 struct lockdep_map lockdep_map;
2227
2228 lockdep_copy_map(&lockdep_map, &work->lockdep_map);
2229 #endif
2230 /* ensure we're on the correct CPU */
2231 WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
2232 raw_smp_processor_id() != pool->cpu);
2233
2234 /*
2235 * A single work shouldn't be executed concurrently by
2236 * multiple workers on a single cpu. Check whether anyone is
2237 * already processing the work. If so, defer the work to the
2238 * currently executing one.
2239 */
2240 collision = find_worker_executing_work(pool, work);
2241 if (unlikely(collision)) {
2242 move_linked_works(work, &collision->scheduled, NULL);
2243 return;
2244 }
2245
2246 /* claim and dequeue */
2247 debug_work_deactivate(work);
2248 hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
2249 worker->current_work = work;
2250 worker->current_func = work->func;
2251 worker->current_pwq = pwq;
2252 work_data = *work_data_bits(work);
2253 worker->current_color = get_work_color(work_data);
2254
2255 /*
2256 * Record wq name for cmdline and debug reporting, may get
2257 * overridden through set_worker_desc().
2258 */
2259 strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN);
2260
2261 list_del_init(&work->entry);
2262
2263 /*
2264 * CPU intensive works don't participate in concurrency management.
2265 * They're the scheduler's responsibility. This takes @worker out
2266 * of concurrency management and the next code block will chain
2267 * execution of the pending work items.
2268 */
2269 if (unlikely(cpu_intensive))
2270 worker_set_flags(worker, WORKER_CPU_INTENSIVE);
2271
2272 /*
2273 * Wake up another worker if necessary. The condition is always
2274 * false for normal per-cpu workers since nr_running would always
2275 * be >= 1 at this point. This is used to chain execution of the
2276 * pending work items for WORKER_NOT_RUNNING workers such as the
2277 * UNBOUND and CPU_INTENSIVE ones.
2278 */
2279 if (need_more_worker(pool))
2280 wake_up_worker(pool);
2281
2282 /*
2283 * Record the last pool and clear PENDING which should be the last
2284 * update to @work. Also, do this inside @pool->lock so that
2285 * PENDING and queued state changes happen together while IRQ is
2286 * disabled.
2287 */
2288 set_work_pool_and_clear_pending(work, pool->id);
2289
2290 raw_spin_unlock_irq(&pool->lock);
2291
2292 lock_map_acquire(&pwq->wq->lockdep_map);
2293 lock_map_acquire(&lockdep_map);
2294 /*
2295 * Strictly speaking we should mark the invariant state without holding
2296 * any locks, that is, before these two lock_map_acquire()'s.
2297 *
2298 * However, that would result in:
2299 *
2300 * A(W1)
2301 * WFC(C)
2302 * A(W1)
2303 * C(C)
2304 *
2305 * Which would create W1->C->W1 dependencies, even though there is no
2306 * actual deadlock possible. There are two solutions, using a
2307 * read-recursive acquire on the work(queue) 'locks', but this will then
2308 * hit the lockdep limitation on recursive locks, or simply discard
2309 * these locks.
2310 *
2311 * AFAICT there is no possible deadlock scenario between the
2312 * flush_work() and complete() primitives (except for single-threaded
2313 * workqueues), so hiding them isn't a problem.
2314 */
2315 lockdep_invariant_state(true);
2316 trace_workqueue_execute_start(work);
2317 worker->current_func(work);
2318 /*
2319 * While we must be careful to not use "work" after this, the trace
2320 * point will only record its address.
2321 */
2322 trace_workqueue_execute_end(work, worker->current_func);
2323 lock_map_release(&lockdep_map);
2324 lock_map_release(&pwq->wq->lockdep_map);
2325
2326 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
2327 pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
2328 " last function: %ps\n",
2329 current->comm, preempt_count(), task_pid_nr(current),
2330 worker->current_func);
2331 debug_show_held_locks(current);
2332 dump_stack();
2333 }
2334
2335 /*
2336 * The following prevents a kworker from hogging CPU on !PREEMPTION
2337 * kernels, where a requeueing work item waiting for something to
2338 * happen could deadlock with stop_machine as such work item could
2339 * indefinitely requeue itself while all other CPUs are trapped in
2340 * stop_machine. At the same time, report a quiescent RCU state so
2341 * the same condition doesn't freeze RCU.
2342 */
2343 cond_resched();
2344
2345 raw_spin_lock_irq(&pool->lock);
2346
2347 /* clear cpu intensive status */
2348 if (unlikely(cpu_intensive))
2349 worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
2350
2351 /* tag the worker for identification in schedule() */
2352 worker->last_func = worker->current_func;
2353
2354 /* we're done with it, release */
2355 hash_del(&worker->hentry);
2356 worker->current_work = NULL;
2357 worker->current_func = NULL;
2358 worker->current_pwq = NULL;
2359 worker->current_color = INT_MAX;
2360 pwq_dec_nr_in_flight(pwq, work_data);
2361 }
2362
2363 /**
2364 * process_scheduled_works - process scheduled works
2365 * @worker: self
2366 *
2367 * Process all scheduled works. Please note that the scheduled list
2368 * may change while processing a work, so this function repeatedly
2369 * fetches a work from the top and executes it.
2370 *
2371 * CONTEXT:
2372 * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
2373 * multiple times.
2374 */
process_scheduled_works(struct worker * worker)2375 static void process_scheduled_works(struct worker *worker)
2376 {
2377 while (!list_empty(&worker->scheduled)) {
2378 struct work_struct *work = list_first_entry(&worker->scheduled,
2379 struct work_struct, entry);
2380 process_one_work(worker, work);
2381 }
2382 }
2383
set_pf_worker(bool val)2384 static void set_pf_worker(bool val)
2385 {
2386 mutex_lock(&wq_pool_attach_mutex);
2387 if (val)
2388 current->flags |= PF_WQ_WORKER;
2389 else
2390 current->flags &= ~PF_WQ_WORKER;
2391 mutex_unlock(&wq_pool_attach_mutex);
2392 }
2393
2394 /**
2395 * worker_thread - the worker thread function
2396 * @__worker: self
2397 *
2398 * The worker thread function. All workers belong to a worker_pool -
2399 * either a per-cpu one or dynamic unbound one. These workers process all
2400 * work items regardless of their specific target workqueue. The only
2401 * exception is work items which belong to workqueues with a rescuer which
2402 * will be explained in rescuer_thread().
2403 *
2404 * Return: 0
2405 */
worker_thread(void * __worker)2406 static int worker_thread(void *__worker)
2407 {
2408 struct worker *worker = __worker;
2409 struct worker_pool *pool = worker->pool;
2410
2411 /* tell the scheduler that this is a workqueue worker */
2412 set_pf_worker(true);
2413 woke_up:
2414 raw_spin_lock_irq(&pool->lock);
2415
2416 /* am I supposed to die? */
2417 if (unlikely(worker->flags & WORKER_DIE)) {
2418 raw_spin_unlock_irq(&pool->lock);
2419 WARN_ON_ONCE(!list_empty(&worker->entry));
2420 set_pf_worker(false);
2421
2422 set_task_comm(worker->task, "kworker/dying");
2423 ida_free(&pool->worker_ida, worker->id);
2424 worker_detach_from_pool(worker);
2425 kfree(worker);
2426 return 0;
2427 }
2428
2429 worker_leave_idle(worker);
2430 recheck:
2431 /* no more worker necessary? */
2432 if (!need_more_worker(pool))
2433 goto sleep;
2434
2435 /* do we need to manage? */
2436 if (unlikely(!may_start_working(pool)) && manage_workers(worker))
2437 goto recheck;
2438
2439 /*
2440 * ->scheduled list can only be filled while a worker is
2441 * preparing to process a work or actually processing it.
2442 * Make sure nobody diddled with it while I was sleeping.
2443 */
2444 WARN_ON_ONCE(!list_empty(&worker->scheduled));
2445
2446 /*
2447 * Finish PREP stage. We're guaranteed to have at least one idle
2448 * worker or that someone else has already assumed the manager
2449 * role. This is where @worker starts participating in concurrency
2450 * management if applicable and concurrency management is restored
2451 * after being rebound. See rebind_workers() for details.
2452 */
2453 worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND);
2454
2455 do {
2456 struct work_struct *work =
2457 list_first_entry(&pool->worklist,
2458 struct work_struct, entry);
2459
2460 pool->watchdog_ts = jiffies;
2461
2462 if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
2463 /* optimization path, not strictly necessary */
2464 process_one_work(worker, work);
2465 if (unlikely(!list_empty(&worker->scheduled)))
2466 process_scheduled_works(worker);
2467 } else {
2468 move_linked_works(work, &worker->scheduled, NULL);
2469 process_scheduled_works(worker);
2470 }
2471 } while (keep_working(pool));
2472
2473 worker_set_flags(worker, WORKER_PREP);
2474 sleep:
2475 /*
2476 * pool->lock is held and there's no work to process and no need to
2477 * manage, sleep. Workers are woken up only while holding
2478 * pool->lock or from local cpu, so setting the current state
2479 * before releasing pool->lock is enough to prevent losing any
2480 * event.
2481 */
2482 worker_enter_idle(worker);
2483 __set_current_state(TASK_IDLE);
2484 raw_spin_unlock_irq(&pool->lock);
2485 schedule();
2486 goto woke_up;
2487 }
2488
2489 /**
2490 * rescuer_thread - the rescuer thread function
2491 * @__rescuer: self
2492 *
2493 * Workqueue rescuer thread function. There's one rescuer for each
2494 * workqueue which has WQ_MEM_RECLAIM set.
2495 *
2496 * Regular work processing on a pool may block trying to create a new
2497 * worker which uses GFP_KERNEL allocation which has slight chance of
2498 * developing into deadlock if some works currently on the same queue
2499 * need to be processed to satisfy the GFP_KERNEL allocation. This is
2500 * the problem rescuer solves.
2501 *
2502 * When such condition is possible, the pool summons rescuers of all
2503 * workqueues which have works queued on the pool and let them process
2504 * those works so that forward progress can be guaranteed.
2505 *
2506 * This should happen rarely.
2507 *
2508 * Return: 0
2509 */
rescuer_thread(void * __rescuer)2510 static int rescuer_thread(void *__rescuer)
2511 {
2512 struct worker *rescuer = __rescuer;
2513 struct workqueue_struct *wq = rescuer->rescue_wq;
2514 struct list_head *scheduled = &rescuer->scheduled;
2515 bool should_stop;
2516
2517 set_user_nice(current, RESCUER_NICE_LEVEL);
2518
2519 /*
2520 * Mark rescuer as worker too. As WORKER_PREP is never cleared, it
2521 * doesn't participate in concurrency management.
2522 */
2523 set_pf_worker(true);
2524 repeat:
2525 set_current_state(TASK_IDLE);
2526
2527 /*
2528 * By the time the rescuer is requested to stop, the workqueue
2529 * shouldn't have any work pending, but @wq->maydays may still have
2530 * pwq(s) queued. This can happen by non-rescuer workers consuming
2531 * all the work items before the rescuer got to them. Go through
2532 * @wq->maydays processing before acting on should_stop so that the
2533 * list is always empty on exit.
2534 */
2535 should_stop = kthread_should_stop();
2536
2537 /* see whether any pwq is asking for help */
2538 raw_spin_lock_irq(&wq_mayday_lock);
2539
2540 while (!list_empty(&wq->maydays)) {
2541 struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
2542 struct pool_workqueue, mayday_node);
2543 struct worker_pool *pool = pwq->pool;
2544 struct work_struct *work, *n;
2545 bool first = true;
2546
2547 __set_current_state(TASK_RUNNING);
2548 list_del_init(&pwq->mayday_node);
2549
2550 raw_spin_unlock_irq(&wq_mayday_lock);
2551
2552 worker_attach_to_pool(rescuer, pool);
2553
2554 raw_spin_lock_irq(&pool->lock);
2555
2556 /*
2557 * Slurp in all works issued via this workqueue and
2558 * process'em.
2559 */
2560 WARN_ON_ONCE(!list_empty(scheduled));
2561 list_for_each_entry_safe(work, n, &pool->worklist, entry) {
2562 if (get_work_pwq(work) == pwq) {
2563 if (first)
2564 pool->watchdog_ts = jiffies;
2565 move_linked_works(work, scheduled, &n);
2566 }
2567 first = false;
2568 }
2569
2570 if (!list_empty(scheduled)) {
2571 process_scheduled_works(rescuer);
2572
2573 /*
2574 * The above execution of rescued work items could
2575 * have created more to rescue through
2576 * pwq_activate_first_inactive() or chained
2577 * queueing. Let's put @pwq back on mayday list so
2578 * that such back-to-back work items, which may be
2579 * being used to relieve memory pressure, don't
2580 * incur MAYDAY_INTERVAL delay inbetween.
2581 */
2582 if (pwq->nr_active && need_to_create_worker(pool)) {
2583 raw_spin_lock(&wq_mayday_lock);
2584 /*
2585 * Queue iff we aren't racing destruction
2586 * and somebody else hasn't queued it already.
2587 */
2588 if (wq->rescuer && list_empty(&pwq->mayday_node)) {
2589 get_pwq(pwq);
2590 list_add_tail(&pwq->mayday_node, &wq->maydays);
2591 }
2592 raw_spin_unlock(&wq_mayday_lock);
2593 }
2594 }
2595
2596 /*
2597 * Put the reference grabbed by send_mayday(). @pool won't
2598 * go away while we're still attached to it.
2599 */
2600 put_pwq(pwq);
2601
2602 /*
2603 * Leave this pool. If need_more_worker() is %true, notify a
2604 * regular worker; otherwise, we end up with 0 concurrency
2605 * and stalling the execution.
2606 */
2607 if (need_more_worker(pool))
2608 wake_up_worker(pool);
2609
2610 raw_spin_unlock_irq(&pool->lock);
2611
2612 worker_detach_from_pool(rescuer);
2613
2614 raw_spin_lock_irq(&wq_mayday_lock);
2615 }
2616
2617 raw_spin_unlock_irq(&wq_mayday_lock);
2618
2619 if (should_stop) {
2620 __set_current_state(TASK_RUNNING);
2621 set_pf_worker(false);
2622 return 0;
2623 }
2624
2625 /* rescuers should never participate in concurrency management */
2626 WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
2627 schedule();
2628 goto repeat;
2629 }
2630
2631 /**
2632 * check_flush_dependency - check for flush dependency sanity
2633 * @target_wq: workqueue being flushed
2634 * @target_work: work item being flushed (NULL for workqueue flushes)
2635 *
2636 * %current is trying to flush the whole @target_wq or @target_work on it.
2637 * If @target_wq doesn't have %WQ_MEM_RECLAIM, verify that %current is not
2638 * reclaiming memory or running on a workqueue which doesn't have
2639 * %WQ_MEM_RECLAIM as that can break forward-progress guarantee leading to
2640 * a deadlock.
2641 */
check_flush_dependency(struct workqueue_struct * target_wq,struct work_struct * target_work)2642 static void check_flush_dependency(struct workqueue_struct *target_wq,
2643 struct work_struct *target_work)
2644 {
2645 work_func_t target_func = target_work ? target_work->func : NULL;
2646 struct worker *worker;
2647
2648 if (target_wq->flags & WQ_MEM_RECLAIM)
2649 return;
2650
2651 worker = current_wq_worker();
2652
2653 WARN_ONCE(current->flags & PF_MEMALLOC,
2654 "workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%ps",
2655 current->pid, current->comm, target_wq->name, target_func);
2656 WARN_ONCE(worker && ((worker->current_pwq->wq->flags &
2657 (WQ_MEM_RECLAIM | __WQ_LEGACY)) == WQ_MEM_RECLAIM),
2658 "workqueue: WQ_MEM_RECLAIM %s:%ps is flushing !WQ_MEM_RECLAIM %s:%ps",
2659 worker->current_pwq->wq->name, worker->current_func,
2660 target_wq->name, target_func);
2661 }
2662
2663 struct wq_barrier {
2664 struct work_struct work;
2665 struct completion done;
2666 struct task_struct *task; /* purely informational */
2667 };
2668
wq_barrier_func(struct work_struct * work)2669 static void wq_barrier_func(struct work_struct *work)
2670 {
2671 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
2672 complete(&barr->done);
2673 }
2674
2675 /**
2676 * insert_wq_barrier - insert a barrier work
2677 * @pwq: pwq to insert barrier into
2678 * @barr: wq_barrier to insert
2679 * @target: target work to attach @barr to
2680 * @worker: worker currently executing @target, NULL if @target is not executing
2681 *
2682 * @barr is linked to @target such that @barr is completed only after
2683 * @target finishes execution. Please note that the ordering
2684 * guarantee is observed only with respect to @target and on the local
2685 * cpu.
2686 *
2687 * Currently, a queued barrier can't be canceled. This is because
2688 * try_to_grab_pending() can't determine whether the work to be
2689 * grabbed is at the head of the queue and thus can't clear LINKED
2690 * flag of the previous work while there must be a valid next work
2691 * after a work with LINKED flag set.
2692 *
2693 * Note that when @worker is non-NULL, @target may be modified
2694 * underneath us, so we can't reliably determine pwq from @target.
2695 *
2696 * CONTEXT:
2697 * raw_spin_lock_irq(pool->lock).
2698 */
insert_wq_barrier(struct pool_workqueue * pwq,struct wq_barrier * barr,struct work_struct * target,struct worker * worker)2699 static void insert_wq_barrier(struct pool_workqueue *pwq,
2700 struct wq_barrier *barr,
2701 struct work_struct *target, struct worker *worker)
2702 {
2703 unsigned int work_flags = 0;
2704 unsigned int work_color;
2705 struct list_head *head;
2706
2707 /*
2708 * debugobject calls are safe here even with pool->lock locked
2709 * as we know for sure that this will not trigger any of the
2710 * checks and call back into the fixup functions where we
2711 * might deadlock.
2712 */
2713 INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
2714 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
2715
2716 init_completion_map(&barr->done, &target->lockdep_map);
2717
2718 barr->task = current;
2719
2720 /* The barrier work item does not participate in pwq->nr_active. */
2721 work_flags |= WORK_STRUCT_INACTIVE;
2722
2723 /*
2724 * If @target is currently being executed, schedule the
2725 * barrier to the worker; otherwise, put it after @target.
2726 */
2727 if (worker) {
2728 head = worker->scheduled.next;
2729 work_color = worker->current_color;
2730 } else {
2731 unsigned long *bits = work_data_bits(target);
2732
2733 head = target->entry.next;
2734 /* there can already be other linked works, inherit and set */
2735 work_flags |= *bits & WORK_STRUCT_LINKED;
2736 work_color = get_work_color(*bits);
2737 __set_bit(WORK_STRUCT_LINKED_BIT, bits);
2738 }
2739
2740 pwq->nr_in_flight[work_color]++;
2741 work_flags |= work_color_to_flags(work_color);
2742
2743 debug_work_activate(&barr->work);
2744 insert_work(pwq, &barr->work, head, work_flags);
2745 }
2746
2747 /**
2748 * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing
2749 * @wq: workqueue being flushed
2750 * @flush_color: new flush color, < 0 for no-op
2751 * @work_color: new work color, < 0 for no-op
2752 *
2753 * Prepare pwqs for workqueue flushing.
2754 *
2755 * If @flush_color is non-negative, flush_color on all pwqs should be
2756 * -1. If no pwq has in-flight commands at the specified color, all
2757 * pwq->flush_color's stay at -1 and %false is returned. If any pwq
2758 * has in flight commands, its pwq->flush_color is set to
2759 * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq
2760 * wakeup logic is armed and %true is returned.
2761 *
2762 * The caller should have initialized @wq->first_flusher prior to
2763 * calling this function with non-negative @flush_color. If
2764 * @flush_color is negative, no flush color update is done and %false
2765 * is returned.
2766 *
2767 * If @work_color is non-negative, all pwqs should have the same
2768 * work_color which is previous to @work_color and all will be
2769 * advanced to @work_color.
2770 *
2771 * CONTEXT:
2772 * mutex_lock(wq->mutex).
2773 *
2774 * Return:
2775 * %true if @flush_color >= 0 and there's something to flush. %false
2776 * otherwise.
2777 */
flush_workqueue_prep_pwqs(struct workqueue_struct * wq,int flush_color,int work_color)2778 static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
2779 int flush_color, int work_color)
2780 {
2781 bool wait = false;
2782 struct pool_workqueue *pwq;
2783
2784 if (flush_color >= 0) {
2785 WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush));
2786 atomic_set(&wq->nr_pwqs_to_flush, 1);
2787 }
2788
2789 for_each_pwq(pwq, wq) {
2790 struct worker_pool *pool = pwq->pool;
2791
2792 raw_spin_lock_irq(&pool->lock);
2793
2794 if (flush_color >= 0) {
2795 WARN_ON_ONCE(pwq->flush_color != -1);
2796
2797 if (pwq->nr_in_flight[flush_color]) {
2798 pwq->flush_color = flush_color;
2799 atomic_inc(&wq->nr_pwqs_to_flush);
2800 wait = true;
2801 }
2802 }
2803
2804 if (work_color >= 0) {
2805 WARN_ON_ONCE(work_color != work_next_color(pwq->work_color));
2806 pwq->work_color = work_color;
2807 }
2808
2809 raw_spin_unlock_irq(&pool->lock);
2810 }
2811
2812 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
2813 complete(&wq->first_flusher->done);
2814
2815 return wait;
2816 }
2817
2818 /**
2819 * flush_workqueue - ensure that any scheduled work has run to completion.
2820 * @wq: workqueue to flush
2821 *
2822 * This function sleeps until all work items which were queued on entry
2823 * have finished execution, but it is not livelocked by new incoming ones.
2824 */
flush_workqueue(struct workqueue_struct * wq)2825 void flush_workqueue(struct workqueue_struct *wq)
2826 {
2827 struct wq_flusher this_flusher = {
2828 .list = LIST_HEAD_INIT(this_flusher.list),
2829 .flush_color = -1,
2830 .done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map),
2831 };
2832 int next_color;
2833
2834 if (WARN_ON(!wq_online))
2835 return;
2836
2837 lock_map_acquire(&wq->lockdep_map);
2838 lock_map_release(&wq->lockdep_map);
2839
2840 mutex_lock(&wq->mutex);
2841
2842 /*
2843 * Start-to-wait phase
2844 */
2845 next_color = work_next_color(wq->work_color);
2846
2847 if (next_color != wq->flush_color) {
2848 /*
2849 * Color space is not full. The current work_color
2850 * becomes our flush_color and work_color is advanced
2851 * by one.
2852 */
2853 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow));
2854 this_flusher.flush_color = wq->work_color;
2855 wq->work_color = next_color;
2856
2857 if (!wq->first_flusher) {
2858 /* no flush in progress, become the first flusher */
2859 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
2860
2861 wq->first_flusher = &this_flusher;
2862
2863 if (!flush_workqueue_prep_pwqs(wq, wq->flush_color,
2864 wq->work_color)) {
2865 /* nothing to flush, done */
2866 wq->flush_color = next_color;
2867 wq->first_flusher = NULL;
2868 goto out_unlock;
2869 }
2870 } else {
2871 /* wait in queue */
2872 WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color);
2873 list_add_tail(&this_flusher.list, &wq->flusher_queue);
2874 flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
2875 }
2876 } else {
2877 /*
2878 * Oops, color space is full, wait on overflow queue.
2879 * The next flush completion will assign us
2880 * flush_color and transfer to flusher_queue.
2881 */
2882 list_add_tail(&this_flusher.list, &wq->flusher_overflow);
2883 }
2884
2885 check_flush_dependency(wq, NULL);
2886
2887 mutex_unlock(&wq->mutex);
2888
2889 wait_for_completion(&this_flusher.done);
2890
2891 /*
2892 * Wake-up-and-cascade phase
2893 *
2894 * First flushers are responsible for cascading flushes and
2895 * handling overflow. Non-first flushers can simply return.
2896 */
2897 if (READ_ONCE(wq->first_flusher) != &this_flusher)
2898 return;
2899
2900 mutex_lock(&wq->mutex);
2901
2902 /* we might have raced, check again with mutex held */
2903 if (wq->first_flusher != &this_flusher)
2904 goto out_unlock;
2905
2906 WRITE_ONCE(wq->first_flusher, NULL);
2907
2908 WARN_ON_ONCE(!list_empty(&this_flusher.list));
2909 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
2910
2911 while (true) {
2912 struct wq_flusher *next, *tmp;
2913
2914 /* complete all the flushers sharing the current flush color */
2915 list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
2916 if (next->flush_color != wq->flush_color)
2917 break;
2918 list_del_init(&next->list);
2919 complete(&next->done);
2920 }
2921
2922 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) &&
2923 wq->flush_color != work_next_color(wq->work_color));
2924
2925 /* this flush_color is finished, advance by one */
2926 wq->flush_color = work_next_color(wq->flush_color);
2927
2928 /* one color has been freed, handle overflow queue */
2929 if (!list_empty(&wq->flusher_overflow)) {
2930 /*
2931 * Assign the same color to all overflowed
2932 * flushers, advance work_color and append to
2933 * flusher_queue. This is the start-to-wait
2934 * phase for these overflowed flushers.
2935 */
2936 list_for_each_entry(tmp, &wq->flusher_overflow, list)
2937 tmp->flush_color = wq->work_color;
2938
2939 wq->work_color = work_next_color(wq->work_color);
2940
2941 list_splice_tail_init(&wq->flusher_overflow,
2942 &wq->flusher_queue);
2943 flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
2944 }
2945
2946 if (list_empty(&wq->flusher_queue)) {
2947 WARN_ON_ONCE(wq->flush_color != wq->work_color);
2948 break;
2949 }
2950
2951 /*
2952 * Need to flush more colors. Make the next flusher
2953 * the new first flusher and arm pwqs.
2954 */
2955 WARN_ON_ONCE(wq->flush_color == wq->work_color);
2956 WARN_ON_ONCE(wq->flush_color != next->flush_color);
2957
2958 list_del_init(&next->list);
2959 wq->first_flusher = next;
2960
2961 if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1))
2962 break;
2963
2964 /*
2965 * Meh... this color is already done, clear first
2966 * flusher and repeat cascading.
2967 */
2968 wq->first_flusher = NULL;
2969 }
2970
2971 out_unlock:
2972 mutex_unlock(&wq->mutex);
2973 }
2974 EXPORT_SYMBOL(flush_workqueue);
2975
2976 /**
2977 * drain_workqueue - drain a workqueue
2978 * @wq: workqueue to drain
2979 *
2980 * Wait until the workqueue becomes empty. While draining is in progress,
2981 * only chain queueing is allowed. IOW, only currently pending or running
2982 * work items on @wq can queue further work items on it. @wq is flushed
2983 * repeatedly until it becomes empty. The number of flushing is determined
2984 * by the depth of chaining and should be relatively short. Whine if it
2985 * takes too long.
2986 */
drain_workqueue(struct workqueue_struct * wq)2987 void drain_workqueue(struct workqueue_struct *wq)
2988 {
2989 unsigned int flush_cnt = 0;
2990 struct pool_workqueue *pwq;
2991
2992 /*
2993 * __queue_work() needs to test whether there are drainers, is much
2994 * hotter than drain_workqueue() and already looks at @wq->flags.
2995 * Use __WQ_DRAINING so that queue doesn't have to check nr_drainers.
2996 */
2997 mutex_lock(&wq->mutex);
2998 if (!wq->nr_drainers++)
2999 wq->flags |= __WQ_DRAINING;
3000 mutex_unlock(&wq->mutex);
3001 reflush:
3002 flush_workqueue(wq);
3003
3004 mutex_lock(&wq->mutex);
3005
3006 for_each_pwq(pwq, wq) {
3007 bool drained;
3008
3009 raw_spin_lock_irq(&pwq->pool->lock);
3010 drained = !pwq->nr_active && list_empty(&pwq->inactive_works);
3011 raw_spin_unlock_irq(&pwq->pool->lock);
3012
3013 if (drained)
3014 continue;
3015
3016 if (++flush_cnt == 10 ||
3017 (flush_cnt % 100 == 0 && flush_cnt <= 1000))
3018 pr_warn("workqueue %s: %s() isn't complete after %u tries\n",
3019 wq->name, __func__, flush_cnt);
3020
3021 mutex_unlock(&wq->mutex);
3022 goto reflush;
3023 }
3024
3025 if (!--wq->nr_drainers)
3026 wq->flags &= ~__WQ_DRAINING;
3027 mutex_unlock(&wq->mutex);
3028 }
3029 EXPORT_SYMBOL_GPL(drain_workqueue);
3030
start_flush_work(struct work_struct * work,struct wq_barrier * barr,bool from_cancel)3031 static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
3032 bool from_cancel)
3033 {
3034 struct worker *worker = NULL;
3035 struct worker_pool *pool;
3036 struct pool_workqueue *pwq;
3037
3038 might_sleep();
3039
3040 rcu_read_lock();
3041 pool = get_work_pool(work);
3042 if (!pool) {
3043 rcu_read_unlock();
3044 return false;
3045 }
3046
3047 raw_spin_lock_irq(&pool->lock);
3048 /* see the comment in try_to_grab_pending() with the same code */
3049 pwq = get_work_pwq(work);
3050 if (pwq) {
3051 if (unlikely(pwq->pool != pool))
3052 goto already_gone;
3053 } else {
3054 worker = find_worker_executing_work(pool, work);
3055 if (!worker)
3056 goto already_gone;
3057 pwq = worker->current_pwq;
3058 }
3059
3060 check_flush_dependency(pwq->wq, work);
3061
3062 insert_wq_barrier(pwq, barr, work, worker);
3063 raw_spin_unlock_irq(&pool->lock);
3064
3065 /*
3066 * Force a lock recursion deadlock when using flush_work() inside a
3067 * single-threaded or rescuer equipped workqueue.
3068 *
3069 * For single threaded workqueues the deadlock happens when the work
3070 * is after the work issuing the flush_work(). For rescuer equipped
3071 * workqueues the deadlock happens when the rescuer stalls, blocking
3072 * forward progress.
3073 */
3074 if (!from_cancel &&
3075 (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) {
3076 lock_map_acquire(&pwq->wq->lockdep_map);
3077 lock_map_release(&pwq->wq->lockdep_map);
3078 }
3079 rcu_read_unlock();
3080 return true;
3081 already_gone:
3082 raw_spin_unlock_irq(&pool->lock);
3083 rcu_read_unlock();
3084 return false;
3085 }
3086
__flush_work(struct work_struct * work,bool from_cancel)3087 static bool __flush_work(struct work_struct *work, bool from_cancel)
3088 {
3089 struct wq_barrier barr;
3090
3091 if (WARN_ON(!wq_online))
3092 return false;
3093
3094 if (WARN_ON(!work->func))
3095 return false;
3096
3097 lock_map_acquire(&work->lockdep_map);
3098 lock_map_release(&work->lockdep_map);
3099
3100 if (start_flush_work(work, &barr, from_cancel)) {
3101 wait_for_completion(&barr.done);
3102 destroy_work_on_stack(&barr.work);
3103 return true;
3104 } else {
3105 return false;
3106 }
3107 }
3108
3109 /**
3110 * flush_work - wait for a work to finish executing the last queueing instance
3111 * @work: the work to flush
3112 *
3113 * Wait until @work has finished execution. @work is guaranteed to be idle
3114 * on return if it hasn't been requeued since flush started.
3115 *
3116 * Return:
3117 * %true if flush_work() waited for the work to finish execution,
3118 * %false if it was already idle.
3119 */
flush_work(struct work_struct * work)3120 bool flush_work(struct work_struct *work)
3121 {
3122 return __flush_work(work, false);
3123 }
3124 EXPORT_SYMBOL_GPL(flush_work);
3125
3126 struct cwt_wait {
3127 wait_queue_entry_t wait;
3128 struct work_struct *work;
3129 };
3130
cwt_wakefn(wait_queue_entry_t * wait,unsigned mode,int sync,void * key)3131 static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
3132 {
3133 struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
3134
3135 if (cwait->work != key)
3136 return 0;
3137 return autoremove_wake_function(wait, mode, sync, key);
3138 }
3139
__cancel_work_timer(struct work_struct * work,bool is_dwork)3140 static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
3141 {
3142 static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
3143 unsigned long flags;
3144 int ret;
3145
3146 do {
3147 ret = try_to_grab_pending(work, is_dwork, &flags);
3148 /*
3149 * If someone else is already canceling, wait for it to
3150 * finish. flush_work() doesn't work for PREEMPT_NONE
3151 * because we may get scheduled between @work's completion
3152 * and the other canceling task resuming and clearing
3153 * CANCELING - flush_work() will return false immediately
3154 * as @work is no longer busy, try_to_grab_pending() will
3155 * return -ENOENT as @work is still being canceled and the
3156 * other canceling task won't be able to clear CANCELING as
3157 * we're hogging the CPU.
3158 *
3159 * Let's wait for completion using a waitqueue. As this
3160 * may lead to the thundering herd problem, use a custom
3161 * wake function which matches @work along with exclusive
3162 * wait and wakeup.
3163 */
3164 if (unlikely(ret == -ENOENT)) {
3165 struct cwt_wait cwait;
3166
3167 init_wait(&cwait.wait);
3168 cwait.wait.func = cwt_wakefn;
3169 cwait.work = work;
3170
3171 prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait,
3172 TASK_UNINTERRUPTIBLE);
3173 if (work_is_canceling(work))
3174 schedule();
3175 finish_wait(&cancel_waitq, &cwait.wait);
3176 }
3177 } while (unlikely(ret < 0));
3178
3179 /* tell other tasks trying to grab @work to back off */
3180 mark_work_canceling(work);
3181 local_irq_restore(flags);
3182
3183 /*
3184 * This allows canceling during early boot. We know that @work
3185 * isn't executing.
3186 */
3187 if (wq_online)
3188 __flush_work(work, true);
3189
3190 clear_work_data(work);
3191
3192 /*
3193 * Paired with prepare_to_wait() above so that either
3194 * waitqueue_active() is visible here or !work_is_canceling() is
3195 * visible there.
3196 */
3197 smp_mb();
3198 if (waitqueue_active(&cancel_waitq))
3199 __wake_up(&cancel_waitq, TASK_NORMAL, 1, work);
3200
3201 return ret;
3202 }
3203
3204 /**
3205 * cancel_work_sync - cancel a work and wait for it to finish
3206 * @work: the work to cancel
3207 *
3208 * Cancel @work and wait for its execution to finish. This function
3209 * can be used even if the work re-queues itself or migrates to
3210 * another workqueue. On return from this function, @work is
3211 * guaranteed to be not pending or executing on any CPU.
3212 *
3213 * cancel_work_sync(&delayed_work->work) must not be used for
3214 * delayed_work's. Use cancel_delayed_work_sync() instead.
3215 *
3216 * The caller must ensure that the workqueue on which @work was last
3217 * queued can't be destroyed before this function returns.
3218 *
3219 * Return:
3220 * %true if @work was pending, %false otherwise.
3221 */
cancel_work_sync(struct work_struct * work)3222 bool cancel_work_sync(struct work_struct *work)
3223 {
3224 return __cancel_work_timer(work, false);
3225 }
3226 EXPORT_SYMBOL_GPL(cancel_work_sync);
3227
3228 /**
3229 * flush_delayed_work - wait for a dwork to finish executing the last queueing
3230 * @dwork: the delayed work to flush
3231 *
3232 * Delayed timer is cancelled and the pending work is queued for
3233 * immediate execution. Like flush_work(), this function only
3234 * considers the last queueing instance of @dwork.
3235 *
3236 * Return:
3237 * %true if flush_work() waited for the work to finish execution,
3238 * %false if it was already idle.
3239 */
flush_delayed_work(struct delayed_work * dwork)3240 bool flush_delayed_work(struct delayed_work *dwork)
3241 {
3242 local_irq_disable();
3243 if (del_timer_sync(&dwork->timer))
3244 __queue_work(dwork->cpu, dwork->wq, &dwork->work);
3245 local_irq_enable();
3246 return flush_work(&dwork->work);
3247 }
3248 EXPORT_SYMBOL(flush_delayed_work);
3249
3250 /**
3251 * flush_rcu_work - wait for a rwork to finish executing the last queueing
3252 * @rwork: the rcu work to flush
3253 *
3254 * Return:
3255 * %true if flush_rcu_work() waited for the work to finish execution,
3256 * %false if it was already idle.
3257 */
flush_rcu_work(struct rcu_work * rwork)3258 bool flush_rcu_work(struct rcu_work *rwork)
3259 {
3260 if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) {
3261 rcu_barrier();
3262 flush_work(&rwork->work);
3263 return true;
3264 } else {
3265 return flush_work(&rwork->work);
3266 }
3267 }
3268 EXPORT_SYMBOL(flush_rcu_work);
3269
__cancel_work(struct work_struct * work,bool is_dwork)3270 static bool __cancel_work(struct work_struct *work, bool is_dwork)
3271 {
3272 unsigned long flags;
3273 int ret;
3274
3275 do {
3276 ret = try_to_grab_pending(work, is_dwork, &flags);
3277 } while (unlikely(ret == -EAGAIN));
3278
3279 if (unlikely(ret < 0))
3280 return false;
3281
3282 set_work_pool_and_clear_pending(work, get_work_pool_id(work));
3283 local_irq_restore(flags);
3284 return ret;
3285 }
3286
3287 /*
3288 * See cancel_delayed_work()
3289 */
cancel_work(struct work_struct * work)3290 bool cancel_work(struct work_struct *work)
3291 {
3292 return __cancel_work(work, false);
3293 }
3294 EXPORT_SYMBOL(cancel_work);
3295
3296 /**
3297 * cancel_delayed_work - cancel a delayed work
3298 * @dwork: delayed_work to cancel
3299 *
3300 * Kill off a pending delayed_work.
3301 *
3302 * Return: %true if @dwork was pending and canceled; %false if it wasn't
3303 * pending.
3304 *
3305 * Note:
3306 * The work callback function may still be running on return, unless
3307 * it returns %true and the work doesn't re-arm itself. Explicitly flush or
3308 * use cancel_delayed_work_sync() to wait on it.
3309 *
3310 * This function is safe to call from any context including IRQ handler.
3311 */
cancel_delayed_work(struct delayed_work * dwork)3312 bool cancel_delayed_work(struct delayed_work *dwork)
3313 {
3314 return __cancel_work(&dwork->work, true);
3315 }
3316 EXPORT_SYMBOL(cancel_delayed_work);
3317
3318 /**
3319 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
3320 * @dwork: the delayed work cancel
3321 *
3322 * This is cancel_work_sync() for delayed works.
3323 *
3324 * Return:
3325 * %true if @dwork was pending, %false otherwise.
3326 */
cancel_delayed_work_sync(struct delayed_work * dwork)3327 bool cancel_delayed_work_sync(struct delayed_work *dwork)
3328 {
3329 return __cancel_work_timer(&dwork->work, true);
3330 }
3331 EXPORT_SYMBOL(cancel_delayed_work_sync);
3332
3333 /**
3334 * schedule_on_each_cpu - execute a function synchronously on each online CPU
3335 * @func: the function to call
3336 *
3337 * schedule_on_each_cpu() executes @func on each online CPU using the
3338 * system workqueue and blocks until all CPUs have completed.
3339 * schedule_on_each_cpu() is very slow.
3340 *
3341 * Return:
3342 * 0 on success, -errno on failure.
3343 */
schedule_on_each_cpu(work_func_t func)3344 int schedule_on_each_cpu(work_func_t func)
3345 {
3346 int cpu;
3347 struct work_struct __percpu *works;
3348
3349 works = alloc_percpu(struct work_struct);
3350 if (!works)
3351 return -ENOMEM;
3352
3353 cpus_read_lock();
3354
3355 for_each_online_cpu(cpu) {
3356 struct work_struct *work = per_cpu_ptr(works, cpu);
3357
3358 INIT_WORK(work, func);
3359 schedule_work_on(cpu, work);
3360 }
3361
3362 for_each_online_cpu(cpu)
3363 flush_work(per_cpu_ptr(works, cpu));
3364
3365 cpus_read_unlock();
3366 free_percpu(works);
3367 return 0;
3368 }
3369
3370 /**
3371 * execute_in_process_context - reliably execute the routine with user context
3372 * @fn: the function to execute
3373 * @ew: guaranteed storage for the execute work structure (must
3374 * be available when the work executes)
3375 *
3376 * Executes the function immediately if process context is available,
3377 * otherwise schedules the function for delayed execution.
3378 *
3379 * Return: 0 - function was executed
3380 * 1 - function was scheduled for execution
3381 */
execute_in_process_context(work_func_t fn,struct execute_work * ew)3382 int execute_in_process_context(work_func_t fn, struct execute_work *ew)
3383 {
3384 if (!in_interrupt()) {
3385 fn(&ew->work);
3386 return 0;
3387 }
3388
3389 INIT_WORK(&ew->work, fn);
3390 schedule_work(&ew->work);
3391
3392 return 1;
3393 }
3394 EXPORT_SYMBOL_GPL(execute_in_process_context);
3395
3396 /**
3397 * free_workqueue_attrs - free a workqueue_attrs
3398 * @attrs: workqueue_attrs to free
3399 *
3400 * Undo alloc_workqueue_attrs().
3401 */
free_workqueue_attrs(struct workqueue_attrs * attrs)3402 void free_workqueue_attrs(struct workqueue_attrs *attrs)
3403 {
3404 if (attrs) {
3405 free_cpumask_var(attrs->cpumask);
3406 kfree(attrs);
3407 }
3408 }
3409
3410 /**
3411 * alloc_workqueue_attrs - allocate a workqueue_attrs
3412 *
3413 * Allocate a new workqueue_attrs, initialize with default settings and
3414 * return it.
3415 *
3416 * Return: The allocated new workqueue_attr on success. %NULL on failure.
3417 */
alloc_workqueue_attrs(void)3418 struct workqueue_attrs *alloc_workqueue_attrs(void)
3419 {
3420 struct workqueue_attrs *attrs;
3421
3422 attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
3423 if (!attrs)
3424 goto fail;
3425 if (!alloc_cpumask_var(&attrs->cpumask, GFP_KERNEL))
3426 goto fail;
3427
3428 cpumask_copy(attrs->cpumask, cpu_possible_mask);
3429 return attrs;
3430 fail:
3431 free_workqueue_attrs(attrs);
3432 return NULL;
3433 }
3434
copy_workqueue_attrs(struct workqueue_attrs * to,const struct workqueue_attrs * from)3435 static void copy_workqueue_attrs(struct workqueue_attrs *to,
3436 const struct workqueue_attrs *from)
3437 {
3438 to->nice = from->nice;
3439 cpumask_copy(to->cpumask, from->cpumask);
3440 /*
3441 * Unlike hash and equality test, this function doesn't ignore
3442 * ->no_numa as it is used for both pool and wq attrs. Instead,
3443 * get_unbound_pool() explicitly clears ->no_numa after copying.
3444 */
3445 to->no_numa = from->no_numa;
3446 }
3447
3448 /* hash value of the content of @attr */
wqattrs_hash(const struct workqueue_attrs * attrs)3449 static u32 wqattrs_hash(const struct workqueue_attrs *attrs)
3450 {
3451 u32 hash = 0;
3452
3453 hash = jhash_1word(attrs->nice, hash);
3454 hash = jhash(cpumask_bits(attrs->cpumask),
3455 BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
3456 return hash;
3457 }
3458
3459 /* content equality test */
wqattrs_equal(const struct workqueue_attrs * a,const struct workqueue_attrs * b)3460 static bool wqattrs_equal(const struct workqueue_attrs *a,
3461 const struct workqueue_attrs *b)
3462 {
3463 if (a->nice != b->nice)
3464 return false;
3465 if (!cpumask_equal(a->cpumask, b->cpumask))
3466 return false;
3467 return true;
3468 }
3469
3470 /**
3471 * init_worker_pool - initialize a newly zalloc'd worker_pool
3472 * @pool: worker_pool to initialize
3473 *
3474 * Initialize a newly zalloc'd @pool. It also allocates @pool->attrs.
3475 *
3476 * Return: 0 on success, -errno on failure. Even on failure, all fields
3477 * inside @pool proper are initialized and put_unbound_pool() can be called
3478 * on @pool safely to release it.
3479 */
init_worker_pool(struct worker_pool * pool)3480 static int init_worker_pool(struct worker_pool *pool)
3481 {
3482 raw_spin_lock_init(&pool->lock);
3483 pool->id = -1;
3484 pool->cpu = -1;
3485 pool->node = NUMA_NO_NODE;
3486 pool->flags |= POOL_DISASSOCIATED;
3487 pool->watchdog_ts = jiffies;
3488 INIT_LIST_HEAD(&pool->worklist);
3489 INIT_LIST_HEAD(&pool->idle_list);
3490 hash_init(pool->busy_hash);
3491
3492 timer_setup(&pool->idle_timer, idle_worker_timeout, TIMER_DEFERRABLE);
3493
3494 timer_setup(&pool->mayday_timer, pool_mayday_timeout, 0);
3495
3496 INIT_LIST_HEAD(&pool->workers);
3497
3498 ida_init(&pool->worker_ida);
3499 INIT_HLIST_NODE(&pool->hash_node);
3500 pool->refcnt = 1;
3501
3502 /* shouldn't fail above this point */
3503 pool->attrs = alloc_workqueue_attrs();
3504 if (!pool->attrs)
3505 return -ENOMEM;
3506 return 0;
3507 }
3508
3509 #ifdef CONFIG_LOCKDEP
wq_init_lockdep(struct workqueue_struct * wq)3510 static void wq_init_lockdep(struct workqueue_struct *wq)
3511 {
3512 char *lock_name;
3513
3514 lockdep_register_key(&wq->key);
3515 lock_name = kasprintf(GFP_KERNEL, "%s%s", "(wq_completion)", wq->name);
3516 if (!lock_name)
3517 lock_name = wq->name;
3518
3519 wq->lock_name = lock_name;
3520 lockdep_init_map(&wq->lockdep_map, lock_name, &wq->key, 0);
3521 }
3522
wq_unregister_lockdep(struct workqueue_struct * wq)3523 static void wq_unregister_lockdep(struct workqueue_struct *wq)
3524 {
3525 lockdep_unregister_key(&wq->key);
3526 }
3527
wq_free_lockdep(struct workqueue_struct * wq)3528 static void wq_free_lockdep(struct workqueue_struct *wq)
3529 {
3530 if (wq->lock_name != wq->name)
3531 kfree(wq->lock_name);
3532 }
3533 #else
wq_init_lockdep(struct workqueue_struct * wq)3534 static void wq_init_lockdep(struct workqueue_struct *wq)
3535 {
3536 }
3537
wq_unregister_lockdep(struct workqueue_struct * wq)3538 static void wq_unregister_lockdep(struct workqueue_struct *wq)
3539 {
3540 }
3541
wq_free_lockdep(struct workqueue_struct * wq)3542 static void wq_free_lockdep(struct workqueue_struct *wq)
3543 {
3544 }
3545 #endif
3546
rcu_free_wq(struct rcu_head * rcu)3547 static void rcu_free_wq(struct rcu_head *rcu)
3548 {
3549 struct workqueue_struct *wq =
3550 container_of(rcu, struct workqueue_struct, rcu);
3551
3552 wq_free_lockdep(wq);
3553
3554 if (!(wq->flags & WQ_UNBOUND))
3555 free_percpu(wq->cpu_pwqs);
3556 else
3557 free_workqueue_attrs(wq->unbound_attrs);
3558
3559 kfree(wq);
3560 }
3561
rcu_free_pool(struct rcu_head * rcu)3562 static void rcu_free_pool(struct rcu_head *rcu)
3563 {
3564 struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu);
3565
3566 ida_destroy(&pool->worker_ida);
3567 free_workqueue_attrs(pool->attrs);
3568 kfree(pool);
3569 }
3570
3571 /* This returns with the lock held on success (pool manager is inactive). */
wq_manager_inactive(struct worker_pool * pool)3572 static bool wq_manager_inactive(struct worker_pool *pool)
3573 {
3574 raw_spin_lock_irq(&pool->lock);
3575
3576 if (pool->flags & POOL_MANAGER_ACTIVE) {
3577 raw_spin_unlock_irq(&pool->lock);
3578 return false;
3579 }
3580 return true;
3581 }
3582
3583 /**
3584 * put_unbound_pool - put a worker_pool
3585 * @pool: worker_pool to put
3586 *
3587 * Put @pool. If its refcnt reaches zero, it gets destroyed in RCU
3588 * safe manner. get_unbound_pool() calls this function on its failure path
3589 * and this function should be able to release pools which went through,
3590 * successfully or not, init_worker_pool().
3591 *
3592 * Should be called with wq_pool_mutex held.
3593 */
put_unbound_pool(struct worker_pool * pool)3594 static void put_unbound_pool(struct worker_pool *pool)
3595 {
3596 DECLARE_COMPLETION_ONSTACK(detach_completion);
3597 struct worker *worker;
3598
3599 lockdep_assert_held(&wq_pool_mutex);
3600
3601 if (--pool->refcnt)
3602 return;
3603
3604 /* sanity checks */
3605 if (WARN_ON(!(pool->cpu < 0)) ||
3606 WARN_ON(!list_empty(&pool->worklist)))
3607 return;
3608
3609 /* release id and unhash */
3610 if (pool->id >= 0)
3611 idr_remove(&worker_pool_idr, pool->id);
3612 hash_del(&pool->hash_node);
3613
3614 /*
3615 * Become the manager and destroy all workers. This prevents
3616 * @pool's workers from blocking on attach_mutex. We're the last
3617 * manager and @pool gets freed with the flag set.
3618 * Because of how wq_manager_inactive() works, we will hold the
3619 * spinlock after a successful wait.
3620 */
3621 rcuwait_wait_event(&manager_wait, wq_manager_inactive(pool),
3622 TASK_UNINTERRUPTIBLE);
3623 pool->flags |= POOL_MANAGER_ACTIVE;
3624
3625 while ((worker = first_idle_worker(pool)))
3626 destroy_worker(worker);
3627 WARN_ON(pool->nr_workers || pool->nr_idle);
3628 raw_spin_unlock_irq(&pool->lock);
3629
3630 mutex_lock(&wq_pool_attach_mutex);
3631 if (!list_empty(&pool->workers))
3632 pool->detach_completion = &detach_completion;
3633 mutex_unlock(&wq_pool_attach_mutex);
3634
3635 if (pool->detach_completion)
3636 wait_for_completion(pool->detach_completion);
3637
3638 /* shut down the timers */
3639 del_timer_sync(&pool->idle_timer);
3640 del_timer_sync(&pool->mayday_timer);
3641
3642 /* RCU protected to allow dereferences from get_work_pool() */
3643 call_rcu(&pool->rcu, rcu_free_pool);
3644 }
3645
3646 /**
3647 * get_unbound_pool - get a worker_pool with the specified attributes
3648 * @attrs: the attributes of the worker_pool to get
3649 *
3650 * Obtain a worker_pool which has the same attributes as @attrs, bump the
3651 * reference count and return it. If there already is a matching
3652 * worker_pool, it will be used; otherwise, this function attempts to
3653 * create a new one.
3654 *
3655 * Should be called with wq_pool_mutex held.
3656 *
3657 * Return: On success, a worker_pool with the same attributes as @attrs.
3658 * On failure, %NULL.
3659 */
get_unbound_pool(const struct workqueue_attrs * attrs)3660 static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
3661 {
3662 u32 hash = wqattrs_hash(attrs);
3663 struct worker_pool *pool;
3664 int node;
3665 int target_node = NUMA_NO_NODE;
3666
3667 lockdep_assert_held(&wq_pool_mutex);
3668
3669 /* do we already have a matching pool? */
3670 hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
3671 if (wqattrs_equal(pool->attrs, attrs)) {
3672 pool->refcnt++;
3673 return pool;
3674 }
3675 }
3676
3677 /* if cpumask is contained inside a NUMA node, we belong to that node */
3678 if (wq_numa_enabled) {
3679 for_each_node(node) {
3680 if (cpumask_subset(attrs->cpumask,
3681 wq_numa_possible_cpumask[node])) {
3682 target_node = node;
3683 break;
3684 }
3685 }
3686 }
3687
3688 /* nope, create a new one */
3689 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, target_node);
3690 if (!pool || init_worker_pool(pool) < 0)
3691 goto fail;
3692
3693 lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */
3694 copy_workqueue_attrs(pool->attrs, attrs);
3695 pool->node = target_node;
3696
3697 /*
3698 * no_numa isn't a worker_pool attribute, always clear it. See
3699 * 'struct workqueue_attrs' comments for detail.
3700 */
3701 pool->attrs->no_numa = false;
3702
3703 if (worker_pool_assign_id(pool) < 0)
3704 goto fail;
3705
3706 /* create and start the initial worker */
3707 if (wq_online && !create_worker(pool))
3708 goto fail;
3709
3710 /* install */
3711 hash_add(unbound_pool_hash, &pool->hash_node, hash);
3712
3713 return pool;
3714 fail:
3715 if (pool)
3716 put_unbound_pool(pool);
3717 return NULL;
3718 }
3719
rcu_free_pwq(struct rcu_head * rcu)3720 static void rcu_free_pwq(struct rcu_head *rcu)
3721 {
3722 kmem_cache_free(pwq_cache,
3723 container_of(rcu, struct pool_workqueue, rcu));
3724 }
3725
3726 /*
3727 * Scheduled on system_wq by put_pwq() when an unbound pwq hits zero refcnt
3728 * and needs to be destroyed.
3729 */
pwq_unbound_release_workfn(struct work_struct * work)3730 static void pwq_unbound_release_workfn(struct work_struct *work)
3731 {
3732 struct pool_workqueue *pwq = container_of(work, struct pool_workqueue,
3733 unbound_release_work);
3734 struct workqueue_struct *wq = pwq->wq;
3735 struct worker_pool *pool = pwq->pool;
3736 bool is_last = false;
3737
3738 /*
3739 * when @pwq is not linked, it doesn't hold any reference to the
3740 * @wq, and @wq is invalid to access.
3741 */
3742 if (!list_empty(&pwq->pwqs_node)) {
3743 if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
3744 return;
3745
3746 mutex_lock(&wq->mutex);
3747 list_del_rcu(&pwq->pwqs_node);
3748 is_last = list_empty(&wq->pwqs);
3749 mutex_unlock(&wq->mutex);
3750 }
3751
3752 mutex_lock(&wq_pool_mutex);
3753 put_unbound_pool(pool);
3754 mutex_unlock(&wq_pool_mutex);
3755
3756 call_rcu(&pwq->rcu, rcu_free_pwq);
3757
3758 /*
3759 * If we're the last pwq going away, @wq is already dead and no one
3760 * is gonna access it anymore. Schedule RCU free.
3761 */
3762 if (is_last) {
3763 wq_unregister_lockdep(wq);
3764 call_rcu(&wq->rcu, rcu_free_wq);
3765 }
3766 }
3767
3768 /**
3769 * pwq_adjust_max_active - update a pwq's max_active to the current setting
3770 * @pwq: target pool_workqueue
3771 *
3772 * If @pwq isn't freezing, set @pwq->max_active to the associated
3773 * workqueue's saved_max_active and activate inactive work items
3774 * accordingly. If @pwq is freezing, clear @pwq->max_active to zero.
3775 */
pwq_adjust_max_active(struct pool_workqueue * pwq)3776 static void pwq_adjust_max_active(struct pool_workqueue *pwq)
3777 {
3778 struct workqueue_struct *wq = pwq->wq;
3779 bool freezable = wq->flags & WQ_FREEZABLE;
3780 unsigned long flags;
3781
3782 /* for @wq->saved_max_active */
3783 lockdep_assert_held(&wq->mutex);
3784
3785 /* fast exit for non-freezable wqs */
3786 if (!freezable && pwq->max_active == wq->saved_max_active)
3787 return;
3788
3789 /* this function can be called during early boot w/ irq disabled */
3790 raw_spin_lock_irqsave(&pwq->pool->lock, flags);
3791
3792 /*
3793 * During [un]freezing, the caller is responsible for ensuring that
3794 * this function is called at least once after @workqueue_freezing
3795 * is updated and visible.
3796 */
3797 if (!freezable || !workqueue_freezing) {
3798 bool kick = false;
3799
3800 pwq->max_active = wq->saved_max_active;
3801
3802 while (!list_empty(&pwq->inactive_works) &&
3803 pwq->nr_active < pwq->max_active) {
3804 pwq_activate_first_inactive(pwq);
3805 kick = true;
3806 }
3807
3808 /*
3809 * Need to kick a worker after thawed or an unbound wq's
3810 * max_active is bumped. In realtime scenarios, always kicking a
3811 * worker will cause interference on the isolated cpu cores, so
3812 * let's kick iff work items were activated.
3813 */
3814 if (kick)
3815 wake_up_worker(pwq->pool);
3816 } else {
3817 pwq->max_active = 0;
3818 }
3819
3820 raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
3821 }
3822
3823 /* initialize newly allocated @pwq which is associated with @wq and @pool */
init_pwq(struct pool_workqueue * pwq,struct workqueue_struct * wq,struct worker_pool * pool)3824 static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
3825 struct worker_pool *pool)
3826 {
3827 BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
3828
3829 memset(pwq, 0, sizeof(*pwq));
3830
3831 pwq->pool = pool;
3832 pwq->wq = wq;
3833 pwq->flush_color = -1;
3834 pwq->refcnt = 1;
3835 INIT_LIST_HEAD(&pwq->inactive_works);
3836 INIT_LIST_HEAD(&pwq->pwqs_node);
3837 INIT_LIST_HEAD(&pwq->mayday_node);
3838 INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn);
3839 }
3840
3841 /* sync @pwq with the current state of its associated wq and link it */
link_pwq(struct pool_workqueue * pwq)3842 static void link_pwq(struct pool_workqueue *pwq)
3843 {
3844 struct workqueue_struct *wq = pwq->wq;
3845
3846 lockdep_assert_held(&wq->mutex);
3847
3848 /* may be called multiple times, ignore if already linked */
3849 if (!list_empty(&pwq->pwqs_node))
3850 return;
3851
3852 /* set the matching work_color */
3853 pwq->work_color = wq->work_color;
3854
3855 /* sync max_active to the current setting */
3856 pwq_adjust_max_active(pwq);
3857
3858 /* link in @pwq */
3859 list_add_rcu(&pwq->pwqs_node, &wq->pwqs);
3860 }
3861
3862 /* obtain a pool matching @attr and create a pwq associating the pool and @wq */
alloc_unbound_pwq(struct workqueue_struct * wq,const struct workqueue_attrs * attrs)3863 static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq,
3864 const struct workqueue_attrs *attrs)
3865 {
3866 struct worker_pool *pool;
3867 struct pool_workqueue *pwq;
3868
3869 lockdep_assert_held(&wq_pool_mutex);
3870
3871 pool = get_unbound_pool(attrs);
3872 if (!pool)
3873 return NULL;
3874
3875 pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node);
3876 if (!pwq) {
3877 put_unbound_pool(pool);
3878 return NULL;
3879 }
3880
3881 init_pwq(pwq, wq, pool);
3882 return pwq;
3883 }
3884
3885 /**
3886 * wq_calc_node_cpumask - calculate a wq_attrs' cpumask for the specified node
3887 * @attrs: the wq_attrs of the default pwq of the target workqueue
3888 * @node: the target NUMA node
3889 * @cpu_going_down: if >= 0, the CPU to consider as offline
3890 * @cpumask: outarg, the resulting cpumask
3891 *
3892 * Calculate the cpumask a workqueue with @attrs should use on @node. If
3893 * @cpu_going_down is >= 0, that cpu is considered offline during
3894 * calculation. The result is stored in @cpumask.
3895 *
3896 * If NUMA affinity is not enabled, @attrs->cpumask is always used. If
3897 * enabled and @node has online CPUs requested by @attrs, the returned
3898 * cpumask is the intersection of the possible CPUs of @node and
3899 * @attrs->cpumask.
3900 *
3901 * The caller is responsible for ensuring that the cpumask of @node stays
3902 * stable.
3903 *
3904 * Return: %true if the resulting @cpumask is different from @attrs->cpumask,
3905 * %false if equal.
3906 */
wq_calc_node_cpumask(const struct workqueue_attrs * attrs,int node,int cpu_going_down,cpumask_t * cpumask)3907 static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node,
3908 int cpu_going_down, cpumask_t *cpumask)
3909 {
3910 if (!wq_numa_enabled || attrs->no_numa)
3911 goto use_dfl;
3912
3913 /* does @node have any online CPUs @attrs wants? */
3914 cpumask_and(cpumask, cpumask_of_node(node), attrs->cpumask);
3915 if (cpu_going_down >= 0)
3916 cpumask_clear_cpu(cpu_going_down, cpumask);
3917
3918 if (cpumask_empty(cpumask))
3919 goto use_dfl;
3920
3921 /* yeap, return possible CPUs in @node that @attrs wants */
3922 cpumask_and(cpumask, attrs->cpumask, wq_numa_possible_cpumask[node]);
3923
3924 if (cpumask_empty(cpumask)) {
3925 pr_warn_once("WARNING: workqueue cpumask: online intersect > "
3926 "possible intersect\n");
3927 return false;
3928 }
3929
3930 return !cpumask_equal(cpumask, attrs->cpumask);
3931
3932 use_dfl:
3933 cpumask_copy(cpumask, attrs->cpumask);
3934 return false;
3935 }
3936
3937 /* install @pwq into @wq's numa_pwq_tbl[] for @node and return the old pwq */
numa_pwq_tbl_install(struct workqueue_struct * wq,int node,struct pool_workqueue * pwq)3938 static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq,
3939 int node,
3940 struct pool_workqueue *pwq)
3941 {
3942 struct pool_workqueue *old_pwq;
3943
3944 lockdep_assert_held(&wq_pool_mutex);
3945 lockdep_assert_held(&wq->mutex);
3946
3947 /* link_pwq() can handle duplicate calls */
3948 link_pwq(pwq);
3949
3950 old_pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
3951 rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq);
3952 return old_pwq;
3953 }
3954
3955 /* context to store the prepared attrs & pwqs before applying */
3956 struct apply_wqattrs_ctx {
3957 struct workqueue_struct *wq; /* target workqueue */
3958 struct workqueue_attrs *attrs; /* attrs to apply */
3959 struct list_head list; /* queued for batching commit */
3960 struct pool_workqueue *dfl_pwq;
3961 struct pool_workqueue *pwq_tbl[];
3962 };
3963
3964 /* free the resources after success or abort */
apply_wqattrs_cleanup(struct apply_wqattrs_ctx * ctx)3965 static void apply_wqattrs_cleanup(struct apply_wqattrs_ctx *ctx)
3966 {
3967 if (ctx) {
3968 int node;
3969
3970 for_each_node(node)
3971 put_pwq_unlocked(ctx->pwq_tbl[node]);
3972 put_pwq_unlocked(ctx->dfl_pwq);
3973
3974 free_workqueue_attrs(ctx->attrs);
3975
3976 kfree(ctx);
3977 }
3978 }
3979
3980 /* allocate the attrs and pwqs for later installation */
3981 static struct apply_wqattrs_ctx *
apply_wqattrs_prepare(struct workqueue_struct * wq,const struct workqueue_attrs * attrs)3982 apply_wqattrs_prepare(struct workqueue_struct *wq,
3983 const struct workqueue_attrs *attrs)
3984 {
3985 struct apply_wqattrs_ctx *ctx;
3986 struct workqueue_attrs *new_attrs, *tmp_attrs;
3987 int node;
3988
3989 lockdep_assert_held(&wq_pool_mutex);
3990
3991 ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_node_ids), GFP_KERNEL);
3992
3993 new_attrs = alloc_workqueue_attrs();
3994 tmp_attrs = alloc_workqueue_attrs();
3995 if (!ctx || !new_attrs || !tmp_attrs)
3996 goto out_free;
3997
3998 /*
3999 * Calculate the attrs of the default pwq.
4000 * If the user configured cpumask doesn't overlap with the
4001 * wq_unbound_cpumask, we fallback to the wq_unbound_cpumask.
4002 */
4003 copy_workqueue_attrs(new_attrs, attrs);
4004 cpumask_and(new_attrs->cpumask, new_attrs->cpumask, wq_unbound_cpumask);
4005 if (unlikely(cpumask_empty(new_attrs->cpumask)))
4006 cpumask_copy(new_attrs->cpumask, wq_unbound_cpumask);
4007
4008 /*
4009 * We may create multiple pwqs with differing cpumasks. Make a
4010 * copy of @new_attrs which will be modified and used to obtain
4011 * pools.
4012 */
4013 copy_workqueue_attrs(tmp_attrs, new_attrs);
4014
4015 /*
4016 * If something goes wrong during CPU up/down, we'll fall back to
4017 * the default pwq covering whole @attrs->cpumask. Always create
4018 * it even if we don't use it immediately.
4019 */
4020 ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs);
4021 if (!ctx->dfl_pwq)
4022 goto out_free;
4023
4024 for_each_node(node) {
4025 if (wq_calc_node_cpumask(new_attrs, node, -1, tmp_attrs->cpumask)) {
4026 ctx->pwq_tbl[node] = alloc_unbound_pwq(wq, tmp_attrs);
4027 if (!ctx->pwq_tbl[node])
4028 goto out_free;
4029 } else {
4030 ctx->dfl_pwq->refcnt++;
4031 ctx->pwq_tbl[node] = ctx->dfl_pwq;
4032 }
4033 }
4034
4035 /* save the user configured attrs and sanitize it. */
4036 copy_workqueue_attrs(new_attrs, attrs);
4037 cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask);
4038 ctx->attrs = new_attrs;
4039
4040 ctx->wq = wq;
4041 free_workqueue_attrs(tmp_attrs);
4042 return ctx;
4043
4044 out_free:
4045 free_workqueue_attrs(tmp_attrs);
4046 free_workqueue_attrs(new_attrs);
4047 apply_wqattrs_cleanup(ctx);
4048 return NULL;
4049 }
4050
4051 /* set attrs and install prepared pwqs, @ctx points to old pwqs on return */
apply_wqattrs_commit(struct apply_wqattrs_ctx * ctx)4052 static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx)
4053 {
4054 int node;
4055
4056 /* all pwqs have been created successfully, let's install'em */
4057 mutex_lock(&ctx->wq->mutex);
4058
4059 copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs);
4060
4061 /* save the previous pwq and install the new one */
4062 for_each_node(node)
4063 ctx->pwq_tbl[node] = numa_pwq_tbl_install(ctx->wq, node,
4064 ctx->pwq_tbl[node]);
4065
4066 /* @dfl_pwq might not have been used, ensure it's linked */
4067 link_pwq(ctx->dfl_pwq);
4068 swap(ctx->wq->dfl_pwq, ctx->dfl_pwq);
4069
4070 mutex_unlock(&ctx->wq->mutex);
4071 }
4072
apply_wqattrs_lock(void)4073 static void apply_wqattrs_lock(void)
4074 {
4075 /* CPUs should stay stable across pwq creations and installations */
4076 cpus_read_lock();
4077 mutex_lock(&wq_pool_mutex);
4078 }
4079
apply_wqattrs_unlock(void)4080 static void apply_wqattrs_unlock(void)
4081 {
4082 mutex_unlock(&wq_pool_mutex);
4083 cpus_read_unlock();
4084 }
4085
apply_workqueue_attrs_locked(struct workqueue_struct * wq,const struct workqueue_attrs * attrs)4086 static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
4087 const struct workqueue_attrs *attrs)
4088 {
4089 struct apply_wqattrs_ctx *ctx;
4090
4091 /* only unbound workqueues can change attributes */
4092 if (WARN_ON(!(wq->flags & WQ_UNBOUND)))
4093 return -EINVAL;
4094
4095 /* creating multiple pwqs breaks ordering guarantee */
4096 if (!list_empty(&wq->pwqs)) {
4097 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
4098 return -EINVAL;
4099
4100 wq->flags &= ~__WQ_ORDERED;
4101 }
4102
4103 ctx = apply_wqattrs_prepare(wq, attrs);
4104 if (!ctx)
4105 return -ENOMEM;
4106
4107 /* the ctx has been prepared successfully, let's commit it */
4108 apply_wqattrs_commit(ctx);
4109 apply_wqattrs_cleanup(ctx);
4110
4111 return 0;
4112 }
4113
4114 /**
4115 * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue
4116 * @wq: the target workqueue
4117 * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs()
4118 *
4119 * Apply @attrs to an unbound workqueue @wq. Unless disabled, on NUMA
4120 * machines, this function maps a separate pwq to each NUMA node with
4121 * possibles CPUs in @attrs->cpumask so that work items are affine to the
4122 * NUMA node it was issued on. Older pwqs are released as in-flight work
4123 * items finish. Note that a work item which repeatedly requeues itself
4124 * back-to-back will stay on its current pwq.
4125 *
4126 * Performs GFP_KERNEL allocations.
4127 *
4128 * Assumes caller has CPU hotplug read exclusion, i.e. cpus_read_lock().
4129 *
4130 * Return: 0 on success and -errno on failure.
4131 */
apply_workqueue_attrs(struct workqueue_struct * wq,const struct workqueue_attrs * attrs)4132 int apply_workqueue_attrs(struct workqueue_struct *wq,
4133 const struct workqueue_attrs *attrs)
4134 {
4135 int ret;
4136
4137 lockdep_assert_cpus_held();
4138
4139 mutex_lock(&wq_pool_mutex);
4140 ret = apply_workqueue_attrs_locked(wq, attrs);
4141 mutex_unlock(&wq_pool_mutex);
4142
4143 return ret;
4144 }
4145
4146 /**
4147 * wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug
4148 * @wq: the target workqueue
4149 * @cpu: the CPU coming up or going down
4150 * @online: whether @cpu is coming up or going down
4151 *
4152 * This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and
4153 * %CPU_DOWN_FAILED. @cpu is being hot[un]plugged, update NUMA affinity of
4154 * @wq accordingly.
4155 *
4156 * If NUMA affinity can't be adjusted due to memory allocation failure, it
4157 * falls back to @wq->dfl_pwq which may not be optimal but is always
4158 * correct.
4159 *
4160 * Note that when the last allowed CPU of a NUMA node goes offline for a
4161 * workqueue with a cpumask spanning multiple nodes, the workers which were
4162 * already executing the work items for the workqueue will lose their CPU
4163 * affinity and may execute on any CPU. This is similar to how per-cpu
4164 * workqueues behave on CPU_DOWN. If a workqueue user wants strict
4165 * affinity, it's the user's responsibility to flush the work item from
4166 * CPU_DOWN_PREPARE.
4167 */
wq_update_unbound_numa(struct workqueue_struct * wq,int cpu,bool online)4168 static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
4169 bool online)
4170 {
4171 int node = cpu_to_node(cpu);
4172 int cpu_off = online ? -1 : cpu;
4173 struct pool_workqueue *old_pwq = NULL, *pwq;
4174 struct workqueue_attrs *target_attrs;
4175 cpumask_t *cpumask;
4176
4177 lockdep_assert_held(&wq_pool_mutex);
4178
4179 if (!wq_numa_enabled || !(wq->flags & WQ_UNBOUND) ||
4180 wq->unbound_attrs->no_numa)
4181 return;
4182
4183 /*
4184 * We don't wanna alloc/free wq_attrs for each wq for each CPU.
4185 * Let's use a preallocated one. The following buf is protected by
4186 * CPU hotplug exclusion.
4187 */
4188 target_attrs = wq_update_unbound_numa_attrs_buf;
4189 cpumask = target_attrs->cpumask;
4190
4191 copy_workqueue_attrs(target_attrs, wq->unbound_attrs);
4192 pwq = unbound_pwq_by_node(wq, node);
4193
4194 /*
4195 * Let's determine what needs to be done. If the target cpumask is
4196 * different from the default pwq's, we need to compare it to @pwq's
4197 * and create a new one if they don't match. If the target cpumask
4198 * equals the default pwq's, the default pwq should be used.
4199 */
4200 if (wq_calc_node_cpumask(wq->dfl_pwq->pool->attrs, node, cpu_off, cpumask)) {
4201 if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask))
4202 return;
4203 } else {
4204 goto use_dfl_pwq;
4205 }
4206
4207 /* create a new pwq */
4208 pwq = alloc_unbound_pwq(wq, target_attrs);
4209 if (!pwq) {
4210 pr_warn("workqueue: allocation failed while updating NUMA affinity of \"%s\"\n",
4211 wq->name);
4212 goto use_dfl_pwq;
4213 }
4214
4215 /* Install the new pwq. */
4216 mutex_lock(&wq->mutex);
4217 old_pwq = numa_pwq_tbl_install(wq, node, pwq);
4218 goto out_unlock;
4219
4220 use_dfl_pwq:
4221 mutex_lock(&wq->mutex);
4222 raw_spin_lock_irq(&wq->dfl_pwq->pool->lock);
4223 get_pwq(wq->dfl_pwq);
4224 raw_spin_unlock_irq(&wq->dfl_pwq->pool->lock);
4225 old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq);
4226 out_unlock:
4227 mutex_unlock(&wq->mutex);
4228 put_pwq_unlocked(old_pwq);
4229 }
4230
alloc_and_link_pwqs(struct workqueue_struct * wq)4231 static int alloc_and_link_pwqs(struct workqueue_struct *wq)
4232 {
4233 bool highpri = wq->flags & WQ_HIGHPRI;
4234 int cpu, ret;
4235
4236 if (!(wq->flags & WQ_UNBOUND)) {
4237 wq->cpu_pwqs = alloc_percpu(struct pool_workqueue);
4238 if (!wq->cpu_pwqs)
4239 return -ENOMEM;
4240
4241 for_each_possible_cpu(cpu) {
4242 struct pool_workqueue *pwq =
4243 per_cpu_ptr(wq->cpu_pwqs, cpu);
4244 struct worker_pool *cpu_pools =
4245 per_cpu(cpu_worker_pools, cpu);
4246
4247 init_pwq(pwq, wq, &cpu_pools[highpri]);
4248
4249 mutex_lock(&wq->mutex);
4250 link_pwq(pwq);
4251 mutex_unlock(&wq->mutex);
4252 }
4253 return 0;
4254 }
4255
4256 cpus_read_lock();
4257 if (wq->flags & __WQ_ORDERED) {
4258 ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
4259 /* there should only be single pwq for ordering guarantee */
4260 WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node ||
4261 wq->pwqs.prev != &wq->dfl_pwq->pwqs_node),
4262 "ordering guarantee broken for workqueue %s\n", wq->name);
4263 } else {
4264 ret = apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
4265 }
4266 cpus_read_unlock();
4267
4268 return ret;
4269 }
4270
wq_clamp_max_active(int max_active,unsigned int flags,const char * name)4271 static int wq_clamp_max_active(int max_active, unsigned int flags,
4272 const char *name)
4273 {
4274 int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
4275
4276 if (max_active < 1 || max_active > lim)
4277 pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n",
4278 max_active, name, 1, lim);
4279
4280 return clamp_val(max_active, 1, lim);
4281 }
4282
4283 /*
4284 * Workqueues which may be used during memory reclaim should have a rescuer
4285 * to guarantee forward progress.
4286 */
init_rescuer(struct workqueue_struct * wq)4287 static int init_rescuer(struct workqueue_struct *wq)
4288 {
4289 struct worker *rescuer;
4290 int ret;
4291
4292 if (!(wq->flags & WQ_MEM_RECLAIM))
4293 return 0;
4294
4295 rescuer = alloc_worker(NUMA_NO_NODE);
4296 if (!rescuer)
4297 return -ENOMEM;
4298
4299 rescuer->rescue_wq = wq;
4300 rescuer->task = kthread_create(rescuer_thread, rescuer, "%s", wq->name);
4301 if (IS_ERR(rescuer->task)) {
4302 ret = PTR_ERR(rescuer->task);
4303 kfree(rescuer);
4304 return ret;
4305 }
4306
4307 wq->rescuer = rescuer;
4308 kthread_bind_mask(rescuer->task, cpu_possible_mask);
4309 wake_up_process(rescuer->task);
4310
4311 return 0;
4312 }
4313
4314 __printf(1, 4)
alloc_workqueue(const char * fmt,unsigned int flags,int max_active,...)4315 struct workqueue_struct *alloc_workqueue(const char *fmt,
4316 unsigned int flags,
4317 int max_active, ...)
4318 {
4319 size_t tbl_size = 0;
4320 va_list args;
4321 struct workqueue_struct *wq;
4322 struct pool_workqueue *pwq;
4323
4324 /*
4325 * Unbound && max_active == 1 used to imply ordered, which is no
4326 * longer the case on NUMA machines due to per-node pools. While
4327 * alloc_ordered_workqueue() is the right way to create an ordered
4328 * workqueue, keep the previous behavior to avoid subtle breakages
4329 * on NUMA.
4330 */
4331 if ((flags & WQ_UNBOUND) && max_active == 1)
4332 flags |= __WQ_ORDERED;
4333
4334 /* see the comment above the definition of WQ_POWER_EFFICIENT */
4335 if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
4336 flags |= WQ_UNBOUND;
4337
4338 /* allocate wq and format name */
4339 if (flags & WQ_UNBOUND)
4340 tbl_size = nr_node_ids * sizeof(wq->numa_pwq_tbl[0]);
4341
4342 wq = kzalloc(sizeof(*wq) + tbl_size, GFP_KERNEL);
4343 if (!wq)
4344 return NULL;
4345
4346 if (flags & WQ_UNBOUND) {
4347 wq->unbound_attrs = alloc_workqueue_attrs();
4348 if (!wq->unbound_attrs)
4349 goto err_free_wq;
4350 }
4351
4352 va_start(args, max_active);
4353 vsnprintf(wq->name, sizeof(wq->name), fmt, args);
4354 va_end(args);
4355
4356 max_active = max_active ?: WQ_DFL_ACTIVE;
4357 max_active = wq_clamp_max_active(max_active, flags, wq->name);
4358
4359 /* init wq */
4360 wq->flags = flags;
4361 wq->saved_max_active = max_active;
4362 mutex_init(&wq->mutex);
4363 atomic_set(&wq->nr_pwqs_to_flush, 0);
4364 INIT_LIST_HEAD(&wq->pwqs);
4365 INIT_LIST_HEAD(&wq->flusher_queue);
4366 INIT_LIST_HEAD(&wq->flusher_overflow);
4367 INIT_LIST_HEAD(&wq->maydays);
4368
4369 wq_init_lockdep(wq);
4370 INIT_LIST_HEAD(&wq->list);
4371
4372 if (alloc_and_link_pwqs(wq) < 0)
4373 goto err_unreg_lockdep;
4374
4375 if (wq_online && init_rescuer(wq) < 0)
4376 goto err_destroy;
4377
4378 if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq))
4379 goto err_destroy;
4380
4381 /*
4382 * wq_pool_mutex protects global freeze state and workqueues list.
4383 * Grab it, adjust max_active and add the new @wq to workqueues
4384 * list.
4385 */
4386 mutex_lock(&wq_pool_mutex);
4387
4388 mutex_lock(&wq->mutex);
4389 for_each_pwq(pwq, wq)
4390 pwq_adjust_max_active(pwq);
4391 mutex_unlock(&wq->mutex);
4392
4393 list_add_tail_rcu(&wq->list, &workqueues);
4394
4395 mutex_unlock(&wq_pool_mutex);
4396
4397 return wq;
4398
4399 err_unreg_lockdep:
4400 wq_unregister_lockdep(wq);
4401 wq_free_lockdep(wq);
4402 err_free_wq:
4403 free_workqueue_attrs(wq->unbound_attrs);
4404 kfree(wq);
4405 return NULL;
4406 err_destroy:
4407 destroy_workqueue(wq);
4408 return NULL;
4409 }
4410 EXPORT_SYMBOL_GPL(alloc_workqueue);
4411
pwq_busy(struct pool_workqueue * pwq)4412 static bool pwq_busy(struct pool_workqueue *pwq)
4413 {
4414 int i;
4415
4416 for (i = 0; i < WORK_NR_COLORS; i++)
4417 if (pwq->nr_in_flight[i])
4418 return true;
4419
4420 if ((pwq != pwq->wq->dfl_pwq) && (pwq->refcnt > 1))
4421 return true;
4422 if (pwq->nr_active || !list_empty(&pwq->inactive_works))
4423 return true;
4424
4425 return false;
4426 }
4427
4428 /**
4429 * destroy_workqueue - safely terminate a workqueue
4430 * @wq: target workqueue
4431 *
4432 * Safely destroy a workqueue. All work currently pending will be done first.
4433 */
destroy_workqueue(struct workqueue_struct * wq)4434 void destroy_workqueue(struct workqueue_struct *wq)
4435 {
4436 struct pool_workqueue *pwq;
4437 int node;
4438
4439 /*
4440 * Remove it from sysfs first so that sanity check failure doesn't
4441 * lead to sysfs name conflicts.
4442 */
4443 workqueue_sysfs_unregister(wq);
4444
4445 /* drain it before proceeding with destruction */
4446 drain_workqueue(wq);
4447
4448 /* kill rescuer, if sanity checks fail, leave it w/o rescuer */
4449 if (wq->rescuer) {
4450 struct worker *rescuer = wq->rescuer;
4451
4452 /* this prevents new queueing */
4453 raw_spin_lock_irq(&wq_mayday_lock);
4454 wq->rescuer = NULL;
4455 raw_spin_unlock_irq(&wq_mayday_lock);
4456
4457 /* rescuer will empty maydays list before exiting */
4458 kthread_stop(rescuer->task);
4459 kfree(rescuer);
4460 }
4461
4462 /*
4463 * Sanity checks - grab all the locks so that we wait for all
4464 * in-flight operations which may do put_pwq().
4465 */
4466 mutex_lock(&wq_pool_mutex);
4467 mutex_lock(&wq->mutex);
4468 for_each_pwq(pwq, wq) {
4469 raw_spin_lock_irq(&pwq->pool->lock);
4470 if (WARN_ON(pwq_busy(pwq))) {
4471 pr_warn("%s: %s has the following busy pwq\n",
4472 __func__, wq->name);
4473 show_pwq(pwq);
4474 raw_spin_unlock_irq(&pwq->pool->lock);
4475 mutex_unlock(&wq->mutex);
4476 mutex_unlock(&wq_pool_mutex);
4477 show_one_workqueue(wq);
4478 return;
4479 }
4480 raw_spin_unlock_irq(&pwq->pool->lock);
4481 }
4482 mutex_unlock(&wq->mutex);
4483
4484 /*
4485 * wq list is used to freeze wq, remove from list after
4486 * flushing is complete in case freeze races us.
4487 */
4488 list_del_rcu(&wq->list);
4489 mutex_unlock(&wq_pool_mutex);
4490
4491 if (!(wq->flags & WQ_UNBOUND)) {
4492 wq_unregister_lockdep(wq);
4493 /*
4494 * The base ref is never dropped on per-cpu pwqs. Directly
4495 * schedule RCU free.
4496 */
4497 call_rcu(&wq->rcu, rcu_free_wq);
4498 } else {
4499 /*
4500 * We're the sole accessor of @wq at this point. Directly
4501 * access numa_pwq_tbl[] and dfl_pwq to put the base refs.
4502 * @wq will be freed when the last pwq is released.
4503 */
4504 for_each_node(node) {
4505 pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
4506 RCU_INIT_POINTER(wq->numa_pwq_tbl[node], NULL);
4507 put_pwq_unlocked(pwq);
4508 }
4509
4510 /*
4511 * Put dfl_pwq. @wq may be freed any time after dfl_pwq is
4512 * put. Don't access it afterwards.
4513 */
4514 pwq = wq->dfl_pwq;
4515 wq->dfl_pwq = NULL;
4516 put_pwq_unlocked(pwq);
4517 }
4518 }
4519 EXPORT_SYMBOL_GPL(destroy_workqueue);
4520
4521 /**
4522 * workqueue_set_max_active - adjust max_active of a workqueue
4523 * @wq: target workqueue
4524 * @max_active: new max_active value.
4525 *
4526 * Set max_active of @wq to @max_active.
4527 *
4528 * CONTEXT:
4529 * Don't call from IRQ context.
4530 */
workqueue_set_max_active(struct workqueue_struct * wq,int max_active)4531 void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
4532 {
4533 struct pool_workqueue *pwq;
4534
4535 /* disallow meddling with max_active for ordered workqueues */
4536 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
4537 return;
4538
4539 max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
4540
4541 mutex_lock(&wq->mutex);
4542
4543 wq->flags &= ~__WQ_ORDERED;
4544 wq->saved_max_active = max_active;
4545
4546 for_each_pwq(pwq, wq)
4547 pwq_adjust_max_active(pwq);
4548
4549 mutex_unlock(&wq->mutex);
4550 }
4551 EXPORT_SYMBOL_GPL(workqueue_set_max_active);
4552
4553 /**
4554 * current_work - retrieve %current task's work struct
4555 *
4556 * Determine if %current task is a workqueue worker and what it's working on.
4557 * Useful to find out the context that the %current task is running in.
4558 *
4559 * Return: work struct if %current task is a workqueue worker, %NULL otherwise.
4560 */
current_work(void)4561 struct work_struct *current_work(void)
4562 {
4563 struct worker *worker = current_wq_worker();
4564
4565 return worker ? worker->current_work : NULL;
4566 }
4567 EXPORT_SYMBOL(current_work);
4568
4569 /**
4570 * current_is_workqueue_rescuer - is %current workqueue rescuer?
4571 *
4572 * Determine whether %current is a workqueue rescuer. Can be used from
4573 * work functions to determine whether it's being run off the rescuer task.
4574 *
4575 * Return: %true if %current is a workqueue rescuer. %false otherwise.
4576 */
current_is_workqueue_rescuer(void)4577 bool current_is_workqueue_rescuer(void)
4578 {
4579 struct worker *worker = current_wq_worker();
4580
4581 return worker && worker->rescue_wq;
4582 }
4583
4584 /**
4585 * workqueue_congested - test whether a workqueue is congested
4586 * @cpu: CPU in question
4587 * @wq: target workqueue
4588 *
4589 * Test whether @wq's cpu workqueue for @cpu is congested. There is
4590 * no synchronization around this function and the test result is
4591 * unreliable and only useful as advisory hints or for debugging.
4592 *
4593 * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU.
4594 * Note that both per-cpu and unbound workqueues may be associated with
4595 * multiple pool_workqueues which have separate congested states. A
4596 * workqueue being congested on one CPU doesn't mean the workqueue is also
4597 * contested on other CPUs / NUMA nodes.
4598 *
4599 * Return:
4600 * %true if congested, %false otherwise.
4601 */
workqueue_congested(int cpu,struct workqueue_struct * wq)4602 bool workqueue_congested(int cpu, struct workqueue_struct *wq)
4603 {
4604 struct pool_workqueue *pwq;
4605 bool ret;
4606
4607 rcu_read_lock();
4608 preempt_disable();
4609
4610 if (cpu == WORK_CPU_UNBOUND)
4611 cpu = smp_processor_id();
4612
4613 if (!(wq->flags & WQ_UNBOUND))
4614 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
4615 else
4616 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
4617
4618 ret = !list_empty(&pwq->inactive_works);
4619 preempt_enable();
4620 rcu_read_unlock();
4621
4622 return ret;
4623 }
4624 EXPORT_SYMBOL_GPL(workqueue_congested);
4625
4626 /**
4627 * work_busy - test whether a work is currently pending or running
4628 * @work: the work to be tested
4629 *
4630 * Test whether @work is currently pending or running. There is no
4631 * synchronization around this function and the test result is
4632 * unreliable and only useful as advisory hints or for debugging.
4633 *
4634 * Return:
4635 * OR'd bitmask of WORK_BUSY_* bits.
4636 */
work_busy(struct work_struct * work)4637 unsigned int work_busy(struct work_struct *work)
4638 {
4639 struct worker_pool *pool;
4640 unsigned long flags;
4641 unsigned int ret = 0;
4642
4643 if (work_pending(work))
4644 ret |= WORK_BUSY_PENDING;
4645
4646 rcu_read_lock();
4647 pool = get_work_pool(work);
4648 if (pool) {
4649 raw_spin_lock_irqsave(&pool->lock, flags);
4650 if (find_worker_executing_work(pool, work))
4651 ret |= WORK_BUSY_RUNNING;
4652 raw_spin_unlock_irqrestore(&pool->lock, flags);
4653 }
4654 rcu_read_unlock();
4655
4656 return ret;
4657 }
4658 EXPORT_SYMBOL_GPL(work_busy);
4659
4660 /**
4661 * set_worker_desc - set description for the current work item
4662 * @fmt: printf-style format string
4663 * @...: arguments for the format string
4664 *
4665 * This function can be called by a running work function to describe what
4666 * the work item is about. If the worker task gets dumped, this
4667 * information will be printed out together to help debugging. The
4668 * description can be at most WORKER_DESC_LEN including the trailing '\0'.
4669 */
set_worker_desc(const char * fmt,...)4670 void set_worker_desc(const char *fmt, ...)
4671 {
4672 struct worker *worker = current_wq_worker();
4673 va_list args;
4674
4675 if (worker) {
4676 va_start(args, fmt);
4677 vsnprintf(worker->desc, sizeof(worker->desc), fmt, args);
4678 va_end(args);
4679 }
4680 }
4681 EXPORT_SYMBOL_GPL(set_worker_desc);
4682
4683 /**
4684 * print_worker_info - print out worker information and description
4685 * @log_lvl: the log level to use when printing
4686 * @task: target task
4687 *
4688 * If @task is a worker and currently executing a work item, print out the
4689 * name of the workqueue being serviced and worker description set with
4690 * set_worker_desc() by the currently executing work item.
4691 *
4692 * This function can be safely called on any task as long as the
4693 * task_struct itself is accessible. While safe, this function isn't
4694 * synchronized and may print out mixups or garbages of limited length.
4695 */
print_worker_info(const char * log_lvl,struct task_struct * task)4696 void print_worker_info(const char *log_lvl, struct task_struct *task)
4697 {
4698 work_func_t *fn = NULL;
4699 char name[WQ_NAME_LEN] = { };
4700 char desc[WORKER_DESC_LEN] = { };
4701 struct pool_workqueue *pwq = NULL;
4702 struct workqueue_struct *wq = NULL;
4703 struct worker *worker;
4704
4705 if (!(task->flags & PF_WQ_WORKER))
4706 return;
4707
4708 /*
4709 * This function is called without any synchronization and @task
4710 * could be in any state. Be careful with dereferences.
4711 */
4712 worker = kthread_probe_data(task);
4713
4714 /*
4715 * Carefully copy the associated workqueue's workfn, name and desc.
4716 * Keep the original last '\0' in case the original is garbage.
4717 */
4718 copy_from_kernel_nofault(&fn, &worker->current_func, sizeof(fn));
4719 copy_from_kernel_nofault(&pwq, &worker->current_pwq, sizeof(pwq));
4720 copy_from_kernel_nofault(&wq, &pwq->wq, sizeof(wq));
4721 copy_from_kernel_nofault(name, wq->name, sizeof(name) - 1);
4722 copy_from_kernel_nofault(desc, worker->desc, sizeof(desc) - 1);
4723
4724 if (fn || name[0] || desc[0]) {
4725 printk("%sWorkqueue: %s %ps", log_lvl, name, fn);
4726 if (strcmp(name, desc))
4727 pr_cont(" (%s)", desc);
4728 pr_cont("\n");
4729 }
4730 }
4731
pr_cont_pool_info(struct worker_pool * pool)4732 static void pr_cont_pool_info(struct worker_pool *pool)
4733 {
4734 pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask);
4735 if (pool->node != NUMA_NO_NODE)
4736 pr_cont(" node=%d", pool->node);
4737 pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice);
4738 }
4739
pr_cont_work(bool comma,struct work_struct * work)4740 static void pr_cont_work(bool comma, struct work_struct *work)
4741 {
4742 if (work->func == wq_barrier_func) {
4743 struct wq_barrier *barr;
4744
4745 barr = container_of(work, struct wq_barrier, work);
4746
4747 pr_cont("%s BAR(%d)", comma ? "," : "",
4748 task_pid_nr(barr->task));
4749 } else {
4750 pr_cont("%s %ps", comma ? "," : "", work->func);
4751 }
4752 }
4753
show_pwq(struct pool_workqueue * pwq)4754 static void show_pwq(struct pool_workqueue *pwq)
4755 {
4756 struct worker_pool *pool = pwq->pool;
4757 struct work_struct *work;
4758 struct worker *worker;
4759 bool has_in_flight = false, has_pending = false;
4760 int bkt;
4761
4762 pr_info(" pwq %d:", pool->id);
4763 pr_cont_pool_info(pool);
4764
4765 pr_cont(" active=%d/%d refcnt=%d%s\n",
4766 pwq->nr_active, pwq->max_active, pwq->refcnt,
4767 !list_empty(&pwq->mayday_node) ? " MAYDAY" : "");
4768
4769 hash_for_each(pool->busy_hash, bkt, worker, hentry) {
4770 if (worker->current_pwq == pwq) {
4771 has_in_flight = true;
4772 break;
4773 }
4774 }
4775 if (has_in_flight) {
4776 bool comma = false;
4777
4778 pr_info(" in-flight:");
4779 hash_for_each(pool->busy_hash, bkt, worker, hentry) {
4780 if (worker->current_pwq != pwq)
4781 continue;
4782
4783 pr_cont("%s %d%s:%ps", comma ? "," : "",
4784 task_pid_nr(worker->task),
4785 worker->rescue_wq ? "(RESCUER)" : "",
4786 worker->current_func);
4787 list_for_each_entry(work, &worker->scheduled, entry)
4788 pr_cont_work(false, work);
4789 comma = true;
4790 }
4791 pr_cont("\n");
4792 }
4793
4794 list_for_each_entry(work, &pool->worklist, entry) {
4795 if (get_work_pwq(work) == pwq) {
4796 has_pending = true;
4797 break;
4798 }
4799 }
4800 if (has_pending) {
4801 bool comma = false;
4802
4803 pr_info(" pending:");
4804 list_for_each_entry(work, &pool->worklist, entry) {
4805 if (get_work_pwq(work) != pwq)
4806 continue;
4807
4808 pr_cont_work(comma, work);
4809 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
4810 }
4811 pr_cont("\n");
4812 }
4813
4814 if (!list_empty(&pwq->inactive_works)) {
4815 bool comma = false;
4816
4817 pr_info(" inactive:");
4818 list_for_each_entry(work, &pwq->inactive_works, entry) {
4819 pr_cont_work(comma, work);
4820 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
4821 }
4822 pr_cont("\n");
4823 }
4824 }
4825
4826 /**
4827 * show_one_workqueue - dump state of specified workqueue
4828 * @wq: workqueue whose state will be printed
4829 */
show_one_workqueue(struct workqueue_struct * wq)4830 void show_one_workqueue(struct workqueue_struct *wq)
4831 {
4832 struct pool_workqueue *pwq;
4833 bool idle = true;
4834 unsigned long flags;
4835
4836 for_each_pwq(pwq, wq) {
4837 if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
4838 idle = false;
4839 break;
4840 }
4841 }
4842 if (idle) /* Nothing to print for idle workqueue */
4843 return;
4844
4845 pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
4846
4847 for_each_pwq(pwq, wq) {
4848 raw_spin_lock_irqsave(&pwq->pool->lock, flags);
4849 if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
4850 /*
4851 * Defer printing to avoid deadlocks in console
4852 * drivers that queue work while holding locks
4853 * also taken in their write paths.
4854 */
4855 printk_deferred_enter();
4856 show_pwq(pwq);
4857 printk_deferred_exit();
4858 }
4859 raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
4860 /*
4861 * We could be printing a lot from atomic context, e.g.
4862 * sysrq-t -> show_all_workqueues(). Avoid triggering
4863 * hard lockup.
4864 */
4865 touch_nmi_watchdog();
4866 }
4867
4868 }
4869
4870 /**
4871 * show_one_worker_pool - dump state of specified worker pool
4872 * @pool: worker pool whose state will be printed
4873 */
show_one_worker_pool(struct worker_pool * pool)4874 static void show_one_worker_pool(struct worker_pool *pool)
4875 {
4876 struct worker *worker;
4877 bool first = true;
4878 unsigned long flags;
4879 unsigned long hung = 0;
4880
4881 raw_spin_lock_irqsave(&pool->lock, flags);
4882 if (pool->nr_workers == pool->nr_idle)
4883 goto next_pool;
4884
4885 /* How long the first pending work is waiting for a worker. */
4886 if (!list_empty(&pool->worklist))
4887 hung = jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000;
4888
4889 /*
4890 * Defer printing to avoid deadlocks in console drivers that
4891 * queue work while holding locks also taken in their write
4892 * paths.
4893 */
4894 printk_deferred_enter();
4895 pr_info("pool %d:", pool->id);
4896 pr_cont_pool_info(pool);
4897 pr_cont(" hung=%lus workers=%d", hung, pool->nr_workers);
4898 if (pool->manager)
4899 pr_cont(" manager: %d",
4900 task_pid_nr(pool->manager->task));
4901 list_for_each_entry(worker, &pool->idle_list, entry) {
4902 pr_cont(" %s%d", first ? "idle: " : "",
4903 task_pid_nr(worker->task));
4904 first = false;
4905 }
4906 pr_cont("\n");
4907 printk_deferred_exit();
4908 next_pool:
4909 raw_spin_unlock_irqrestore(&pool->lock, flags);
4910 /*
4911 * We could be printing a lot from atomic context, e.g.
4912 * sysrq-t -> show_all_workqueues(). Avoid triggering
4913 * hard lockup.
4914 */
4915 touch_nmi_watchdog();
4916
4917 }
4918
4919 /**
4920 * show_all_workqueues - dump workqueue state
4921 *
4922 * Called from a sysrq handler or try_to_freeze_tasks() and prints out
4923 * all busy workqueues and pools.
4924 */
show_all_workqueues(void)4925 void show_all_workqueues(void)
4926 {
4927 struct workqueue_struct *wq;
4928 struct worker_pool *pool;
4929 int pi;
4930
4931 rcu_read_lock();
4932
4933 pr_info("Showing busy workqueues and worker pools:\n");
4934
4935 list_for_each_entry_rcu(wq, &workqueues, list)
4936 show_one_workqueue(wq);
4937
4938 for_each_pool(pool, pi)
4939 show_one_worker_pool(pool);
4940
4941 rcu_read_unlock();
4942 }
4943
4944 /* used to show worker information through /proc/PID/{comm,stat,status} */
wq_worker_comm(char * buf,size_t size,struct task_struct * task)4945 void wq_worker_comm(char *buf, size_t size, struct task_struct *task)
4946 {
4947 int off;
4948
4949 /* always show the actual comm */
4950 off = strscpy(buf, task->comm, size);
4951 if (off < 0)
4952 return;
4953
4954 /* stabilize PF_WQ_WORKER and worker pool association */
4955 mutex_lock(&wq_pool_attach_mutex);
4956
4957 if (task->flags & PF_WQ_WORKER) {
4958 struct worker *worker = kthread_data(task);
4959 struct worker_pool *pool = worker->pool;
4960
4961 if (pool) {
4962 raw_spin_lock_irq(&pool->lock);
4963 /*
4964 * ->desc tracks information (wq name or
4965 * set_worker_desc()) for the latest execution. If
4966 * current, prepend '+', otherwise '-'.
4967 */
4968 if (worker->desc[0] != '\0') {
4969 if (worker->current_work)
4970 scnprintf(buf + off, size - off, "+%s",
4971 worker->desc);
4972 else
4973 scnprintf(buf + off, size - off, "-%s",
4974 worker->desc);
4975 }
4976 raw_spin_unlock_irq(&pool->lock);
4977 }
4978 }
4979
4980 mutex_unlock(&wq_pool_attach_mutex);
4981 }
4982 EXPORT_SYMBOL_GPL(wq_worker_comm);
4983
4984 #ifdef CONFIG_SMP
4985
4986 /*
4987 * CPU hotplug.
4988 *
4989 * There are two challenges in supporting CPU hotplug. Firstly, there
4990 * are a lot of assumptions on strong associations among work, pwq and
4991 * pool which make migrating pending and scheduled works very
4992 * difficult to implement without impacting hot paths. Secondly,
4993 * worker pools serve mix of short, long and very long running works making
4994 * blocked draining impractical.
4995 *
4996 * This is solved by allowing the pools to be disassociated from the CPU
4997 * running as an unbound one and allowing it to be reattached later if the
4998 * cpu comes back online.
4999 */
5000
unbind_workers(int cpu)5001 static void unbind_workers(int cpu)
5002 {
5003 struct worker_pool *pool;
5004 struct worker *worker;
5005
5006 for_each_cpu_worker_pool(pool, cpu) {
5007 mutex_lock(&wq_pool_attach_mutex);
5008 raw_spin_lock_irq(&pool->lock);
5009
5010 /*
5011 * We've blocked all attach/detach operations. Make all workers
5012 * unbound and set DISASSOCIATED. Before this, all workers
5013 * except for the ones which are still executing works from
5014 * before the last CPU down must be on the cpu. After
5015 * this, they may become diasporas.
5016 */
5017 for_each_pool_worker(worker, pool)
5018 worker->flags |= WORKER_UNBOUND;
5019
5020 pool->flags |= POOL_DISASSOCIATED;
5021
5022 raw_spin_unlock_irq(&pool->lock);
5023
5024 for_each_pool_worker(worker, pool) {
5025 kthread_set_per_cpu(worker->task, -1);
5026 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0);
5027 }
5028
5029 mutex_unlock(&wq_pool_attach_mutex);
5030
5031 /*
5032 * Call schedule() so that we cross rq->lock and thus can
5033 * guarantee sched callbacks see the %WORKER_UNBOUND flag.
5034 * This is necessary as scheduler callbacks may be invoked
5035 * from other cpus.
5036 */
5037 schedule();
5038
5039 /*
5040 * Sched callbacks are disabled now. Zap nr_running.
5041 * After this, nr_running stays zero and need_more_worker()
5042 * and keep_working() are always true as long as the
5043 * worklist is not empty. This pool now behaves as an
5044 * unbound (in terms of concurrency management) pool which
5045 * are served by workers tied to the pool.
5046 */
5047 atomic_set(&pool->nr_running, 0);
5048
5049 /*
5050 * With concurrency management just turned off, a busy
5051 * worker blocking could lead to lengthy stalls. Kick off
5052 * unbound chain execution of currently pending work items.
5053 */
5054 raw_spin_lock_irq(&pool->lock);
5055 wake_up_worker(pool);
5056 raw_spin_unlock_irq(&pool->lock);
5057 }
5058 }
5059
5060 /**
5061 * rebind_workers - rebind all workers of a pool to the associated CPU
5062 * @pool: pool of interest
5063 *
5064 * @pool->cpu is coming online. Rebind all workers to the CPU.
5065 */
rebind_workers(struct worker_pool * pool)5066 static void rebind_workers(struct worker_pool *pool)
5067 {
5068 struct worker *worker;
5069
5070 lockdep_assert_held(&wq_pool_attach_mutex);
5071
5072 /*
5073 * Restore CPU affinity of all workers. As all idle workers should
5074 * be on the run-queue of the associated CPU before any local
5075 * wake-ups for concurrency management happen, restore CPU affinity
5076 * of all workers first and then clear UNBOUND. As we're called
5077 * from CPU_ONLINE, the following shouldn't fail.
5078 */
5079 for_each_pool_worker(worker, pool) {
5080 kthread_set_per_cpu(worker->task, pool->cpu);
5081 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
5082 pool->attrs->cpumask) < 0);
5083 }
5084
5085 raw_spin_lock_irq(&pool->lock);
5086
5087 pool->flags &= ~POOL_DISASSOCIATED;
5088
5089 for_each_pool_worker(worker, pool) {
5090 unsigned int worker_flags = worker->flags;
5091
5092 /*
5093 * A bound idle worker should actually be on the runqueue
5094 * of the associated CPU for local wake-ups targeting it to
5095 * work. Kick all idle workers so that they migrate to the
5096 * associated CPU. Doing this in the same loop as
5097 * replacing UNBOUND with REBOUND is safe as no worker will
5098 * be bound before @pool->lock is released.
5099 */
5100 if (worker_flags & WORKER_IDLE)
5101 wake_up_process(worker->task);
5102
5103 /*
5104 * We want to clear UNBOUND but can't directly call
5105 * worker_clr_flags() or adjust nr_running. Atomically
5106 * replace UNBOUND with another NOT_RUNNING flag REBOUND.
5107 * @worker will clear REBOUND using worker_clr_flags() when
5108 * it initiates the next execution cycle thus restoring
5109 * concurrency management. Note that when or whether
5110 * @worker clears REBOUND doesn't affect correctness.
5111 *
5112 * WRITE_ONCE() is necessary because @worker->flags may be
5113 * tested without holding any lock in
5114 * wq_worker_running(). Without it, NOT_RUNNING test may
5115 * fail incorrectly leading to premature concurrency
5116 * management operations.
5117 */
5118 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
5119 worker_flags |= WORKER_REBOUND;
5120 worker_flags &= ~WORKER_UNBOUND;
5121 WRITE_ONCE(worker->flags, worker_flags);
5122 }
5123
5124 raw_spin_unlock_irq(&pool->lock);
5125 }
5126
5127 /**
5128 * restore_unbound_workers_cpumask - restore cpumask of unbound workers
5129 * @pool: unbound pool of interest
5130 * @cpu: the CPU which is coming up
5131 *
5132 * An unbound pool may end up with a cpumask which doesn't have any online
5133 * CPUs. When a worker of such pool get scheduled, the scheduler resets
5134 * its cpus_allowed. If @cpu is in @pool's cpumask which didn't have any
5135 * online CPU before, cpus_allowed of all its workers should be restored.
5136 */
restore_unbound_workers_cpumask(struct worker_pool * pool,int cpu)5137 static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
5138 {
5139 static cpumask_t cpumask;
5140 struct worker *worker;
5141
5142 lockdep_assert_held(&wq_pool_attach_mutex);
5143
5144 /* is @cpu allowed for @pool? */
5145 if (!cpumask_test_cpu(cpu, pool->attrs->cpumask))
5146 return;
5147
5148 cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask);
5149
5150 /* as we're called from CPU_ONLINE, the following shouldn't fail */
5151 for_each_pool_worker(worker, pool)
5152 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0);
5153 }
5154
workqueue_prepare_cpu(unsigned int cpu)5155 int workqueue_prepare_cpu(unsigned int cpu)
5156 {
5157 struct worker_pool *pool;
5158
5159 for_each_cpu_worker_pool(pool, cpu) {
5160 if (pool->nr_workers)
5161 continue;
5162 if (!create_worker(pool))
5163 return -ENOMEM;
5164 }
5165 return 0;
5166 }
5167
workqueue_online_cpu(unsigned int cpu)5168 int workqueue_online_cpu(unsigned int cpu)
5169 {
5170 struct worker_pool *pool;
5171 struct workqueue_struct *wq;
5172 int pi;
5173
5174 mutex_lock(&wq_pool_mutex);
5175
5176 for_each_pool(pool, pi) {
5177 mutex_lock(&wq_pool_attach_mutex);
5178
5179 if (pool->cpu == cpu)
5180 rebind_workers(pool);
5181 else if (pool->cpu < 0)
5182 restore_unbound_workers_cpumask(pool, cpu);
5183
5184 mutex_unlock(&wq_pool_attach_mutex);
5185 }
5186
5187 /* update NUMA affinity of unbound workqueues */
5188 list_for_each_entry(wq, &workqueues, list)
5189 wq_update_unbound_numa(wq, cpu, true);
5190
5191 mutex_unlock(&wq_pool_mutex);
5192 return 0;
5193 }
5194
workqueue_offline_cpu(unsigned int cpu)5195 int workqueue_offline_cpu(unsigned int cpu)
5196 {
5197 struct workqueue_struct *wq;
5198
5199 /* unbinding per-cpu workers should happen on the local CPU */
5200 if (WARN_ON(cpu != smp_processor_id()))
5201 return -1;
5202
5203 unbind_workers(cpu);
5204
5205 /* update NUMA affinity of unbound workqueues */
5206 mutex_lock(&wq_pool_mutex);
5207 list_for_each_entry(wq, &workqueues, list)
5208 wq_update_unbound_numa(wq, cpu, false);
5209 mutex_unlock(&wq_pool_mutex);
5210
5211 return 0;
5212 }
5213
5214 struct work_for_cpu {
5215 struct work_struct work;
5216 long (*fn)(void *);
5217 void *arg;
5218 long ret;
5219 };
5220
work_for_cpu_fn(struct work_struct * work)5221 static void work_for_cpu_fn(struct work_struct *work)
5222 {
5223 struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
5224
5225 wfc->ret = wfc->fn(wfc->arg);
5226 }
5227
5228 /**
5229 * work_on_cpu - run a function in thread context on a particular cpu
5230 * @cpu: the cpu to run on
5231 * @fn: the function to run
5232 * @arg: the function arg
5233 *
5234 * It is up to the caller to ensure that the cpu doesn't go offline.
5235 * The caller must not hold any locks which would prevent @fn from completing.
5236 *
5237 * Return: The value @fn returns.
5238 */
work_on_cpu(int cpu,long (* fn)(void *),void * arg)5239 long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
5240 {
5241 struct work_for_cpu wfc = { .fn = fn, .arg = arg };
5242
5243 INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
5244 schedule_work_on(cpu, &wfc.work);
5245 flush_work(&wfc.work);
5246 destroy_work_on_stack(&wfc.work);
5247 return wfc.ret;
5248 }
5249 EXPORT_SYMBOL_GPL(work_on_cpu);
5250
5251 /**
5252 * work_on_cpu_safe - run a function in thread context on a particular cpu
5253 * @cpu: the cpu to run on
5254 * @fn: the function to run
5255 * @arg: the function argument
5256 *
5257 * Disables CPU hotplug and calls work_on_cpu(). The caller must not hold
5258 * any locks which would prevent @fn from completing.
5259 *
5260 * Return: The value @fn returns.
5261 */
work_on_cpu_safe(int cpu,long (* fn)(void *),void * arg)5262 long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
5263 {
5264 long ret = -ENODEV;
5265
5266 cpus_read_lock();
5267 if (cpu_online(cpu))
5268 ret = work_on_cpu(cpu, fn, arg);
5269 cpus_read_unlock();
5270 return ret;
5271 }
5272 EXPORT_SYMBOL_GPL(work_on_cpu_safe);
5273 #endif /* CONFIG_SMP */
5274
5275 #ifdef CONFIG_FREEZER
5276
5277 /**
5278 * freeze_workqueues_begin - begin freezing workqueues
5279 *
5280 * Start freezing workqueues. After this function returns, all freezable
5281 * workqueues will queue new works to their inactive_works list instead of
5282 * pool->worklist.
5283 *
5284 * CONTEXT:
5285 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
5286 */
freeze_workqueues_begin(void)5287 void freeze_workqueues_begin(void)
5288 {
5289 struct workqueue_struct *wq;
5290 struct pool_workqueue *pwq;
5291
5292 mutex_lock(&wq_pool_mutex);
5293
5294 WARN_ON_ONCE(workqueue_freezing);
5295 workqueue_freezing = true;
5296
5297 list_for_each_entry(wq, &workqueues, list) {
5298 mutex_lock(&wq->mutex);
5299 for_each_pwq(pwq, wq)
5300 pwq_adjust_max_active(pwq);
5301 mutex_unlock(&wq->mutex);
5302 }
5303
5304 mutex_unlock(&wq_pool_mutex);
5305 }
5306
5307 /**
5308 * freeze_workqueues_busy - are freezable workqueues still busy?
5309 *
5310 * Check whether freezing is complete. This function must be called
5311 * between freeze_workqueues_begin() and thaw_workqueues().
5312 *
5313 * CONTEXT:
5314 * Grabs and releases wq_pool_mutex.
5315 *
5316 * Return:
5317 * %true if some freezable workqueues are still busy. %false if freezing
5318 * is complete.
5319 */
freeze_workqueues_busy(void)5320 bool freeze_workqueues_busy(void)
5321 {
5322 bool busy = false;
5323 struct workqueue_struct *wq;
5324 struct pool_workqueue *pwq;
5325
5326 mutex_lock(&wq_pool_mutex);
5327
5328 WARN_ON_ONCE(!workqueue_freezing);
5329
5330 list_for_each_entry(wq, &workqueues, list) {
5331 if (!(wq->flags & WQ_FREEZABLE))
5332 continue;
5333 /*
5334 * nr_active is monotonically decreasing. It's safe
5335 * to peek without lock.
5336 */
5337 rcu_read_lock();
5338 for_each_pwq(pwq, wq) {
5339 WARN_ON_ONCE(pwq->nr_active < 0);
5340 if (pwq->nr_active) {
5341 busy = true;
5342 rcu_read_unlock();
5343 goto out_unlock;
5344 }
5345 }
5346 rcu_read_unlock();
5347 }
5348 out_unlock:
5349 mutex_unlock(&wq_pool_mutex);
5350 return busy;
5351 }
5352
5353 /**
5354 * thaw_workqueues - thaw workqueues
5355 *
5356 * Thaw workqueues. Normal queueing is restored and all collected
5357 * frozen works are transferred to their respective pool worklists.
5358 *
5359 * CONTEXT:
5360 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
5361 */
thaw_workqueues(void)5362 void thaw_workqueues(void)
5363 {
5364 struct workqueue_struct *wq;
5365 struct pool_workqueue *pwq;
5366
5367 mutex_lock(&wq_pool_mutex);
5368
5369 if (!workqueue_freezing)
5370 goto out_unlock;
5371
5372 workqueue_freezing = false;
5373
5374 /* restore max_active and repopulate worklist */
5375 list_for_each_entry(wq, &workqueues, list) {
5376 mutex_lock(&wq->mutex);
5377 for_each_pwq(pwq, wq)
5378 pwq_adjust_max_active(pwq);
5379 mutex_unlock(&wq->mutex);
5380 }
5381
5382 out_unlock:
5383 mutex_unlock(&wq_pool_mutex);
5384 }
5385 #endif /* CONFIG_FREEZER */
5386
workqueue_apply_unbound_cpumask(void)5387 static int workqueue_apply_unbound_cpumask(void)
5388 {
5389 LIST_HEAD(ctxs);
5390 int ret = 0;
5391 struct workqueue_struct *wq;
5392 struct apply_wqattrs_ctx *ctx, *n;
5393
5394 lockdep_assert_held(&wq_pool_mutex);
5395
5396 list_for_each_entry(wq, &workqueues, list) {
5397 if (!(wq->flags & WQ_UNBOUND))
5398 continue;
5399
5400 /* creating multiple pwqs breaks ordering guarantee */
5401 if (!list_empty(&wq->pwqs)) {
5402 if (wq->flags & __WQ_ORDERED_EXPLICIT)
5403 continue;
5404 wq->flags &= ~__WQ_ORDERED;
5405 }
5406
5407 ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs);
5408 if (!ctx) {
5409 ret = -ENOMEM;
5410 break;
5411 }
5412
5413 list_add_tail(&ctx->list, &ctxs);
5414 }
5415
5416 list_for_each_entry_safe(ctx, n, &ctxs, list) {
5417 if (!ret)
5418 apply_wqattrs_commit(ctx);
5419 apply_wqattrs_cleanup(ctx);
5420 }
5421
5422 return ret;
5423 }
5424
5425 /**
5426 * workqueue_set_unbound_cpumask - Set the low-level unbound cpumask
5427 * @cpumask: the cpumask to set
5428 *
5429 * The low-level workqueues cpumask is a global cpumask that limits
5430 * the affinity of all unbound workqueues. This function check the @cpumask
5431 * and apply it to all unbound workqueues and updates all pwqs of them.
5432 *
5433 * Return: 0 - Success
5434 * -EINVAL - Invalid @cpumask
5435 * -ENOMEM - Failed to allocate memory for attrs or pwqs.
5436 */
workqueue_set_unbound_cpumask(cpumask_var_t cpumask)5437 int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
5438 {
5439 int ret = -EINVAL;
5440 cpumask_var_t saved_cpumask;
5441
5442 /*
5443 * Not excluding isolated cpus on purpose.
5444 * If the user wishes to include them, we allow that.
5445 */
5446 cpumask_and(cpumask, cpumask, cpu_possible_mask);
5447 if (!cpumask_empty(cpumask)) {
5448 apply_wqattrs_lock();
5449 if (cpumask_equal(cpumask, wq_unbound_cpumask)) {
5450 ret = 0;
5451 goto out_unlock;
5452 }
5453
5454 if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL)) {
5455 ret = -ENOMEM;
5456 goto out_unlock;
5457 }
5458
5459 /* save the old wq_unbound_cpumask. */
5460 cpumask_copy(saved_cpumask, wq_unbound_cpumask);
5461
5462 /* update wq_unbound_cpumask at first and apply it to wqs. */
5463 cpumask_copy(wq_unbound_cpumask, cpumask);
5464 ret = workqueue_apply_unbound_cpumask();
5465
5466 /* restore the wq_unbound_cpumask when failed. */
5467 if (ret < 0)
5468 cpumask_copy(wq_unbound_cpumask, saved_cpumask);
5469
5470 free_cpumask_var(saved_cpumask);
5471 out_unlock:
5472 apply_wqattrs_unlock();
5473 }
5474
5475 return ret;
5476 }
5477
5478 #ifdef CONFIG_SYSFS
5479 /*
5480 * Workqueues with WQ_SYSFS flag set is visible to userland via
5481 * /sys/bus/workqueue/devices/WQ_NAME. All visible workqueues have the
5482 * following attributes.
5483 *
5484 * per_cpu RO bool : whether the workqueue is per-cpu or unbound
5485 * max_active RW int : maximum number of in-flight work items
5486 *
5487 * Unbound workqueues have the following extra attributes.
5488 *
5489 * pool_ids RO int : the associated pool IDs for each node
5490 * nice RW int : nice value of the workers
5491 * cpumask RW mask : bitmask of allowed CPUs for the workers
5492 * numa RW bool : whether enable NUMA affinity
5493 */
5494 struct wq_device {
5495 struct workqueue_struct *wq;
5496 struct device dev;
5497 };
5498
dev_to_wq(struct device * dev)5499 static struct workqueue_struct *dev_to_wq(struct device *dev)
5500 {
5501 struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
5502
5503 return wq_dev->wq;
5504 }
5505
per_cpu_show(struct device * dev,struct device_attribute * attr,char * buf)5506 static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr,
5507 char *buf)
5508 {
5509 struct workqueue_struct *wq = dev_to_wq(dev);
5510
5511 return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND));
5512 }
5513 static DEVICE_ATTR_RO(per_cpu);
5514
max_active_show(struct device * dev,struct device_attribute * attr,char * buf)5515 static ssize_t max_active_show(struct device *dev,
5516 struct device_attribute *attr, char *buf)
5517 {
5518 struct workqueue_struct *wq = dev_to_wq(dev);
5519
5520 return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active);
5521 }
5522
max_active_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5523 static ssize_t max_active_store(struct device *dev,
5524 struct device_attribute *attr, const char *buf,
5525 size_t count)
5526 {
5527 struct workqueue_struct *wq = dev_to_wq(dev);
5528 int val;
5529
5530 if (sscanf(buf, "%d", &val) != 1 || val <= 0)
5531 return -EINVAL;
5532
5533 workqueue_set_max_active(wq, val);
5534 return count;
5535 }
5536 static DEVICE_ATTR_RW(max_active);
5537
5538 static struct attribute *wq_sysfs_attrs[] = {
5539 &dev_attr_per_cpu.attr,
5540 &dev_attr_max_active.attr,
5541 NULL,
5542 };
5543 ATTRIBUTE_GROUPS(wq_sysfs);
5544
wq_pool_ids_show(struct device * dev,struct device_attribute * attr,char * buf)5545 static ssize_t wq_pool_ids_show(struct device *dev,
5546 struct device_attribute *attr, char *buf)
5547 {
5548 struct workqueue_struct *wq = dev_to_wq(dev);
5549 const char *delim = "";
5550 int node, written = 0;
5551
5552 cpus_read_lock();
5553 rcu_read_lock();
5554 for_each_node(node) {
5555 written += scnprintf(buf + written, PAGE_SIZE - written,
5556 "%s%d:%d", delim, node,
5557 unbound_pwq_by_node(wq, node)->pool->id);
5558 delim = " ";
5559 }
5560 written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
5561 rcu_read_unlock();
5562 cpus_read_unlock();
5563
5564 return written;
5565 }
5566
wq_nice_show(struct device * dev,struct device_attribute * attr,char * buf)5567 static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr,
5568 char *buf)
5569 {
5570 struct workqueue_struct *wq = dev_to_wq(dev);
5571 int written;
5572
5573 mutex_lock(&wq->mutex);
5574 written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice);
5575 mutex_unlock(&wq->mutex);
5576
5577 return written;
5578 }
5579
5580 /* prepare workqueue_attrs for sysfs store operations */
wq_sysfs_prep_attrs(struct workqueue_struct * wq)5581 static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq)
5582 {
5583 struct workqueue_attrs *attrs;
5584
5585 lockdep_assert_held(&wq_pool_mutex);
5586
5587 attrs = alloc_workqueue_attrs();
5588 if (!attrs)
5589 return NULL;
5590
5591 copy_workqueue_attrs(attrs, wq->unbound_attrs);
5592 return attrs;
5593 }
5594
wq_nice_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5595 static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr,
5596 const char *buf, size_t count)
5597 {
5598 struct workqueue_struct *wq = dev_to_wq(dev);
5599 struct workqueue_attrs *attrs;
5600 int ret = -ENOMEM;
5601
5602 apply_wqattrs_lock();
5603
5604 attrs = wq_sysfs_prep_attrs(wq);
5605 if (!attrs)
5606 goto out_unlock;
5607
5608 if (sscanf(buf, "%d", &attrs->nice) == 1 &&
5609 attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE)
5610 ret = apply_workqueue_attrs_locked(wq, attrs);
5611 else
5612 ret = -EINVAL;
5613
5614 out_unlock:
5615 apply_wqattrs_unlock();
5616 free_workqueue_attrs(attrs);
5617 return ret ?: count;
5618 }
5619
wq_cpumask_show(struct device * dev,struct device_attribute * attr,char * buf)5620 static ssize_t wq_cpumask_show(struct device *dev,
5621 struct device_attribute *attr, char *buf)
5622 {
5623 struct workqueue_struct *wq = dev_to_wq(dev);
5624 int written;
5625
5626 mutex_lock(&wq->mutex);
5627 written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
5628 cpumask_pr_args(wq->unbound_attrs->cpumask));
5629 mutex_unlock(&wq->mutex);
5630 return written;
5631 }
5632
wq_cpumask_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5633 static ssize_t wq_cpumask_store(struct device *dev,
5634 struct device_attribute *attr,
5635 const char *buf, size_t count)
5636 {
5637 struct workqueue_struct *wq = dev_to_wq(dev);
5638 struct workqueue_attrs *attrs;
5639 int ret = -ENOMEM;
5640
5641 apply_wqattrs_lock();
5642
5643 attrs = wq_sysfs_prep_attrs(wq);
5644 if (!attrs)
5645 goto out_unlock;
5646
5647 ret = cpumask_parse(buf, attrs->cpumask);
5648 if (!ret)
5649 ret = apply_workqueue_attrs_locked(wq, attrs);
5650
5651 out_unlock:
5652 apply_wqattrs_unlock();
5653 free_workqueue_attrs(attrs);
5654 return ret ?: count;
5655 }
5656
wq_numa_show(struct device * dev,struct device_attribute * attr,char * buf)5657 static ssize_t wq_numa_show(struct device *dev, struct device_attribute *attr,
5658 char *buf)
5659 {
5660 struct workqueue_struct *wq = dev_to_wq(dev);
5661 int written;
5662
5663 mutex_lock(&wq->mutex);
5664 written = scnprintf(buf, PAGE_SIZE, "%d\n",
5665 !wq->unbound_attrs->no_numa);
5666 mutex_unlock(&wq->mutex);
5667
5668 return written;
5669 }
5670
wq_numa_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5671 static ssize_t wq_numa_store(struct device *dev, struct device_attribute *attr,
5672 const char *buf, size_t count)
5673 {
5674 struct workqueue_struct *wq = dev_to_wq(dev);
5675 struct workqueue_attrs *attrs;
5676 int v, ret = -ENOMEM;
5677
5678 apply_wqattrs_lock();
5679
5680 attrs = wq_sysfs_prep_attrs(wq);
5681 if (!attrs)
5682 goto out_unlock;
5683
5684 ret = -EINVAL;
5685 if (sscanf(buf, "%d", &v) == 1) {
5686 attrs->no_numa = !v;
5687 ret = apply_workqueue_attrs_locked(wq, attrs);
5688 }
5689
5690 out_unlock:
5691 apply_wqattrs_unlock();
5692 free_workqueue_attrs(attrs);
5693 return ret ?: count;
5694 }
5695
5696 static struct device_attribute wq_sysfs_unbound_attrs[] = {
5697 __ATTR(pool_ids, 0444, wq_pool_ids_show, NULL),
5698 __ATTR(nice, 0644, wq_nice_show, wq_nice_store),
5699 __ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store),
5700 __ATTR(numa, 0644, wq_numa_show, wq_numa_store),
5701 __ATTR_NULL,
5702 };
5703
5704 static struct bus_type wq_subsys = {
5705 .name = "workqueue",
5706 .dev_groups = wq_sysfs_groups,
5707 };
5708
wq_unbound_cpumask_show(struct device * dev,struct device_attribute * attr,char * buf)5709 static ssize_t wq_unbound_cpumask_show(struct device *dev,
5710 struct device_attribute *attr, char *buf)
5711 {
5712 int written;
5713
5714 mutex_lock(&wq_pool_mutex);
5715 written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
5716 cpumask_pr_args(wq_unbound_cpumask));
5717 mutex_unlock(&wq_pool_mutex);
5718
5719 return written;
5720 }
5721
wq_unbound_cpumask_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5722 static ssize_t wq_unbound_cpumask_store(struct device *dev,
5723 struct device_attribute *attr, const char *buf, size_t count)
5724 {
5725 cpumask_var_t cpumask;
5726 int ret;
5727
5728 if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
5729 return -ENOMEM;
5730
5731 ret = cpumask_parse(buf, cpumask);
5732 if (!ret)
5733 ret = workqueue_set_unbound_cpumask(cpumask);
5734
5735 free_cpumask_var(cpumask);
5736 return ret ? ret : count;
5737 }
5738
5739 static struct device_attribute wq_sysfs_cpumask_attr =
5740 __ATTR(cpumask, 0644, wq_unbound_cpumask_show,
5741 wq_unbound_cpumask_store);
5742
wq_sysfs_init(void)5743 static int __init wq_sysfs_init(void)
5744 {
5745 int err;
5746
5747 err = subsys_virtual_register(&wq_subsys, NULL);
5748 if (err)
5749 return err;
5750
5751 return device_create_file(wq_subsys.dev_root, &wq_sysfs_cpumask_attr);
5752 }
5753 core_initcall(wq_sysfs_init);
5754
wq_device_release(struct device * dev)5755 static void wq_device_release(struct device *dev)
5756 {
5757 struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
5758
5759 kfree(wq_dev);
5760 }
5761
5762 /**
5763 * workqueue_sysfs_register - make a workqueue visible in sysfs
5764 * @wq: the workqueue to register
5765 *
5766 * Expose @wq in sysfs under /sys/bus/workqueue/devices.
5767 * alloc_workqueue*() automatically calls this function if WQ_SYSFS is set
5768 * which is the preferred method.
5769 *
5770 * Workqueue user should use this function directly iff it wants to apply
5771 * workqueue_attrs before making the workqueue visible in sysfs; otherwise,
5772 * apply_workqueue_attrs() may race against userland updating the
5773 * attributes.
5774 *
5775 * Return: 0 on success, -errno on failure.
5776 */
workqueue_sysfs_register(struct workqueue_struct * wq)5777 int workqueue_sysfs_register(struct workqueue_struct *wq)
5778 {
5779 struct wq_device *wq_dev;
5780 int ret;
5781
5782 /*
5783 * Adjusting max_active or creating new pwqs by applying
5784 * attributes breaks ordering guarantee. Disallow exposing ordered
5785 * workqueues.
5786 */
5787 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
5788 return -EINVAL;
5789
5790 wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
5791 if (!wq_dev)
5792 return -ENOMEM;
5793
5794 wq_dev->wq = wq;
5795 wq_dev->dev.bus = &wq_subsys;
5796 wq_dev->dev.release = wq_device_release;
5797 dev_set_name(&wq_dev->dev, "%s", wq->name);
5798
5799 /*
5800 * unbound_attrs are created separately. Suppress uevent until
5801 * everything is ready.
5802 */
5803 dev_set_uevent_suppress(&wq_dev->dev, true);
5804
5805 ret = device_register(&wq_dev->dev);
5806 if (ret) {
5807 put_device(&wq_dev->dev);
5808 wq->wq_dev = NULL;
5809 return ret;
5810 }
5811
5812 if (wq->flags & WQ_UNBOUND) {
5813 struct device_attribute *attr;
5814
5815 for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) {
5816 ret = device_create_file(&wq_dev->dev, attr);
5817 if (ret) {
5818 device_unregister(&wq_dev->dev);
5819 wq->wq_dev = NULL;
5820 return ret;
5821 }
5822 }
5823 }
5824
5825 dev_set_uevent_suppress(&wq_dev->dev, false);
5826 kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD);
5827 return 0;
5828 }
5829
5830 /**
5831 * workqueue_sysfs_unregister - undo workqueue_sysfs_register()
5832 * @wq: the workqueue to unregister
5833 *
5834 * If @wq is registered to sysfs by workqueue_sysfs_register(), unregister.
5835 */
workqueue_sysfs_unregister(struct workqueue_struct * wq)5836 static void workqueue_sysfs_unregister(struct workqueue_struct *wq)
5837 {
5838 struct wq_device *wq_dev = wq->wq_dev;
5839
5840 if (!wq->wq_dev)
5841 return;
5842
5843 wq->wq_dev = NULL;
5844 device_unregister(&wq_dev->dev);
5845 }
5846 #else /* CONFIG_SYSFS */
workqueue_sysfs_unregister(struct workqueue_struct * wq)5847 static void workqueue_sysfs_unregister(struct workqueue_struct *wq) { }
5848 #endif /* CONFIG_SYSFS */
5849
5850 /*
5851 * Workqueue watchdog.
5852 *
5853 * Stall may be caused by various bugs - missing WQ_MEM_RECLAIM, illegal
5854 * flush dependency, a concurrency managed work item which stays RUNNING
5855 * indefinitely. Workqueue stalls can be very difficult to debug as the
5856 * usual warning mechanisms don't trigger and internal workqueue state is
5857 * largely opaque.
5858 *
5859 * Workqueue watchdog monitors all worker pools periodically and dumps
5860 * state if some pools failed to make forward progress for a while where
5861 * forward progress is defined as the first item on ->worklist changing.
5862 *
5863 * This mechanism is controlled through the kernel parameter
5864 * "workqueue.watchdog_thresh" which can be updated at runtime through the
5865 * corresponding sysfs parameter file.
5866 */
5867 #ifdef CONFIG_WQ_WATCHDOG
5868
5869 static unsigned long wq_watchdog_thresh = 30;
5870 static struct timer_list wq_watchdog_timer;
5871
5872 static unsigned long wq_watchdog_touched = INITIAL_JIFFIES;
5873 static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES;
5874
wq_watchdog_reset_touched(void)5875 static void wq_watchdog_reset_touched(void)
5876 {
5877 int cpu;
5878
5879 wq_watchdog_touched = jiffies;
5880 for_each_possible_cpu(cpu)
5881 per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
5882 }
5883
wq_watchdog_timer_fn(struct timer_list * unused)5884 static void wq_watchdog_timer_fn(struct timer_list *unused)
5885 {
5886 unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ;
5887 bool lockup_detected = false;
5888 unsigned long now = jiffies;
5889 struct worker_pool *pool;
5890 int pi;
5891
5892 if (!thresh)
5893 return;
5894
5895 rcu_read_lock();
5896
5897 for_each_pool(pool, pi) {
5898 unsigned long pool_ts, touched, ts;
5899
5900 if (list_empty(&pool->worklist))
5901 continue;
5902
5903 /*
5904 * If a virtual machine is stopped by the host it can look to
5905 * the watchdog like a stall.
5906 */
5907 kvm_check_and_clear_guest_paused();
5908
5909 /* get the latest of pool and touched timestamps */
5910 if (pool->cpu >= 0)
5911 touched = READ_ONCE(per_cpu(wq_watchdog_touched_cpu, pool->cpu));
5912 else
5913 touched = READ_ONCE(wq_watchdog_touched);
5914 pool_ts = READ_ONCE(pool->watchdog_ts);
5915
5916 if (time_after(pool_ts, touched))
5917 ts = pool_ts;
5918 else
5919 ts = touched;
5920
5921 /* did we stall? */
5922 if (time_after(now, ts + thresh)) {
5923 lockup_detected = true;
5924 pr_emerg("BUG: workqueue lockup - pool");
5925 pr_cont_pool_info(pool);
5926 pr_cont(" stuck for %us!\n",
5927 jiffies_to_msecs(now - pool_ts) / 1000);
5928 trace_android_vh_wq_lockup_pool(pool->cpu, pool_ts);
5929 }
5930 }
5931
5932 rcu_read_unlock();
5933
5934 if (lockup_detected)
5935 show_all_workqueues();
5936
5937 wq_watchdog_reset_touched();
5938 mod_timer(&wq_watchdog_timer, jiffies + thresh);
5939 }
5940
wq_watchdog_touch(int cpu)5941 notrace void wq_watchdog_touch(int cpu)
5942 {
5943 if (cpu >= 0)
5944 per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
5945
5946 wq_watchdog_touched = jiffies;
5947 }
5948
wq_watchdog_set_thresh(unsigned long thresh)5949 static void wq_watchdog_set_thresh(unsigned long thresh)
5950 {
5951 wq_watchdog_thresh = 0;
5952 del_timer_sync(&wq_watchdog_timer);
5953
5954 if (thresh) {
5955 wq_watchdog_thresh = thresh;
5956 wq_watchdog_reset_touched();
5957 mod_timer(&wq_watchdog_timer, jiffies + thresh * HZ);
5958 }
5959 }
5960
wq_watchdog_param_set_thresh(const char * val,const struct kernel_param * kp)5961 static int wq_watchdog_param_set_thresh(const char *val,
5962 const struct kernel_param *kp)
5963 {
5964 unsigned long thresh;
5965 int ret;
5966
5967 ret = kstrtoul(val, 0, &thresh);
5968 if (ret)
5969 return ret;
5970
5971 if (system_wq)
5972 wq_watchdog_set_thresh(thresh);
5973 else
5974 wq_watchdog_thresh = thresh;
5975
5976 return 0;
5977 }
5978
5979 static const struct kernel_param_ops wq_watchdog_thresh_ops = {
5980 .set = wq_watchdog_param_set_thresh,
5981 .get = param_get_ulong,
5982 };
5983
5984 module_param_cb(watchdog_thresh, &wq_watchdog_thresh_ops, &wq_watchdog_thresh,
5985 0644);
5986
wq_watchdog_init(void)5987 static void wq_watchdog_init(void)
5988 {
5989 timer_setup(&wq_watchdog_timer, wq_watchdog_timer_fn, TIMER_DEFERRABLE);
5990 wq_watchdog_set_thresh(wq_watchdog_thresh);
5991 }
5992
5993 #else /* CONFIG_WQ_WATCHDOG */
5994
wq_watchdog_init(void)5995 static inline void wq_watchdog_init(void) { }
5996
5997 #endif /* CONFIG_WQ_WATCHDOG */
5998
wq_numa_init(void)5999 static void __init wq_numa_init(void)
6000 {
6001 cpumask_var_t *tbl;
6002 int node, cpu;
6003
6004 if (num_possible_nodes() <= 1)
6005 return;
6006
6007 if (wq_disable_numa) {
6008 pr_info("workqueue: NUMA affinity support disabled\n");
6009 return;
6010 }
6011
6012 for_each_possible_cpu(cpu) {
6013 if (WARN_ON(cpu_to_node(cpu) == NUMA_NO_NODE)) {
6014 pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu);
6015 return;
6016 }
6017 }
6018
6019 wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs();
6020 BUG_ON(!wq_update_unbound_numa_attrs_buf);
6021
6022 /*
6023 * We want masks of possible CPUs of each node which isn't readily
6024 * available. Build one from cpu_to_node() which should have been
6025 * fully initialized by now.
6026 */
6027 tbl = kcalloc(nr_node_ids, sizeof(tbl[0]), GFP_KERNEL);
6028 BUG_ON(!tbl);
6029
6030 for_each_node(node)
6031 BUG_ON(!zalloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
6032 node_online(node) ? node : NUMA_NO_NODE));
6033
6034 for_each_possible_cpu(cpu) {
6035 node = cpu_to_node(cpu);
6036 cpumask_set_cpu(cpu, tbl[node]);
6037 }
6038
6039 wq_numa_possible_cpumask = tbl;
6040 wq_numa_enabled = true;
6041 }
6042
6043 /**
6044 * workqueue_init_early - early init for workqueue subsystem
6045 *
6046 * This is the first half of two-staged workqueue subsystem initialization
6047 * and invoked as soon as the bare basics - memory allocation, cpumasks and
6048 * idr are up. It sets up all the data structures and system workqueues
6049 * and allows early boot code to create workqueues and queue/cancel work
6050 * items. Actual work item execution starts only after kthreads can be
6051 * created and scheduled right before early initcalls.
6052 */
workqueue_init_early(void)6053 void __init workqueue_init_early(void)
6054 {
6055 int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
6056 int hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ;
6057 int i, cpu;
6058
6059 BUILD_BUG_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
6060
6061 BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL));
6062 cpumask_copy(wq_unbound_cpumask, housekeeping_cpumask(hk_flags));
6063
6064 pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
6065
6066 /* initialize CPU pools */
6067 for_each_possible_cpu(cpu) {
6068 struct worker_pool *pool;
6069
6070 i = 0;
6071 for_each_cpu_worker_pool(pool, cpu) {
6072 BUG_ON(init_worker_pool(pool));
6073 pool->cpu = cpu;
6074 cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
6075 pool->attrs->nice = std_nice[i++];
6076 pool->node = cpu_to_node(cpu);
6077
6078 /* alloc pool ID */
6079 mutex_lock(&wq_pool_mutex);
6080 BUG_ON(worker_pool_assign_id(pool));
6081 mutex_unlock(&wq_pool_mutex);
6082 }
6083 }
6084
6085 /* create default unbound and ordered wq attrs */
6086 for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
6087 struct workqueue_attrs *attrs;
6088
6089 BUG_ON(!(attrs = alloc_workqueue_attrs()));
6090 attrs->nice = std_nice[i];
6091 unbound_std_wq_attrs[i] = attrs;
6092
6093 /*
6094 * An ordered wq should have only one pwq as ordering is
6095 * guaranteed by max_active which is enforced by pwqs.
6096 * Turn off NUMA so that dfl_pwq is used for all nodes.
6097 */
6098 BUG_ON(!(attrs = alloc_workqueue_attrs()));
6099 attrs->nice = std_nice[i];
6100 attrs->no_numa = true;
6101 ordered_wq_attrs[i] = attrs;
6102 }
6103
6104 system_wq = alloc_workqueue("events", 0, 0);
6105 system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0);
6106 system_long_wq = alloc_workqueue("events_long", 0, 0);
6107 system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
6108 WQ_UNBOUND_MAX_ACTIVE);
6109 system_freezable_wq = alloc_workqueue("events_freezable",
6110 WQ_FREEZABLE, 0);
6111 system_power_efficient_wq = alloc_workqueue("events_power_efficient",
6112 WQ_POWER_EFFICIENT, 0);
6113 system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient",
6114 WQ_FREEZABLE | WQ_POWER_EFFICIENT,
6115 0);
6116 BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq ||
6117 !system_unbound_wq || !system_freezable_wq ||
6118 !system_power_efficient_wq ||
6119 !system_freezable_power_efficient_wq);
6120 }
6121
6122 /**
6123 * workqueue_init - bring workqueue subsystem fully online
6124 *
6125 * This is the latter half of two-staged workqueue subsystem initialization
6126 * and invoked as soon as kthreads can be created and scheduled.
6127 * Workqueues have been created and work items queued on them, but there
6128 * are no kworkers executing the work items yet. Populate the worker pools
6129 * with the initial workers and enable future kworker creations.
6130 */
workqueue_init(void)6131 void __init workqueue_init(void)
6132 {
6133 struct workqueue_struct *wq;
6134 struct worker_pool *pool;
6135 int cpu, bkt;
6136
6137 /*
6138 * It'd be simpler to initialize NUMA in workqueue_init_early() but
6139 * CPU to node mapping may not be available that early on some
6140 * archs such as power and arm64. As per-cpu pools created
6141 * previously could be missing node hint and unbound pools NUMA
6142 * affinity, fix them up.
6143 *
6144 * Also, while iterating workqueues, create rescuers if requested.
6145 */
6146 wq_numa_init();
6147
6148 mutex_lock(&wq_pool_mutex);
6149
6150 for_each_possible_cpu(cpu) {
6151 for_each_cpu_worker_pool(pool, cpu) {
6152 pool->node = cpu_to_node(cpu);
6153 }
6154 }
6155
6156 list_for_each_entry(wq, &workqueues, list) {
6157 wq_update_unbound_numa(wq, smp_processor_id(), true);
6158 WARN(init_rescuer(wq),
6159 "workqueue: failed to create early rescuer for %s",
6160 wq->name);
6161 }
6162
6163 mutex_unlock(&wq_pool_mutex);
6164
6165 /* create the initial workers */
6166 for_each_online_cpu(cpu) {
6167 for_each_cpu_worker_pool(pool, cpu) {
6168 pool->flags &= ~POOL_DISASSOCIATED;
6169 BUG_ON(!create_worker(pool));
6170 }
6171 }
6172
6173 hash_for_each(unbound_pool_hash, bkt, pool, hash_node)
6174 BUG_ON(!create_worker(pool));
6175
6176 wq_online = true;
6177 wq_watchdog_init();
6178 }
6179