• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Kernel thread helper functions.
3  *   Copyright (C) 2004 IBM Corporation, Rusty Russell.
4  *   Copyright (C) 2009 Red Hat, Inc.
5  *
6  * Creation is done via kthreadd, so that we get a clean environment
7  * even if we're invoked from userspace (think modprobe, hotplug cpu,
8  * etc.).
9  */
10 #include <uapi/linux/sched/types.h>
11 #include <linux/mm.h>
12 #include <linux/mmu_context.h>
13 #include <linux/sched.h>
14 #include <linux/sched/mm.h>
15 #include <linux/sched/task.h>
16 #include <linux/kthread.h>
17 #include <linux/completion.h>
18 #include <linux/err.h>
19 #include <linux/cgroup.h>
20 #include <linux/cpuset.h>
21 #include <linux/unistd.h>
22 #include <linux/file.h>
23 #include <linux/export.h>
24 #include <linux/mutex.h>
25 #include <linux/slab.h>
26 #include <linux/freezer.h>
27 #include <linux/ptrace.h>
28 #include <linux/uaccess.h>
29 #include <linux/numa.h>
30 #include <linux/sched/isolation.h>
31 #include <trace/events/sched.h>
32 
33 
34 static DEFINE_SPINLOCK(kthread_create_lock);
35 static LIST_HEAD(kthread_create_list);
36 struct task_struct *kthreadd_task;
37 
38 struct kthread_create_info
39 {
40 	/* Information passed to kthread() from kthreadd. */
41 	char *full_name;
42 	int (*threadfn)(void *data);
43 	void *data;
44 	int node;
45 
46 	/* Result passed back to kthread_create() from kthreadd. */
47 	struct task_struct *result;
48 	struct completion *done;
49 
50 	struct list_head list;
51 };
52 
53 struct kthread {
54 	unsigned long flags;
55 	unsigned int cpu;
56 	int result;
57 	int (*threadfn)(void *);
58 	void *data;
59 	struct completion parked;
60 	struct completion exited;
61 #ifdef CONFIG_BLK_CGROUP
62 	struct cgroup_subsys_state *blkcg_css;
63 #endif
64 	/* To store the full name if task comm is truncated. */
65 	char *full_name;
66 };
67 
68 enum KTHREAD_BITS {
69 	KTHREAD_IS_PER_CPU = 0,
70 	KTHREAD_SHOULD_STOP,
71 	KTHREAD_SHOULD_PARK,
72 };
73 
to_kthread(struct task_struct * k)74 static inline struct kthread *to_kthread(struct task_struct *k)
75 {
76 	WARN_ON(!(k->flags & PF_KTHREAD));
77 	return k->worker_private;
78 }
79 
80 /*
81  * Variant of to_kthread() that doesn't assume @p is a kthread.
82  *
83  * Per construction; when:
84  *
85  *   (p->flags & PF_KTHREAD) && p->worker_private
86  *
87  * the task is both a kthread and struct kthread is persistent. However
88  * PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and
89  * begin_new_exec()).
90  */
__to_kthread(struct task_struct * p)91 static inline struct kthread *__to_kthread(struct task_struct *p)
92 {
93 	void *kthread = p->worker_private;
94 	if (kthread && !(p->flags & PF_KTHREAD))
95 		kthread = NULL;
96 	return kthread;
97 }
98 
get_kthread_comm(char * buf,size_t buf_size,struct task_struct * tsk)99 void get_kthread_comm(char *buf, size_t buf_size, struct task_struct *tsk)
100 {
101 	struct kthread *kthread = to_kthread(tsk);
102 
103 	if (!kthread || !kthread->full_name) {
104 		__get_task_comm(buf, buf_size, tsk);
105 		return;
106 	}
107 
108 	strscpy_pad(buf, kthread->full_name, buf_size);
109 }
110 
set_kthread_struct(struct task_struct * p)111 bool set_kthread_struct(struct task_struct *p)
112 {
113 	struct kthread *kthread;
114 
115 	if (WARN_ON_ONCE(to_kthread(p)))
116 		return false;
117 
118 	kthread = kzalloc(sizeof(*kthread), GFP_KERNEL);
119 	if (!kthread)
120 		return false;
121 
122 	init_completion(&kthread->exited);
123 	init_completion(&kthread->parked);
124 	p->vfork_done = &kthread->exited;
125 
126 	p->worker_private = kthread;
127 	return true;
128 }
129 
free_kthread_struct(struct task_struct * k)130 void free_kthread_struct(struct task_struct *k)
131 {
132 	struct kthread *kthread;
133 
134 	/*
135 	 * Can be NULL if kmalloc() in set_kthread_struct() failed.
136 	 */
137 	kthread = to_kthread(k);
138 	if (!kthread)
139 		return;
140 
141 #ifdef CONFIG_BLK_CGROUP
142 	WARN_ON_ONCE(kthread->blkcg_css);
143 #endif
144 	k->worker_private = NULL;
145 	kfree(kthread->full_name);
146 	kfree(kthread);
147 }
148 
149 /**
150  * kthread_should_stop - should this kthread return now?
151  *
152  * When someone calls kthread_stop() on your kthread, it will be woken
153  * and this will return true.  You should then return, and your return
154  * value will be passed through to kthread_stop().
155  */
kthread_should_stop(void)156 bool kthread_should_stop(void)
157 {
158 	return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
159 }
160 EXPORT_SYMBOL(kthread_should_stop);
161 
__kthread_should_park(struct task_struct * k)162 static bool __kthread_should_park(struct task_struct *k)
163 {
164 	return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags);
165 }
166 
167 /**
168  * kthread_should_park - should this kthread park now?
169  *
170  * When someone calls kthread_park() on your kthread, it will be woken
171  * and this will return true.  You should then do the necessary
172  * cleanup and call kthread_parkme()
173  *
174  * Similar to kthread_should_stop(), but this keeps the thread alive
175  * and in a park position. kthread_unpark() "restarts" the thread and
176  * calls the thread function again.
177  */
kthread_should_park(void)178 bool kthread_should_park(void)
179 {
180 	return __kthread_should_park(current);
181 }
182 EXPORT_SYMBOL_GPL(kthread_should_park);
183 
kthread_should_stop_or_park(void)184 bool kthread_should_stop_or_park(void)
185 {
186 	struct kthread *kthread = __to_kthread(current);
187 
188 	if (!kthread)
189 		return false;
190 
191 	return kthread->flags & (BIT(KTHREAD_SHOULD_STOP) | BIT(KTHREAD_SHOULD_PARK));
192 }
193 
194 /**
195  * kthread_freezable_should_stop - should this freezable kthread return now?
196  * @was_frozen: optional out parameter, indicates whether %current was frozen
197  *
198  * kthread_should_stop() for freezable kthreads, which will enter
199  * refrigerator if necessary.  This function is safe from kthread_stop() /
200  * freezer deadlock and freezable kthreads should use this function instead
201  * of calling try_to_freeze() directly.
202  */
kthread_freezable_should_stop(bool * was_frozen)203 bool kthread_freezable_should_stop(bool *was_frozen)
204 {
205 	bool frozen = false;
206 
207 	might_sleep();
208 
209 	if (unlikely(freezing(current)))
210 		frozen = __refrigerator(true);
211 
212 	if (was_frozen)
213 		*was_frozen = frozen;
214 
215 	return kthread_should_stop();
216 }
217 EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
218 
219 /**
220  * kthread_func - return the function specified on kthread creation
221  * @task: kthread task in question
222  *
223  * Returns NULL if the task is not a kthread.
224  */
kthread_func(struct task_struct * task)225 void *kthread_func(struct task_struct *task)
226 {
227 	struct kthread *kthread = __to_kthread(task);
228 	if (kthread)
229 		return kthread->threadfn;
230 	return NULL;
231 }
232 EXPORT_SYMBOL_GPL(kthread_func);
233 
234 /**
235  * kthread_data - return data value specified on kthread creation
236  * @task: kthread task in question
237  *
238  * Return the data value specified when kthread @task was created.
239  * The caller is responsible for ensuring the validity of @task when
240  * calling this function.
241  */
kthread_data(struct task_struct * task)242 void *kthread_data(struct task_struct *task)
243 {
244 	return to_kthread(task)->data;
245 }
246 EXPORT_SYMBOL_GPL(kthread_data);
247 
248 /**
249  * kthread_probe_data - speculative version of kthread_data()
250  * @task: possible kthread task in question
251  *
252  * @task could be a kthread task.  Return the data value specified when it
253  * was created if accessible.  If @task isn't a kthread task or its data is
254  * inaccessible for any reason, %NULL is returned.  This function requires
255  * that @task itself is safe to dereference.
256  */
kthread_probe_data(struct task_struct * task)257 void *kthread_probe_data(struct task_struct *task)
258 {
259 	struct kthread *kthread = __to_kthread(task);
260 	void *data = NULL;
261 
262 	if (kthread)
263 		copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
264 	return data;
265 }
266 
__kthread_parkme(struct kthread * self)267 static void __kthread_parkme(struct kthread *self)
268 {
269 	for (;;) {
270 		/*
271 		 * TASK_PARKED is a special state; we must serialize against
272 		 * possible pending wakeups to avoid store-store collisions on
273 		 * task->state.
274 		 *
275 		 * Such a collision might possibly result in the task state
276 		 * changin from TASK_PARKED and us failing the
277 		 * wait_task_inactive() in kthread_park().
278 		 */
279 		set_special_state(TASK_PARKED);
280 		if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
281 			break;
282 
283 		/*
284 		 * Thread is going to call schedule(), do not preempt it,
285 		 * or the caller of kthread_park() may spend more time in
286 		 * wait_task_inactive().
287 		 */
288 		preempt_disable();
289 		complete(&self->parked);
290 		schedule_preempt_disabled();
291 		preempt_enable();
292 	}
293 	__set_current_state(TASK_RUNNING);
294 }
295 
kthread_parkme(void)296 void kthread_parkme(void)
297 {
298 	__kthread_parkme(to_kthread(current));
299 }
300 EXPORT_SYMBOL_GPL(kthread_parkme);
301 
302 /**
303  * kthread_exit - Cause the current kthread return @result to kthread_stop().
304  * @result: The integer value to return to kthread_stop().
305  *
306  * While kthread_exit can be called directly, it exists so that
307  * functions which do some additional work in non-modular code such as
308  * module_put_and_kthread_exit can be implemented.
309  *
310  * Does not return.
311  */
kthread_exit(long result)312 void __noreturn kthread_exit(long result)
313 {
314 	struct kthread *kthread = to_kthread(current);
315 	kthread->result = result;
316 	do_exit(0);
317 }
318 EXPORT_SYMBOL(kthread_exit);
319 
320 /**
321  * kthread_complete_and_exit - Exit the current kthread.
322  * @comp: Completion to complete
323  * @code: The integer value to return to kthread_stop().
324  *
325  * If present, complete @comp and then return code to kthread_stop().
326  *
327  * A kernel thread whose module may be removed after the completion of
328  * @comp can use this function to exit safely.
329  *
330  * Does not return.
331  */
kthread_complete_and_exit(struct completion * comp,long code)332 void __noreturn kthread_complete_and_exit(struct completion *comp, long code)
333 {
334 	if (comp)
335 		complete(comp);
336 
337 	kthread_exit(code);
338 }
339 EXPORT_SYMBOL(kthread_complete_and_exit);
340 
kthread(void * _create)341 static int kthread(void *_create)
342 {
343 	static const struct sched_param param = { .sched_priority = 0 };
344 	/* Copy data: it's on kthread's stack */
345 	struct kthread_create_info *create = _create;
346 	int (*threadfn)(void *data) = create->threadfn;
347 	void *data = create->data;
348 	struct completion *done;
349 	struct kthread *self;
350 	int ret;
351 
352 	self = to_kthread(current);
353 
354 	/* Release the structure when caller killed by a fatal signal. */
355 	done = xchg(&create->done, NULL);
356 	if (!done) {
357 		kfree(create->full_name);
358 		kfree(create);
359 		kthread_exit(-EINTR);
360 	}
361 
362 	self->full_name = create->full_name;
363 	self->threadfn = threadfn;
364 	self->data = data;
365 
366 	/*
367 	 * The new thread inherited kthreadd's priority and CPU mask. Reset
368 	 * back to default in case they have been changed.
369 	 */
370 	sched_setscheduler_nocheck(current, SCHED_NORMAL, &param);
371 	set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_KTHREAD));
372 
373 	/* OK, tell user we're spawned, wait for stop or wakeup */
374 	__set_current_state(TASK_UNINTERRUPTIBLE);
375 	create->result = current;
376 	/*
377 	 * Thread is going to call schedule(), do not preempt it,
378 	 * or the creator may spend more time in wait_task_inactive().
379 	 */
380 	preempt_disable();
381 	complete(done);
382 	schedule_preempt_disabled();
383 	preempt_enable();
384 
385 	ret = -EINTR;
386 	if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
387 		cgroup_kthread_ready();
388 		__kthread_parkme(self);
389 		ret = threadfn(data);
390 	}
391 	kthread_exit(ret);
392 }
393 
394 /* called from kernel_clone() to get node information for about to be created task */
tsk_fork_get_node(struct task_struct * tsk)395 int tsk_fork_get_node(struct task_struct *tsk)
396 {
397 #ifdef CONFIG_NUMA
398 	if (tsk == kthreadd_task)
399 		return tsk->pref_node_fork;
400 #endif
401 	return NUMA_NO_NODE;
402 }
403 
create_kthread(struct kthread_create_info * create)404 static void create_kthread(struct kthread_create_info *create)
405 {
406 	int pid;
407 
408 #ifdef CONFIG_NUMA
409 	current->pref_node_fork = create->node;
410 #endif
411 	/* We want our own signal handler (we take no signals by default). */
412 	pid = kernel_thread(kthread, create, create->full_name,
413 			    CLONE_FS | CLONE_FILES | SIGCHLD);
414 	if (pid < 0) {
415 		/* Release the structure when caller killed by a fatal signal. */
416 		struct completion *done = xchg(&create->done, NULL);
417 
418 		kfree(create->full_name);
419 		if (!done) {
420 			kfree(create);
421 			return;
422 		}
423 		create->result = ERR_PTR(pid);
424 		complete(done);
425 	}
426 }
427 
428 static __printf(4, 0)
__kthread_create_on_node(int (* threadfn)(void * data),void * data,int node,const char namefmt[],va_list args)429 struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
430 						    void *data, int node,
431 						    const char namefmt[],
432 						    va_list args)
433 {
434 	DECLARE_COMPLETION_ONSTACK(done);
435 	struct task_struct *task;
436 	struct kthread_create_info *create = kmalloc(sizeof(*create),
437 						     GFP_KERNEL);
438 
439 	if (!create)
440 		return ERR_PTR(-ENOMEM);
441 	create->threadfn = threadfn;
442 	create->data = data;
443 	create->node = node;
444 	create->done = &done;
445 	create->full_name = kvasprintf(GFP_KERNEL, namefmt, args);
446 	if (!create->full_name) {
447 		task = ERR_PTR(-ENOMEM);
448 		goto free_create;
449 	}
450 
451 	spin_lock(&kthread_create_lock);
452 	list_add_tail(&create->list, &kthread_create_list);
453 	spin_unlock(&kthread_create_lock);
454 
455 	wake_up_process(kthreadd_task);
456 	/*
457 	 * Wait for completion in killable state, for I might be chosen by
458 	 * the OOM killer while kthreadd is trying to allocate memory for
459 	 * new kernel thread.
460 	 */
461 	if (unlikely(wait_for_completion_killable(&done))) {
462 		/*
463 		 * If I was killed by a fatal signal before kthreadd (or new
464 		 * kernel thread) calls complete(), leave the cleanup of this
465 		 * structure to that thread.
466 		 */
467 		if (xchg(&create->done, NULL))
468 			return ERR_PTR(-EINTR);
469 		/*
470 		 * kthreadd (or new kernel thread) will call complete()
471 		 * shortly.
472 		 */
473 		wait_for_completion(&done);
474 	}
475 	task = create->result;
476 free_create:
477 	kfree(create);
478 	return task;
479 }
480 
481 /**
482  * kthread_create_on_node - create a kthread.
483  * @threadfn: the function to run until signal_pending(current).
484  * @data: data ptr for @threadfn.
485  * @node: task and thread structures for the thread are allocated on this node
486  * @namefmt: printf-style name for the thread.
487  *
488  * Description: This helper function creates and names a kernel
489  * thread.  The thread will be stopped: use wake_up_process() to start
490  * it.  See also kthread_run().  The new thread has SCHED_NORMAL policy and
491  * is affine to all CPUs.
492  *
493  * If thread is going to be bound on a particular cpu, give its node
494  * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
495  * When woken, the thread will run @threadfn() with @data as its
496  * argument. @threadfn() can either return directly if it is a
497  * standalone thread for which no one will call kthread_stop(), or
498  * return when 'kthread_should_stop()' is true (which means
499  * kthread_stop() has been called).  The return value should be zero
500  * or a negative error number; it will be passed to kthread_stop().
501  *
502  * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
503  */
kthread_create_on_node(int (* threadfn)(void * data),void * data,int node,const char namefmt[],...)504 struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
505 					   void *data, int node,
506 					   const char namefmt[],
507 					   ...)
508 {
509 	struct task_struct *task;
510 	va_list args;
511 
512 	va_start(args, namefmt);
513 	task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
514 	va_end(args);
515 
516 	return task;
517 }
518 EXPORT_SYMBOL(kthread_create_on_node);
519 
__kthread_bind_mask(struct task_struct * p,const struct cpumask * mask,unsigned int state)520 static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, unsigned int state)
521 {
522 	unsigned long flags;
523 
524 	if (!wait_task_inactive(p, state)) {
525 		WARN_ON(1);
526 		return;
527 	}
528 
529 	/* It's safe because the task is inactive. */
530 	raw_spin_lock_irqsave(&p->pi_lock, flags);
531 	do_set_cpus_allowed(p, mask);
532 	p->flags |= PF_NO_SETAFFINITY;
533 	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
534 }
535 
__kthread_bind(struct task_struct * p,unsigned int cpu,unsigned int state)536 static void __kthread_bind(struct task_struct *p, unsigned int cpu, unsigned int state)
537 {
538 	__kthread_bind_mask(p, cpumask_of(cpu), state);
539 }
540 
kthread_bind_mask(struct task_struct * p,const struct cpumask * mask)541 void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
542 {
543 	__kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
544 }
545 EXPORT_SYMBOL_GPL(kthread_bind_mask);
546 
547 /**
548  * kthread_bind - bind a just-created kthread to a cpu.
549  * @p: thread created by kthread_create().
550  * @cpu: cpu (might not be online, must be possible) for @k to run on.
551  *
552  * Description: This function is equivalent to set_cpus_allowed(),
553  * except that @cpu doesn't need to be online, and the thread must be
554  * stopped (i.e., just returned from kthread_create()).
555  */
kthread_bind(struct task_struct * p,unsigned int cpu)556 void kthread_bind(struct task_struct *p, unsigned int cpu)
557 {
558 	__kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
559 }
560 EXPORT_SYMBOL(kthread_bind);
561 
562 /**
563  * kthread_create_on_cpu - Create a cpu bound kthread
564  * @threadfn: the function to run until signal_pending(current).
565  * @data: data ptr for @threadfn.
566  * @cpu: The cpu on which the thread should be bound,
567  * @namefmt: printf-style name for the thread. Format is restricted
568  *	     to "name.*%u". Code fills in cpu number.
569  *
570  * Description: This helper function creates and names a kernel thread
571  */
kthread_create_on_cpu(int (* threadfn)(void * data),void * data,unsigned int cpu,const char * namefmt)572 struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
573 					  void *data, unsigned int cpu,
574 					  const char *namefmt)
575 {
576 	struct task_struct *p;
577 
578 	p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
579 				   cpu);
580 	if (IS_ERR(p))
581 		return p;
582 	kthread_bind(p, cpu);
583 	/* CPU hotplug need to bind once again when unparking the thread. */
584 	to_kthread(p)->cpu = cpu;
585 	return p;
586 }
587 EXPORT_SYMBOL(kthread_create_on_cpu);
588 
kthread_set_per_cpu(struct task_struct * k,int cpu)589 void kthread_set_per_cpu(struct task_struct *k, int cpu)
590 {
591 	struct kthread *kthread = to_kthread(k);
592 	if (!kthread)
593 		return;
594 
595 	WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY));
596 
597 	if (cpu < 0) {
598 		clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
599 		return;
600 	}
601 
602 	kthread->cpu = cpu;
603 	set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
604 }
605 EXPORT_SYMBOL_GPL(kthread_set_per_cpu);
606 
kthread_is_per_cpu(struct task_struct * p)607 bool kthread_is_per_cpu(struct task_struct *p)
608 {
609 	struct kthread *kthread = __to_kthread(p);
610 	if (!kthread)
611 		return false;
612 
613 	return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
614 }
615 
616 /**
617  * kthread_unpark - unpark a thread created by kthread_create().
618  * @k:		thread created by kthread_create().
619  *
620  * Sets kthread_should_park() for @k to return false, wakes it, and
621  * waits for it to return. If the thread is marked percpu then its
622  * bound to the cpu again.
623  */
kthread_unpark(struct task_struct * k)624 void kthread_unpark(struct task_struct *k)
625 {
626 	struct kthread *kthread = to_kthread(k);
627 
628 	if (!test_bit(KTHREAD_SHOULD_PARK, &kthread->flags))
629 		return;
630 	/*
631 	 * Newly created kthread was parked when the CPU was offline.
632 	 * The binding was lost and we need to set it again.
633 	 */
634 	if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
635 		__kthread_bind(k, kthread->cpu, TASK_PARKED);
636 
637 	clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
638 	/*
639 	 * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup.
640 	 */
641 	wake_up_state(k, TASK_PARKED);
642 }
643 EXPORT_SYMBOL_GPL(kthread_unpark);
644 
645 /**
646  * kthread_park - park a thread created by kthread_create().
647  * @k: thread created by kthread_create().
648  *
649  * Sets kthread_should_park() for @k to return true, wakes it, and
650  * waits for it to return. This can also be called after kthread_create()
651  * instead of calling wake_up_process(): the thread will park without
652  * calling threadfn().
653  *
654  * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
655  * If called by the kthread itself just the park bit is set.
656  */
kthread_park(struct task_struct * k)657 int kthread_park(struct task_struct *k)
658 {
659 	struct kthread *kthread = to_kthread(k);
660 
661 	if (WARN_ON(k->flags & PF_EXITING))
662 		return -ENOSYS;
663 
664 	if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
665 		return -EBUSY;
666 
667 	set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
668 	if (k != current) {
669 		wake_up_process(k);
670 		/*
671 		 * Wait for __kthread_parkme() to complete(), this means we
672 		 * _will_ have TASK_PARKED and are about to call schedule().
673 		 */
674 		wait_for_completion(&kthread->parked);
675 		/*
676 		 * Now wait for that schedule() to complete and the task to
677 		 * get scheduled out.
678 		 */
679 		WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
680 	}
681 
682 	return 0;
683 }
684 EXPORT_SYMBOL_GPL(kthread_park);
685 
686 /**
687  * kthread_stop - stop a thread created by kthread_create().
688  * @k: thread created by kthread_create().
689  *
690  * Sets kthread_should_stop() for @k to return true, wakes it, and
691  * waits for it to exit. This can also be called after kthread_create()
692  * instead of calling wake_up_process(): the thread will exit without
693  * calling threadfn().
694  *
695  * If threadfn() may call kthread_exit() itself, the caller must ensure
696  * task_struct can't go away.
697  *
698  * Returns the result of threadfn(), or %-EINTR if wake_up_process()
699  * was never called.
700  */
kthread_stop(struct task_struct * k)701 int kthread_stop(struct task_struct *k)
702 {
703 	struct kthread *kthread;
704 	int ret;
705 
706 	trace_sched_kthread_stop(k);
707 
708 	get_task_struct(k);
709 	kthread = to_kthread(k);
710 	set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
711 	kthread_unpark(k);
712 	set_tsk_thread_flag(k, TIF_NOTIFY_SIGNAL);
713 	wake_up_process(k);
714 	wait_for_completion(&kthread->exited);
715 	ret = kthread->result;
716 	put_task_struct(k);
717 
718 	trace_sched_kthread_stop_ret(ret);
719 	return ret;
720 }
721 EXPORT_SYMBOL(kthread_stop);
722 
723 /**
724  * kthread_stop_put - stop a thread and put its task struct
725  * @k: thread created by kthread_create().
726  *
727  * Stops a thread created by kthread_create() and put its task_struct.
728  * Only use when holding an extra task struct reference obtained by
729  * calling get_task_struct().
730  */
kthread_stop_put(struct task_struct * k)731 int kthread_stop_put(struct task_struct *k)
732 {
733 	int ret;
734 
735 	ret = kthread_stop(k);
736 	put_task_struct(k);
737 	return ret;
738 }
739 EXPORT_SYMBOL(kthread_stop_put);
740 
kthreadd(void * unused)741 int kthreadd(void *unused)
742 {
743 	struct task_struct *tsk = current;
744 
745 	/* Setup a clean context for our children to inherit. */
746 	set_task_comm(tsk, "kthreadd");
747 	ignore_signals(tsk);
748 	set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_TYPE_KTHREAD));
749 	set_mems_allowed(node_states[N_MEMORY]);
750 
751 	current->flags |= PF_NOFREEZE;
752 	cgroup_init_kthreadd();
753 
754 	for (;;) {
755 		set_current_state(TASK_INTERRUPTIBLE);
756 		if (list_empty(&kthread_create_list))
757 			schedule();
758 		__set_current_state(TASK_RUNNING);
759 
760 		spin_lock(&kthread_create_lock);
761 		while (!list_empty(&kthread_create_list)) {
762 			struct kthread_create_info *create;
763 
764 			create = list_entry(kthread_create_list.next,
765 					    struct kthread_create_info, list);
766 			list_del_init(&create->list);
767 			spin_unlock(&kthread_create_lock);
768 
769 			create_kthread(create);
770 
771 			spin_lock(&kthread_create_lock);
772 		}
773 		spin_unlock(&kthread_create_lock);
774 	}
775 
776 	return 0;
777 }
778 
__kthread_init_worker(struct kthread_worker * worker,const char * name,struct lock_class_key * key)779 void __kthread_init_worker(struct kthread_worker *worker,
780 				const char *name,
781 				struct lock_class_key *key)
782 {
783 	memset(worker, 0, sizeof(struct kthread_worker));
784 	raw_spin_lock_init(&worker->lock);
785 	lockdep_set_class_and_name(&worker->lock, key, name);
786 	INIT_LIST_HEAD(&worker->work_list);
787 	INIT_LIST_HEAD(&worker->delayed_work_list);
788 }
789 EXPORT_SYMBOL_GPL(__kthread_init_worker);
790 
791 /**
792  * kthread_worker_fn - kthread function to process kthread_worker
793  * @worker_ptr: pointer to initialized kthread_worker
794  *
795  * This function implements the main cycle of kthread worker. It processes
796  * work_list until it is stopped with kthread_stop(). It sleeps when the queue
797  * is empty.
798  *
799  * The works are not allowed to keep any locks, disable preemption or interrupts
800  * when they finish. There is defined a safe point for freezing when one work
801  * finishes and before a new one is started.
802  *
803  * Also the works must not be handled by more than one worker at the same time,
804  * see also kthread_queue_work().
805  */
kthread_worker_fn(void * worker_ptr)806 int kthread_worker_fn(void *worker_ptr)
807 {
808 	struct kthread_worker *worker = worker_ptr;
809 	struct kthread_work *work;
810 
811 	/*
812 	 * FIXME: Update the check and remove the assignment when all kthread
813 	 * worker users are created using kthread_create_worker*() functions.
814 	 */
815 	WARN_ON(worker->task && worker->task != current);
816 	worker->task = current;
817 
818 	if (worker->flags & KTW_FREEZABLE)
819 		set_freezable();
820 
821 repeat:
822 	set_current_state(TASK_INTERRUPTIBLE);	/* mb paired w/ kthread_stop */
823 
824 	if (kthread_should_stop()) {
825 		__set_current_state(TASK_RUNNING);
826 		raw_spin_lock_irq(&worker->lock);
827 		worker->task = NULL;
828 		raw_spin_unlock_irq(&worker->lock);
829 		return 0;
830 	}
831 
832 	work = NULL;
833 	raw_spin_lock_irq(&worker->lock);
834 	if (!list_empty(&worker->work_list)) {
835 		work = list_first_entry(&worker->work_list,
836 					struct kthread_work, node);
837 		list_del_init(&work->node);
838 	}
839 	worker->current_work = work;
840 	raw_spin_unlock_irq(&worker->lock);
841 
842 	if (work) {
843 		kthread_work_func_t func = work->func;
844 		__set_current_state(TASK_RUNNING);
845 		trace_sched_kthread_work_execute_start(work);
846 		work->func(work);
847 		/*
848 		 * Avoid dereferencing work after this point.  The trace
849 		 * event only cares about the address.
850 		 */
851 		trace_sched_kthread_work_execute_end(work, func);
852 	} else if (!freezing(current)) {
853 		schedule();
854 	} else {
855 		/*
856 		 * Handle the case where the current remains
857 		 * TASK_INTERRUPTIBLE. try_to_freeze() expects
858 		 * the current to be TASK_RUNNING.
859 		 */
860 		__set_current_state(TASK_RUNNING);
861 	}
862 
863 	try_to_freeze();
864 	cond_resched();
865 	goto repeat;
866 }
867 EXPORT_SYMBOL_GPL(kthread_worker_fn);
868 
869 static __printf(3, 0) struct kthread_worker *
__kthread_create_worker(int cpu,unsigned int flags,const char namefmt[],va_list args)870 __kthread_create_worker(int cpu, unsigned int flags,
871 			const char namefmt[], va_list args)
872 {
873 	struct kthread_worker *worker;
874 	struct task_struct *task;
875 	int node = NUMA_NO_NODE;
876 
877 	worker = kzalloc(sizeof(*worker), GFP_KERNEL);
878 	if (!worker)
879 		return ERR_PTR(-ENOMEM);
880 
881 	kthread_init_worker(worker);
882 
883 	if (cpu >= 0)
884 		node = cpu_to_node(cpu);
885 
886 	task = __kthread_create_on_node(kthread_worker_fn, worker,
887 						node, namefmt, args);
888 	if (IS_ERR(task))
889 		goto fail_task;
890 
891 	if (cpu >= 0)
892 		kthread_bind(task, cpu);
893 
894 	worker->flags = flags;
895 	worker->task = task;
896 	wake_up_process(task);
897 	return worker;
898 
899 fail_task:
900 	kfree(worker);
901 	return ERR_CAST(task);
902 }
903 
904 /**
905  * kthread_create_worker - create a kthread worker
906  * @flags: flags modifying the default behavior of the worker
907  * @namefmt: printf-style name for the kthread worker (task).
908  *
909  * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
910  * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
911  * when the caller was killed by a fatal signal.
912  */
913 struct kthread_worker *
kthread_create_worker(unsigned int flags,const char namefmt[],...)914 kthread_create_worker(unsigned int flags, const char namefmt[], ...)
915 {
916 	struct kthread_worker *worker;
917 	va_list args;
918 
919 	va_start(args, namefmt);
920 	worker = __kthread_create_worker(-1, flags, namefmt, args);
921 	va_end(args);
922 
923 	return worker;
924 }
925 EXPORT_SYMBOL(kthread_create_worker);
926 
927 /**
928  * kthread_create_worker_on_cpu - create a kthread worker and bind it
929  *	to a given CPU and the associated NUMA node.
930  * @cpu: CPU number
931  * @flags: flags modifying the default behavior of the worker
932  * @namefmt: printf-style name for the kthread worker (task).
933  *
934  * Use a valid CPU number if you want to bind the kthread worker
935  * to the given CPU and the associated NUMA node.
936  *
937  * A good practice is to add the cpu number also into the worker name.
938  * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
939  *
940  * CPU hotplug:
941  * The kthread worker API is simple and generic. It just provides a way
942  * to create, use, and destroy workers.
943  *
944  * It is up to the API user how to handle CPU hotplug. They have to decide
945  * how to handle pending work items, prevent queuing new ones, and
946  * restore the functionality when the CPU goes off and on. There are a
947  * few catches:
948  *
949  *    - CPU affinity gets lost when it is scheduled on an offline CPU.
950  *
951  *    - The worker might not exist when the CPU was off when the user
952  *      created the workers.
953  *
954  * Good practice is to implement two CPU hotplug callbacks and to
955  * destroy/create the worker when the CPU goes down/up.
956  *
957  * Return:
958  * The pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
959  * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
960  * when the caller was killed by a fatal signal.
961  */
962 struct kthread_worker *
kthread_create_worker_on_cpu(int cpu,unsigned int flags,const char namefmt[],...)963 kthread_create_worker_on_cpu(int cpu, unsigned int flags,
964 			     const char namefmt[], ...)
965 {
966 	struct kthread_worker *worker;
967 	va_list args;
968 
969 	va_start(args, namefmt);
970 	worker = __kthread_create_worker(cpu, flags, namefmt, args);
971 	va_end(args);
972 
973 	return worker;
974 }
975 EXPORT_SYMBOL(kthread_create_worker_on_cpu);
976 
977 /*
978  * Returns true when the work could not be queued at the moment.
979  * It happens when it is already pending in a worker list
980  * or when it is being cancelled.
981  */
queuing_blocked(struct kthread_worker * worker,struct kthread_work * work)982 static inline bool queuing_blocked(struct kthread_worker *worker,
983 				   struct kthread_work *work)
984 {
985 	lockdep_assert_held(&worker->lock);
986 
987 	return !list_empty(&work->node) || work->canceling;
988 }
989 
kthread_insert_work_sanity_check(struct kthread_worker * worker,struct kthread_work * work)990 static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
991 					     struct kthread_work *work)
992 {
993 	lockdep_assert_held(&worker->lock);
994 	WARN_ON_ONCE(!list_empty(&work->node));
995 	/* Do not use a work with >1 worker, see kthread_queue_work() */
996 	WARN_ON_ONCE(work->worker && work->worker != worker);
997 }
998 
999 /* insert @work before @pos in @worker */
kthread_insert_work(struct kthread_worker * worker,struct kthread_work * work,struct list_head * pos)1000 static void kthread_insert_work(struct kthread_worker *worker,
1001 				struct kthread_work *work,
1002 				struct list_head *pos)
1003 {
1004 	kthread_insert_work_sanity_check(worker, work);
1005 
1006 	trace_sched_kthread_work_queue_work(worker, work);
1007 
1008 	list_add_tail(&work->node, pos);
1009 	work->worker = worker;
1010 	if (!worker->current_work && likely(worker->task))
1011 		wake_up_process(worker->task);
1012 }
1013 
1014 /**
1015  * kthread_queue_work - queue a kthread_work
1016  * @worker: target kthread_worker
1017  * @work: kthread_work to queue
1018  *
1019  * Queue @work to work processor @task for async execution.  @task
1020  * must have been created with kthread_worker_create().  Returns %true
1021  * if @work was successfully queued, %false if it was already pending.
1022  *
1023  * Reinitialize the work if it needs to be used by another worker.
1024  * For example, when the worker was stopped and started again.
1025  */
kthread_queue_work(struct kthread_worker * worker,struct kthread_work * work)1026 bool kthread_queue_work(struct kthread_worker *worker,
1027 			struct kthread_work *work)
1028 {
1029 	bool ret = false;
1030 	unsigned long flags;
1031 
1032 	raw_spin_lock_irqsave(&worker->lock, flags);
1033 	if (!queuing_blocked(worker, work)) {
1034 		kthread_insert_work(worker, work, &worker->work_list);
1035 		ret = true;
1036 	}
1037 	raw_spin_unlock_irqrestore(&worker->lock, flags);
1038 	return ret;
1039 }
1040 EXPORT_SYMBOL_GPL(kthread_queue_work);
1041 
1042 /**
1043  * kthread_delayed_work_timer_fn - callback that queues the associated kthread
1044  *	delayed work when the timer expires.
1045  * @t: pointer to the expired timer
1046  *
1047  * The format of the function is defined by struct timer_list.
1048  * It should have been called from irqsafe timer with irq already off.
1049  */
kthread_delayed_work_timer_fn(struct timer_list * t)1050 void kthread_delayed_work_timer_fn(struct timer_list *t)
1051 {
1052 	struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
1053 	struct kthread_work *work = &dwork->work;
1054 	struct kthread_worker *worker = work->worker;
1055 	unsigned long flags;
1056 
1057 	/*
1058 	 * This might happen when a pending work is reinitialized.
1059 	 * It means that it is used a wrong way.
1060 	 */
1061 	if (WARN_ON_ONCE(!worker))
1062 		return;
1063 
1064 	raw_spin_lock_irqsave(&worker->lock, flags);
1065 	/* Work must not be used with >1 worker, see kthread_queue_work(). */
1066 	WARN_ON_ONCE(work->worker != worker);
1067 
1068 	/* Move the work from worker->delayed_work_list. */
1069 	WARN_ON_ONCE(list_empty(&work->node));
1070 	list_del_init(&work->node);
1071 	if (!work->canceling)
1072 		kthread_insert_work(worker, work, &worker->work_list);
1073 
1074 	raw_spin_unlock_irqrestore(&worker->lock, flags);
1075 }
1076 EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
1077 
__kthread_queue_delayed_work(struct kthread_worker * worker,struct kthread_delayed_work * dwork,unsigned long delay)1078 static void __kthread_queue_delayed_work(struct kthread_worker *worker,
1079 					 struct kthread_delayed_work *dwork,
1080 					 unsigned long delay)
1081 {
1082 	struct timer_list *timer = &dwork->timer;
1083 	struct kthread_work *work = &dwork->work;
1084 
1085 	WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn);
1086 
1087 	/*
1088 	 * If @delay is 0, queue @dwork->work immediately.  This is for
1089 	 * both optimization and correctness.  The earliest @timer can
1090 	 * expire is on the closest next tick and delayed_work users depend
1091 	 * on that there's no such delay when @delay is 0.
1092 	 */
1093 	if (!delay) {
1094 		kthread_insert_work(worker, work, &worker->work_list);
1095 		return;
1096 	}
1097 
1098 	/* Be paranoid and try to detect possible races already now. */
1099 	kthread_insert_work_sanity_check(worker, work);
1100 
1101 	list_add(&work->node, &worker->delayed_work_list);
1102 	work->worker = worker;
1103 	timer->expires = jiffies + delay;
1104 	add_timer(timer);
1105 }
1106 
1107 /**
1108  * kthread_queue_delayed_work - queue the associated kthread work
1109  *	after a delay.
1110  * @worker: target kthread_worker
1111  * @dwork: kthread_delayed_work to queue
1112  * @delay: number of jiffies to wait before queuing
1113  *
1114  * If the work has not been pending it starts a timer that will queue
1115  * the work after the given @delay. If @delay is zero, it queues the
1116  * work immediately.
1117  *
1118  * Return: %false if the @work has already been pending. It means that
1119  * either the timer was running or the work was queued. It returns %true
1120  * otherwise.
1121  */
kthread_queue_delayed_work(struct kthread_worker * worker,struct kthread_delayed_work * dwork,unsigned long delay)1122 bool kthread_queue_delayed_work(struct kthread_worker *worker,
1123 				struct kthread_delayed_work *dwork,
1124 				unsigned long delay)
1125 {
1126 	struct kthread_work *work = &dwork->work;
1127 	unsigned long flags;
1128 	bool ret = false;
1129 
1130 	raw_spin_lock_irqsave(&worker->lock, flags);
1131 
1132 	if (!queuing_blocked(worker, work)) {
1133 		__kthread_queue_delayed_work(worker, dwork, delay);
1134 		ret = true;
1135 	}
1136 
1137 	raw_spin_unlock_irqrestore(&worker->lock, flags);
1138 	return ret;
1139 }
1140 EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
1141 
1142 struct kthread_flush_work {
1143 	struct kthread_work	work;
1144 	struct completion	done;
1145 };
1146 
kthread_flush_work_fn(struct kthread_work * work)1147 static void kthread_flush_work_fn(struct kthread_work *work)
1148 {
1149 	struct kthread_flush_work *fwork =
1150 		container_of(work, struct kthread_flush_work, work);
1151 	complete(&fwork->done);
1152 }
1153 
1154 /**
1155  * kthread_flush_work - flush a kthread_work
1156  * @work: work to flush
1157  *
1158  * If @work is queued or executing, wait for it to finish execution.
1159  */
kthread_flush_work(struct kthread_work * work)1160 void kthread_flush_work(struct kthread_work *work)
1161 {
1162 	struct kthread_flush_work fwork = {
1163 		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1164 		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1165 	};
1166 	struct kthread_worker *worker;
1167 	bool noop = false;
1168 
1169 	worker = work->worker;
1170 	if (!worker)
1171 		return;
1172 
1173 	raw_spin_lock_irq(&worker->lock);
1174 	/* Work must not be used with >1 worker, see kthread_queue_work(). */
1175 	WARN_ON_ONCE(work->worker != worker);
1176 
1177 	if (!list_empty(&work->node))
1178 		kthread_insert_work(worker, &fwork.work, work->node.next);
1179 	else if (worker->current_work == work)
1180 		kthread_insert_work(worker, &fwork.work,
1181 				    worker->work_list.next);
1182 	else
1183 		noop = true;
1184 
1185 	raw_spin_unlock_irq(&worker->lock);
1186 
1187 	if (!noop)
1188 		wait_for_completion(&fwork.done);
1189 }
1190 EXPORT_SYMBOL_GPL(kthread_flush_work);
1191 
1192 /*
1193  * Make sure that the timer is neither set nor running and could
1194  * not manipulate the work list_head any longer.
1195  *
1196  * The function is called under worker->lock. The lock is temporary
1197  * released but the timer can't be set again in the meantime.
1198  */
kthread_cancel_delayed_work_timer(struct kthread_work * work,unsigned long * flags)1199 static void kthread_cancel_delayed_work_timer(struct kthread_work *work,
1200 					      unsigned long *flags)
1201 {
1202 	struct kthread_delayed_work *dwork =
1203 		container_of(work, struct kthread_delayed_work, work);
1204 	struct kthread_worker *worker = work->worker;
1205 
1206 	/*
1207 	 * del_timer_sync() must be called to make sure that the timer
1208 	 * callback is not running. The lock must be temporary released
1209 	 * to avoid a deadlock with the callback. In the meantime,
1210 	 * any queuing is blocked by setting the canceling counter.
1211 	 */
1212 	work->canceling++;
1213 	raw_spin_unlock_irqrestore(&worker->lock, *flags);
1214 	del_timer_sync(&dwork->timer);
1215 	raw_spin_lock_irqsave(&worker->lock, *flags);
1216 	work->canceling--;
1217 }
1218 
1219 /*
1220  * This function removes the work from the worker queue.
1221  *
1222  * It is called under worker->lock. The caller must make sure that
1223  * the timer used by delayed work is not running, e.g. by calling
1224  * kthread_cancel_delayed_work_timer().
1225  *
1226  * The work might still be in use when this function finishes. See the
1227  * current_work proceed by the worker.
1228  *
1229  * Return: %true if @work was pending and successfully canceled,
1230  *	%false if @work was not pending
1231  */
__kthread_cancel_work(struct kthread_work * work)1232 static bool __kthread_cancel_work(struct kthread_work *work)
1233 {
1234 	/*
1235 	 * Try to remove the work from a worker list. It might either
1236 	 * be from worker->work_list or from worker->delayed_work_list.
1237 	 */
1238 	if (!list_empty(&work->node)) {
1239 		list_del_init(&work->node);
1240 		return true;
1241 	}
1242 
1243 	return false;
1244 }
1245 
1246 /**
1247  * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
1248  * @worker: kthread worker to use
1249  * @dwork: kthread delayed work to queue
1250  * @delay: number of jiffies to wait before queuing
1251  *
1252  * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
1253  * modify @dwork's timer so that it expires after @delay. If @delay is zero,
1254  * @work is guaranteed to be queued immediately.
1255  *
1256  * Return: %false if @dwork was idle and queued, %true otherwise.
1257  *
1258  * A special case is when the work is being canceled in parallel.
1259  * It might be caused either by the real kthread_cancel_delayed_work_sync()
1260  * or yet another kthread_mod_delayed_work() call. We let the other command
1261  * win and return %true here. The return value can be used for reference
1262  * counting and the number of queued works stays the same. Anyway, the caller
1263  * is supposed to synchronize these operations a reasonable way.
1264  *
1265  * This function is safe to call from any context including IRQ handler.
1266  * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
1267  * for details.
1268  */
kthread_mod_delayed_work(struct kthread_worker * worker,struct kthread_delayed_work * dwork,unsigned long delay)1269 bool kthread_mod_delayed_work(struct kthread_worker *worker,
1270 			      struct kthread_delayed_work *dwork,
1271 			      unsigned long delay)
1272 {
1273 	struct kthread_work *work = &dwork->work;
1274 	unsigned long flags;
1275 	int ret;
1276 
1277 	raw_spin_lock_irqsave(&worker->lock, flags);
1278 
1279 	/* Do not bother with canceling when never queued. */
1280 	if (!work->worker) {
1281 		ret = false;
1282 		goto fast_queue;
1283 	}
1284 
1285 	/* Work must not be used with >1 worker, see kthread_queue_work() */
1286 	WARN_ON_ONCE(work->worker != worker);
1287 
1288 	/*
1289 	 * Temporary cancel the work but do not fight with another command
1290 	 * that is canceling the work as well.
1291 	 *
1292 	 * It is a bit tricky because of possible races with another
1293 	 * mod_delayed_work() and cancel_delayed_work() callers.
1294 	 *
1295 	 * The timer must be canceled first because worker->lock is released
1296 	 * when doing so. But the work can be removed from the queue (list)
1297 	 * only when it can be queued again so that the return value can
1298 	 * be used for reference counting.
1299 	 */
1300 	kthread_cancel_delayed_work_timer(work, &flags);
1301 	if (work->canceling) {
1302 		/* The number of works in the queue does not change. */
1303 		ret = true;
1304 		goto out;
1305 	}
1306 	ret = __kthread_cancel_work(work);
1307 
1308 fast_queue:
1309 	__kthread_queue_delayed_work(worker, dwork, delay);
1310 out:
1311 	raw_spin_unlock_irqrestore(&worker->lock, flags);
1312 	return ret;
1313 }
1314 EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
1315 
__kthread_cancel_work_sync(struct kthread_work * work,bool is_dwork)1316 static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
1317 {
1318 	struct kthread_worker *worker = work->worker;
1319 	unsigned long flags;
1320 	int ret = false;
1321 
1322 	if (!worker)
1323 		goto out;
1324 
1325 	raw_spin_lock_irqsave(&worker->lock, flags);
1326 	/* Work must not be used with >1 worker, see kthread_queue_work(). */
1327 	WARN_ON_ONCE(work->worker != worker);
1328 
1329 	if (is_dwork)
1330 		kthread_cancel_delayed_work_timer(work, &flags);
1331 
1332 	ret = __kthread_cancel_work(work);
1333 
1334 	if (worker->current_work != work)
1335 		goto out_fast;
1336 
1337 	/*
1338 	 * The work is in progress and we need to wait with the lock released.
1339 	 * In the meantime, block any queuing by setting the canceling counter.
1340 	 */
1341 	work->canceling++;
1342 	raw_spin_unlock_irqrestore(&worker->lock, flags);
1343 	kthread_flush_work(work);
1344 	raw_spin_lock_irqsave(&worker->lock, flags);
1345 	work->canceling--;
1346 
1347 out_fast:
1348 	raw_spin_unlock_irqrestore(&worker->lock, flags);
1349 out:
1350 	return ret;
1351 }
1352 
1353 /**
1354  * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
1355  * @work: the kthread work to cancel
1356  *
1357  * Cancel @work and wait for its execution to finish.  This function
1358  * can be used even if the work re-queues itself. On return from this
1359  * function, @work is guaranteed to be not pending or executing on any CPU.
1360  *
1361  * kthread_cancel_work_sync(&delayed_work->work) must not be used for
1362  * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
1363  *
1364  * The caller must ensure that the worker on which @work was last
1365  * queued can't be destroyed before this function returns.
1366  *
1367  * Return: %true if @work was pending, %false otherwise.
1368  */
kthread_cancel_work_sync(struct kthread_work * work)1369 bool kthread_cancel_work_sync(struct kthread_work *work)
1370 {
1371 	return __kthread_cancel_work_sync(work, false);
1372 }
1373 EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
1374 
1375 /**
1376  * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
1377  *	wait for it to finish.
1378  * @dwork: the kthread delayed work to cancel
1379  *
1380  * This is kthread_cancel_work_sync() for delayed works.
1381  *
1382  * Return: %true if @dwork was pending, %false otherwise.
1383  */
kthread_cancel_delayed_work_sync(struct kthread_delayed_work * dwork)1384 bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
1385 {
1386 	return __kthread_cancel_work_sync(&dwork->work, true);
1387 }
1388 EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
1389 
1390 /**
1391  * kthread_flush_worker - flush all current works on a kthread_worker
1392  * @worker: worker to flush
1393  *
1394  * Wait until all currently executing or pending works on @worker are
1395  * finished.
1396  */
kthread_flush_worker(struct kthread_worker * worker)1397 void kthread_flush_worker(struct kthread_worker *worker)
1398 {
1399 	struct kthread_flush_work fwork = {
1400 		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1401 		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1402 	};
1403 
1404 	kthread_queue_work(worker, &fwork.work);
1405 	wait_for_completion(&fwork.done);
1406 }
1407 EXPORT_SYMBOL_GPL(kthread_flush_worker);
1408 
1409 /**
1410  * kthread_destroy_worker - destroy a kthread worker
1411  * @worker: worker to be destroyed
1412  *
1413  * Flush and destroy @worker.  The simple flush is enough because the kthread
1414  * worker API is used only in trivial scenarios.  There are no multi-step state
1415  * machines needed.
1416  *
1417  * Note that this function is not responsible for handling delayed work, so
1418  * caller should be responsible for queuing or canceling all delayed work items
1419  * before invoke this function.
1420  */
kthread_destroy_worker(struct kthread_worker * worker)1421 void kthread_destroy_worker(struct kthread_worker *worker)
1422 {
1423 	struct task_struct *task;
1424 
1425 	task = worker->task;
1426 	if (WARN_ON(!task))
1427 		return;
1428 
1429 	kthread_flush_worker(worker);
1430 	kthread_stop(task);
1431 	WARN_ON(!list_empty(&worker->delayed_work_list));
1432 	WARN_ON(!list_empty(&worker->work_list));
1433 	kfree(worker);
1434 }
1435 EXPORT_SYMBOL(kthread_destroy_worker);
1436 
1437 /**
1438  * kthread_use_mm - make the calling kthread operate on an address space
1439  * @mm: address space to operate on
1440  */
kthread_use_mm(struct mm_struct * mm)1441 void kthread_use_mm(struct mm_struct *mm)
1442 {
1443 	struct mm_struct *active_mm;
1444 	struct task_struct *tsk = current;
1445 
1446 	WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1447 	WARN_ON_ONCE(tsk->mm);
1448 
1449 	/*
1450 	 * It is possible for mm to be the same as tsk->active_mm, but
1451 	 * we must still mmgrab(mm) and mmdrop_lazy_tlb(active_mm),
1452 	 * because these references are not equivalent.
1453 	 */
1454 	mmgrab(mm);
1455 
1456 	task_lock(tsk);
1457 	/* Hold off tlb flush IPIs while switching mm's */
1458 	local_irq_disable();
1459 	active_mm = tsk->active_mm;
1460 	tsk->active_mm = mm;
1461 	tsk->mm = mm;
1462 	membarrier_update_current_mm(mm);
1463 	switch_mm_irqs_off(active_mm, mm, tsk);
1464 	local_irq_enable();
1465 	task_unlock(tsk);
1466 #ifdef finish_arch_post_lock_switch
1467 	finish_arch_post_lock_switch();
1468 #endif
1469 
1470 	/*
1471 	 * When a kthread starts operating on an address space, the loop
1472 	 * in membarrier_{private,global}_expedited() may not observe
1473 	 * that tsk->mm, and not issue an IPI. Membarrier requires a
1474 	 * memory barrier after storing to tsk->mm, before accessing
1475 	 * user-space memory. A full memory barrier for membarrier
1476 	 * {PRIVATE,GLOBAL}_EXPEDITED is implicitly provided by
1477 	 * mmdrop_lazy_tlb().
1478 	 */
1479 	mmdrop_lazy_tlb(active_mm);
1480 }
1481 EXPORT_SYMBOL_GPL(kthread_use_mm);
1482 
1483 /**
1484  * kthread_unuse_mm - reverse the effect of kthread_use_mm()
1485  * @mm: address space to operate on
1486  */
kthread_unuse_mm(struct mm_struct * mm)1487 void kthread_unuse_mm(struct mm_struct *mm)
1488 {
1489 	struct task_struct *tsk = current;
1490 
1491 	WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1492 	WARN_ON_ONCE(!tsk->mm);
1493 
1494 	task_lock(tsk);
1495 	/*
1496 	 * When a kthread stops operating on an address space, the loop
1497 	 * in membarrier_{private,global}_expedited() may not observe
1498 	 * that tsk->mm, and not issue an IPI. Membarrier requires a
1499 	 * memory barrier after accessing user-space memory, before
1500 	 * clearing tsk->mm.
1501 	 */
1502 	smp_mb__after_spinlock();
1503 	local_irq_disable();
1504 	tsk->mm = NULL;
1505 	membarrier_update_current_mm(NULL);
1506 	mmgrab_lazy_tlb(mm);
1507 	/* active_mm is still 'mm' */
1508 	enter_lazy_tlb(mm, tsk);
1509 	local_irq_enable();
1510 	task_unlock(tsk);
1511 
1512 	mmdrop(mm);
1513 }
1514 EXPORT_SYMBOL_GPL(kthread_unuse_mm);
1515 
1516 #ifdef CONFIG_BLK_CGROUP
1517 /**
1518  * kthread_associate_blkcg - associate blkcg to current kthread
1519  * @css: the cgroup info
1520  *
1521  * Current thread must be a kthread. The thread is running jobs on behalf of
1522  * other threads. In some cases, we expect the jobs attach cgroup info of
1523  * original threads instead of that of current thread. This function stores
1524  * original thread's cgroup info in current kthread context for later
1525  * retrieval.
1526  */
kthread_associate_blkcg(struct cgroup_subsys_state * css)1527 void kthread_associate_blkcg(struct cgroup_subsys_state *css)
1528 {
1529 	struct kthread *kthread;
1530 
1531 	if (!(current->flags & PF_KTHREAD))
1532 		return;
1533 	kthread = to_kthread(current);
1534 	if (!kthread)
1535 		return;
1536 
1537 	if (kthread->blkcg_css) {
1538 		css_put(kthread->blkcg_css);
1539 		kthread->blkcg_css = NULL;
1540 	}
1541 	if (css) {
1542 		css_get(css);
1543 		kthread->blkcg_css = css;
1544 	}
1545 }
1546 EXPORT_SYMBOL(kthread_associate_blkcg);
1547 
1548 /**
1549  * kthread_blkcg - get associated blkcg css of current kthread
1550  *
1551  * Current thread must be a kthread.
1552  */
kthread_blkcg(void)1553 struct cgroup_subsys_state *kthread_blkcg(void)
1554 {
1555 	struct kthread *kthread;
1556 
1557 	if (current->flags & PF_KTHREAD) {
1558 		kthread = to_kthread(current);
1559 		if (kthread)
1560 			return kthread->blkcg_css;
1561 	}
1562 	return NULL;
1563 }
1564 #endif
1565