• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Kernel thread helper functions.
3  *   Copyright (C) 2004 IBM Corporation, Rusty Russell.
4  *   Copyright (C) 2009 Red Hat, Inc.
5  *
6  * Creation is done via kthreadd, so that we get a clean environment
7  * even if we're invoked from userspace (think modprobe, hotplug cpu,
8  * etc.).
9  */
10 #include <uapi/linux/sched/types.h>
11 #include <linux/mm.h>
12 #include <linux/mmu_context.h>
13 #include <linux/sched.h>
14 #include <linux/sched/mm.h>
15 #include <linux/sched/task.h>
16 #include <linux/kthread.h>
17 #include <linux/completion.h>
18 #include <linux/err.h>
19 #include <linux/cgroup.h>
20 #include <linux/cpuset.h>
21 #include <linux/unistd.h>
22 #include <linux/file.h>
23 #include <linux/export.h>
24 #include <linux/mutex.h>
25 #include <linux/slab.h>
26 #include <linux/freezer.h>
27 #include <linux/ptrace.h>
28 #include <linux/uaccess.h>
29 #include <linux/numa.h>
30 #include <linux/sched/isolation.h>
31 #include <trace/events/sched.h>
32 
33 
34 static DEFINE_SPINLOCK(kthread_create_lock);
35 static LIST_HEAD(kthread_create_list);
36 struct task_struct *kthreadd_task;
37 
38 struct kthread_create_info
39 {
40 	/* Information passed to kthread() from kthreadd. */
41 	int (*threadfn)(void *data);
42 	void *data;
43 	int node;
44 
45 	/* Result passed back to kthread_create() from kthreadd. */
46 	struct task_struct *result;
47 	struct completion *done;
48 
49 	struct list_head list;
50 };
51 
52 struct kthread {
53 	unsigned long flags;
54 	unsigned int cpu;
55 	int (*threadfn)(void *);
56 	void *data;
57 	mm_segment_t oldfs;
58 	struct completion parked;
59 	struct completion exited;
60 #ifdef CONFIG_BLK_CGROUP
61 	struct cgroup_subsys_state *blkcg_css;
62 #endif
63 	/* To store the full name if task comm is truncated. */
64 	char *full_name;
65 };
66 
67 enum KTHREAD_BITS {
68 	KTHREAD_IS_PER_CPU = 0,
69 	KTHREAD_SHOULD_STOP,
70 	KTHREAD_SHOULD_PARK,
71 };
72 
to_kthread(struct task_struct * k)73 static inline struct kthread *to_kthread(struct task_struct *k)
74 {
75 	WARN_ON(!(k->flags & PF_KTHREAD));
76 	return (__force void *)k->set_child_tid;
77 }
78 
79 /*
80  * Variant of to_kthread() that doesn't assume @p is a kthread.
81  *
82  * Per construction; when:
83  *
84  *   (p->flags & PF_KTHREAD) && p->set_child_tid
85  *
86  * the task is both a kthread and struct kthread is persistent. However
87  * PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and
88  * begin_new_exec()).
89  */
__to_kthread(struct task_struct * p)90 static inline struct kthread *__to_kthread(struct task_struct *p)
91 {
92 	void *kthread = (__force void *)p->set_child_tid;
93 	if (kthread && !(p->flags & PF_KTHREAD))
94 		kthread = NULL;
95 	return kthread;
96 }
97 
get_kthread_comm(char * buf,size_t buf_size,struct task_struct * tsk)98 void get_kthread_comm(char *buf, size_t buf_size, struct task_struct *tsk)
99 {
100 	struct kthread *kthread = to_kthread(tsk);
101 
102 	if (!kthread || !kthread->full_name) {
103 		__get_task_comm(buf, buf_size, tsk);
104 		return;
105 	}
106 
107 	strscpy_pad(buf, kthread->full_name, buf_size);
108 }
109 
set_kthread_struct(struct task_struct * p)110 void set_kthread_struct(struct task_struct *p)
111 {
112 	struct kthread *kthread;
113 
114 	if (__to_kthread(p))
115 		return;
116 
117 	kthread = kzalloc(sizeof(*kthread), GFP_KERNEL);
118 	/*
119 	 * We abuse ->set_child_tid to avoid the new member and because it
120 	 * can't be wrongly copied by copy_process(). We also rely on fact
121 	 * that the caller can't exec, so PF_KTHREAD can't be cleared.
122 	 */
123 	p->set_child_tid = (__force void __user *)kthread;
124 }
125 
free_kthread_struct(struct task_struct * k)126 void free_kthread_struct(struct task_struct *k)
127 {
128 	struct kthread *kthread;
129 
130 	/*
131 	 * Can be NULL if this kthread was created by kernel_thread()
132 	 * or if kmalloc() in kthread() failed.
133 	 */
134 	kthread = to_kthread(k);
135 	if (!kthread)
136 		return;
137 
138 #ifdef CONFIG_BLK_CGROUP
139 	WARN_ON_ONCE(kthread->blkcg_css);
140 #endif
141 	kfree(kthread->full_name);
142 	kfree(kthread);
143 }
144 
145 /**
146  * kthread_should_stop - should this kthread return now?
147  *
148  * When someone calls kthread_stop() on your kthread, it will be woken
149  * and this will return true.  You should then return, and your return
150  * value will be passed through to kthread_stop().
151  */
kthread_should_stop(void)152 bool kthread_should_stop(void)
153 {
154 	return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
155 }
156 EXPORT_SYMBOL(kthread_should_stop);
157 
__kthread_should_park(struct task_struct * k)158 bool __kthread_should_park(struct task_struct *k)
159 {
160 	return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags);
161 }
162 EXPORT_SYMBOL_GPL(__kthread_should_park);
163 
164 /**
165  * kthread_should_park - should this kthread park now?
166  *
167  * When someone calls kthread_park() on your kthread, it will be woken
168  * and this will return true.  You should then do the necessary
169  * cleanup and call kthread_parkme()
170  *
171  * Similar to kthread_should_stop(), but this keeps the thread alive
172  * and in a park position. kthread_unpark() "restarts" the thread and
173  * calls the thread function again.
174  */
kthread_should_park(void)175 bool kthread_should_park(void)
176 {
177 	return __kthread_should_park(current);
178 }
179 EXPORT_SYMBOL_GPL(kthread_should_park);
180 
181 /**
182  * kthread_freezable_should_stop - should this freezable kthread return now?
183  * @was_frozen: optional out parameter, indicates whether %current was frozen
184  *
185  * kthread_should_stop() for freezable kthreads, which will enter
186  * refrigerator if necessary.  This function is safe from kthread_stop() /
187  * freezer deadlock and freezable kthreads should use this function instead
188  * of calling try_to_freeze() directly.
189  */
kthread_freezable_should_stop(bool * was_frozen)190 bool kthread_freezable_should_stop(bool *was_frozen)
191 {
192 	bool frozen = false;
193 
194 	might_sleep();
195 
196 	if (unlikely(freezing(current)))
197 		frozen = __refrigerator(true);
198 
199 	if (was_frozen)
200 		*was_frozen = frozen;
201 
202 	return kthread_should_stop();
203 }
204 EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
205 
206 /**
207  * kthread_func - return the function specified on kthread creation
208  * @task: kthread task in question
209  *
210  * Returns NULL if the task is not a kthread.
211  */
kthread_func(struct task_struct * task)212 void *kthread_func(struct task_struct *task)
213 {
214 	struct kthread *kthread = __to_kthread(task);
215 	if (kthread)
216 		return kthread->threadfn;
217 	return NULL;
218 }
219 EXPORT_SYMBOL_GPL(kthread_func);
220 
221 /**
222  * kthread_data - return data value specified on kthread creation
223  * @task: kthread task in question
224  *
225  * Return the data value specified when kthread @task was created.
226  * The caller is responsible for ensuring the validity of @task when
227  * calling this function.
228  */
kthread_data(struct task_struct * task)229 void *kthread_data(struct task_struct *task)
230 {
231 	return to_kthread(task)->data;
232 }
233 EXPORT_SYMBOL_GPL(kthread_data);
234 
235 /**
236  * kthread_probe_data - speculative version of kthread_data()
237  * @task: possible kthread task in question
238  *
239  * @task could be a kthread task.  Return the data value specified when it
240  * was created if accessible.  If @task isn't a kthread task or its data is
241  * inaccessible for any reason, %NULL is returned.  This function requires
242  * that @task itself is safe to dereference.
243  */
kthread_probe_data(struct task_struct * task)244 void *kthread_probe_data(struct task_struct *task)
245 {
246 	struct kthread *kthread = __to_kthread(task);
247 	void *data = NULL;
248 
249 	if (kthread)
250 		copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
251 	return data;
252 }
253 
__kthread_parkme(struct kthread * self)254 static void __kthread_parkme(struct kthread *self)
255 {
256 	for (;;) {
257 		/*
258 		 * TASK_PARKED is a special state; we must serialize against
259 		 * possible pending wakeups to avoid store-store collisions on
260 		 * task->state.
261 		 *
262 		 * Such a collision might possibly result in the task state
263 		 * changin from TASK_PARKED and us failing the
264 		 * wait_task_inactive() in kthread_park().
265 		 */
266 		set_special_state(TASK_PARKED);
267 		if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
268 			break;
269 
270 		/*
271 		 * Thread is going to call schedule(), do not preempt it,
272 		 * or the caller of kthread_park() may spend more time in
273 		 * wait_task_inactive().
274 		 */
275 		preempt_disable();
276 		complete(&self->parked);
277 		schedule_preempt_disabled();
278 		preempt_enable();
279 	}
280 	__set_current_state(TASK_RUNNING);
281 }
282 
kthread_parkme(void)283 void kthread_parkme(void)
284 {
285 	__kthread_parkme(to_kthread(current));
286 }
287 EXPORT_SYMBOL_GPL(kthread_parkme);
288 
kthread(void * _create)289 static int kthread(void *_create)
290 {
291 	/* Copy data: it's on kthread's stack */
292 	struct kthread_create_info *create = _create;
293 	int (*threadfn)(void *data) = create->threadfn;
294 	void *data = create->data;
295 	struct completion *done;
296 	struct kthread *self;
297 	int ret;
298 
299 	set_kthread_struct(current);
300 	self = to_kthread(current);
301 
302 	/* If user was SIGKILLed, I release the structure. */
303 	done = xchg(&create->done, NULL);
304 	if (!done) {
305 		kfree(create);
306 		do_exit(-EINTR);
307 	}
308 
309 	if (!self) {
310 		create->result = ERR_PTR(-ENOMEM);
311 		complete(done);
312 		do_exit(-ENOMEM);
313 	}
314 
315 	self->threadfn = threadfn;
316 	self->data = data;
317 	init_completion(&self->exited);
318 	init_completion(&self->parked);
319 	current->vfork_done = &self->exited;
320 
321 	/* OK, tell user we're spawned, wait for stop or wakeup */
322 	__set_current_state(TASK_UNINTERRUPTIBLE);
323 	create->result = current;
324 	/*
325 	 * Thread is going to call schedule(), do not preempt it,
326 	 * or the creator may spend more time in wait_task_inactive().
327 	 */
328 	preempt_disable();
329 	complete(done);
330 	schedule_preempt_disabled();
331 	preempt_enable();
332 
333 	ret = -EINTR;
334 	if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
335 		cgroup_kthread_ready();
336 		__kthread_parkme(self);
337 		ret = threadfn(data);
338 	}
339 	do_exit(ret);
340 }
341 
342 /* called from kernel_clone() to get node information for about to be created task */
tsk_fork_get_node(struct task_struct * tsk)343 int tsk_fork_get_node(struct task_struct *tsk)
344 {
345 #ifdef CONFIG_NUMA
346 	if (tsk == kthreadd_task)
347 		return tsk->pref_node_fork;
348 #endif
349 	return NUMA_NO_NODE;
350 }
351 
create_kthread(struct kthread_create_info * create)352 static void create_kthread(struct kthread_create_info *create)
353 {
354 	int pid;
355 
356 #ifdef CONFIG_NUMA
357 	current->pref_node_fork = create->node;
358 #endif
359 	/* We want our own signal handler (we take no signals by default). */
360 	pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
361 	if (pid < 0) {
362 		/* If user was SIGKILLed, I release the structure. */
363 		struct completion *done = xchg(&create->done, NULL);
364 
365 		if (!done) {
366 			kfree(create);
367 			return;
368 		}
369 		create->result = ERR_PTR(pid);
370 		complete(done);
371 	}
372 }
373 
374 static __printf(4, 0)
__kthread_create_on_node(int (* threadfn)(void * data),void * data,int node,const char namefmt[],va_list args)375 struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
376 						    void *data, int node,
377 						    const char namefmt[],
378 						    va_list args)
379 {
380 	DECLARE_COMPLETION_ONSTACK(done);
381 	struct task_struct *task;
382 	struct kthread_create_info *create = kmalloc(sizeof(*create),
383 						     GFP_KERNEL);
384 
385 	if (!create)
386 		return ERR_PTR(-ENOMEM);
387 	create->threadfn = threadfn;
388 	create->data = data;
389 	create->node = node;
390 	create->done = &done;
391 
392 	spin_lock(&kthread_create_lock);
393 	list_add_tail(&create->list, &kthread_create_list);
394 	spin_unlock(&kthread_create_lock);
395 
396 	wake_up_process(kthreadd_task);
397 	/*
398 	 * Wait for completion in killable state, for I might be chosen by
399 	 * the OOM killer while kthreadd is trying to allocate memory for
400 	 * new kernel thread.
401 	 */
402 	if (unlikely(wait_for_completion_killable(&done))) {
403 		/*
404 		 * If I was SIGKILLed before kthreadd (or new kernel thread)
405 		 * calls complete(), leave the cleanup of this structure to
406 		 * that thread.
407 		 */
408 		if (xchg(&create->done, NULL))
409 			return ERR_PTR(-EINTR);
410 		/*
411 		 * kthreadd (or new kernel thread) will call complete()
412 		 * shortly.
413 		 */
414 		wait_for_completion(&done);
415 	}
416 	task = create->result;
417 	if (!IS_ERR(task)) {
418 		static const struct sched_param param = { .sched_priority = 0 };
419 		char name[TASK_COMM_LEN];
420 		va_list aq;
421 		int len;
422 
423 		/*
424 		 * task is already visible to other tasks, so updating
425 		 * COMM must be protected.
426 		 */
427 		va_copy(aq, args);
428 		len = vsnprintf(name, sizeof(name), namefmt, aq);
429 		va_end(aq);
430 		if (len >= TASK_COMM_LEN) {
431 			struct kthread *kthread = to_kthread(task);
432 
433 			/* leave it truncated when out of memory. */
434 			kthread->full_name = kvasprintf(GFP_KERNEL, namefmt, args);
435 		}
436 		set_task_comm(task, name);
437 		/*
438 		 * root may have changed our (kthreadd's) priority or CPU mask.
439 		 * The kernel thread should not inherit these properties.
440 		 */
441 		sched_setscheduler_nocheck(task, SCHED_NORMAL, &param);
442 		set_cpus_allowed_ptr(task,
443 				     housekeeping_cpumask(HK_FLAG_KTHREAD));
444 	}
445 	kfree(create);
446 	return task;
447 }
448 
449 /**
450  * kthread_create_on_node - create a kthread.
451  * @threadfn: the function to run until signal_pending(current).
452  * @data: data ptr for @threadfn.
453  * @node: task and thread structures for the thread are allocated on this node
454  * @namefmt: printf-style name for the thread.
455  *
456  * Description: This helper function creates and names a kernel
457  * thread.  The thread will be stopped: use wake_up_process() to start
458  * it.  See also kthread_run().  The new thread has SCHED_NORMAL policy and
459  * is affine to all CPUs.
460  *
461  * If thread is going to be bound on a particular cpu, give its node
462  * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
463  * When woken, the thread will run @threadfn() with @data as its
464  * argument. @threadfn() can either call do_exit() directly if it is a
465  * standalone thread for which no one will call kthread_stop(), or
466  * return when 'kthread_should_stop()' is true (which means
467  * kthread_stop() has been called).  The return value should be zero
468  * or a negative error number; it will be passed to kthread_stop().
469  *
470  * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
471  */
kthread_create_on_node(int (* threadfn)(void * data),void * data,int node,const char namefmt[],...)472 struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
473 					   void *data, int node,
474 					   const char namefmt[],
475 					   ...)
476 {
477 	struct task_struct *task;
478 	va_list args;
479 
480 	va_start(args, namefmt);
481 	task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
482 	va_end(args);
483 
484 	return task;
485 }
486 EXPORT_SYMBOL(kthread_create_on_node);
487 
__kthread_bind_mask(struct task_struct * p,const struct cpumask * mask,unsigned int state)488 static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, unsigned int state)
489 {
490 	unsigned long flags;
491 
492 	if (!wait_task_inactive(p, state)) {
493 		WARN_ON(1);
494 		return;
495 	}
496 
497 	/* It's safe because the task is inactive. */
498 	raw_spin_lock_irqsave(&p->pi_lock, flags);
499 	do_set_cpus_allowed(p, mask);
500 	p->flags |= PF_NO_SETAFFINITY;
501 	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
502 }
503 
__kthread_bind(struct task_struct * p,unsigned int cpu,unsigned int state)504 static void __kthread_bind(struct task_struct *p, unsigned int cpu, unsigned int state)
505 {
506 	__kthread_bind_mask(p, cpumask_of(cpu), state);
507 }
508 
kthread_bind_mask(struct task_struct * p,const struct cpumask * mask)509 void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
510 {
511 	__kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
512 }
513 EXPORT_SYMBOL_GPL(kthread_bind_mask);
514 
515 /**
516  * kthread_bind - bind a just-created kthread to a cpu.
517  * @p: thread created by kthread_create().
518  * @cpu: cpu (might not be online, must be possible) for @k to run on.
519  *
520  * Description: This function is equivalent to set_cpus_allowed(),
521  * except that @cpu doesn't need to be online, and the thread must be
522  * stopped (i.e., just returned from kthread_create()).
523  */
kthread_bind(struct task_struct * p,unsigned int cpu)524 void kthread_bind(struct task_struct *p, unsigned int cpu)
525 {
526 	__kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
527 }
528 EXPORT_SYMBOL(kthread_bind);
529 
530 /**
531  * kthread_create_on_cpu - Create a cpu bound kthread
532  * @threadfn: the function to run until signal_pending(current).
533  * @data: data ptr for @threadfn.
534  * @cpu: The cpu on which the thread should be bound,
535  * @namefmt: printf-style name for the thread. Format is restricted
536  *	     to "name.*%u". Code fills in cpu number.
537  *
538  * Description: This helper function creates and names a kernel thread
539  */
kthread_create_on_cpu(int (* threadfn)(void * data),void * data,unsigned int cpu,const char * namefmt)540 struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
541 					  void *data, unsigned int cpu,
542 					  const char *namefmt)
543 {
544 	struct task_struct *p;
545 
546 	p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
547 				   cpu);
548 	if (IS_ERR(p))
549 		return p;
550 	kthread_bind(p, cpu);
551 	/* CPU hotplug need to bind once again when unparking the thread. */
552 	to_kthread(p)->cpu = cpu;
553 	return p;
554 }
555 EXPORT_SYMBOL(kthread_create_on_cpu);
556 
kthread_set_per_cpu(struct task_struct * k,int cpu)557 void kthread_set_per_cpu(struct task_struct *k, int cpu)
558 {
559 	struct kthread *kthread = to_kthread(k);
560 	if (!kthread)
561 		return;
562 
563 	WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY));
564 
565 	if (cpu < 0) {
566 		clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
567 		return;
568 	}
569 
570 	kthread->cpu = cpu;
571 	set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
572 }
573 
kthread_is_per_cpu(struct task_struct * p)574 bool kthread_is_per_cpu(struct task_struct *p)
575 {
576 	struct kthread *kthread = __to_kthread(p);
577 	if (!kthread)
578 		return false;
579 
580 	return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
581 }
582 
583 /**
584  * kthread_unpark - unpark a thread created by kthread_create().
585  * @k:		thread created by kthread_create().
586  *
587  * Sets kthread_should_park() for @k to return false, wakes it, and
588  * waits for it to return. If the thread is marked percpu then its
589  * bound to the cpu again.
590  */
kthread_unpark(struct task_struct * k)591 void kthread_unpark(struct task_struct *k)
592 {
593 	struct kthread *kthread = to_kthread(k);
594 
595 	/*
596 	 * Newly created kthread was parked when the CPU was offline.
597 	 * The binding was lost and we need to set it again.
598 	 */
599 	if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
600 		__kthread_bind(k, kthread->cpu, TASK_PARKED);
601 
602 	clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
603 	/*
604 	 * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup.
605 	 */
606 	wake_up_state(k, TASK_PARKED);
607 }
608 EXPORT_SYMBOL_GPL(kthread_unpark);
609 
610 /**
611  * kthread_park - park a thread created by kthread_create().
612  * @k: thread created by kthread_create().
613  *
614  * Sets kthread_should_park() for @k to return true, wakes it, and
615  * waits for it to return. This can also be called after kthread_create()
616  * instead of calling wake_up_process(): the thread will park without
617  * calling threadfn().
618  *
619  * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
620  * If called by the kthread itself just the park bit is set.
621  */
kthread_park(struct task_struct * k)622 int kthread_park(struct task_struct *k)
623 {
624 	struct kthread *kthread = to_kthread(k);
625 
626 	if (WARN_ON(k->flags & PF_EXITING))
627 		return -ENOSYS;
628 
629 	if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
630 		return -EBUSY;
631 
632 	set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
633 	if (k != current) {
634 		wake_up_process(k);
635 		/*
636 		 * Wait for __kthread_parkme() to complete(), this means we
637 		 * _will_ have TASK_PARKED and are about to call schedule().
638 		 */
639 		wait_for_completion(&kthread->parked);
640 		/*
641 		 * Now wait for that schedule() to complete and the task to
642 		 * get scheduled out.
643 		 */
644 		WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
645 	}
646 
647 	return 0;
648 }
649 EXPORT_SYMBOL_GPL(kthread_park);
650 
651 /**
652  * kthread_stop - stop a thread created by kthread_create().
653  * @k: thread created by kthread_create().
654  *
655  * Sets kthread_should_stop() for @k to return true, wakes it, and
656  * waits for it to exit. This can also be called after kthread_create()
657  * instead of calling wake_up_process(): the thread will exit without
658  * calling threadfn().
659  *
660  * If threadfn() may call do_exit() itself, the caller must ensure
661  * task_struct can't go away.
662  *
663  * Returns the result of threadfn(), or %-EINTR if wake_up_process()
664  * was never called.
665  */
kthread_stop(struct task_struct * k)666 int kthread_stop(struct task_struct *k)
667 {
668 	struct kthread *kthread;
669 	int ret;
670 
671 	trace_sched_kthread_stop(k);
672 
673 	get_task_struct(k);
674 	kthread = to_kthread(k);
675 	set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
676 	kthread_unpark(k);
677 	wake_up_process(k);
678 	wait_for_completion(&kthread->exited);
679 	ret = k->exit_code;
680 	put_task_struct(k);
681 
682 	trace_sched_kthread_stop_ret(ret);
683 	return ret;
684 }
685 EXPORT_SYMBOL(kthread_stop);
686 
kthreadd(void * unused)687 int kthreadd(void *unused)
688 {
689 	struct task_struct *tsk = current;
690 
691 	/* Setup a clean context for our children to inherit. */
692 	set_task_comm(tsk, "kthreadd");
693 	ignore_signals(tsk);
694 	set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_FLAG_KTHREAD));
695 	set_mems_allowed(node_states[N_MEMORY]);
696 
697 	current->flags |= PF_NOFREEZE;
698 	cgroup_init_kthreadd();
699 
700 	for (;;) {
701 		set_current_state(TASK_INTERRUPTIBLE);
702 		if (list_empty(&kthread_create_list))
703 			schedule();
704 		__set_current_state(TASK_RUNNING);
705 
706 		spin_lock(&kthread_create_lock);
707 		while (!list_empty(&kthread_create_list)) {
708 			struct kthread_create_info *create;
709 
710 			create = list_entry(kthread_create_list.next,
711 					    struct kthread_create_info, list);
712 			list_del_init(&create->list);
713 			spin_unlock(&kthread_create_lock);
714 
715 			create_kthread(create);
716 
717 			spin_lock(&kthread_create_lock);
718 		}
719 		spin_unlock(&kthread_create_lock);
720 	}
721 
722 	return 0;
723 }
724 
__kthread_init_worker(struct kthread_worker * worker,const char * name,struct lock_class_key * key)725 void __kthread_init_worker(struct kthread_worker *worker,
726 				const char *name,
727 				struct lock_class_key *key)
728 {
729 	memset(worker, 0, sizeof(struct kthread_worker));
730 	raw_spin_lock_init(&worker->lock);
731 	lockdep_set_class_and_name(&worker->lock, key, name);
732 	INIT_LIST_HEAD(&worker->work_list);
733 	INIT_LIST_HEAD(&worker->delayed_work_list);
734 }
735 EXPORT_SYMBOL_GPL(__kthread_init_worker);
736 
737 /**
738  * kthread_worker_fn - kthread function to process kthread_worker
739  * @worker_ptr: pointer to initialized kthread_worker
740  *
741  * This function implements the main cycle of kthread worker. It processes
742  * work_list until it is stopped with kthread_stop(). It sleeps when the queue
743  * is empty.
744  *
745  * The works are not allowed to keep any locks, disable preemption or interrupts
746  * when they finish. There is defined a safe point for freezing when one work
747  * finishes and before a new one is started.
748  *
749  * Also the works must not be handled by more than one worker at the same time,
750  * see also kthread_queue_work().
751  */
kthread_worker_fn(void * worker_ptr)752 int kthread_worker_fn(void *worker_ptr)
753 {
754 	struct kthread_worker *worker = worker_ptr;
755 	struct kthread_work *work;
756 
757 	/*
758 	 * FIXME: Update the check and remove the assignment when all kthread
759 	 * worker users are created using kthread_create_worker*() functions.
760 	 */
761 	WARN_ON(worker->task && worker->task != current);
762 	worker->task = current;
763 
764 	if (worker->flags & KTW_FREEZABLE)
765 		set_freezable();
766 
767 repeat:
768 	set_current_state(TASK_INTERRUPTIBLE);	/* mb paired w/ kthread_stop */
769 
770 	if (kthread_should_stop()) {
771 		__set_current_state(TASK_RUNNING);
772 		raw_spin_lock_irq(&worker->lock);
773 		worker->task = NULL;
774 		raw_spin_unlock_irq(&worker->lock);
775 		return 0;
776 	}
777 
778 	work = NULL;
779 	raw_spin_lock_irq(&worker->lock);
780 	if (!list_empty(&worker->work_list)) {
781 		work = list_first_entry(&worker->work_list,
782 					struct kthread_work, node);
783 		list_del_init(&work->node);
784 	}
785 	worker->current_work = work;
786 	raw_spin_unlock_irq(&worker->lock);
787 
788 	if (work) {
789 		kthread_work_func_t func = work->func;
790 		__set_current_state(TASK_RUNNING);
791 		trace_sched_kthread_work_execute_start(work);
792 		work->func(work);
793 		/*
794 		 * Avoid dereferencing work after this point.  The trace
795 		 * event only cares about the address.
796 		 */
797 		trace_sched_kthread_work_execute_end(work, func);
798 	} else if (!freezing(current))
799 		schedule();
800 
801 	try_to_freeze();
802 	cond_resched();
803 	goto repeat;
804 }
805 EXPORT_SYMBOL_GPL(kthread_worker_fn);
806 
807 static __printf(3, 0) struct kthread_worker *
__kthread_create_worker(int cpu,unsigned int flags,const char namefmt[],va_list args)808 __kthread_create_worker(int cpu, unsigned int flags,
809 			const char namefmt[], va_list args)
810 {
811 	struct kthread_worker *worker;
812 	struct task_struct *task;
813 	int node = NUMA_NO_NODE;
814 
815 	worker = kzalloc(sizeof(*worker), GFP_KERNEL);
816 	if (!worker)
817 		return ERR_PTR(-ENOMEM);
818 
819 	kthread_init_worker(worker);
820 
821 	if (cpu >= 0)
822 		node = cpu_to_node(cpu);
823 
824 	task = __kthread_create_on_node(kthread_worker_fn, worker,
825 						node, namefmt, args);
826 	if (IS_ERR(task))
827 		goto fail_task;
828 
829 	if (cpu >= 0)
830 		kthread_bind(task, cpu);
831 
832 	worker->flags = flags;
833 	worker->task = task;
834 	wake_up_process(task);
835 	return worker;
836 
837 fail_task:
838 	kfree(worker);
839 	return ERR_CAST(task);
840 }
841 
842 /**
843  * kthread_create_worker - create a kthread worker
844  * @flags: flags modifying the default behavior of the worker
845  * @namefmt: printf-style name for the kthread worker (task).
846  *
847  * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
848  * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
849  * when the worker was SIGKILLed.
850  */
851 struct kthread_worker *
kthread_create_worker(unsigned int flags,const char namefmt[],...)852 kthread_create_worker(unsigned int flags, const char namefmt[], ...)
853 {
854 	struct kthread_worker *worker;
855 	va_list args;
856 
857 	va_start(args, namefmt);
858 	worker = __kthread_create_worker(-1, flags, namefmt, args);
859 	va_end(args);
860 
861 	return worker;
862 }
863 EXPORT_SYMBOL(kthread_create_worker);
864 
865 /**
866  * kthread_create_worker_on_cpu - create a kthread worker and bind it
867  *	to a given CPU and the associated NUMA node.
868  * @cpu: CPU number
869  * @flags: flags modifying the default behavior of the worker
870  * @namefmt: printf-style name for the kthread worker (task).
871  *
872  * Use a valid CPU number if you want to bind the kthread worker
873  * to the given CPU and the associated NUMA node.
874  *
875  * A good practice is to add the cpu number also into the worker name.
876  * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
877  *
878  * CPU hotplug:
879  * The kthread worker API is simple and generic. It just provides a way
880  * to create, use, and destroy workers.
881  *
882  * It is up to the API user how to handle CPU hotplug. They have to decide
883  * how to handle pending work items, prevent queuing new ones, and
884  * restore the functionality when the CPU goes off and on. There are a
885  * few catches:
886  *
887  *    - CPU affinity gets lost when it is scheduled on an offline CPU.
888  *
889  *    - The worker might not exist when the CPU was off when the user
890  *      created the workers.
891  *
892  * Good practice is to implement two CPU hotplug callbacks and to
893  * destroy/create the worker when the CPU goes down/up.
894  *
895  * Return:
896  * The pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
897  * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
898  * when the worker was SIGKILLed.
899  */
900 struct kthread_worker *
kthread_create_worker_on_cpu(int cpu,unsigned int flags,const char namefmt[],...)901 kthread_create_worker_on_cpu(int cpu, unsigned int flags,
902 			     const char namefmt[], ...)
903 {
904 	struct kthread_worker *worker;
905 	va_list args;
906 
907 	va_start(args, namefmt);
908 	worker = __kthread_create_worker(cpu, flags, namefmt, args);
909 	va_end(args);
910 
911 	return worker;
912 }
913 EXPORT_SYMBOL(kthread_create_worker_on_cpu);
914 
915 /*
916  * Returns true when the work could not be queued at the moment.
917  * It happens when it is already pending in a worker list
918  * or when it is being cancelled.
919  */
queuing_blocked(struct kthread_worker * worker,struct kthread_work * work)920 static inline bool queuing_blocked(struct kthread_worker *worker,
921 				   struct kthread_work *work)
922 {
923 	lockdep_assert_held(&worker->lock);
924 
925 	return !list_empty(&work->node) || work->canceling;
926 }
927 
kthread_insert_work_sanity_check(struct kthread_worker * worker,struct kthread_work * work)928 static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
929 					     struct kthread_work *work)
930 {
931 	lockdep_assert_held(&worker->lock);
932 	WARN_ON_ONCE(!list_empty(&work->node));
933 	/* Do not use a work with >1 worker, see kthread_queue_work() */
934 	WARN_ON_ONCE(work->worker && work->worker != worker);
935 }
936 
937 /* insert @work before @pos in @worker */
kthread_insert_work(struct kthread_worker * worker,struct kthread_work * work,struct list_head * pos)938 static void kthread_insert_work(struct kthread_worker *worker,
939 				struct kthread_work *work,
940 				struct list_head *pos)
941 {
942 	kthread_insert_work_sanity_check(worker, work);
943 
944 	trace_sched_kthread_work_queue_work(worker, work);
945 
946 	list_add_tail(&work->node, pos);
947 	work->worker = worker;
948 	if (!worker->current_work && likely(worker->task))
949 		wake_up_process(worker->task);
950 }
951 
952 /**
953  * kthread_queue_work - queue a kthread_work
954  * @worker: target kthread_worker
955  * @work: kthread_work to queue
956  *
957  * Queue @work to work processor @task for async execution.  @task
958  * must have been created with kthread_worker_create().  Returns %true
959  * if @work was successfully queued, %false if it was already pending.
960  *
961  * Reinitialize the work if it needs to be used by another worker.
962  * For example, when the worker was stopped and started again.
963  */
kthread_queue_work(struct kthread_worker * worker,struct kthread_work * work)964 bool kthread_queue_work(struct kthread_worker *worker,
965 			struct kthread_work *work)
966 {
967 	bool ret = false;
968 	unsigned long flags;
969 
970 	raw_spin_lock_irqsave(&worker->lock, flags);
971 	if (!queuing_blocked(worker, work)) {
972 		kthread_insert_work(worker, work, &worker->work_list);
973 		ret = true;
974 	}
975 	raw_spin_unlock_irqrestore(&worker->lock, flags);
976 	return ret;
977 }
978 EXPORT_SYMBOL_GPL(kthread_queue_work);
979 
980 /**
981  * kthread_delayed_work_timer_fn - callback that queues the associated kthread
982  *	delayed work when the timer expires.
983  * @t: pointer to the expired timer
984  *
985  * The format of the function is defined by struct timer_list.
986  * It should have been called from irqsafe timer with irq already off.
987  */
kthread_delayed_work_timer_fn(struct timer_list * t)988 void kthread_delayed_work_timer_fn(struct timer_list *t)
989 {
990 	struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
991 	struct kthread_work *work = &dwork->work;
992 	struct kthread_worker *worker = work->worker;
993 	unsigned long flags;
994 
995 	/*
996 	 * This might happen when a pending work is reinitialized.
997 	 * It means that it is used a wrong way.
998 	 */
999 	if (WARN_ON_ONCE(!worker))
1000 		return;
1001 
1002 	raw_spin_lock_irqsave(&worker->lock, flags);
1003 	/* Work must not be used with >1 worker, see kthread_queue_work(). */
1004 	WARN_ON_ONCE(work->worker != worker);
1005 
1006 	/* Move the work from worker->delayed_work_list. */
1007 	WARN_ON_ONCE(list_empty(&work->node));
1008 	list_del_init(&work->node);
1009 	if (!work->canceling)
1010 		kthread_insert_work(worker, work, &worker->work_list);
1011 
1012 	raw_spin_unlock_irqrestore(&worker->lock, flags);
1013 }
1014 EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
1015 
__kthread_queue_delayed_work(struct kthread_worker * worker,struct kthread_delayed_work * dwork,unsigned long delay)1016 static void __kthread_queue_delayed_work(struct kthread_worker *worker,
1017 					 struct kthread_delayed_work *dwork,
1018 					 unsigned long delay)
1019 {
1020 	struct timer_list *timer = &dwork->timer;
1021 	struct kthread_work *work = &dwork->work;
1022 
1023 	WARN_ON_FUNCTION_MISMATCH(timer->function,
1024 				  kthread_delayed_work_timer_fn);
1025 
1026 	/*
1027 	 * If @delay is 0, queue @dwork->work immediately.  This is for
1028 	 * both optimization and correctness.  The earliest @timer can
1029 	 * expire is on the closest next tick and delayed_work users depend
1030 	 * on that there's no such delay when @delay is 0.
1031 	 */
1032 	if (!delay) {
1033 		kthread_insert_work(worker, work, &worker->work_list);
1034 		return;
1035 	}
1036 
1037 	/* Be paranoid and try to detect possible races already now. */
1038 	kthread_insert_work_sanity_check(worker, work);
1039 
1040 	list_add(&work->node, &worker->delayed_work_list);
1041 	work->worker = worker;
1042 	timer->expires = jiffies + delay;
1043 	add_timer(timer);
1044 }
1045 
1046 /**
1047  * kthread_queue_delayed_work - queue the associated kthread work
1048  *	after a delay.
1049  * @worker: target kthread_worker
1050  * @dwork: kthread_delayed_work to queue
1051  * @delay: number of jiffies to wait before queuing
1052  *
1053  * If the work has not been pending it starts a timer that will queue
1054  * the work after the given @delay. If @delay is zero, it queues the
1055  * work immediately.
1056  *
1057  * Return: %false if the @work has already been pending. It means that
1058  * either the timer was running or the work was queued. It returns %true
1059  * otherwise.
1060  */
kthread_queue_delayed_work(struct kthread_worker * worker,struct kthread_delayed_work * dwork,unsigned long delay)1061 bool kthread_queue_delayed_work(struct kthread_worker *worker,
1062 				struct kthread_delayed_work *dwork,
1063 				unsigned long delay)
1064 {
1065 	struct kthread_work *work = &dwork->work;
1066 	unsigned long flags;
1067 	bool ret = false;
1068 
1069 	raw_spin_lock_irqsave(&worker->lock, flags);
1070 
1071 	if (!queuing_blocked(worker, work)) {
1072 		__kthread_queue_delayed_work(worker, dwork, delay);
1073 		ret = true;
1074 	}
1075 
1076 	raw_spin_unlock_irqrestore(&worker->lock, flags);
1077 	return ret;
1078 }
1079 EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
1080 
1081 struct kthread_flush_work {
1082 	struct kthread_work	work;
1083 	struct completion	done;
1084 };
1085 
kthread_flush_work_fn(struct kthread_work * work)1086 static void kthread_flush_work_fn(struct kthread_work *work)
1087 {
1088 	struct kthread_flush_work *fwork =
1089 		container_of(work, struct kthread_flush_work, work);
1090 	complete(&fwork->done);
1091 }
1092 
1093 /**
1094  * kthread_flush_work - flush a kthread_work
1095  * @work: work to flush
1096  *
1097  * If @work is queued or executing, wait for it to finish execution.
1098  */
kthread_flush_work(struct kthread_work * work)1099 void kthread_flush_work(struct kthread_work *work)
1100 {
1101 	struct kthread_flush_work fwork = {
1102 		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1103 		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1104 	};
1105 	struct kthread_worker *worker;
1106 	bool noop = false;
1107 
1108 	worker = work->worker;
1109 	if (!worker)
1110 		return;
1111 
1112 	raw_spin_lock_irq(&worker->lock);
1113 	/* Work must not be used with >1 worker, see kthread_queue_work(). */
1114 	WARN_ON_ONCE(work->worker != worker);
1115 
1116 	if (!list_empty(&work->node))
1117 		kthread_insert_work(worker, &fwork.work, work->node.next);
1118 	else if (worker->current_work == work)
1119 		kthread_insert_work(worker, &fwork.work,
1120 				    worker->work_list.next);
1121 	else
1122 		noop = true;
1123 
1124 	raw_spin_unlock_irq(&worker->lock);
1125 
1126 	if (!noop)
1127 		wait_for_completion(&fwork.done);
1128 }
1129 EXPORT_SYMBOL_GPL(kthread_flush_work);
1130 
1131 /*
1132  * Make sure that the timer is neither set nor running and could
1133  * not manipulate the work list_head any longer.
1134  *
1135  * The function is called under worker->lock. The lock is temporary
1136  * released but the timer can't be set again in the meantime.
1137  */
kthread_cancel_delayed_work_timer(struct kthread_work * work,unsigned long * flags)1138 static void kthread_cancel_delayed_work_timer(struct kthread_work *work,
1139 					      unsigned long *flags)
1140 {
1141 	struct kthread_delayed_work *dwork =
1142 		container_of(work, struct kthread_delayed_work, work);
1143 	struct kthread_worker *worker = work->worker;
1144 
1145 	/*
1146 	 * del_timer_sync() must be called to make sure that the timer
1147 	 * callback is not running. The lock must be temporary released
1148 	 * to avoid a deadlock with the callback. In the meantime,
1149 	 * any queuing is blocked by setting the canceling counter.
1150 	 */
1151 	work->canceling++;
1152 	raw_spin_unlock_irqrestore(&worker->lock, *flags);
1153 	del_timer_sync(&dwork->timer);
1154 	raw_spin_lock_irqsave(&worker->lock, *flags);
1155 	work->canceling--;
1156 }
1157 
1158 /*
1159  * This function removes the work from the worker queue.
1160  *
1161  * It is called under worker->lock. The caller must make sure that
1162  * the timer used by delayed work is not running, e.g. by calling
1163  * kthread_cancel_delayed_work_timer().
1164  *
1165  * The work might still be in use when this function finishes. See the
1166  * current_work proceed by the worker.
1167  *
1168  * Return: %true if @work was pending and successfully canceled,
1169  *	%false if @work was not pending
1170  */
__kthread_cancel_work(struct kthread_work * work)1171 static bool __kthread_cancel_work(struct kthread_work *work)
1172 {
1173 	/*
1174 	 * Try to remove the work from a worker list. It might either
1175 	 * be from worker->work_list or from worker->delayed_work_list.
1176 	 */
1177 	if (!list_empty(&work->node)) {
1178 		list_del_init(&work->node);
1179 		return true;
1180 	}
1181 
1182 	return false;
1183 }
1184 
1185 /**
1186  * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
1187  * @worker: kthread worker to use
1188  * @dwork: kthread delayed work to queue
1189  * @delay: number of jiffies to wait before queuing
1190  *
1191  * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
1192  * modify @dwork's timer so that it expires after @delay. If @delay is zero,
1193  * @work is guaranteed to be queued immediately.
1194  *
1195  * Return: %false if @dwork was idle and queued, %true otherwise.
1196  *
1197  * A special case is when the work is being canceled in parallel.
1198  * It might be caused either by the real kthread_cancel_delayed_work_sync()
1199  * or yet another kthread_mod_delayed_work() call. We let the other command
1200  * win and return %true here. The return value can be used for reference
1201  * counting and the number of queued works stays the same. Anyway, the caller
1202  * is supposed to synchronize these operations a reasonable way.
1203  *
1204  * This function is safe to call from any context including IRQ handler.
1205  * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
1206  * for details.
1207  */
kthread_mod_delayed_work(struct kthread_worker * worker,struct kthread_delayed_work * dwork,unsigned long delay)1208 bool kthread_mod_delayed_work(struct kthread_worker *worker,
1209 			      struct kthread_delayed_work *dwork,
1210 			      unsigned long delay)
1211 {
1212 	struct kthread_work *work = &dwork->work;
1213 	unsigned long flags;
1214 	int ret;
1215 
1216 	raw_spin_lock_irqsave(&worker->lock, flags);
1217 
1218 	/* Do not bother with canceling when never queued. */
1219 	if (!work->worker) {
1220 		ret = false;
1221 		goto fast_queue;
1222 	}
1223 
1224 	/* Work must not be used with >1 worker, see kthread_queue_work() */
1225 	WARN_ON_ONCE(work->worker != worker);
1226 
1227 	/*
1228 	 * Temporary cancel the work but do not fight with another command
1229 	 * that is canceling the work as well.
1230 	 *
1231 	 * It is a bit tricky because of possible races with another
1232 	 * mod_delayed_work() and cancel_delayed_work() callers.
1233 	 *
1234 	 * The timer must be canceled first because worker->lock is released
1235 	 * when doing so. But the work can be removed from the queue (list)
1236 	 * only when it can be queued again so that the return value can
1237 	 * be used for reference counting.
1238 	 */
1239 	kthread_cancel_delayed_work_timer(work, &flags);
1240 	if (work->canceling) {
1241 		/* The number of works in the queue does not change. */
1242 		ret = true;
1243 		goto out;
1244 	}
1245 	ret = __kthread_cancel_work(work);
1246 
1247 fast_queue:
1248 	__kthread_queue_delayed_work(worker, dwork, delay);
1249 out:
1250 	raw_spin_unlock_irqrestore(&worker->lock, flags);
1251 	return ret;
1252 }
1253 EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
1254 
__kthread_cancel_work_sync(struct kthread_work * work,bool is_dwork)1255 static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
1256 {
1257 	struct kthread_worker *worker = work->worker;
1258 	unsigned long flags;
1259 	int ret = false;
1260 
1261 	if (!worker)
1262 		goto out;
1263 
1264 	raw_spin_lock_irqsave(&worker->lock, flags);
1265 	/* Work must not be used with >1 worker, see kthread_queue_work(). */
1266 	WARN_ON_ONCE(work->worker != worker);
1267 
1268 	if (is_dwork)
1269 		kthread_cancel_delayed_work_timer(work, &flags);
1270 
1271 	ret = __kthread_cancel_work(work);
1272 
1273 	if (worker->current_work != work)
1274 		goto out_fast;
1275 
1276 	/*
1277 	 * The work is in progress and we need to wait with the lock released.
1278 	 * In the meantime, block any queuing by setting the canceling counter.
1279 	 */
1280 	work->canceling++;
1281 	raw_spin_unlock_irqrestore(&worker->lock, flags);
1282 	kthread_flush_work(work);
1283 	raw_spin_lock_irqsave(&worker->lock, flags);
1284 	work->canceling--;
1285 
1286 out_fast:
1287 	raw_spin_unlock_irqrestore(&worker->lock, flags);
1288 out:
1289 	return ret;
1290 }
1291 
1292 /**
1293  * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
1294  * @work: the kthread work to cancel
1295  *
1296  * Cancel @work and wait for its execution to finish.  This function
1297  * can be used even if the work re-queues itself. On return from this
1298  * function, @work is guaranteed to be not pending or executing on any CPU.
1299  *
1300  * kthread_cancel_work_sync(&delayed_work->work) must not be used for
1301  * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
1302  *
1303  * The caller must ensure that the worker on which @work was last
1304  * queued can't be destroyed before this function returns.
1305  *
1306  * Return: %true if @work was pending, %false otherwise.
1307  */
kthread_cancel_work_sync(struct kthread_work * work)1308 bool kthread_cancel_work_sync(struct kthread_work *work)
1309 {
1310 	return __kthread_cancel_work_sync(work, false);
1311 }
1312 EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
1313 
1314 /**
1315  * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
1316  *	wait for it to finish.
1317  * @dwork: the kthread delayed work to cancel
1318  *
1319  * This is kthread_cancel_work_sync() for delayed works.
1320  *
1321  * Return: %true if @dwork was pending, %false otherwise.
1322  */
kthread_cancel_delayed_work_sync(struct kthread_delayed_work * dwork)1323 bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
1324 {
1325 	return __kthread_cancel_work_sync(&dwork->work, true);
1326 }
1327 EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
1328 
1329 /**
1330  * kthread_flush_worker - flush all current works on a kthread_worker
1331  * @worker: worker to flush
1332  *
1333  * Wait until all currently executing or pending works on @worker are
1334  * finished.
1335  */
kthread_flush_worker(struct kthread_worker * worker)1336 void kthread_flush_worker(struct kthread_worker *worker)
1337 {
1338 	struct kthread_flush_work fwork = {
1339 		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1340 		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1341 	};
1342 
1343 	kthread_queue_work(worker, &fwork.work);
1344 	wait_for_completion(&fwork.done);
1345 }
1346 EXPORT_SYMBOL_GPL(kthread_flush_worker);
1347 
1348 /**
1349  * kthread_destroy_worker - destroy a kthread worker
1350  * @worker: worker to be destroyed
1351  *
1352  * Flush and destroy @worker.  The simple flush is enough because the kthread
1353  * worker API is used only in trivial scenarios.  There are no multi-step state
1354  * machines needed.
1355  */
kthread_destroy_worker(struct kthread_worker * worker)1356 void kthread_destroy_worker(struct kthread_worker *worker)
1357 {
1358 	struct task_struct *task;
1359 
1360 	task = worker->task;
1361 	if (WARN_ON(!task))
1362 		return;
1363 
1364 	kthread_flush_worker(worker);
1365 	kthread_stop(task);
1366 	WARN_ON(!list_empty(&worker->work_list));
1367 	kfree(worker);
1368 }
1369 EXPORT_SYMBOL(kthread_destroy_worker);
1370 
1371 /**
1372  * kthread_use_mm - make the calling kthread operate on an address space
1373  * @mm: address space to operate on
1374  */
kthread_use_mm(struct mm_struct * mm)1375 void kthread_use_mm(struct mm_struct *mm)
1376 {
1377 	struct mm_struct *active_mm;
1378 	struct task_struct *tsk = current;
1379 
1380 	WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1381 	WARN_ON_ONCE(tsk->mm);
1382 
1383 	task_lock(tsk);
1384 	/* Hold off tlb flush IPIs while switching mm's */
1385 	local_irq_disable();
1386 	active_mm = tsk->active_mm;
1387 	if (active_mm != mm) {
1388 		mmgrab(mm);
1389 		tsk->active_mm = mm;
1390 	}
1391 	tsk->mm = mm;
1392 	membarrier_update_current_mm(mm);
1393 	switch_mm_irqs_off(active_mm, mm, tsk);
1394 	local_irq_enable();
1395 	task_unlock(tsk);
1396 #ifdef finish_arch_post_lock_switch
1397 	finish_arch_post_lock_switch();
1398 #endif
1399 
1400 	/*
1401 	 * When a kthread starts operating on an address space, the loop
1402 	 * in membarrier_{private,global}_expedited() may not observe
1403 	 * that tsk->mm, and not issue an IPI. Membarrier requires a
1404 	 * memory barrier after storing to tsk->mm, before accessing
1405 	 * user-space memory. A full memory barrier for membarrier
1406 	 * {PRIVATE,GLOBAL}_EXPEDITED is implicitly provided by
1407 	 * mmdrop(), or explicitly with smp_mb().
1408 	 */
1409 	if (active_mm != mm)
1410 		mmdrop(active_mm);
1411 	else
1412 		smp_mb();
1413 
1414 	to_kthread(tsk)->oldfs = force_uaccess_begin();
1415 }
1416 EXPORT_SYMBOL_GPL(kthread_use_mm);
1417 
1418 /**
1419  * kthread_unuse_mm - reverse the effect of kthread_use_mm()
1420  * @mm: address space to operate on
1421  */
kthread_unuse_mm(struct mm_struct * mm)1422 void kthread_unuse_mm(struct mm_struct *mm)
1423 {
1424 	struct task_struct *tsk = current;
1425 
1426 	WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1427 	WARN_ON_ONCE(!tsk->mm);
1428 
1429 	force_uaccess_end(to_kthread(tsk)->oldfs);
1430 
1431 	task_lock(tsk);
1432 	/*
1433 	 * When a kthread stops operating on an address space, the loop
1434 	 * in membarrier_{private,global}_expedited() may not observe
1435 	 * that tsk->mm, and not issue an IPI. Membarrier requires a
1436 	 * memory barrier after accessing user-space memory, before
1437 	 * clearing tsk->mm.
1438 	 */
1439 	smp_mb__after_spinlock();
1440 	sync_mm_rss(mm);
1441 	local_irq_disable();
1442 	tsk->mm = NULL;
1443 	membarrier_update_current_mm(NULL);
1444 	/* active_mm is still 'mm' */
1445 	enter_lazy_tlb(mm, tsk);
1446 	local_irq_enable();
1447 	task_unlock(tsk);
1448 }
1449 EXPORT_SYMBOL_GPL(kthread_unuse_mm);
1450 
1451 #ifdef CONFIG_BLK_CGROUP
1452 /**
1453  * kthread_associate_blkcg - associate blkcg to current kthread
1454  * @css: the cgroup info
1455  *
1456  * Current thread must be a kthread. The thread is running jobs on behalf of
1457  * other threads. In some cases, we expect the jobs attach cgroup info of
1458  * original threads instead of that of current thread. This function stores
1459  * original thread's cgroup info in current kthread context for later
1460  * retrieval.
1461  */
kthread_associate_blkcg(struct cgroup_subsys_state * css)1462 void kthread_associate_blkcg(struct cgroup_subsys_state *css)
1463 {
1464 	struct kthread *kthread;
1465 
1466 	if (!(current->flags & PF_KTHREAD))
1467 		return;
1468 	kthread = to_kthread(current);
1469 	if (!kthread)
1470 		return;
1471 
1472 	if (kthread->blkcg_css) {
1473 		css_put(kthread->blkcg_css);
1474 		kthread->blkcg_css = NULL;
1475 	}
1476 	if (css) {
1477 		css_get(css);
1478 		kthread->blkcg_css = css;
1479 	}
1480 }
1481 EXPORT_SYMBOL(kthread_associate_blkcg);
1482 
1483 /**
1484  * kthread_blkcg - get associated blkcg css of current kthread
1485  *
1486  * Current thread must be a kthread.
1487  */
kthread_blkcg(void)1488 struct cgroup_subsys_state *kthread_blkcg(void)
1489 {
1490 	struct kthread *kthread;
1491 
1492 	if (current->flags & PF_KTHREAD) {
1493 		kthread = to_kthread(current);
1494 		if (kthread)
1495 			return kthread->blkcg_css;
1496 	}
1497 	return NULL;
1498 }
1499 EXPORT_SYMBOL(kthread_blkcg);
1500 #endif
1501