1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Kernel thread helper functions.
3 * Copyright (C) 2004 IBM Corporation, Rusty Russell.
4 * Copyright (C) 2009 Red Hat, Inc.
5 *
6 * Creation is done via kthreadd, so that we get a clean environment
7 * even if we're invoked from userspace (think modprobe, hotplug cpu,
8 * etc.).
9 */
10 #include <uapi/linux/sched/types.h>
11 #include <linux/mm.h>
12 #include <linux/mmu_context.h>
13 #include <linux/sched.h>
14 #include <linux/sched/mm.h>
15 #include <linux/sched/task.h>
16 #include <linux/kthread.h>
17 #include <linux/completion.h>
18 #include <linux/err.h>
19 #include <linux/cgroup.h>
20 #include <linux/cpuset.h>
21 #include <linux/unistd.h>
22 #include <linux/file.h>
23 #include <linux/export.h>
24 #include <linux/mutex.h>
25 #include <linux/slab.h>
26 #include <linux/freezer.h>
27 #include <linux/ptrace.h>
28 #include <linux/uaccess.h>
29 #include <linux/numa.h>
30 #include <linux/sched/isolation.h>
31 #include <trace/events/sched.h>
32
33
34 static DEFINE_SPINLOCK(kthread_create_lock);
35 static LIST_HEAD(kthread_create_list);
36 struct task_struct *kthreadd_task;
37
38 struct kthread_create_info
39 {
40 /* Information passed to kthread() from kthreadd. */
41 int (*threadfn)(void *data);
42 void *data;
43 int node;
44
45 /* Result passed back to kthread_create() from kthreadd. */
46 struct task_struct *result;
47 struct completion *done;
48
49 struct list_head list;
50 };
51
52 struct kthread {
53 unsigned long flags;
54 unsigned int cpu;
55 int result;
56 int (*threadfn)(void *);
57 void *data;
58 struct completion parked;
59 struct completion exited;
60 #ifdef CONFIG_BLK_CGROUP
61 struct cgroup_subsys_state *blkcg_css;
62 #endif
63 /* To store the full name if task comm is truncated. */
64 char *full_name;
65 };
66
67 enum KTHREAD_BITS {
68 KTHREAD_IS_PER_CPU = 0,
69 KTHREAD_SHOULD_STOP,
70 KTHREAD_SHOULD_PARK,
71 };
72
to_kthread(struct task_struct * k)73 static inline struct kthread *to_kthread(struct task_struct *k)
74 {
75 WARN_ON(!(k->flags & PF_KTHREAD));
76 return k->worker_private;
77 }
78
79 /*
80 * Variant of to_kthread() that doesn't assume @p is a kthread.
81 *
82 * Per construction; when:
83 *
84 * (p->flags & PF_KTHREAD) && p->worker_private
85 *
86 * the task is both a kthread and struct kthread is persistent. However
87 * PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and
88 * begin_new_exec()).
89 */
__to_kthread(struct task_struct * p)90 static inline struct kthread *__to_kthread(struct task_struct *p)
91 {
92 void *kthread = p->worker_private;
93 if (kthread && !(p->flags & PF_KTHREAD))
94 kthread = NULL;
95 return kthread;
96 }
97
get_kthread_comm(char * buf,size_t buf_size,struct task_struct * tsk)98 void get_kthread_comm(char *buf, size_t buf_size, struct task_struct *tsk)
99 {
100 struct kthread *kthread = to_kthread(tsk);
101
102 if (!kthread || !kthread->full_name) {
103 __get_task_comm(buf, buf_size, tsk);
104 return;
105 }
106
107 strscpy_pad(buf, kthread->full_name, buf_size);
108 }
109
set_kthread_struct(struct task_struct * p)110 bool set_kthread_struct(struct task_struct *p)
111 {
112 struct kthread *kthread;
113
114 if (WARN_ON_ONCE(to_kthread(p)))
115 return false;
116
117 kthread = kzalloc(sizeof(*kthread), GFP_KERNEL);
118 if (!kthread)
119 return false;
120
121 init_completion(&kthread->exited);
122 init_completion(&kthread->parked);
123 p->vfork_done = &kthread->exited;
124
125 p->worker_private = kthread;
126 return true;
127 }
128
free_kthread_struct(struct task_struct * k)129 void free_kthread_struct(struct task_struct *k)
130 {
131 struct kthread *kthread;
132
133 /*
134 * Can be NULL if kmalloc() in set_kthread_struct() failed.
135 */
136 kthread = to_kthread(k);
137 if (!kthread)
138 return;
139
140 #ifdef CONFIG_BLK_CGROUP
141 WARN_ON_ONCE(kthread->blkcg_css);
142 #endif
143 k->worker_private = NULL;
144 kfree(kthread->full_name);
145 kfree(kthread);
146 }
147
148 /**
149 * kthread_should_stop - should this kthread return now?
150 *
151 * When someone calls kthread_stop() on your kthread, it will be woken
152 * and this will return true. You should then return, and your return
153 * value will be passed through to kthread_stop().
154 */
kthread_should_stop(void)155 bool kthread_should_stop(void)
156 {
157 return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
158 }
159 EXPORT_SYMBOL(kthread_should_stop);
160
__kthread_should_park(struct task_struct * k)161 bool __kthread_should_park(struct task_struct *k)
162 {
163 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags);
164 }
165 EXPORT_SYMBOL_GPL(__kthread_should_park);
166
167 /**
168 * kthread_should_park - should this kthread park now?
169 *
170 * When someone calls kthread_park() on your kthread, it will be woken
171 * and this will return true. You should then do the necessary
172 * cleanup and call kthread_parkme()
173 *
174 * Similar to kthread_should_stop(), but this keeps the thread alive
175 * and in a park position. kthread_unpark() "restarts" the thread and
176 * calls the thread function again.
177 */
kthread_should_park(void)178 bool kthread_should_park(void)
179 {
180 return __kthread_should_park(current);
181 }
182 EXPORT_SYMBOL_GPL(kthread_should_park);
183
kthread_should_stop_or_park(void)184 bool kthread_should_stop_or_park(void)
185 {
186 struct kthread *kthread = __to_kthread(current);
187
188 if (!kthread)
189 return false;
190
191 return kthread->flags & (BIT(KTHREAD_SHOULD_STOP) | BIT(KTHREAD_SHOULD_PARK));
192 }
193
194 /**
195 * kthread_freezable_should_stop - should this freezable kthread return now?
196 * @was_frozen: optional out parameter, indicates whether %current was frozen
197 *
198 * kthread_should_stop() for freezable kthreads, which will enter
199 * refrigerator if necessary. This function is safe from kthread_stop() /
200 * freezer deadlock and freezable kthreads should use this function instead
201 * of calling try_to_freeze() directly.
202 */
kthread_freezable_should_stop(bool * was_frozen)203 bool kthread_freezable_should_stop(bool *was_frozen)
204 {
205 bool frozen = false;
206
207 might_sleep();
208
209 if (unlikely(freezing(current)))
210 frozen = __refrigerator(true);
211
212 if (was_frozen)
213 *was_frozen = frozen;
214
215 return kthread_should_stop();
216 }
217 EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
218
219 /**
220 * kthread_func - return the function specified on kthread creation
221 * @task: kthread task in question
222 *
223 * Returns NULL if the task is not a kthread.
224 */
kthread_func(struct task_struct * task)225 void *kthread_func(struct task_struct *task)
226 {
227 struct kthread *kthread = __to_kthread(task);
228 if (kthread)
229 return kthread->threadfn;
230 return NULL;
231 }
232 EXPORT_SYMBOL_GPL(kthread_func);
233
234 /**
235 * kthread_data - return data value specified on kthread creation
236 * @task: kthread task in question
237 *
238 * Return the data value specified when kthread @task was created.
239 * The caller is responsible for ensuring the validity of @task when
240 * calling this function.
241 */
kthread_data(struct task_struct * task)242 void *kthread_data(struct task_struct *task)
243 {
244 return to_kthread(task)->data;
245 }
246 EXPORT_SYMBOL_GPL(kthread_data);
247
248 /**
249 * kthread_probe_data - speculative version of kthread_data()
250 * @task: possible kthread task in question
251 *
252 * @task could be a kthread task. Return the data value specified when it
253 * was created if accessible. If @task isn't a kthread task or its data is
254 * inaccessible for any reason, %NULL is returned. This function requires
255 * that @task itself is safe to dereference.
256 */
kthread_probe_data(struct task_struct * task)257 void *kthread_probe_data(struct task_struct *task)
258 {
259 struct kthread *kthread = __to_kthread(task);
260 void *data = NULL;
261
262 if (kthread)
263 copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
264 return data;
265 }
266
__kthread_parkme(struct kthread * self)267 static void __kthread_parkme(struct kthread *self)
268 {
269 for (;;) {
270 /*
271 * TASK_PARKED is a special state; we must serialize against
272 * possible pending wakeups to avoid store-store collisions on
273 * task->state.
274 *
275 * Such a collision might possibly result in the task state
276 * changin from TASK_PARKED and us failing the
277 * wait_task_inactive() in kthread_park().
278 */
279 set_special_state(TASK_PARKED);
280 if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
281 break;
282
283 /*
284 * Thread is going to call schedule(), do not preempt it,
285 * or the caller of kthread_park() may spend more time in
286 * wait_task_inactive().
287 */
288 preempt_disable();
289 complete(&self->parked);
290 schedule_preempt_disabled();
291 preempt_enable();
292 }
293 __set_current_state(TASK_RUNNING);
294 }
295
kthread_parkme(void)296 void kthread_parkme(void)
297 {
298 __kthread_parkme(to_kthread(current));
299 }
300 EXPORT_SYMBOL_GPL(kthread_parkme);
301
302 /**
303 * kthread_exit - Cause the current kthread return @result to kthread_stop().
304 * @result: The integer value to return to kthread_stop().
305 *
306 * While kthread_exit can be called directly, it exists so that
307 * functions which do some additional work in non-modular code such as
308 * module_put_and_kthread_exit can be implemented.
309 *
310 * Does not return.
311 */
kthread_exit(long result)312 void __noreturn kthread_exit(long result)
313 {
314 struct kthread *kthread = to_kthread(current);
315 kthread->result = result;
316 do_exit(0);
317 }
318
319 /**
320 * kthread_complete_and_exit - Exit the current kthread.
321 * @comp: Completion to complete
322 * @code: The integer value to return to kthread_stop().
323 *
324 * If present complete @comp and the reuturn code to kthread_stop().
325 *
326 * A kernel thread whose module may be removed after the completion of
327 * @comp can use this function exit safely.
328 *
329 * Does not return.
330 */
kthread_complete_and_exit(struct completion * comp,long code)331 void __noreturn kthread_complete_and_exit(struct completion *comp, long code)
332 {
333 if (comp)
334 complete(comp);
335
336 kthread_exit(code);
337 }
338 EXPORT_SYMBOL(kthread_complete_and_exit);
339
kthread(void * _create)340 static int kthread(void *_create)
341 {
342 static const struct sched_param param = { .sched_priority = 0 };
343 /* Copy data: it's on kthread's stack */
344 struct kthread_create_info *create = _create;
345 int (*threadfn)(void *data) = create->threadfn;
346 void *data = create->data;
347 struct completion *done;
348 struct kthread *self;
349 int ret;
350
351 self = to_kthread(current);
352
353 /* Release the structure when caller killed by a fatal signal. */
354 done = xchg(&create->done, NULL);
355 if (!done) {
356 kfree(create);
357 kthread_exit(-EINTR);
358 }
359
360 self->threadfn = threadfn;
361 self->data = data;
362
363 /*
364 * The new thread inherited kthreadd's priority and CPU mask. Reset
365 * back to default in case they have been changed.
366 */
367 sched_setscheduler_nocheck(current, SCHED_NORMAL, ¶m);
368 set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_KTHREAD));
369
370 /* OK, tell user we're spawned, wait for stop or wakeup */
371 __set_current_state(TASK_UNINTERRUPTIBLE);
372 create->result = current;
373 /*
374 * Thread is going to call schedule(), do not preempt it,
375 * or the creator may spend more time in wait_task_inactive().
376 */
377 preempt_disable();
378 complete(done);
379 schedule_preempt_disabled();
380 preempt_enable();
381
382 ret = -EINTR;
383 if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
384 cgroup_kthread_ready();
385 __kthread_parkme(self);
386 ret = threadfn(data);
387 }
388 kthread_exit(ret);
389 }
390
391 /* called from kernel_clone() to get node information for about to be created task */
tsk_fork_get_node(struct task_struct * tsk)392 int tsk_fork_get_node(struct task_struct *tsk)
393 {
394 #ifdef CONFIG_NUMA
395 if (tsk == kthreadd_task)
396 return tsk->pref_node_fork;
397 #endif
398 return NUMA_NO_NODE;
399 }
400
create_kthread(struct kthread_create_info * create)401 static void create_kthread(struct kthread_create_info *create)
402 {
403 int pid;
404
405 #ifdef CONFIG_NUMA
406 current->pref_node_fork = create->node;
407 #endif
408 /* We want our own signal handler (we take no signals by default). */
409 pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
410 if (pid < 0) {
411 /* Release the structure when caller killed by a fatal signal. */
412 struct completion *done = xchg(&create->done, NULL);
413
414 if (!done) {
415 kfree(create);
416 return;
417 }
418 create->result = ERR_PTR(pid);
419 complete(done);
420 }
421 }
422
423 static __printf(4, 0)
__kthread_create_on_node(int (* threadfn)(void * data),void * data,int node,const char namefmt[],va_list args)424 struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
425 void *data, int node,
426 const char namefmt[],
427 va_list args)
428 {
429 DECLARE_COMPLETION_ONSTACK(done);
430 struct task_struct *task;
431 struct kthread_create_info *create = kmalloc(sizeof(*create),
432 GFP_KERNEL);
433
434 if (!create)
435 return ERR_PTR(-ENOMEM);
436 create->threadfn = threadfn;
437 create->data = data;
438 create->node = node;
439 create->done = &done;
440
441 spin_lock(&kthread_create_lock);
442 list_add_tail(&create->list, &kthread_create_list);
443 spin_unlock(&kthread_create_lock);
444
445 wake_up_process(kthreadd_task);
446 /*
447 * Wait for completion in killable state, for I might be chosen by
448 * the OOM killer while kthreadd is trying to allocate memory for
449 * new kernel thread.
450 */
451 if (unlikely(wait_for_completion_killable(&done))) {
452 /*
453 * If I was killed by a fatal signal before kthreadd (or new
454 * kernel thread) calls complete(), leave the cleanup of this
455 * structure to that thread.
456 */
457 if (xchg(&create->done, NULL))
458 return ERR_PTR(-EINTR);
459 /*
460 * kthreadd (or new kernel thread) will call complete()
461 * shortly.
462 */
463 wait_for_completion(&done);
464 }
465 task = create->result;
466 if (!IS_ERR(task)) {
467 char name[TASK_COMM_LEN];
468 va_list aq;
469 int len;
470
471 /*
472 * task is already visible to other tasks, so updating
473 * COMM must be protected.
474 */
475 va_copy(aq, args);
476 len = vsnprintf(name, sizeof(name), namefmt, aq);
477 va_end(aq);
478 if (len >= TASK_COMM_LEN) {
479 struct kthread *kthread = to_kthread(task);
480
481 /* leave it truncated when out of memory. */
482 kthread->full_name = kvasprintf(GFP_KERNEL, namefmt, args);
483 }
484 set_task_comm(task, name);
485 }
486 kfree(create);
487 return task;
488 }
489
490 /**
491 * kthread_create_on_node - create a kthread.
492 * @threadfn: the function to run until signal_pending(current).
493 * @data: data ptr for @threadfn.
494 * @node: task and thread structures for the thread are allocated on this node
495 * @namefmt: printf-style name for the thread.
496 *
497 * Description: This helper function creates and names a kernel
498 * thread. The thread will be stopped: use wake_up_process() to start
499 * it. See also kthread_run(). The new thread has SCHED_NORMAL policy and
500 * is affine to all CPUs.
501 *
502 * If thread is going to be bound on a particular cpu, give its node
503 * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
504 * When woken, the thread will run @threadfn() with @data as its
505 * argument. @threadfn() can either return directly if it is a
506 * standalone thread for which no one will call kthread_stop(), or
507 * return when 'kthread_should_stop()' is true (which means
508 * kthread_stop() has been called). The return value should be zero
509 * or a negative error number; it will be passed to kthread_stop().
510 *
511 * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
512 */
kthread_create_on_node(int (* threadfn)(void * data),void * data,int node,const char namefmt[],...)513 struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
514 void *data, int node,
515 const char namefmt[],
516 ...)
517 {
518 struct task_struct *task;
519 va_list args;
520
521 va_start(args, namefmt);
522 task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
523 va_end(args);
524
525 return task;
526 }
527 EXPORT_SYMBOL(kthread_create_on_node);
528
__kthread_bind_mask(struct task_struct * p,const struct cpumask * mask,unsigned int state)529 static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, unsigned int state)
530 {
531 unsigned long flags;
532
533 if (!wait_task_inactive(p, state)) {
534 WARN_ON(1);
535 return;
536 }
537
538 /* It's safe because the task is inactive. */
539 raw_spin_lock_irqsave(&p->pi_lock, flags);
540 do_set_cpus_allowed(p, mask);
541 p->flags |= PF_NO_SETAFFINITY;
542 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
543 }
544
__kthread_bind(struct task_struct * p,unsigned int cpu,unsigned int state)545 static void __kthread_bind(struct task_struct *p, unsigned int cpu, unsigned int state)
546 {
547 __kthread_bind_mask(p, cpumask_of(cpu), state);
548 }
549
kthread_bind_mask(struct task_struct * p,const struct cpumask * mask)550 void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
551 {
552 __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
553 }
554 EXPORT_SYMBOL_GPL(kthread_bind_mask);
555
556 /**
557 * kthread_bind - bind a just-created kthread to a cpu.
558 * @p: thread created by kthread_create().
559 * @cpu: cpu (might not be online, must be possible) for @k to run on.
560 *
561 * Description: This function is equivalent to set_cpus_allowed(),
562 * except that @cpu doesn't need to be online, and the thread must be
563 * stopped (i.e., just returned from kthread_create()).
564 */
kthread_bind(struct task_struct * p,unsigned int cpu)565 void kthread_bind(struct task_struct *p, unsigned int cpu)
566 {
567 __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
568 }
569 EXPORT_SYMBOL(kthread_bind);
570
571 /**
572 * kthread_create_on_cpu - Create a cpu bound kthread
573 * @threadfn: the function to run until signal_pending(current).
574 * @data: data ptr for @threadfn.
575 * @cpu: The cpu on which the thread should be bound,
576 * @namefmt: printf-style name for the thread. Format is restricted
577 * to "name.*%u". Code fills in cpu number.
578 *
579 * Description: This helper function creates and names a kernel thread
580 */
kthread_create_on_cpu(int (* threadfn)(void * data),void * data,unsigned int cpu,const char * namefmt)581 struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
582 void *data, unsigned int cpu,
583 const char *namefmt)
584 {
585 struct task_struct *p;
586
587 p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
588 cpu);
589 if (IS_ERR(p))
590 return p;
591 kthread_bind(p, cpu);
592 /* CPU hotplug need to bind once again when unparking the thread. */
593 to_kthread(p)->cpu = cpu;
594 return p;
595 }
596 EXPORT_SYMBOL(kthread_create_on_cpu);
597
kthread_set_per_cpu(struct task_struct * k,int cpu)598 void kthread_set_per_cpu(struct task_struct *k, int cpu)
599 {
600 struct kthread *kthread = to_kthread(k);
601 if (!kthread)
602 return;
603
604 WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY));
605
606 if (cpu < 0) {
607 clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
608 return;
609 }
610
611 kthread->cpu = cpu;
612 set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
613 }
614 EXPORT_SYMBOL_GPL(kthread_set_per_cpu);
615
kthread_is_per_cpu(struct task_struct * p)616 bool kthread_is_per_cpu(struct task_struct *p)
617 {
618 struct kthread *kthread = __to_kthread(p);
619 if (!kthread)
620 return false;
621
622 return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
623 }
624
625 /**
626 * kthread_unpark - unpark a thread created by kthread_create().
627 * @k: thread created by kthread_create().
628 *
629 * Sets kthread_should_park() for @k to return false, wakes it, and
630 * waits for it to return. If the thread is marked percpu then its
631 * bound to the cpu again.
632 */
kthread_unpark(struct task_struct * k)633 void kthread_unpark(struct task_struct *k)
634 {
635 struct kthread *kthread = to_kthread(k);
636
637 /*
638 * Newly created kthread was parked when the CPU was offline.
639 * The binding was lost and we need to set it again.
640 */
641 if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
642 __kthread_bind(k, kthread->cpu, TASK_PARKED);
643
644 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
645 /*
646 * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup.
647 */
648 wake_up_state(k, TASK_PARKED);
649 }
650 EXPORT_SYMBOL_GPL(kthread_unpark);
651
652 /**
653 * kthread_park - park a thread created by kthread_create().
654 * @k: thread created by kthread_create().
655 *
656 * Sets kthread_should_park() for @k to return true, wakes it, and
657 * waits for it to return. This can also be called after kthread_create()
658 * instead of calling wake_up_process(): the thread will park without
659 * calling threadfn().
660 *
661 * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
662 * If called by the kthread itself just the park bit is set.
663 */
kthread_park(struct task_struct * k)664 int kthread_park(struct task_struct *k)
665 {
666 struct kthread *kthread = to_kthread(k);
667
668 if (WARN_ON(k->flags & PF_EXITING))
669 return -ENOSYS;
670
671 if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
672 return -EBUSY;
673
674 set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
675 if (k != current) {
676 wake_up_process(k);
677 /*
678 * Wait for __kthread_parkme() to complete(), this means we
679 * _will_ have TASK_PARKED and are about to call schedule().
680 */
681 wait_for_completion(&kthread->parked);
682 /*
683 * Now wait for that schedule() to complete and the task to
684 * get scheduled out.
685 */
686 WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
687 }
688
689 return 0;
690 }
691 EXPORT_SYMBOL_GPL(kthread_park);
692
693 /**
694 * kthread_stop - stop a thread created by kthread_create().
695 * @k: thread created by kthread_create().
696 *
697 * Sets kthread_should_stop() for @k to return true, wakes it, and
698 * waits for it to exit. This can also be called after kthread_create()
699 * instead of calling wake_up_process(): the thread will exit without
700 * calling threadfn().
701 *
702 * If threadfn() may call kthread_exit() itself, the caller must ensure
703 * task_struct can't go away.
704 *
705 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
706 * was never called.
707 */
kthread_stop(struct task_struct * k)708 int kthread_stop(struct task_struct *k)
709 {
710 struct kthread *kthread;
711 int ret;
712
713 trace_sched_kthread_stop(k);
714
715 get_task_struct(k);
716 kthread = to_kthread(k);
717 set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
718 kthread_unpark(k);
719 set_tsk_thread_flag(k, TIF_NOTIFY_SIGNAL);
720 wake_up_process(k);
721 wait_for_completion(&kthread->exited);
722 ret = kthread->result;
723 put_task_struct(k);
724
725 trace_sched_kthread_stop_ret(ret);
726 return ret;
727 }
728 EXPORT_SYMBOL(kthread_stop);
729
kthreadd(void * unused)730 int kthreadd(void *unused)
731 {
732 struct task_struct *tsk = current;
733
734 /* Setup a clean context for our children to inherit. */
735 set_task_comm(tsk, "kthreadd");
736 ignore_signals(tsk);
737 set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_TYPE_KTHREAD));
738 set_mems_allowed(node_states[N_MEMORY]);
739
740 current->flags |= PF_NOFREEZE;
741 cgroup_init_kthreadd();
742
743 for (;;) {
744 set_current_state(TASK_INTERRUPTIBLE);
745 if (list_empty(&kthread_create_list))
746 schedule();
747 __set_current_state(TASK_RUNNING);
748
749 spin_lock(&kthread_create_lock);
750 while (!list_empty(&kthread_create_list)) {
751 struct kthread_create_info *create;
752
753 create = list_entry(kthread_create_list.next,
754 struct kthread_create_info, list);
755 list_del_init(&create->list);
756 spin_unlock(&kthread_create_lock);
757
758 create_kthread(create);
759
760 spin_lock(&kthread_create_lock);
761 }
762 spin_unlock(&kthread_create_lock);
763 }
764
765 return 0;
766 }
767
__kthread_init_worker(struct kthread_worker * worker,const char * name,struct lock_class_key * key)768 void __kthread_init_worker(struct kthread_worker *worker,
769 const char *name,
770 struct lock_class_key *key)
771 {
772 memset(worker, 0, sizeof(struct kthread_worker));
773 raw_spin_lock_init(&worker->lock);
774 lockdep_set_class_and_name(&worker->lock, key, name);
775 INIT_LIST_HEAD(&worker->work_list);
776 INIT_LIST_HEAD(&worker->delayed_work_list);
777 }
778 EXPORT_SYMBOL_GPL(__kthread_init_worker);
779
780 /**
781 * kthread_worker_fn - kthread function to process kthread_worker
782 * @worker_ptr: pointer to initialized kthread_worker
783 *
784 * This function implements the main cycle of kthread worker. It processes
785 * work_list until it is stopped with kthread_stop(). It sleeps when the queue
786 * is empty.
787 *
788 * The works are not allowed to keep any locks, disable preemption or interrupts
789 * when they finish. There is defined a safe point for freezing when one work
790 * finishes and before a new one is started.
791 *
792 * Also the works must not be handled by more than one worker at the same time,
793 * see also kthread_queue_work().
794 */
kthread_worker_fn(void * worker_ptr)795 int kthread_worker_fn(void *worker_ptr)
796 {
797 struct kthread_worker *worker = worker_ptr;
798 struct kthread_work *work;
799
800 /*
801 * FIXME: Update the check and remove the assignment when all kthread
802 * worker users are created using kthread_create_worker*() functions.
803 */
804 WARN_ON(worker->task && worker->task != current);
805 worker->task = current;
806
807 if (worker->flags & KTW_FREEZABLE)
808 set_freezable();
809
810 repeat:
811 set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */
812
813 if (kthread_should_stop()) {
814 __set_current_state(TASK_RUNNING);
815 raw_spin_lock_irq(&worker->lock);
816 worker->task = NULL;
817 raw_spin_unlock_irq(&worker->lock);
818 return 0;
819 }
820
821 work = NULL;
822 raw_spin_lock_irq(&worker->lock);
823 if (!list_empty(&worker->work_list)) {
824 work = list_first_entry(&worker->work_list,
825 struct kthread_work, node);
826 list_del_init(&work->node);
827 }
828 worker->current_work = work;
829 raw_spin_unlock_irq(&worker->lock);
830
831 if (work) {
832 kthread_work_func_t func = work->func;
833 __set_current_state(TASK_RUNNING);
834 trace_sched_kthread_work_execute_start(work);
835 work->func(work);
836 /*
837 * Avoid dereferencing work after this point. The trace
838 * event only cares about the address.
839 */
840 trace_sched_kthread_work_execute_end(work, func);
841 } else if (!freezing(current))
842 schedule();
843
844 try_to_freeze();
845 cond_resched();
846 goto repeat;
847 }
848 EXPORT_SYMBOL_GPL(kthread_worker_fn);
849
850 static __printf(3, 0) struct kthread_worker *
__kthread_create_worker(int cpu,unsigned int flags,const char namefmt[],va_list args)851 __kthread_create_worker(int cpu, unsigned int flags,
852 const char namefmt[], va_list args)
853 {
854 struct kthread_worker *worker;
855 struct task_struct *task;
856 int node = NUMA_NO_NODE;
857
858 worker = kzalloc(sizeof(*worker), GFP_KERNEL);
859 if (!worker)
860 return ERR_PTR(-ENOMEM);
861
862 kthread_init_worker(worker);
863
864 if (cpu >= 0)
865 node = cpu_to_node(cpu);
866
867 task = __kthread_create_on_node(kthread_worker_fn, worker,
868 node, namefmt, args);
869 if (IS_ERR(task))
870 goto fail_task;
871
872 if (cpu >= 0)
873 kthread_bind(task, cpu);
874
875 worker->flags = flags;
876 worker->task = task;
877 wake_up_process(task);
878 return worker;
879
880 fail_task:
881 kfree(worker);
882 return ERR_CAST(task);
883 }
884
885 /**
886 * kthread_create_worker - create a kthread worker
887 * @flags: flags modifying the default behavior of the worker
888 * @namefmt: printf-style name for the kthread worker (task).
889 *
890 * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
891 * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
892 * when the caller was killed by a fatal signal.
893 */
894 struct kthread_worker *
kthread_create_worker(unsigned int flags,const char namefmt[],...)895 kthread_create_worker(unsigned int flags, const char namefmt[], ...)
896 {
897 struct kthread_worker *worker;
898 va_list args;
899
900 va_start(args, namefmt);
901 worker = __kthread_create_worker(-1, flags, namefmt, args);
902 va_end(args);
903
904 return worker;
905 }
906 EXPORT_SYMBOL(kthread_create_worker);
907
908 /**
909 * kthread_create_worker_on_cpu - create a kthread worker and bind it
910 * to a given CPU and the associated NUMA node.
911 * @cpu: CPU number
912 * @flags: flags modifying the default behavior of the worker
913 * @namefmt: printf-style name for the kthread worker (task).
914 *
915 * Use a valid CPU number if you want to bind the kthread worker
916 * to the given CPU and the associated NUMA node.
917 *
918 * A good practice is to add the cpu number also into the worker name.
919 * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
920 *
921 * CPU hotplug:
922 * The kthread worker API is simple and generic. It just provides a way
923 * to create, use, and destroy workers.
924 *
925 * It is up to the API user how to handle CPU hotplug. They have to decide
926 * how to handle pending work items, prevent queuing new ones, and
927 * restore the functionality when the CPU goes off and on. There are a
928 * few catches:
929 *
930 * - CPU affinity gets lost when it is scheduled on an offline CPU.
931 *
932 * - The worker might not exist when the CPU was off when the user
933 * created the workers.
934 *
935 * Good practice is to implement two CPU hotplug callbacks and to
936 * destroy/create the worker when the CPU goes down/up.
937 *
938 * Return:
939 * The pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
940 * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
941 * when the caller was killed by a fatal signal.
942 */
943 struct kthread_worker *
kthread_create_worker_on_cpu(int cpu,unsigned int flags,const char namefmt[],...)944 kthread_create_worker_on_cpu(int cpu, unsigned int flags,
945 const char namefmt[], ...)
946 {
947 struct kthread_worker *worker;
948 va_list args;
949
950 va_start(args, namefmt);
951 worker = __kthread_create_worker(cpu, flags, namefmt, args);
952 va_end(args);
953
954 return worker;
955 }
956 EXPORT_SYMBOL(kthread_create_worker_on_cpu);
957
958 /*
959 * Returns true when the work could not be queued at the moment.
960 * It happens when it is already pending in a worker list
961 * or when it is being cancelled.
962 */
queuing_blocked(struct kthread_worker * worker,struct kthread_work * work)963 static inline bool queuing_blocked(struct kthread_worker *worker,
964 struct kthread_work *work)
965 {
966 lockdep_assert_held(&worker->lock);
967
968 return !list_empty(&work->node) || work->canceling;
969 }
970
kthread_insert_work_sanity_check(struct kthread_worker * worker,struct kthread_work * work)971 static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
972 struct kthread_work *work)
973 {
974 lockdep_assert_held(&worker->lock);
975 WARN_ON_ONCE(!list_empty(&work->node));
976 /* Do not use a work with >1 worker, see kthread_queue_work() */
977 WARN_ON_ONCE(work->worker && work->worker != worker);
978 }
979
980 /* insert @work before @pos in @worker */
kthread_insert_work(struct kthread_worker * worker,struct kthread_work * work,struct list_head * pos)981 static void kthread_insert_work(struct kthread_worker *worker,
982 struct kthread_work *work,
983 struct list_head *pos)
984 {
985 kthread_insert_work_sanity_check(worker, work);
986
987 trace_sched_kthread_work_queue_work(worker, work);
988
989 list_add_tail(&work->node, pos);
990 work->worker = worker;
991 if (!worker->current_work && likely(worker->task))
992 wake_up_process(worker->task);
993 }
994
995 /**
996 * kthread_queue_work - queue a kthread_work
997 * @worker: target kthread_worker
998 * @work: kthread_work to queue
999 *
1000 * Queue @work to work processor @task for async execution. @task
1001 * must have been created with kthread_worker_create(). Returns %true
1002 * if @work was successfully queued, %false if it was already pending.
1003 *
1004 * Reinitialize the work if it needs to be used by another worker.
1005 * For example, when the worker was stopped and started again.
1006 */
kthread_queue_work(struct kthread_worker * worker,struct kthread_work * work)1007 bool kthread_queue_work(struct kthread_worker *worker,
1008 struct kthread_work *work)
1009 {
1010 bool ret = false;
1011 unsigned long flags;
1012
1013 raw_spin_lock_irqsave(&worker->lock, flags);
1014 if (!queuing_blocked(worker, work)) {
1015 kthread_insert_work(worker, work, &worker->work_list);
1016 ret = true;
1017 }
1018 raw_spin_unlock_irqrestore(&worker->lock, flags);
1019 return ret;
1020 }
1021 EXPORT_SYMBOL_GPL(kthread_queue_work);
1022
1023 /**
1024 * kthread_delayed_work_timer_fn - callback that queues the associated kthread
1025 * delayed work when the timer expires.
1026 * @t: pointer to the expired timer
1027 *
1028 * The format of the function is defined by struct timer_list.
1029 * It should have been called from irqsafe timer with irq already off.
1030 */
kthread_delayed_work_timer_fn(struct timer_list * t)1031 void kthread_delayed_work_timer_fn(struct timer_list *t)
1032 {
1033 struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
1034 struct kthread_work *work = &dwork->work;
1035 struct kthread_worker *worker = work->worker;
1036 unsigned long flags;
1037
1038 /*
1039 * This might happen when a pending work is reinitialized.
1040 * It means that it is used a wrong way.
1041 */
1042 if (WARN_ON_ONCE(!worker))
1043 return;
1044
1045 raw_spin_lock_irqsave(&worker->lock, flags);
1046 /* Work must not be used with >1 worker, see kthread_queue_work(). */
1047 WARN_ON_ONCE(work->worker != worker);
1048
1049 /* Move the work from worker->delayed_work_list. */
1050 WARN_ON_ONCE(list_empty(&work->node));
1051 list_del_init(&work->node);
1052 if (!work->canceling)
1053 kthread_insert_work(worker, work, &worker->work_list);
1054
1055 raw_spin_unlock_irqrestore(&worker->lock, flags);
1056 }
1057 EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
1058
__kthread_queue_delayed_work(struct kthread_worker * worker,struct kthread_delayed_work * dwork,unsigned long delay)1059 static void __kthread_queue_delayed_work(struct kthread_worker *worker,
1060 struct kthread_delayed_work *dwork,
1061 unsigned long delay)
1062 {
1063 struct timer_list *timer = &dwork->timer;
1064 struct kthread_work *work = &dwork->work;
1065
1066 WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn);
1067
1068 /*
1069 * If @delay is 0, queue @dwork->work immediately. This is for
1070 * both optimization and correctness. The earliest @timer can
1071 * expire is on the closest next tick and delayed_work users depend
1072 * on that there's no such delay when @delay is 0.
1073 */
1074 if (!delay) {
1075 kthread_insert_work(worker, work, &worker->work_list);
1076 return;
1077 }
1078
1079 /* Be paranoid and try to detect possible races already now. */
1080 kthread_insert_work_sanity_check(worker, work);
1081
1082 list_add(&work->node, &worker->delayed_work_list);
1083 work->worker = worker;
1084 timer->expires = jiffies + delay;
1085 add_timer(timer);
1086 }
1087
1088 /**
1089 * kthread_queue_delayed_work - queue the associated kthread work
1090 * after a delay.
1091 * @worker: target kthread_worker
1092 * @dwork: kthread_delayed_work to queue
1093 * @delay: number of jiffies to wait before queuing
1094 *
1095 * If the work has not been pending it starts a timer that will queue
1096 * the work after the given @delay. If @delay is zero, it queues the
1097 * work immediately.
1098 *
1099 * Return: %false if the @work has already been pending. It means that
1100 * either the timer was running or the work was queued. It returns %true
1101 * otherwise.
1102 */
kthread_queue_delayed_work(struct kthread_worker * worker,struct kthread_delayed_work * dwork,unsigned long delay)1103 bool kthread_queue_delayed_work(struct kthread_worker *worker,
1104 struct kthread_delayed_work *dwork,
1105 unsigned long delay)
1106 {
1107 struct kthread_work *work = &dwork->work;
1108 unsigned long flags;
1109 bool ret = false;
1110
1111 raw_spin_lock_irqsave(&worker->lock, flags);
1112
1113 if (!queuing_blocked(worker, work)) {
1114 __kthread_queue_delayed_work(worker, dwork, delay);
1115 ret = true;
1116 }
1117
1118 raw_spin_unlock_irqrestore(&worker->lock, flags);
1119 return ret;
1120 }
1121 EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
1122
1123 struct kthread_flush_work {
1124 struct kthread_work work;
1125 struct completion done;
1126 };
1127
kthread_flush_work_fn(struct kthread_work * work)1128 static void kthread_flush_work_fn(struct kthread_work *work)
1129 {
1130 struct kthread_flush_work *fwork =
1131 container_of(work, struct kthread_flush_work, work);
1132 complete(&fwork->done);
1133 }
1134
1135 /**
1136 * kthread_flush_work - flush a kthread_work
1137 * @work: work to flush
1138 *
1139 * If @work is queued or executing, wait for it to finish execution.
1140 */
kthread_flush_work(struct kthread_work * work)1141 void kthread_flush_work(struct kthread_work *work)
1142 {
1143 struct kthread_flush_work fwork = {
1144 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1145 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1146 };
1147 struct kthread_worker *worker;
1148 bool noop = false;
1149
1150 worker = work->worker;
1151 if (!worker)
1152 return;
1153
1154 raw_spin_lock_irq(&worker->lock);
1155 /* Work must not be used with >1 worker, see kthread_queue_work(). */
1156 WARN_ON_ONCE(work->worker != worker);
1157
1158 if (!list_empty(&work->node))
1159 kthread_insert_work(worker, &fwork.work, work->node.next);
1160 else if (worker->current_work == work)
1161 kthread_insert_work(worker, &fwork.work,
1162 worker->work_list.next);
1163 else
1164 noop = true;
1165
1166 raw_spin_unlock_irq(&worker->lock);
1167
1168 if (!noop)
1169 wait_for_completion(&fwork.done);
1170 }
1171 EXPORT_SYMBOL_GPL(kthread_flush_work);
1172
1173 /*
1174 * Make sure that the timer is neither set nor running and could
1175 * not manipulate the work list_head any longer.
1176 *
1177 * The function is called under worker->lock. The lock is temporary
1178 * released but the timer can't be set again in the meantime.
1179 */
kthread_cancel_delayed_work_timer(struct kthread_work * work,unsigned long * flags)1180 static void kthread_cancel_delayed_work_timer(struct kthread_work *work,
1181 unsigned long *flags)
1182 {
1183 struct kthread_delayed_work *dwork =
1184 container_of(work, struct kthread_delayed_work, work);
1185 struct kthread_worker *worker = work->worker;
1186
1187 /*
1188 * del_timer_sync() must be called to make sure that the timer
1189 * callback is not running. The lock must be temporary released
1190 * to avoid a deadlock with the callback. In the meantime,
1191 * any queuing is blocked by setting the canceling counter.
1192 */
1193 work->canceling++;
1194 raw_spin_unlock_irqrestore(&worker->lock, *flags);
1195 del_timer_sync(&dwork->timer);
1196 raw_spin_lock_irqsave(&worker->lock, *flags);
1197 work->canceling--;
1198 }
1199
1200 /*
1201 * This function removes the work from the worker queue.
1202 *
1203 * It is called under worker->lock. The caller must make sure that
1204 * the timer used by delayed work is not running, e.g. by calling
1205 * kthread_cancel_delayed_work_timer().
1206 *
1207 * The work might still be in use when this function finishes. See the
1208 * current_work proceed by the worker.
1209 *
1210 * Return: %true if @work was pending and successfully canceled,
1211 * %false if @work was not pending
1212 */
__kthread_cancel_work(struct kthread_work * work)1213 static bool __kthread_cancel_work(struct kthread_work *work)
1214 {
1215 /*
1216 * Try to remove the work from a worker list. It might either
1217 * be from worker->work_list or from worker->delayed_work_list.
1218 */
1219 if (!list_empty(&work->node)) {
1220 list_del_init(&work->node);
1221 return true;
1222 }
1223
1224 return false;
1225 }
1226
1227 /**
1228 * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
1229 * @worker: kthread worker to use
1230 * @dwork: kthread delayed work to queue
1231 * @delay: number of jiffies to wait before queuing
1232 *
1233 * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
1234 * modify @dwork's timer so that it expires after @delay. If @delay is zero,
1235 * @work is guaranteed to be queued immediately.
1236 *
1237 * Return: %false if @dwork was idle and queued, %true otherwise.
1238 *
1239 * A special case is when the work is being canceled in parallel.
1240 * It might be caused either by the real kthread_cancel_delayed_work_sync()
1241 * or yet another kthread_mod_delayed_work() call. We let the other command
1242 * win and return %true here. The return value can be used for reference
1243 * counting and the number of queued works stays the same. Anyway, the caller
1244 * is supposed to synchronize these operations a reasonable way.
1245 *
1246 * This function is safe to call from any context including IRQ handler.
1247 * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
1248 * for details.
1249 */
kthread_mod_delayed_work(struct kthread_worker * worker,struct kthread_delayed_work * dwork,unsigned long delay)1250 bool kthread_mod_delayed_work(struct kthread_worker *worker,
1251 struct kthread_delayed_work *dwork,
1252 unsigned long delay)
1253 {
1254 struct kthread_work *work = &dwork->work;
1255 unsigned long flags;
1256 int ret;
1257
1258 raw_spin_lock_irqsave(&worker->lock, flags);
1259
1260 /* Do not bother with canceling when never queued. */
1261 if (!work->worker) {
1262 ret = false;
1263 goto fast_queue;
1264 }
1265
1266 /* Work must not be used with >1 worker, see kthread_queue_work() */
1267 WARN_ON_ONCE(work->worker != worker);
1268
1269 /*
1270 * Temporary cancel the work but do not fight with another command
1271 * that is canceling the work as well.
1272 *
1273 * It is a bit tricky because of possible races with another
1274 * mod_delayed_work() and cancel_delayed_work() callers.
1275 *
1276 * The timer must be canceled first because worker->lock is released
1277 * when doing so. But the work can be removed from the queue (list)
1278 * only when it can be queued again so that the return value can
1279 * be used for reference counting.
1280 */
1281 kthread_cancel_delayed_work_timer(work, &flags);
1282 if (work->canceling) {
1283 /* The number of works in the queue does not change. */
1284 ret = true;
1285 goto out;
1286 }
1287 ret = __kthread_cancel_work(work);
1288
1289 fast_queue:
1290 __kthread_queue_delayed_work(worker, dwork, delay);
1291 out:
1292 raw_spin_unlock_irqrestore(&worker->lock, flags);
1293 return ret;
1294 }
1295 EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
1296
__kthread_cancel_work_sync(struct kthread_work * work,bool is_dwork)1297 static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
1298 {
1299 struct kthread_worker *worker = work->worker;
1300 unsigned long flags;
1301 int ret = false;
1302
1303 if (!worker)
1304 goto out;
1305
1306 raw_spin_lock_irqsave(&worker->lock, flags);
1307 /* Work must not be used with >1 worker, see kthread_queue_work(). */
1308 WARN_ON_ONCE(work->worker != worker);
1309
1310 if (is_dwork)
1311 kthread_cancel_delayed_work_timer(work, &flags);
1312
1313 ret = __kthread_cancel_work(work);
1314
1315 if (worker->current_work != work)
1316 goto out_fast;
1317
1318 /*
1319 * The work is in progress and we need to wait with the lock released.
1320 * In the meantime, block any queuing by setting the canceling counter.
1321 */
1322 work->canceling++;
1323 raw_spin_unlock_irqrestore(&worker->lock, flags);
1324 kthread_flush_work(work);
1325 raw_spin_lock_irqsave(&worker->lock, flags);
1326 work->canceling--;
1327
1328 out_fast:
1329 raw_spin_unlock_irqrestore(&worker->lock, flags);
1330 out:
1331 return ret;
1332 }
1333
1334 /**
1335 * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
1336 * @work: the kthread work to cancel
1337 *
1338 * Cancel @work and wait for its execution to finish. This function
1339 * can be used even if the work re-queues itself. On return from this
1340 * function, @work is guaranteed to be not pending or executing on any CPU.
1341 *
1342 * kthread_cancel_work_sync(&delayed_work->work) must not be used for
1343 * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
1344 *
1345 * The caller must ensure that the worker on which @work was last
1346 * queued can't be destroyed before this function returns.
1347 *
1348 * Return: %true if @work was pending, %false otherwise.
1349 */
kthread_cancel_work_sync(struct kthread_work * work)1350 bool kthread_cancel_work_sync(struct kthread_work *work)
1351 {
1352 return __kthread_cancel_work_sync(work, false);
1353 }
1354 EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
1355
1356 /**
1357 * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
1358 * wait for it to finish.
1359 * @dwork: the kthread delayed work to cancel
1360 *
1361 * This is kthread_cancel_work_sync() for delayed works.
1362 *
1363 * Return: %true if @dwork was pending, %false otherwise.
1364 */
kthread_cancel_delayed_work_sync(struct kthread_delayed_work * dwork)1365 bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
1366 {
1367 return __kthread_cancel_work_sync(&dwork->work, true);
1368 }
1369 EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
1370
1371 /**
1372 * kthread_flush_worker - flush all current works on a kthread_worker
1373 * @worker: worker to flush
1374 *
1375 * Wait until all currently executing or pending works on @worker are
1376 * finished.
1377 */
kthread_flush_worker(struct kthread_worker * worker)1378 void kthread_flush_worker(struct kthread_worker *worker)
1379 {
1380 struct kthread_flush_work fwork = {
1381 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1382 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1383 };
1384
1385 kthread_queue_work(worker, &fwork.work);
1386 wait_for_completion(&fwork.done);
1387 }
1388 EXPORT_SYMBOL_GPL(kthread_flush_worker);
1389
1390 /**
1391 * kthread_destroy_worker - destroy a kthread worker
1392 * @worker: worker to be destroyed
1393 *
1394 * Flush and destroy @worker. The simple flush is enough because the kthread
1395 * worker API is used only in trivial scenarios. There are no multi-step state
1396 * machines needed.
1397 */
kthread_destroy_worker(struct kthread_worker * worker)1398 void kthread_destroy_worker(struct kthread_worker *worker)
1399 {
1400 struct task_struct *task;
1401
1402 task = worker->task;
1403 if (WARN_ON(!task))
1404 return;
1405
1406 kthread_flush_worker(worker);
1407 kthread_stop(task);
1408 WARN_ON(!list_empty(&worker->work_list));
1409 kfree(worker);
1410 }
1411 EXPORT_SYMBOL(kthread_destroy_worker);
1412
1413 /**
1414 * kthread_use_mm - make the calling kthread operate on an address space
1415 * @mm: address space to operate on
1416 */
kthread_use_mm(struct mm_struct * mm)1417 void kthread_use_mm(struct mm_struct *mm)
1418 {
1419 struct mm_struct *active_mm;
1420 struct task_struct *tsk = current;
1421
1422 WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1423 WARN_ON_ONCE(tsk->mm);
1424
1425 task_lock(tsk);
1426 /* Hold off tlb flush IPIs while switching mm's */
1427 local_irq_disable();
1428 active_mm = tsk->active_mm;
1429 if (active_mm != mm) {
1430 mmgrab(mm);
1431 tsk->active_mm = mm;
1432 }
1433 tsk->mm = mm;
1434 membarrier_update_current_mm(mm);
1435 switch_mm_irqs_off(active_mm, mm, tsk);
1436 local_irq_enable();
1437 task_unlock(tsk);
1438 #ifdef finish_arch_post_lock_switch
1439 finish_arch_post_lock_switch();
1440 #endif
1441
1442 /*
1443 * When a kthread starts operating on an address space, the loop
1444 * in membarrier_{private,global}_expedited() may not observe
1445 * that tsk->mm, and not issue an IPI. Membarrier requires a
1446 * memory barrier after storing to tsk->mm, before accessing
1447 * user-space memory. A full memory barrier for membarrier
1448 * {PRIVATE,GLOBAL}_EXPEDITED is implicitly provided by
1449 * mmdrop(), or explicitly with smp_mb().
1450 */
1451 if (active_mm != mm)
1452 mmdrop(active_mm);
1453 else
1454 smp_mb();
1455 }
1456 EXPORT_SYMBOL_GPL(kthread_use_mm);
1457
1458 /**
1459 * kthread_unuse_mm - reverse the effect of kthread_use_mm()
1460 * @mm: address space to operate on
1461 */
kthread_unuse_mm(struct mm_struct * mm)1462 void kthread_unuse_mm(struct mm_struct *mm)
1463 {
1464 struct task_struct *tsk = current;
1465
1466 WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1467 WARN_ON_ONCE(!tsk->mm);
1468
1469 task_lock(tsk);
1470 /*
1471 * When a kthread stops operating on an address space, the loop
1472 * in membarrier_{private,global}_expedited() may not observe
1473 * that tsk->mm, and not issue an IPI. Membarrier requires a
1474 * memory barrier after accessing user-space memory, before
1475 * clearing tsk->mm.
1476 */
1477 smp_mb__after_spinlock();
1478 sync_mm_rss(mm);
1479 local_irq_disable();
1480 tsk->mm = NULL;
1481 membarrier_update_current_mm(NULL);
1482 /* active_mm is still 'mm' */
1483 enter_lazy_tlb(mm, tsk);
1484 local_irq_enable();
1485 task_unlock(tsk);
1486 }
1487 EXPORT_SYMBOL_GPL(kthread_unuse_mm);
1488
1489 #ifdef CONFIG_BLK_CGROUP
1490 /**
1491 * kthread_associate_blkcg - associate blkcg to current kthread
1492 * @css: the cgroup info
1493 *
1494 * Current thread must be a kthread. The thread is running jobs on behalf of
1495 * other threads. In some cases, we expect the jobs attach cgroup info of
1496 * original threads instead of that of current thread. This function stores
1497 * original thread's cgroup info in current kthread context for later
1498 * retrieval.
1499 */
kthread_associate_blkcg(struct cgroup_subsys_state * css)1500 void kthread_associate_blkcg(struct cgroup_subsys_state *css)
1501 {
1502 struct kthread *kthread;
1503
1504 if (!(current->flags & PF_KTHREAD))
1505 return;
1506 kthread = to_kthread(current);
1507 if (!kthread)
1508 return;
1509
1510 if (kthread->blkcg_css) {
1511 css_put(kthread->blkcg_css);
1512 kthread->blkcg_css = NULL;
1513 }
1514 if (css) {
1515 css_get(css);
1516 kthread->blkcg_css = css;
1517 }
1518 }
1519 EXPORT_SYMBOL(kthread_associate_blkcg);
1520
1521 /**
1522 * kthread_blkcg - get associated blkcg css of current kthread
1523 *
1524 * Current thread must be a kthread.
1525 */
kthread_blkcg(void)1526 struct cgroup_subsys_state *kthread_blkcg(void)
1527 {
1528 struct kthread *kthread;
1529
1530 if (current->flags & PF_KTHREAD) {
1531 kthread = to_kthread(current);
1532 if (kthread)
1533 return kthread->blkcg_css;
1534 }
1535 return NULL;
1536 }
1537 #endif
1538