1 /* Kernel thread helper functions.
2 * Copyright (C) 2004 IBM Corporation, Rusty Russell.
3 *
4 * Creation is done via kthreadd, so that we get a clean environment
5 * even if we're invoked from userspace (think modprobe, hotplug cpu,
6 * etc.).
7 */
8 #include <linux/sched.h>
9 #include <linux/kthread.h>
10 #include <linux/completion.h>
11 #include <linux/err.h>
12 #include <linux/cpuset.h>
13 #include <linux/unistd.h>
14 #include <linux/file.h>
15 #include <linux/export.h>
16 #include <linux/mutex.h>
17 #include <linux/slab.h>
18 #include <linux/freezer.h>
19 #include <linux/ptrace.h>
20 #include <linux/uaccess.h>
21 #include <trace/events/sched.h>
22
23 static DEFINE_SPINLOCK(kthread_create_lock);
24 static LIST_HEAD(kthread_create_list);
25 struct task_struct *kthreadd_task;
26
27 struct kthread_create_info
28 {
29 /* Information passed to kthread() from kthreadd. */
30 int (*threadfn)(void *data);
31 void *data;
32 int node;
33
34 /* Result passed back to kthread_create() from kthreadd. */
35 struct task_struct *result;
36 struct completion done;
37
38 struct list_head list;
39 };
40
41 struct kthread {
42 unsigned long flags;
43 unsigned int cpu;
44 void *data;
45 struct completion parked;
46 struct completion exited;
47 };
48
49 enum KTHREAD_BITS {
50 KTHREAD_IS_PER_CPU = 0,
51 KTHREAD_SHOULD_STOP,
52 KTHREAD_SHOULD_PARK,
53 KTHREAD_IS_PARKED,
54 };
55
56 #define __to_kthread(vfork) \
57 container_of(vfork, struct kthread, exited)
58
to_kthread(struct task_struct * k)59 static inline struct kthread *to_kthread(struct task_struct *k)
60 {
61 return __to_kthread(k->vfork_done);
62 }
63
to_live_kthread(struct task_struct * k)64 static struct kthread *to_live_kthread(struct task_struct *k)
65 {
66 struct completion *vfork = ACCESS_ONCE(k->vfork_done);
67 if (likely(vfork))
68 return __to_kthread(vfork);
69 return NULL;
70 }
71
72 /**
73 * kthread_should_stop - should this kthread return now?
74 *
75 * When someone calls kthread_stop() on your kthread, it will be woken
76 * and this will return true. You should then return, and your return
77 * value will be passed through to kthread_stop().
78 */
kthread_should_stop(void)79 bool kthread_should_stop(void)
80 {
81 return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
82 }
83 EXPORT_SYMBOL(kthread_should_stop);
84
85 /**
86 * kthread_should_park - should this kthread park now?
87 *
88 * When someone calls kthread_park() on your kthread, it will be woken
89 * and this will return true. You should then do the necessary
90 * cleanup and call kthread_parkme()
91 *
92 * Similar to kthread_should_stop(), but this keeps the thread alive
93 * and in a park position. kthread_unpark() "restarts" the thread and
94 * calls the thread function again.
95 */
kthread_should_park(void)96 bool kthread_should_park(void)
97 {
98 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
99 }
100
101 /**
102 * kthread_freezable_should_stop - should this freezable kthread return now?
103 * @was_frozen: optional out parameter, indicates whether %current was frozen
104 *
105 * kthread_should_stop() for freezable kthreads, which will enter
106 * refrigerator if necessary. This function is safe from kthread_stop() /
107 * freezer deadlock and freezable kthreads should use this function instead
108 * of calling try_to_freeze() directly.
109 */
kthread_freezable_should_stop(bool * was_frozen)110 bool kthread_freezable_should_stop(bool *was_frozen)
111 {
112 bool frozen = false;
113
114 might_sleep();
115
116 if (unlikely(freezing(current)))
117 frozen = __refrigerator(true);
118
119 if (was_frozen)
120 *was_frozen = frozen;
121
122 return kthread_should_stop();
123 }
124 EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
125
126 /**
127 * kthread_data - return data value specified on kthread creation
128 * @task: kthread task in question
129 *
130 * Return the data value specified when kthread @task was created.
131 * The caller is responsible for ensuring the validity of @task when
132 * calling this function.
133 */
kthread_data(struct task_struct * task)134 void *kthread_data(struct task_struct *task)
135 {
136 return to_kthread(task)->data;
137 }
138
139 /**
140 * probe_kthread_data - speculative version of kthread_data()
141 * @task: possible kthread task in question
142 *
143 * @task could be a kthread task. Return the data value specified when it
144 * was created if accessible. If @task isn't a kthread task or its data is
145 * inaccessible for any reason, %NULL is returned. This function requires
146 * that @task itself is safe to dereference.
147 */
probe_kthread_data(struct task_struct * task)148 void *probe_kthread_data(struct task_struct *task)
149 {
150 struct kthread *kthread = to_kthread(task);
151 void *data = NULL;
152
153 probe_kernel_read(&data, &kthread->data, sizeof(data));
154 return data;
155 }
156
__kthread_parkme(struct kthread * self)157 static void __kthread_parkme(struct kthread *self)
158 {
159 __set_current_state(TASK_PARKED);
160 while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) {
161 if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags))
162 complete(&self->parked);
163 schedule();
164 __set_current_state(TASK_PARKED);
165 }
166 clear_bit(KTHREAD_IS_PARKED, &self->flags);
167 __set_current_state(TASK_RUNNING);
168 }
169
kthread_parkme(void)170 void kthread_parkme(void)
171 {
172 __kthread_parkme(to_kthread(current));
173 }
174
kthread(void * _create)175 static int kthread(void *_create)
176 {
177 /* Copy data: it's on kthread's stack */
178 struct kthread_create_info *create = _create;
179 int (*threadfn)(void *data) = create->threadfn;
180 void *data = create->data;
181 struct kthread self;
182 int ret;
183
184 self.flags = 0;
185 self.data = data;
186 init_completion(&self.exited);
187 init_completion(&self.parked);
188 current->vfork_done = &self.exited;
189
190 /* OK, tell user we're spawned, wait for stop or wakeup */
191 __set_current_state(TASK_UNINTERRUPTIBLE);
192 create->result = current;
193 complete(&create->done);
194 schedule();
195
196 ret = -EINTR;
197
198 if (!test_bit(KTHREAD_SHOULD_STOP, &self.flags)) {
199 __kthread_parkme(&self);
200 ret = threadfn(data);
201 }
202 /* we can't just return, we must preserve "self" on stack */
203 do_exit(ret);
204 }
205
206 /* called from do_fork() to get node information for about to be created task */
tsk_fork_get_node(struct task_struct * tsk)207 int tsk_fork_get_node(struct task_struct *tsk)
208 {
209 #ifdef CONFIG_NUMA
210 if (tsk == kthreadd_task)
211 return tsk->pref_node_fork;
212 #endif
213 return numa_node_id();
214 }
215
create_kthread(struct kthread_create_info * create)216 static void create_kthread(struct kthread_create_info *create)
217 {
218 int pid;
219
220 #ifdef CONFIG_NUMA
221 current->pref_node_fork = create->node;
222 #endif
223 /* We want our own signal handler (we take no signals by default). */
224 pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
225 if (pid < 0) {
226 create->result = ERR_PTR(pid);
227 complete(&create->done);
228 }
229 }
230
231 /**
232 * kthread_create_on_node - create a kthread.
233 * @threadfn: the function to run until signal_pending(current).
234 * @data: data ptr for @threadfn.
235 * @node: memory node number.
236 * @namefmt: printf-style name for the thread.
237 *
238 * Description: This helper function creates and names a kernel
239 * thread. The thread will be stopped: use wake_up_process() to start
240 * it. See also kthread_run().
241 *
242 * If thread is going to be bound on a particular cpu, give its node
243 * in @node, to get NUMA affinity for kthread stack, or else give -1.
244 * When woken, the thread will run @threadfn() with @data as its
245 * argument. @threadfn() can either call do_exit() directly if it is a
246 * standalone thread for which no one will call kthread_stop(), or
247 * return when 'kthread_should_stop()' is true (which means
248 * kthread_stop() has been called). The return value should be zero
249 * or a negative error number; it will be passed to kthread_stop().
250 *
251 * Returns a task_struct or ERR_PTR(-ENOMEM).
252 */
kthread_create_on_node(int (* threadfn)(void * data),void * data,int node,const char namefmt[],...)253 struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
254 void *data, int node,
255 const char namefmt[],
256 ...)
257 {
258 struct kthread_create_info create;
259
260 create.threadfn = threadfn;
261 create.data = data;
262 create.node = node;
263 init_completion(&create.done);
264
265 spin_lock(&kthread_create_lock);
266 list_add_tail(&create.list, &kthread_create_list);
267 spin_unlock(&kthread_create_lock);
268
269 wake_up_process(kthreadd_task);
270 wait_for_completion(&create.done);
271
272 if (!IS_ERR(create.result)) {
273 static const struct sched_param param = { .sched_priority = 0 };
274 va_list args;
275
276 va_start(args, namefmt);
277 vsnprintf(create.result->comm, sizeof(create.result->comm),
278 namefmt, args);
279 va_end(args);
280 /*
281 * root may have changed our (kthreadd's) priority or CPU mask.
282 * The kernel thread should not inherit these properties.
283 */
284 sched_setscheduler_nocheck(create.result, SCHED_NORMAL, ¶m);
285 set_cpus_allowed_ptr(create.result, cpu_all_mask);
286 }
287 return create.result;
288 }
289 EXPORT_SYMBOL(kthread_create_on_node);
290
__kthread_bind(struct task_struct * p,unsigned int cpu,long state)291 static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
292 {
293 /* Must have done schedule() in kthread() before we set_task_cpu */
294 if (!wait_task_inactive(p, state)) {
295 WARN_ON(1);
296 return;
297 }
298 /* It's safe because the task is inactive. */
299 do_set_cpus_allowed(p, cpumask_of(cpu));
300 p->flags |= PF_NO_SETAFFINITY;
301 }
302
303 /**
304 * kthread_bind - bind a just-created kthread to a cpu.
305 * @p: thread created by kthread_create().
306 * @cpu: cpu (might not be online, must be possible) for @k to run on.
307 *
308 * Description: This function is equivalent to set_cpus_allowed(),
309 * except that @cpu doesn't need to be online, and the thread must be
310 * stopped (i.e., just returned from kthread_create()).
311 */
kthread_bind(struct task_struct * p,unsigned int cpu)312 void kthread_bind(struct task_struct *p, unsigned int cpu)
313 {
314 __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
315 }
316 EXPORT_SYMBOL(kthread_bind);
317
318 /**
319 * kthread_create_on_cpu - Create a cpu bound kthread
320 * @threadfn: the function to run until signal_pending(current).
321 * @data: data ptr for @threadfn.
322 * @cpu: The cpu on which the thread should be bound,
323 * @namefmt: printf-style name for the thread. Format is restricted
324 * to "name.*%u". Code fills in cpu number.
325 *
326 * Description: This helper function creates and names a kernel thread
327 * The thread will be woken and put into park mode.
328 */
kthread_create_on_cpu(int (* threadfn)(void * data),void * data,unsigned int cpu,const char * namefmt)329 struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
330 void *data, unsigned int cpu,
331 const char *namefmt)
332 {
333 struct task_struct *p;
334
335 p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
336 cpu);
337 if (IS_ERR(p))
338 return p;
339 set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
340 to_kthread(p)->cpu = cpu;
341 /* Park the thread to get it out of TASK_UNINTERRUPTIBLE state */
342 kthread_park(p);
343 return p;
344 }
345
__kthread_unpark(struct task_struct * k,struct kthread * kthread)346 static void __kthread_unpark(struct task_struct *k, struct kthread *kthread)
347 {
348 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
349 /*
350 * We clear the IS_PARKED bit here as we don't wait
351 * until the task has left the park code. So if we'd
352 * park before that happens we'd see the IS_PARKED bit
353 * which might be about to be cleared.
354 */
355 if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
356 if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
357 __kthread_bind(k, kthread->cpu, TASK_PARKED);
358 wake_up_state(k, TASK_PARKED);
359 }
360 }
361
362 /**
363 * kthread_unpark - unpark a thread created by kthread_create().
364 * @k: thread created by kthread_create().
365 *
366 * Sets kthread_should_park() for @k to return false, wakes it, and
367 * waits for it to return. If the thread is marked percpu then its
368 * bound to the cpu again.
369 */
kthread_unpark(struct task_struct * k)370 void kthread_unpark(struct task_struct *k)
371 {
372 struct kthread *kthread = to_live_kthread(k);
373
374 if (kthread)
375 __kthread_unpark(k, kthread);
376 }
377
378 /**
379 * kthread_park - park a thread created by kthread_create().
380 * @k: thread created by kthread_create().
381 *
382 * Sets kthread_should_park() for @k to return true, wakes it, and
383 * waits for it to return. This can also be called after kthread_create()
384 * instead of calling wake_up_process(): the thread will park without
385 * calling threadfn().
386 *
387 * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
388 * If called by the kthread itself just the park bit is set.
389 */
kthread_park(struct task_struct * k)390 int kthread_park(struct task_struct *k)
391 {
392 struct kthread *kthread = to_live_kthread(k);
393 int ret = -ENOSYS;
394
395 if (kthread) {
396 if (!test_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
397 set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
398 if (k != current) {
399 wake_up_process(k);
400 wait_for_completion(&kthread->parked);
401 }
402 }
403 ret = 0;
404 }
405 return ret;
406 }
407
408 /**
409 * kthread_stop - stop a thread created by kthread_create().
410 * @k: thread created by kthread_create().
411 *
412 * Sets kthread_should_stop() for @k to return true, wakes it, and
413 * waits for it to exit. This can also be called after kthread_create()
414 * instead of calling wake_up_process(): the thread will exit without
415 * calling threadfn().
416 *
417 * If threadfn() may call do_exit() itself, the caller must ensure
418 * task_struct can't go away.
419 *
420 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
421 * was never called.
422 */
kthread_stop(struct task_struct * k)423 int kthread_stop(struct task_struct *k)
424 {
425 struct kthread *kthread;
426 int ret;
427
428 trace_sched_kthread_stop(k);
429
430 get_task_struct(k);
431 kthread = to_live_kthread(k);
432 if (kthread) {
433 set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
434 __kthread_unpark(k, kthread);
435 wake_up_process(k);
436 wait_for_completion(&kthread->exited);
437 }
438 ret = k->exit_code;
439 put_task_struct(k);
440
441 trace_sched_kthread_stop_ret(ret);
442 return ret;
443 }
444 EXPORT_SYMBOL(kthread_stop);
445
kthreadd(void * unused)446 int kthreadd(void *unused)
447 {
448 struct task_struct *tsk = current;
449
450 /* Setup a clean context for our children to inherit. */
451 set_task_comm(tsk, "kthreadd");
452 ignore_signals(tsk);
453 set_cpus_allowed_ptr(tsk, cpu_all_mask);
454 set_mems_allowed(node_states[N_MEMORY]);
455
456 current->flags |= PF_NOFREEZE;
457
458 for (;;) {
459 set_current_state(TASK_INTERRUPTIBLE);
460 if (list_empty(&kthread_create_list))
461 schedule();
462 __set_current_state(TASK_RUNNING);
463
464 spin_lock(&kthread_create_lock);
465 while (!list_empty(&kthread_create_list)) {
466 struct kthread_create_info *create;
467
468 create = list_entry(kthread_create_list.next,
469 struct kthread_create_info, list);
470 list_del_init(&create->list);
471 spin_unlock(&kthread_create_lock);
472
473 create_kthread(create);
474
475 spin_lock(&kthread_create_lock);
476 }
477 spin_unlock(&kthread_create_lock);
478 }
479
480 return 0;
481 }
482
__init_kthread_worker(struct kthread_worker * worker,const char * name,struct lock_class_key * key)483 void __init_kthread_worker(struct kthread_worker *worker,
484 const char *name,
485 struct lock_class_key *key)
486 {
487 spin_lock_init(&worker->lock);
488 lockdep_set_class_and_name(&worker->lock, key, name);
489 INIT_LIST_HEAD(&worker->work_list);
490 worker->task = NULL;
491 }
492 EXPORT_SYMBOL_GPL(__init_kthread_worker);
493
494 /**
495 * kthread_worker_fn - kthread function to process kthread_worker
496 * @worker_ptr: pointer to initialized kthread_worker
497 *
498 * This function can be used as @threadfn to kthread_create() or
499 * kthread_run() with @worker_ptr argument pointing to an initialized
500 * kthread_worker. The started kthread will process work_list until
501 * the it is stopped with kthread_stop(). A kthread can also call
502 * this function directly after extra initialization.
503 *
504 * Different kthreads can be used for the same kthread_worker as long
505 * as there's only one kthread attached to it at any given time. A
506 * kthread_worker without an attached kthread simply collects queued
507 * kthread_works.
508 */
kthread_worker_fn(void * worker_ptr)509 int kthread_worker_fn(void *worker_ptr)
510 {
511 struct kthread_worker *worker = worker_ptr;
512 struct kthread_work *work;
513
514 WARN_ON(worker->task);
515 worker->task = current;
516 repeat:
517 set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */
518
519 if (kthread_should_stop()) {
520 __set_current_state(TASK_RUNNING);
521 spin_lock_irq(&worker->lock);
522 worker->task = NULL;
523 spin_unlock_irq(&worker->lock);
524 return 0;
525 }
526
527 work = NULL;
528 spin_lock_irq(&worker->lock);
529 if (!list_empty(&worker->work_list)) {
530 work = list_first_entry(&worker->work_list,
531 struct kthread_work, node);
532 list_del_init(&work->node);
533 }
534 worker->current_work = work;
535 spin_unlock_irq(&worker->lock);
536
537 if (work) {
538 __set_current_state(TASK_RUNNING);
539 work->func(work);
540 } else if (!freezing(current))
541 schedule();
542
543 try_to_freeze();
544 goto repeat;
545 }
546 EXPORT_SYMBOL_GPL(kthread_worker_fn);
547
548 /* insert @work before @pos in @worker */
insert_kthread_work(struct kthread_worker * worker,struct kthread_work * work,struct list_head * pos)549 static void insert_kthread_work(struct kthread_worker *worker,
550 struct kthread_work *work,
551 struct list_head *pos)
552 {
553 lockdep_assert_held(&worker->lock);
554
555 list_add_tail(&work->node, pos);
556 work->worker = worker;
557 if (likely(worker->task))
558 wake_up_process(worker->task);
559 }
560
561 /**
562 * queue_kthread_work - queue a kthread_work
563 * @worker: target kthread_worker
564 * @work: kthread_work to queue
565 *
566 * Queue @work to work processor @task for async execution. @task
567 * must have been created with kthread_worker_create(). Returns %true
568 * if @work was successfully queued, %false if it was already pending.
569 */
queue_kthread_work(struct kthread_worker * worker,struct kthread_work * work)570 bool queue_kthread_work(struct kthread_worker *worker,
571 struct kthread_work *work)
572 {
573 bool ret = false;
574 unsigned long flags;
575
576 spin_lock_irqsave(&worker->lock, flags);
577 if (list_empty(&work->node)) {
578 insert_kthread_work(worker, work, &worker->work_list);
579 ret = true;
580 }
581 spin_unlock_irqrestore(&worker->lock, flags);
582 return ret;
583 }
584 EXPORT_SYMBOL_GPL(queue_kthread_work);
585
586 struct kthread_flush_work {
587 struct kthread_work work;
588 struct completion done;
589 };
590
kthread_flush_work_fn(struct kthread_work * work)591 static void kthread_flush_work_fn(struct kthread_work *work)
592 {
593 struct kthread_flush_work *fwork =
594 container_of(work, struct kthread_flush_work, work);
595 complete(&fwork->done);
596 }
597
598 /**
599 * flush_kthread_work - flush a kthread_work
600 * @work: work to flush
601 *
602 * If @work is queued or executing, wait for it to finish execution.
603 */
flush_kthread_work(struct kthread_work * work)604 void flush_kthread_work(struct kthread_work *work)
605 {
606 struct kthread_flush_work fwork = {
607 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
608 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
609 };
610 struct kthread_worker *worker;
611 bool noop = false;
612
613 retry:
614 worker = work->worker;
615 if (!worker)
616 return;
617
618 spin_lock_irq(&worker->lock);
619 if (work->worker != worker) {
620 spin_unlock_irq(&worker->lock);
621 goto retry;
622 }
623
624 if (!list_empty(&work->node))
625 insert_kthread_work(worker, &fwork.work, work->node.next);
626 else if (worker->current_work == work)
627 insert_kthread_work(worker, &fwork.work, worker->work_list.next);
628 else
629 noop = true;
630
631 spin_unlock_irq(&worker->lock);
632
633 if (!noop)
634 wait_for_completion(&fwork.done);
635 }
636 EXPORT_SYMBOL_GPL(flush_kthread_work);
637
638 /**
639 * flush_kthread_worker - flush all current works on a kthread_worker
640 * @worker: worker to flush
641 *
642 * Wait until all currently executing or pending works on @worker are
643 * finished.
644 */
flush_kthread_worker(struct kthread_worker * worker)645 void flush_kthread_worker(struct kthread_worker *worker)
646 {
647 struct kthread_flush_work fwork = {
648 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
649 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
650 };
651
652 queue_kthread_work(worker, &fwork.work);
653 wait_for_completion(&fwork.done);
654 }
655 EXPORT_SYMBOL_GPL(flush_kthread_worker);
656