1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/kernel/exit.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 */
7
8 #include <linux/mm.h>
9 #include <linux/slab.h>
10 #include <linux/sched/autogroup.h>
11 #include <linux/sched/mm.h>
12 #include <linux/sched/stat.h>
13 #include <linux/sched/task.h>
14 #include <linux/sched/task_stack.h>
15 #include <linux/sched/cputime.h>
16 #include <linux/interrupt.h>
17 #include <linux/module.h>
18 #include <linux/capability.h>
19 #include <linux/completion.h>
20 #include <linux/personality.h>
21 #include <linux/tty.h>
22 #include <linux/iocontext.h>
23 #include <linux/key.h>
24 #include <linux/cpu.h>
25 #include <linux/acct.h>
26 #include <linux/tsacct_kern.h>
27 #include <linux/file.h>
28 #include <linux/fdtable.h>
29 #include <linux/freezer.h>
30 #include <linux/binfmts.h>
31 #include <linux/nsproxy.h>
32 #include <linux/pid_namespace.h>
33 #include <linux/ptrace.h>
34 #include <linux/profile.h>
35 #include <linux/mount.h>
36 #include <linux/proc_fs.h>
37 #include <linux/kthread.h>
38 #include <linux/mempolicy.h>
39 #include <linux/taskstats_kern.h>
40 #include <linux/delayacct.h>
41 #include <linux/cgroup.h>
42 #include <linux/syscalls.h>
43 #include <linux/signal.h>
44 #include <linux/posix-timers.h>
45 #include <linux/cn_proc.h>
46 #include <linux/mutex.h>
47 #include <linux/futex.h>
48 #include <linux/pipe_fs_i.h>
49 #include <linux/audit.h> /* for audit_free() */
50 #include <linux/resource.h>
51 #include <linux/task_io_accounting_ops.h>
52 #include <linux/blkdev.h>
53 #include <linux/task_work.h>
54 #include <linux/fs_struct.h>
55 #include <linux/init_task.h>
56 #include <linux/perf_event.h>
57 #include <trace/events/sched.h>
58 #include <linux/hw_breakpoint.h>
59 #include <linux/oom.h>
60 #include <linux/writeback.h>
61 #include <linux/shm.h>
62 #include <linux/kcov.h>
63 #include <linux/kmsan.h>
64 #include <linux/random.h>
65 #include <linux/rcuwait.h>
66 #include <linux/compat.h>
67 #include <linux/io_uring.h>
68 #include <linux/kprobes.h>
69 #include <linux/rethook.h>
70 #include <linux/sysfs.h>
71
72 #include <linux/uaccess.h>
73 #include <asm/unistd.h>
74 #include <asm/mmu_context.h>
75 #include <trace/hooks/mm.h>
76 #include <trace/hooks/dtask.h>
77
78 /*
79 * The default value should be high enough to not crash a system that randomly
80 * crashes its kernel from time to time, but low enough to at least not permit
81 * overflowing 32-bit refcounts or the ldsem writer count.
82 */
83 static unsigned int oops_limit = 10000;
84
85 #ifdef CONFIG_SYSCTL
86 static struct ctl_table kern_exit_table[] = {
87 {
88 .procname = "oops_limit",
89 .data = &oops_limit,
90 .maxlen = sizeof(oops_limit),
91 .mode = 0644,
92 .proc_handler = proc_douintvec,
93 },
94 { }
95 };
96
kernel_exit_sysctls_init(void)97 static __init int kernel_exit_sysctls_init(void)
98 {
99 register_sysctl_init("kernel", kern_exit_table);
100 return 0;
101 }
102 late_initcall(kernel_exit_sysctls_init);
103 #endif
104
105 static atomic_t oops_count = ATOMIC_INIT(0);
106
107 #ifdef CONFIG_SYSFS
oops_count_show(struct kobject * kobj,struct kobj_attribute * attr,char * page)108 static ssize_t oops_count_show(struct kobject *kobj, struct kobj_attribute *attr,
109 char *page)
110 {
111 return sysfs_emit(page, "%d\n", atomic_read(&oops_count));
112 }
113
114 static struct kobj_attribute oops_count_attr = __ATTR_RO(oops_count);
115
kernel_exit_sysfs_init(void)116 static __init int kernel_exit_sysfs_init(void)
117 {
118 sysfs_add_file_to_group(kernel_kobj, &oops_count_attr.attr, NULL);
119 return 0;
120 }
121 late_initcall(kernel_exit_sysfs_init);
122 #endif
123
__unhash_process(struct task_struct * p,bool group_dead)124 static void __unhash_process(struct task_struct *p, bool group_dead)
125 {
126 nr_threads--;
127 detach_pid(p, PIDTYPE_PID);
128 if (group_dead) {
129 detach_pid(p, PIDTYPE_TGID);
130 detach_pid(p, PIDTYPE_PGID);
131 detach_pid(p, PIDTYPE_SID);
132
133 list_del_rcu(&p->tasks);
134 list_del_init(&p->sibling);
135 __this_cpu_dec(process_counts);
136 }
137 list_del_rcu(&p->thread_group);
138 list_del_rcu(&p->thread_node);
139 }
140
141 /*
142 * This function expects the tasklist_lock write-locked.
143 */
__exit_signal(struct task_struct * tsk)144 static void __exit_signal(struct task_struct *tsk)
145 {
146 struct signal_struct *sig = tsk->signal;
147 bool group_dead = thread_group_leader(tsk);
148 struct sighand_struct *sighand;
149 struct tty_struct *tty;
150 u64 utime, stime;
151
152 sighand = rcu_dereference_check(tsk->sighand,
153 lockdep_tasklist_lock_is_held());
154 spin_lock(&sighand->siglock);
155
156 #ifdef CONFIG_POSIX_TIMERS
157 posix_cpu_timers_exit(tsk);
158 if (group_dead)
159 posix_cpu_timers_exit_group(tsk);
160 #endif
161
162 if (group_dead) {
163 tty = sig->tty;
164 sig->tty = NULL;
165 } else {
166 /*
167 * If there is any task waiting for the group exit
168 * then notify it:
169 */
170 if (sig->notify_count > 0 && !--sig->notify_count)
171 wake_up_process(sig->group_exec_task);
172
173 if (tsk == sig->curr_target)
174 sig->curr_target = next_thread(tsk);
175 }
176
177 add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
178 sizeof(unsigned long long));
179
180 /*
181 * Accumulate here the counters for all threads as they die. We could
182 * skip the group leader because it is the last user of signal_struct,
183 * but we want to avoid the race with thread_group_cputime() which can
184 * see the empty ->thread_head list.
185 */
186 task_cputime(tsk, &utime, &stime);
187 write_seqlock(&sig->stats_lock);
188 sig->utime += utime;
189 sig->stime += stime;
190 sig->gtime += task_gtime(tsk);
191 sig->min_flt += tsk->min_flt;
192 sig->maj_flt += tsk->maj_flt;
193 sig->nvcsw += tsk->nvcsw;
194 sig->nivcsw += tsk->nivcsw;
195 sig->inblock += task_io_get_inblock(tsk);
196 sig->oublock += task_io_get_oublock(tsk);
197 task_io_accounting_add(&sig->ioac, &tsk->ioac);
198 sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
199 sig->nr_threads--;
200 __unhash_process(tsk, group_dead);
201 write_sequnlock(&sig->stats_lock);
202
203 /*
204 * Do this under ->siglock, we can race with another thread
205 * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
206 */
207 flush_sigqueue(&tsk->pending);
208 tsk->sighand = NULL;
209 spin_unlock(&sighand->siglock);
210
211 __cleanup_sighand(sighand);
212 clear_tsk_thread_flag(tsk, TIF_SIGPENDING);
213 if (group_dead) {
214 flush_sigqueue(&sig->shared_pending);
215 tty_kref_put(tty);
216 }
217 }
218
delayed_put_task_struct(struct rcu_head * rhp)219 static void delayed_put_task_struct(struct rcu_head *rhp)
220 {
221 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
222
223 kprobe_flush_task(tsk);
224 rethook_flush_task(tsk);
225 perf_event_delayed_put(tsk);
226 trace_sched_process_free(tsk);
227 put_task_struct(tsk);
228 }
229
put_task_struct_rcu_user(struct task_struct * task)230 void put_task_struct_rcu_user(struct task_struct *task)
231 {
232 if (refcount_dec_and_test(&task->rcu_users))
233 call_rcu(&task->rcu, delayed_put_task_struct);
234 }
235
release_thread(struct task_struct * dead_task)236 void __weak release_thread(struct task_struct *dead_task)
237 {
238 }
239
release_task(struct task_struct * p)240 void release_task(struct task_struct *p)
241 {
242 struct task_struct *leader;
243 struct pid *thread_pid;
244 int zap_leader;
245 repeat:
246 /* don't need to get the RCU readlock here - the process is dead and
247 * can't be modifying its own credentials. But shut RCU-lockdep up */
248 rcu_read_lock();
249 dec_rlimit_ucounts(task_ucounts(p), UCOUNT_RLIMIT_NPROC, 1);
250 rcu_read_unlock();
251
252 cgroup_release(p);
253
254 write_lock_irq(&tasklist_lock);
255 ptrace_release_task(p);
256 thread_pid = get_pid(p->thread_pid);
257 __exit_signal(p);
258
259 /*
260 * If we are the last non-leader member of the thread
261 * group, and the leader is zombie, then notify the
262 * group leader's parent process. (if it wants notification.)
263 */
264 zap_leader = 0;
265 leader = p->group_leader;
266 if (leader != p && thread_group_empty(leader)
267 && leader->exit_state == EXIT_ZOMBIE) {
268 /*
269 * If we were the last child thread and the leader has
270 * exited already, and the leader's parent ignores SIGCHLD,
271 * then we are the one who should release the leader.
272 */
273 zap_leader = do_notify_parent(leader, leader->exit_signal);
274 if (zap_leader)
275 leader->exit_state = EXIT_DEAD;
276 }
277
278 write_unlock_irq(&tasklist_lock);
279 seccomp_filter_release(p);
280 proc_flush_pid(thread_pid);
281 put_pid(thread_pid);
282 release_thread(p);
283 put_task_struct_rcu_user(p);
284
285 p = leader;
286 if (unlikely(zap_leader))
287 goto repeat;
288 }
289
rcuwait_wake_up(struct rcuwait * w)290 int rcuwait_wake_up(struct rcuwait *w)
291 {
292 int ret = 0;
293 struct task_struct *task;
294
295 rcu_read_lock();
296
297 /*
298 * Order condition vs @task, such that everything prior to the load
299 * of @task is visible. This is the condition as to why the user called
300 * rcuwait_wake() in the first place. Pairs with set_current_state()
301 * barrier (A) in rcuwait_wait_event().
302 *
303 * WAIT WAKE
304 * [S] tsk = current [S] cond = true
305 * MB (A) MB (B)
306 * [L] cond [L] tsk
307 */
308 smp_mb(); /* (B) */
309
310 task = rcu_dereference(w->task);
311 if (task)
312 ret = wake_up_process(task);
313 rcu_read_unlock();
314
315 return ret;
316 }
317 EXPORT_SYMBOL_GPL(rcuwait_wake_up);
318
319 /*
320 * Determine if a process group is "orphaned", according to the POSIX
321 * definition in 2.2.2.52. Orphaned process groups are not to be affected
322 * by terminal-generated stop signals. Newly orphaned process groups are
323 * to receive a SIGHUP and a SIGCONT.
324 *
325 * "I ask you, have you ever known what it is to be an orphan?"
326 */
will_become_orphaned_pgrp(struct pid * pgrp,struct task_struct * ignored_task)327 static int will_become_orphaned_pgrp(struct pid *pgrp,
328 struct task_struct *ignored_task)
329 {
330 struct task_struct *p;
331
332 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
333 if ((p == ignored_task) ||
334 (p->exit_state && thread_group_empty(p)) ||
335 is_global_init(p->real_parent))
336 continue;
337
338 if (task_pgrp(p->real_parent) != pgrp &&
339 task_session(p->real_parent) == task_session(p))
340 return 0;
341 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
342
343 return 1;
344 }
345
is_current_pgrp_orphaned(void)346 int is_current_pgrp_orphaned(void)
347 {
348 int retval;
349
350 read_lock(&tasklist_lock);
351 retval = will_become_orphaned_pgrp(task_pgrp(current), NULL);
352 read_unlock(&tasklist_lock);
353
354 return retval;
355 }
356
has_stopped_jobs(struct pid * pgrp)357 static bool has_stopped_jobs(struct pid *pgrp)
358 {
359 struct task_struct *p;
360
361 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
362 if (p->signal->flags & SIGNAL_STOP_STOPPED)
363 return true;
364 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
365
366 return false;
367 }
368
369 /*
370 * Check to see if any process groups have become orphaned as
371 * a result of our exiting, and if they have any stopped jobs,
372 * send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
373 */
374 static void
kill_orphaned_pgrp(struct task_struct * tsk,struct task_struct * parent)375 kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent)
376 {
377 struct pid *pgrp = task_pgrp(tsk);
378 struct task_struct *ignored_task = tsk;
379
380 if (!parent)
381 /* exit: our father is in a different pgrp than
382 * we are and we were the only connection outside.
383 */
384 parent = tsk->real_parent;
385 else
386 /* reparent: our child is in a different pgrp than
387 * we are, and it was the only connection outside.
388 */
389 ignored_task = NULL;
390
391 if (task_pgrp(parent) != pgrp &&
392 task_session(parent) == task_session(tsk) &&
393 will_become_orphaned_pgrp(pgrp, ignored_task) &&
394 has_stopped_jobs(pgrp)) {
395 __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);
396 __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);
397 }
398 }
399
coredump_task_exit(struct task_struct * tsk)400 static void coredump_task_exit(struct task_struct *tsk)
401 {
402 struct core_state *core_state;
403
404 /*
405 * Serialize with any possible pending coredump.
406 * We must hold siglock around checking core_state
407 * and setting PF_POSTCOREDUMP. The core-inducing thread
408 * will increment ->nr_threads for each thread in the
409 * group without PF_POSTCOREDUMP set.
410 */
411 spin_lock_irq(&tsk->sighand->siglock);
412 tsk->flags |= PF_POSTCOREDUMP;
413 core_state = tsk->signal->core_state;
414 spin_unlock_irq(&tsk->sighand->siglock);
415 if (core_state) {
416 struct core_thread self;
417
418 self.task = current;
419 if (self.task->flags & PF_SIGNALED)
420 self.next = xchg(&core_state->dumper.next, &self);
421 else
422 self.task = NULL;
423 /*
424 * Implies mb(), the result of xchg() must be visible
425 * to core_state->dumper.
426 */
427 if (atomic_dec_and_test(&core_state->nr_threads))
428 complete(&core_state->startup);
429
430 for (;;) {
431 set_current_state(TASK_UNINTERRUPTIBLE|TASK_FREEZABLE);
432 if (!self.task) /* see coredump_finish() */
433 break;
434 schedule();
435 }
436 __set_current_state(TASK_RUNNING);
437 }
438 }
439
440 #ifdef CONFIG_MEMCG
441 /*
442 * A task is exiting. If it owned this mm, find a new owner for the mm.
443 */
mm_update_next_owner(struct mm_struct * mm)444 void mm_update_next_owner(struct mm_struct *mm)
445 {
446 struct task_struct *c, *g, *p = current;
447
448 retry:
449 /*
450 * If the exiting or execing task is not the owner, it's
451 * someone else's problem.
452 */
453 if (mm->owner != p)
454 return;
455 /*
456 * The current owner is exiting/execing and there are no other
457 * candidates. Do not leave the mm pointing to a possibly
458 * freed task structure.
459 */
460 if (atomic_read(&mm->mm_users) <= 1) {
461 WRITE_ONCE(mm->owner, NULL);
462 return;
463 }
464
465 read_lock(&tasklist_lock);
466 /*
467 * Search in the children
468 */
469 list_for_each_entry(c, &p->children, sibling) {
470 if (c->mm == mm)
471 goto assign_new_owner;
472 }
473
474 /*
475 * Search in the siblings
476 */
477 list_for_each_entry(c, &p->real_parent->children, sibling) {
478 if (c->mm == mm)
479 goto assign_new_owner;
480 }
481
482 /*
483 * Search through everything else, we should not get here often.
484 */
485 for_each_process(g) {
486 if (g->flags & PF_KTHREAD)
487 continue;
488 for_each_thread(g, c) {
489 if (c->mm == mm)
490 goto assign_new_owner;
491 if (c->mm)
492 break;
493 }
494 }
495 read_unlock(&tasklist_lock);
496 /*
497 * We found no owner yet mm_users > 1: this implies that we are
498 * most likely racing with swapoff (try_to_unuse()) or /proc or
499 * ptrace or page migration (get_task_mm()). Mark owner as NULL.
500 */
501 WRITE_ONCE(mm->owner, NULL);
502 return;
503
504 assign_new_owner:
505 BUG_ON(c == p);
506 get_task_struct(c);
507 /*
508 * The task_lock protects c->mm from changing.
509 * We always want mm->owner->mm == mm
510 */
511 task_lock(c);
512 /*
513 * Delay read_unlock() till we have the task_lock()
514 * to ensure that c does not slip away underneath us
515 */
516 read_unlock(&tasklist_lock);
517 if (c->mm != mm) {
518 task_unlock(c);
519 put_task_struct(c);
520 goto retry;
521 }
522 WRITE_ONCE(mm->owner, c);
523 lru_gen_migrate_mm(mm);
524 task_unlock(c);
525 put_task_struct(c);
526 }
527 #endif /* CONFIG_MEMCG */
528
529 /*
530 * Turn us into a lazy TLB process if we
531 * aren't already..
532 */
exit_mm(void)533 static void exit_mm(void)
534 {
535 struct mm_struct *mm = current->mm;
536
537 exit_mm_release(current, mm);
538 if (!mm)
539 return;
540 sync_mm_rss(mm);
541 mmap_read_lock(mm);
542 mmgrab(mm);
543 BUG_ON(mm != current->active_mm);
544 /* more a memory barrier than a real lock */
545 task_lock(current);
546 /*
547 * When a thread stops operating on an address space, the loop
548 * in membarrier_private_expedited() may not observe that
549 * tsk->mm, and the loop in membarrier_global_expedited() may
550 * not observe a MEMBARRIER_STATE_GLOBAL_EXPEDITED
551 * rq->membarrier_state, so those would not issue an IPI.
552 * Membarrier requires a memory barrier after accessing
553 * user-space memory, before clearing tsk->mm or the
554 * rq->membarrier_state.
555 */
556 smp_mb__after_spinlock();
557 local_irq_disable();
558 current->mm = NULL;
559 membarrier_update_current_mm(NULL);
560 enter_lazy_tlb(mm, current);
561 local_irq_enable();
562 task_unlock(current);
563 mmap_read_unlock(mm);
564 mm_update_next_owner(mm);
565 trace_android_vh_exit_mm(mm);
566 mmput(mm);
567 if (test_thread_flag(TIF_MEMDIE))
568 exit_oom_victim();
569 }
570
find_alive_thread(struct task_struct * p)571 static struct task_struct *find_alive_thread(struct task_struct *p)
572 {
573 struct task_struct *t;
574
575 for_each_thread(p, t) {
576 if (!(t->flags & PF_EXITING))
577 return t;
578 }
579 return NULL;
580 }
581
find_child_reaper(struct task_struct * father,struct list_head * dead)582 static struct task_struct *find_child_reaper(struct task_struct *father,
583 struct list_head *dead)
584 __releases(&tasklist_lock)
585 __acquires(&tasklist_lock)
586 {
587 struct pid_namespace *pid_ns = task_active_pid_ns(father);
588 struct task_struct *reaper = pid_ns->child_reaper;
589 struct task_struct *p, *n;
590
591 if (likely(reaper != father))
592 return reaper;
593
594 reaper = find_alive_thread(father);
595 if (reaper) {
596 pid_ns->child_reaper = reaper;
597 return reaper;
598 }
599
600 write_unlock_irq(&tasklist_lock);
601
602 list_for_each_entry_safe(p, n, dead, ptrace_entry) {
603 list_del_init(&p->ptrace_entry);
604 release_task(p);
605 }
606
607 zap_pid_ns_processes(pid_ns);
608 write_lock_irq(&tasklist_lock);
609
610 return father;
611 }
612
613 /*
614 * When we die, we re-parent all our children, and try to:
615 * 1. give them to another thread in our thread group, if such a member exists
616 * 2. give it to the first ancestor process which prctl'd itself as a
617 * child_subreaper for its children (like a service manager)
618 * 3. give it to the init process (PID 1) in our pid namespace
619 */
find_new_reaper(struct task_struct * father,struct task_struct * child_reaper)620 static struct task_struct *find_new_reaper(struct task_struct *father,
621 struct task_struct *child_reaper)
622 {
623 struct task_struct *thread, *reaper;
624
625 thread = find_alive_thread(father);
626 if (thread)
627 return thread;
628
629 if (father->signal->has_child_subreaper) {
630 unsigned int ns_level = task_pid(father)->level;
631 /*
632 * Find the first ->is_child_subreaper ancestor in our pid_ns.
633 * We can't check reaper != child_reaper to ensure we do not
634 * cross the namespaces, the exiting parent could be injected
635 * by setns() + fork().
636 * We check pid->level, this is slightly more efficient than
637 * task_active_pid_ns(reaper) != task_active_pid_ns(father).
638 */
639 for (reaper = father->real_parent;
640 task_pid(reaper)->level == ns_level;
641 reaper = reaper->real_parent) {
642 if (reaper == &init_task)
643 break;
644 if (!reaper->signal->is_child_subreaper)
645 continue;
646 thread = find_alive_thread(reaper);
647 if (thread)
648 return thread;
649 }
650 }
651
652 return child_reaper;
653 }
654
655 /*
656 * Any that need to be release_task'd are put on the @dead list.
657 */
reparent_leader(struct task_struct * father,struct task_struct * p,struct list_head * dead)658 static void reparent_leader(struct task_struct *father, struct task_struct *p,
659 struct list_head *dead)
660 {
661 if (unlikely(p->exit_state == EXIT_DEAD))
662 return;
663
664 /* We don't want people slaying init. */
665 p->exit_signal = SIGCHLD;
666
667 /* If it has exited notify the new parent about this child's death. */
668 if (!p->ptrace &&
669 p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) {
670 if (do_notify_parent(p, p->exit_signal)) {
671 p->exit_state = EXIT_DEAD;
672 list_add(&p->ptrace_entry, dead);
673 }
674 }
675
676 kill_orphaned_pgrp(p, father);
677 }
678
679 /*
680 * This does two things:
681 *
682 * A. Make init inherit all the child processes
683 * B. Check to see if any process groups have become orphaned
684 * as a result of our exiting, and if they have any stopped
685 * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
686 */
forget_original_parent(struct task_struct * father,struct list_head * dead)687 static void forget_original_parent(struct task_struct *father,
688 struct list_head *dead)
689 {
690 struct task_struct *p, *t, *reaper;
691
692 if (unlikely(!list_empty(&father->ptraced)))
693 exit_ptrace(father, dead);
694
695 /* Can drop and reacquire tasklist_lock */
696 reaper = find_child_reaper(father, dead);
697 if (list_empty(&father->children))
698 return;
699
700 reaper = find_new_reaper(father, reaper);
701 list_for_each_entry(p, &father->children, sibling) {
702 for_each_thread(p, t) {
703 RCU_INIT_POINTER(t->real_parent, reaper);
704 BUG_ON((!t->ptrace) != (rcu_access_pointer(t->parent) == father));
705 if (likely(!t->ptrace))
706 t->parent = t->real_parent;
707 if (t->pdeath_signal)
708 group_send_sig_info(t->pdeath_signal,
709 SEND_SIG_NOINFO, t,
710 PIDTYPE_TGID);
711 }
712 /*
713 * If this is a threaded reparent there is no need to
714 * notify anyone anything has happened.
715 */
716 if (!same_thread_group(reaper, father))
717 reparent_leader(father, p, dead);
718 }
719 list_splice_tail_init(&father->children, &reaper->children);
720 }
721
722 /*
723 * Send signals to all our closest relatives so that they know
724 * to properly mourn us..
725 */
exit_notify(struct task_struct * tsk,int group_dead)726 static void exit_notify(struct task_struct *tsk, int group_dead)
727 {
728 bool autoreap;
729 struct task_struct *p, *n;
730 LIST_HEAD(dead);
731
732 write_lock_irq(&tasklist_lock);
733 forget_original_parent(tsk, &dead);
734
735 if (group_dead)
736 kill_orphaned_pgrp(tsk->group_leader, NULL);
737
738 tsk->exit_state = EXIT_ZOMBIE;
739 if (unlikely(tsk->ptrace)) {
740 int sig = thread_group_leader(tsk) &&
741 thread_group_empty(tsk) &&
742 !ptrace_reparented(tsk) ?
743 tsk->exit_signal : SIGCHLD;
744 autoreap = do_notify_parent(tsk, sig);
745 } else if (thread_group_leader(tsk)) {
746 autoreap = thread_group_empty(tsk) &&
747 do_notify_parent(tsk, tsk->exit_signal);
748 } else {
749 autoreap = true;
750 }
751
752 if (autoreap) {
753 tsk->exit_state = EXIT_DEAD;
754 list_add(&tsk->ptrace_entry, &dead);
755 }
756
757 /* mt-exec, de_thread() is waiting for group leader */
758 if (unlikely(tsk->signal->notify_count < 0))
759 wake_up_process(tsk->signal->group_exec_task);
760 write_unlock_irq(&tasklist_lock);
761
762 list_for_each_entry_safe(p, n, &dead, ptrace_entry) {
763 list_del_init(&p->ptrace_entry);
764 release_task(p);
765 }
766 }
767
768 #ifdef CONFIG_DEBUG_STACK_USAGE
check_stack_usage(void)769 static void check_stack_usage(void)
770 {
771 static DEFINE_SPINLOCK(low_water_lock);
772 static int lowest_to_date = THREAD_SIZE;
773 unsigned long free;
774
775 free = stack_not_used(current);
776
777 if (free >= lowest_to_date)
778 return;
779
780 spin_lock(&low_water_lock);
781 if (free < lowest_to_date) {
782 pr_info("%s (%d) used greatest stack depth: %lu bytes left\n",
783 current->comm, task_pid_nr(current), free);
784 lowest_to_date = free;
785 }
786 spin_unlock(&low_water_lock);
787 }
788 #else
check_stack_usage(void)789 static inline void check_stack_usage(void) {}
790 #endif
791
synchronize_group_exit(struct task_struct * tsk,long code)792 static void synchronize_group_exit(struct task_struct *tsk, long code)
793 {
794 struct sighand_struct *sighand = tsk->sighand;
795 struct signal_struct *signal = tsk->signal;
796
797 spin_lock_irq(&sighand->siglock);
798 signal->quick_threads--;
799 if ((signal->quick_threads == 0) &&
800 !(signal->flags & SIGNAL_GROUP_EXIT)) {
801 signal->flags = SIGNAL_GROUP_EXIT;
802 signal->group_exit_code = code;
803 signal->group_stop_count = 0;
804 }
805 spin_unlock_irq(&sighand->siglock);
806 }
807
do_exit(long code)808 void __noreturn do_exit(long code)
809 {
810 struct task_struct *tsk = current;
811 int group_dead;
812
813 WARN_ON(irqs_disabled());
814
815 synchronize_group_exit(tsk, code);
816
817 WARN_ON(tsk->plug);
818
819 profile_task_exit(tsk);
820 kcov_task_exit(tsk);
821 kmsan_task_exit(tsk);
822
823 coredump_task_exit(tsk);
824 ptrace_event(PTRACE_EVENT_EXIT, code);
825
826 validate_creds_for_do_exit(tsk);
827
828 io_uring_files_cancel();
829 exit_signals(tsk); /* sets PF_EXITING */
830
831 trace_android_vh_exit_check(current);
832
833 /* sync mm's RSS info before statistics gathering */
834 if (tsk->mm)
835 sync_mm_rss(tsk->mm);
836 acct_update_integrals(tsk);
837 group_dead = atomic_dec_and_test(&tsk->signal->live);
838 if (group_dead) {
839 /*
840 * If the last thread of global init has exited, panic
841 * immediately to get a useable coredump.
842 */
843 if (unlikely(is_global_init(tsk)))
844 panic("Attempted to kill init! exitcode=0x%08x\n",
845 tsk->signal->group_exit_code ?: (int)code);
846
847 #ifdef CONFIG_POSIX_TIMERS
848 hrtimer_cancel(&tsk->signal->real_timer);
849 exit_itimers(tsk);
850 #endif
851 if (tsk->mm)
852 setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm);
853 }
854 acct_collect(code, group_dead);
855 if (group_dead)
856 tty_audit_exit();
857 audit_free(tsk);
858
859 tsk->exit_code = code;
860 taskstats_exit(tsk, group_dead);
861
862 exit_mm();
863
864 if (group_dead)
865 acct_process();
866 trace_sched_process_exit(tsk);
867
868 exit_sem(tsk);
869 exit_shm(tsk);
870 exit_files(tsk);
871 exit_fs(tsk);
872 if (group_dead)
873 disassociate_ctty(1);
874 exit_task_namespaces(tsk);
875 exit_task_work(tsk);
876 exit_thread(tsk);
877
878 /*
879 * Flush inherited counters to the parent - before the parent
880 * gets woken up by child-exit notifications.
881 *
882 * because of cgroup mode, must be called before cgroup_exit()
883 */
884 perf_event_exit_task(tsk);
885
886 sched_autogroup_exit_task(tsk);
887 cgroup_exit(tsk);
888
889 /*
890 * FIXME: do that only when needed, using sched_exit tracepoint
891 */
892 flush_ptrace_hw_breakpoint(tsk);
893
894 exit_tasks_rcu_start();
895 exit_notify(tsk, group_dead);
896 proc_exit_connector(tsk);
897 mpol_put_task_policy(tsk);
898 #ifdef CONFIG_FUTEX
899 if (unlikely(current->pi_state_cache))
900 kfree(current->pi_state_cache);
901 #endif
902 /*
903 * Make sure we are holding no locks:
904 */
905 debug_check_no_locks_held();
906
907 if (tsk->io_context)
908 exit_io_context(tsk);
909
910 if (tsk->splice_pipe)
911 free_pipe_info(tsk->splice_pipe);
912
913 if (tsk->task_frag.page)
914 put_page(tsk->task_frag.page);
915
916 validate_creds_for_do_exit(tsk);
917 exit_task_stack_account(tsk);
918
919 check_stack_usage();
920 preempt_disable();
921 if (tsk->nr_dirtied)
922 __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied);
923 exit_rcu();
924 exit_tasks_rcu_finish();
925
926 lockdep_free_task(tsk);
927 do_task_dead();
928 }
929
make_task_dead(int signr)930 void __noreturn make_task_dead(int signr)
931 {
932 /*
933 * Take the task off the cpu after something catastrophic has
934 * happened.
935 *
936 * We can get here from a kernel oops, sometimes with preemption off.
937 * Start by checking for critical errors.
938 * Then fix up important state like USER_DS and preemption.
939 * Then do everything else.
940 */
941 struct task_struct *tsk = current;
942 unsigned int limit;
943
944 if (unlikely(in_interrupt()))
945 panic("Aiee, killing interrupt handler!");
946 if (unlikely(!tsk->pid))
947 panic("Attempted to kill the idle task!");
948
949 if (unlikely(irqs_disabled())) {
950 pr_info("note: %s[%d] exited with irqs disabled\n",
951 current->comm, task_pid_nr(current));
952 local_irq_enable();
953 }
954 if (unlikely(in_atomic())) {
955 pr_info("note: %s[%d] exited with preempt_count %d\n",
956 current->comm, task_pid_nr(current),
957 preempt_count());
958 preempt_count_set(PREEMPT_ENABLED);
959 }
960
961 /*
962 * Every time the system oopses, if the oops happens while a reference
963 * to an object was held, the reference leaks.
964 * If the oops doesn't also leak memory, repeated oopsing can cause
965 * reference counters to wrap around (if they're not using refcount_t).
966 * This means that repeated oopsing can make unexploitable-looking bugs
967 * exploitable through repeated oopsing.
968 * To make sure this can't happen, place an upper bound on how often the
969 * kernel may oops without panic().
970 */
971 limit = READ_ONCE(oops_limit);
972 if (atomic_inc_return(&oops_count) >= limit && limit)
973 panic("Oopsed too often (kernel.oops_limit is %d)", limit);
974
975 /*
976 * We're taking recursive faults here in make_task_dead. Safest is to just
977 * leave this task alone and wait for reboot.
978 */
979 if (unlikely(tsk->flags & PF_EXITING)) {
980 pr_alert("Fixing recursive fault but reboot is needed!\n");
981 futex_exit_recursive(tsk);
982 tsk->exit_state = EXIT_DEAD;
983 refcount_inc(&tsk->rcu_users);
984 do_task_dead();
985 }
986
987 do_exit(signr);
988 }
989
SYSCALL_DEFINE1(exit,int,error_code)990 SYSCALL_DEFINE1(exit, int, error_code)
991 {
992 do_exit((error_code&0xff)<<8);
993 }
994
995 /*
996 * Take down every thread in the group. This is called by fatal signals
997 * as well as by sys_exit_group (below).
998 */
999 void __noreturn
do_group_exit(int exit_code)1000 do_group_exit(int exit_code)
1001 {
1002 struct signal_struct *sig = current->signal;
1003
1004 if (sig->flags & SIGNAL_GROUP_EXIT)
1005 exit_code = sig->group_exit_code;
1006 else if (sig->group_exec_task)
1007 exit_code = 0;
1008 else {
1009 struct sighand_struct *const sighand = current->sighand;
1010
1011 spin_lock_irq(&sighand->siglock);
1012 if (sig->flags & SIGNAL_GROUP_EXIT)
1013 /* Another thread got here before we took the lock. */
1014 exit_code = sig->group_exit_code;
1015 else if (sig->group_exec_task)
1016 exit_code = 0;
1017 else {
1018 sig->group_exit_code = exit_code;
1019 sig->flags = SIGNAL_GROUP_EXIT;
1020 zap_other_threads(current);
1021 }
1022 spin_unlock_irq(&sighand->siglock);
1023 }
1024
1025 do_exit(exit_code);
1026 /* NOTREACHED */
1027 }
1028
1029 /*
1030 * this kills every thread in the thread group. Note that any externally
1031 * wait4()-ing process will get the correct exit code - even if this
1032 * thread is not the thread group leader.
1033 */
SYSCALL_DEFINE1(exit_group,int,error_code)1034 SYSCALL_DEFINE1(exit_group, int, error_code)
1035 {
1036 do_group_exit((error_code & 0xff) << 8);
1037 /* NOTREACHED */
1038 return 0;
1039 }
1040
1041 struct waitid_info {
1042 pid_t pid;
1043 uid_t uid;
1044 int status;
1045 int cause;
1046 };
1047
1048 struct wait_opts {
1049 enum pid_type wo_type;
1050 int wo_flags;
1051 struct pid *wo_pid;
1052
1053 struct waitid_info *wo_info;
1054 int wo_stat;
1055 struct rusage *wo_rusage;
1056
1057 wait_queue_entry_t child_wait;
1058 int notask_error;
1059 };
1060
eligible_pid(struct wait_opts * wo,struct task_struct * p)1061 static int eligible_pid(struct wait_opts *wo, struct task_struct *p)
1062 {
1063 return wo->wo_type == PIDTYPE_MAX ||
1064 task_pid_type(p, wo->wo_type) == wo->wo_pid;
1065 }
1066
1067 static int
eligible_child(struct wait_opts * wo,bool ptrace,struct task_struct * p)1068 eligible_child(struct wait_opts *wo, bool ptrace, struct task_struct *p)
1069 {
1070 if (!eligible_pid(wo, p))
1071 return 0;
1072
1073 /*
1074 * Wait for all children (clone and not) if __WALL is set or
1075 * if it is traced by us.
1076 */
1077 if (ptrace || (wo->wo_flags & __WALL))
1078 return 1;
1079
1080 /*
1081 * Otherwise, wait for clone children *only* if __WCLONE is set;
1082 * otherwise, wait for non-clone children *only*.
1083 *
1084 * Note: a "clone" child here is one that reports to its parent
1085 * using a signal other than SIGCHLD, or a non-leader thread which
1086 * we can only see if it is traced by us.
1087 */
1088 if ((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE))
1089 return 0;
1090
1091 return 1;
1092 }
1093
1094 /*
1095 * Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold
1096 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
1097 * the lock and this task is uninteresting. If we return nonzero, we have
1098 * released the lock and the system call should return.
1099 */
wait_task_zombie(struct wait_opts * wo,struct task_struct * p)1100 static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
1101 {
1102 int state, status;
1103 pid_t pid = task_pid_vnr(p);
1104 uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p));
1105 struct waitid_info *infop;
1106
1107 if (!likely(wo->wo_flags & WEXITED))
1108 return 0;
1109
1110 if (unlikely(wo->wo_flags & WNOWAIT)) {
1111 status = (p->signal->flags & SIGNAL_GROUP_EXIT)
1112 ? p->signal->group_exit_code : p->exit_code;
1113 get_task_struct(p);
1114 read_unlock(&tasklist_lock);
1115 sched_annotate_sleep();
1116 if (wo->wo_rusage)
1117 getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
1118 put_task_struct(p);
1119 goto out_info;
1120 }
1121 /*
1122 * Move the task's state to DEAD/TRACE, only one thread can do this.
1123 */
1124 state = (ptrace_reparented(p) && thread_group_leader(p)) ?
1125 EXIT_TRACE : EXIT_DEAD;
1126 if (cmpxchg(&p->exit_state, EXIT_ZOMBIE, state) != EXIT_ZOMBIE)
1127 return 0;
1128 /*
1129 * We own this thread, nobody else can reap it.
1130 */
1131 read_unlock(&tasklist_lock);
1132 sched_annotate_sleep();
1133
1134 /*
1135 * Check thread_group_leader() to exclude the traced sub-threads.
1136 */
1137 if (state == EXIT_DEAD && thread_group_leader(p)) {
1138 struct signal_struct *sig = p->signal;
1139 struct signal_struct *psig = current->signal;
1140 unsigned long maxrss;
1141 u64 tgutime, tgstime;
1142
1143 /*
1144 * The resource counters for the group leader are in its
1145 * own task_struct. Those for dead threads in the group
1146 * are in its signal_struct, as are those for the child
1147 * processes it has previously reaped. All these
1148 * accumulate in the parent's signal_struct c* fields.
1149 *
1150 * We don't bother to take a lock here to protect these
1151 * p->signal fields because the whole thread group is dead
1152 * and nobody can change them.
1153 *
1154 * psig->stats_lock also protects us from our sub-threads
1155 * which can reap other children at the same time. Until
1156 * we change k_getrusage()-like users to rely on this lock
1157 * we have to take ->siglock as well.
1158 *
1159 * We use thread_group_cputime_adjusted() to get times for
1160 * the thread group, which consolidates times for all threads
1161 * in the group including the group leader.
1162 */
1163 thread_group_cputime_adjusted(p, &tgutime, &tgstime);
1164 spin_lock_irq(¤t->sighand->siglock);
1165 write_seqlock(&psig->stats_lock);
1166 psig->cutime += tgutime + sig->cutime;
1167 psig->cstime += tgstime + sig->cstime;
1168 psig->cgtime += task_gtime(p) + sig->gtime + sig->cgtime;
1169 psig->cmin_flt +=
1170 p->min_flt + sig->min_flt + sig->cmin_flt;
1171 psig->cmaj_flt +=
1172 p->maj_flt + sig->maj_flt + sig->cmaj_flt;
1173 psig->cnvcsw +=
1174 p->nvcsw + sig->nvcsw + sig->cnvcsw;
1175 psig->cnivcsw +=
1176 p->nivcsw + sig->nivcsw + sig->cnivcsw;
1177 psig->cinblock +=
1178 task_io_get_inblock(p) +
1179 sig->inblock + sig->cinblock;
1180 psig->coublock +=
1181 task_io_get_oublock(p) +
1182 sig->oublock + sig->coublock;
1183 maxrss = max(sig->maxrss, sig->cmaxrss);
1184 if (psig->cmaxrss < maxrss)
1185 psig->cmaxrss = maxrss;
1186 task_io_accounting_add(&psig->ioac, &p->ioac);
1187 task_io_accounting_add(&psig->ioac, &sig->ioac);
1188 write_sequnlock(&psig->stats_lock);
1189 spin_unlock_irq(¤t->sighand->siglock);
1190 }
1191
1192 if (wo->wo_rusage)
1193 getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
1194 status = (p->signal->flags & SIGNAL_GROUP_EXIT)
1195 ? p->signal->group_exit_code : p->exit_code;
1196 wo->wo_stat = status;
1197
1198 if (state == EXIT_TRACE) {
1199 write_lock_irq(&tasklist_lock);
1200 /* We dropped tasklist, ptracer could die and untrace */
1201 ptrace_unlink(p);
1202
1203 /* If parent wants a zombie, don't release it now */
1204 state = EXIT_ZOMBIE;
1205 if (do_notify_parent(p, p->exit_signal))
1206 state = EXIT_DEAD;
1207 p->exit_state = state;
1208 write_unlock_irq(&tasklist_lock);
1209 }
1210 if (state == EXIT_DEAD)
1211 release_task(p);
1212
1213 out_info:
1214 infop = wo->wo_info;
1215 if (infop) {
1216 if ((status & 0x7f) == 0) {
1217 infop->cause = CLD_EXITED;
1218 infop->status = status >> 8;
1219 } else {
1220 infop->cause = (status & 0x80) ? CLD_DUMPED : CLD_KILLED;
1221 infop->status = status & 0x7f;
1222 }
1223 infop->pid = pid;
1224 infop->uid = uid;
1225 }
1226
1227 return pid;
1228 }
1229
task_stopped_code(struct task_struct * p,bool ptrace)1230 static int *task_stopped_code(struct task_struct *p, bool ptrace)
1231 {
1232 if (ptrace) {
1233 if (task_is_traced(p) && !(p->jobctl & JOBCTL_LISTENING))
1234 return &p->exit_code;
1235 } else {
1236 if (p->signal->flags & SIGNAL_STOP_STOPPED)
1237 return &p->signal->group_exit_code;
1238 }
1239 return NULL;
1240 }
1241
1242 /**
1243 * wait_task_stopped - Wait for %TASK_STOPPED or %TASK_TRACED
1244 * @wo: wait options
1245 * @ptrace: is the wait for ptrace
1246 * @p: task to wait for
1247 *
1248 * Handle sys_wait4() work for %p in state %TASK_STOPPED or %TASK_TRACED.
1249 *
1250 * CONTEXT:
1251 * read_lock(&tasklist_lock), which is released if return value is
1252 * non-zero. Also, grabs and releases @p->sighand->siglock.
1253 *
1254 * RETURNS:
1255 * 0 if wait condition didn't exist and search for other wait conditions
1256 * should continue. Non-zero return, -errno on failure and @p's pid on
1257 * success, implies that tasklist_lock is released and wait condition
1258 * search should terminate.
1259 */
wait_task_stopped(struct wait_opts * wo,int ptrace,struct task_struct * p)1260 static int wait_task_stopped(struct wait_opts *wo,
1261 int ptrace, struct task_struct *p)
1262 {
1263 struct waitid_info *infop;
1264 int exit_code, *p_code, why;
1265 uid_t uid = 0; /* unneeded, required by compiler */
1266 pid_t pid;
1267
1268 /*
1269 * Traditionally we see ptrace'd stopped tasks regardless of options.
1270 */
1271 if (!ptrace && !(wo->wo_flags & WUNTRACED))
1272 return 0;
1273
1274 if (!task_stopped_code(p, ptrace))
1275 return 0;
1276
1277 exit_code = 0;
1278 spin_lock_irq(&p->sighand->siglock);
1279
1280 p_code = task_stopped_code(p, ptrace);
1281 if (unlikely(!p_code))
1282 goto unlock_sig;
1283
1284 exit_code = *p_code;
1285 if (!exit_code)
1286 goto unlock_sig;
1287
1288 if (!unlikely(wo->wo_flags & WNOWAIT))
1289 *p_code = 0;
1290
1291 uid = from_kuid_munged(current_user_ns(), task_uid(p));
1292 unlock_sig:
1293 spin_unlock_irq(&p->sighand->siglock);
1294 if (!exit_code)
1295 return 0;
1296
1297 /*
1298 * Now we are pretty sure this task is interesting.
1299 * Make sure it doesn't get reaped out from under us while we
1300 * give up the lock and then examine it below. We don't want to
1301 * keep holding onto the tasklist_lock while we call getrusage and
1302 * possibly take page faults for user memory.
1303 */
1304 get_task_struct(p);
1305 pid = task_pid_vnr(p);
1306 why = ptrace ? CLD_TRAPPED : CLD_STOPPED;
1307 read_unlock(&tasklist_lock);
1308 sched_annotate_sleep();
1309 if (wo->wo_rusage)
1310 getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
1311 put_task_struct(p);
1312
1313 if (likely(!(wo->wo_flags & WNOWAIT)))
1314 wo->wo_stat = (exit_code << 8) | 0x7f;
1315
1316 infop = wo->wo_info;
1317 if (infop) {
1318 infop->cause = why;
1319 infop->status = exit_code;
1320 infop->pid = pid;
1321 infop->uid = uid;
1322 }
1323 return pid;
1324 }
1325
1326 /*
1327 * Handle do_wait work for one task in a live, non-stopped state.
1328 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
1329 * the lock and this task is uninteresting. If we return nonzero, we have
1330 * released the lock and the system call should return.
1331 */
wait_task_continued(struct wait_opts * wo,struct task_struct * p)1332 static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
1333 {
1334 struct waitid_info *infop;
1335 pid_t pid;
1336 uid_t uid;
1337
1338 if (!unlikely(wo->wo_flags & WCONTINUED))
1339 return 0;
1340
1341 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
1342 return 0;
1343
1344 spin_lock_irq(&p->sighand->siglock);
1345 /* Re-check with the lock held. */
1346 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) {
1347 spin_unlock_irq(&p->sighand->siglock);
1348 return 0;
1349 }
1350 if (!unlikely(wo->wo_flags & WNOWAIT))
1351 p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
1352 uid = from_kuid_munged(current_user_ns(), task_uid(p));
1353 spin_unlock_irq(&p->sighand->siglock);
1354
1355 pid = task_pid_vnr(p);
1356 get_task_struct(p);
1357 read_unlock(&tasklist_lock);
1358 sched_annotate_sleep();
1359 if (wo->wo_rusage)
1360 getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
1361 put_task_struct(p);
1362
1363 infop = wo->wo_info;
1364 if (!infop) {
1365 wo->wo_stat = 0xffff;
1366 } else {
1367 infop->cause = CLD_CONTINUED;
1368 infop->pid = pid;
1369 infop->uid = uid;
1370 infop->status = SIGCONT;
1371 }
1372 return pid;
1373 }
1374
1375 /*
1376 * Consider @p for a wait by @parent.
1377 *
1378 * -ECHILD should be in ->notask_error before the first call.
1379 * Returns nonzero for a final return, when we have unlocked tasklist_lock.
1380 * Returns zero if the search for a child should continue;
1381 * then ->notask_error is 0 if @p is an eligible child,
1382 * or still -ECHILD.
1383 */
wait_consider_task(struct wait_opts * wo,int ptrace,struct task_struct * p)1384 static int wait_consider_task(struct wait_opts *wo, int ptrace,
1385 struct task_struct *p)
1386 {
1387 /*
1388 * We can race with wait_task_zombie() from another thread.
1389 * Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition
1390 * can't confuse the checks below.
1391 */
1392 int exit_state = READ_ONCE(p->exit_state);
1393 int ret;
1394
1395 if (unlikely(exit_state == EXIT_DEAD))
1396 return 0;
1397
1398 ret = eligible_child(wo, ptrace, p);
1399 if (!ret)
1400 return ret;
1401
1402 if (unlikely(exit_state == EXIT_TRACE)) {
1403 /*
1404 * ptrace == 0 means we are the natural parent. In this case
1405 * we should clear notask_error, debugger will notify us.
1406 */
1407 if (likely(!ptrace))
1408 wo->notask_error = 0;
1409 return 0;
1410 }
1411
1412 if (likely(!ptrace) && unlikely(p->ptrace)) {
1413 /*
1414 * If it is traced by its real parent's group, just pretend
1415 * the caller is ptrace_do_wait() and reap this child if it
1416 * is zombie.
1417 *
1418 * This also hides group stop state from real parent; otherwise
1419 * a single stop can be reported twice as group and ptrace stop.
1420 * If a ptracer wants to distinguish these two events for its
1421 * own children it should create a separate process which takes
1422 * the role of real parent.
1423 */
1424 if (!ptrace_reparented(p))
1425 ptrace = 1;
1426 }
1427
1428 /* slay zombie? */
1429 if (exit_state == EXIT_ZOMBIE) {
1430 /* we don't reap group leaders with subthreads */
1431 if (!delay_group_leader(p)) {
1432 /*
1433 * A zombie ptracee is only visible to its ptracer.
1434 * Notification and reaping will be cascaded to the
1435 * real parent when the ptracer detaches.
1436 */
1437 if (unlikely(ptrace) || likely(!p->ptrace))
1438 return wait_task_zombie(wo, p);
1439 }
1440
1441 /*
1442 * Allow access to stopped/continued state via zombie by
1443 * falling through. Clearing of notask_error is complex.
1444 *
1445 * When !@ptrace:
1446 *
1447 * If WEXITED is set, notask_error should naturally be
1448 * cleared. If not, subset of WSTOPPED|WCONTINUED is set,
1449 * so, if there are live subthreads, there are events to
1450 * wait for. If all subthreads are dead, it's still safe
1451 * to clear - this function will be called again in finite
1452 * amount time once all the subthreads are released and
1453 * will then return without clearing.
1454 *
1455 * When @ptrace:
1456 *
1457 * Stopped state is per-task and thus can't change once the
1458 * target task dies. Only continued and exited can happen.
1459 * Clear notask_error if WCONTINUED | WEXITED.
1460 */
1461 if (likely(!ptrace) || (wo->wo_flags & (WCONTINUED | WEXITED)))
1462 wo->notask_error = 0;
1463 } else {
1464 /*
1465 * @p is alive and it's gonna stop, continue or exit, so
1466 * there always is something to wait for.
1467 */
1468 wo->notask_error = 0;
1469 }
1470
1471 /*
1472 * Wait for stopped. Depending on @ptrace, different stopped state
1473 * is used and the two don't interact with each other.
1474 */
1475 ret = wait_task_stopped(wo, ptrace, p);
1476 if (ret)
1477 return ret;
1478
1479 /*
1480 * Wait for continued. There's only one continued state and the
1481 * ptracer can consume it which can confuse the real parent. Don't
1482 * use WCONTINUED from ptracer. You don't need or want it.
1483 */
1484 return wait_task_continued(wo, p);
1485 }
1486
1487 /*
1488 * Do the work of do_wait() for one thread in the group, @tsk.
1489 *
1490 * -ECHILD should be in ->notask_error before the first call.
1491 * Returns nonzero for a final return, when we have unlocked tasklist_lock.
1492 * Returns zero if the search for a child should continue; then
1493 * ->notask_error is 0 if there were any eligible children,
1494 * or still -ECHILD.
1495 */
do_wait_thread(struct wait_opts * wo,struct task_struct * tsk)1496 static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk)
1497 {
1498 struct task_struct *p;
1499
1500 list_for_each_entry(p, &tsk->children, sibling) {
1501 int ret = wait_consider_task(wo, 0, p);
1502
1503 if (ret)
1504 return ret;
1505 }
1506
1507 return 0;
1508 }
1509
ptrace_do_wait(struct wait_opts * wo,struct task_struct * tsk)1510 static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk)
1511 {
1512 struct task_struct *p;
1513
1514 list_for_each_entry(p, &tsk->ptraced, ptrace_entry) {
1515 int ret = wait_consider_task(wo, 1, p);
1516
1517 if (ret)
1518 return ret;
1519 }
1520
1521 return 0;
1522 }
1523
child_wait_callback(wait_queue_entry_t * wait,unsigned mode,int sync,void * key)1524 static int child_wait_callback(wait_queue_entry_t *wait, unsigned mode,
1525 int sync, void *key)
1526 {
1527 struct wait_opts *wo = container_of(wait, struct wait_opts,
1528 child_wait);
1529 struct task_struct *p = key;
1530
1531 if (!eligible_pid(wo, p))
1532 return 0;
1533
1534 if ((wo->wo_flags & __WNOTHREAD) && wait->private != p->parent)
1535 return 0;
1536
1537 return default_wake_function(wait, mode, sync, key);
1538 }
1539
__wake_up_parent(struct task_struct * p,struct task_struct * parent)1540 void __wake_up_parent(struct task_struct *p, struct task_struct *parent)
1541 {
1542 __wake_up_sync_key(&parent->signal->wait_chldexit,
1543 TASK_INTERRUPTIBLE, p);
1544 }
1545
is_effectively_child(struct wait_opts * wo,bool ptrace,struct task_struct * target)1546 static bool is_effectively_child(struct wait_opts *wo, bool ptrace,
1547 struct task_struct *target)
1548 {
1549 struct task_struct *parent =
1550 !ptrace ? target->real_parent : target->parent;
1551
1552 return current == parent || (!(wo->wo_flags & __WNOTHREAD) &&
1553 same_thread_group(current, parent));
1554 }
1555
1556 /*
1557 * Optimization for waiting on PIDTYPE_PID. No need to iterate through child
1558 * and tracee lists to find the target task.
1559 */
do_wait_pid(struct wait_opts * wo)1560 static int do_wait_pid(struct wait_opts *wo)
1561 {
1562 bool ptrace;
1563 struct task_struct *target;
1564 int retval;
1565
1566 ptrace = false;
1567 target = pid_task(wo->wo_pid, PIDTYPE_TGID);
1568 if (target && is_effectively_child(wo, ptrace, target)) {
1569 retval = wait_consider_task(wo, ptrace, target);
1570 if (retval)
1571 return retval;
1572 }
1573
1574 ptrace = true;
1575 target = pid_task(wo->wo_pid, PIDTYPE_PID);
1576 if (target && target->ptrace &&
1577 is_effectively_child(wo, ptrace, target)) {
1578 retval = wait_consider_task(wo, ptrace, target);
1579 if (retval)
1580 return retval;
1581 }
1582
1583 return 0;
1584 }
1585
do_wait(struct wait_opts * wo)1586 static long do_wait(struct wait_opts *wo)
1587 {
1588 int retval;
1589
1590 trace_sched_process_wait(wo->wo_pid);
1591
1592 init_waitqueue_func_entry(&wo->child_wait, child_wait_callback);
1593 wo->child_wait.private = current;
1594 add_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait);
1595 repeat:
1596 /*
1597 * If there is nothing that can match our criteria, just get out.
1598 * We will clear ->notask_error to zero if we see any child that
1599 * might later match our criteria, even if we are not able to reap
1600 * it yet.
1601 */
1602 wo->notask_error = -ECHILD;
1603 if ((wo->wo_type < PIDTYPE_MAX) &&
1604 (!wo->wo_pid || !pid_has_task(wo->wo_pid, wo->wo_type)))
1605 goto notask;
1606
1607 set_current_state(TASK_INTERRUPTIBLE);
1608 read_lock(&tasklist_lock);
1609
1610 if (wo->wo_type == PIDTYPE_PID) {
1611 retval = do_wait_pid(wo);
1612 if (retval)
1613 goto end;
1614 } else {
1615 struct task_struct *tsk = current;
1616
1617 do {
1618 retval = do_wait_thread(wo, tsk);
1619 if (retval)
1620 goto end;
1621
1622 retval = ptrace_do_wait(wo, tsk);
1623 if (retval)
1624 goto end;
1625
1626 if (wo->wo_flags & __WNOTHREAD)
1627 break;
1628 } while_each_thread(current, tsk);
1629 }
1630 read_unlock(&tasklist_lock);
1631
1632 notask:
1633 retval = wo->notask_error;
1634 if (!retval && !(wo->wo_flags & WNOHANG)) {
1635 retval = -ERESTARTSYS;
1636 if (!signal_pending(current)) {
1637 schedule();
1638 goto repeat;
1639 }
1640 }
1641 end:
1642 __set_current_state(TASK_RUNNING);
1643 remove_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait);
1644 return retval;
1645 }
1646
kernel_waitid(int which,pid_t upid,struct waitid_info * infop,int options,struct rusage * ru)1647 static long kernel_waitid(int which, pid_t upid, struct waitid_info *infop,
1648 int options, struct rusage *ru)
1649 {
1650 struct wait_opts wo;
1651 struct pid *pid = NULL;
1652 enum pid_type type;
1653 long ret;
1654 unsigned int f_flags = 0;
1655
1656 if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED|
1657 __WNOTHREAD|__WCLONE|__WALL))
1658 return -EINVAL;
1659 if (!(options & (WEXITED|WSTOPPED|WCONTINUED)))
1660 return -EINVAL;
1661
1662 switch (which) {
1663 case P_ALL:
1664 type = PIDTYPE_MAX;
1665 break;
1666 case P_PID:
1667 type = PIDTYPE_PID;
1668 if (upid <= 0)
1669 return -EINVAL;
1670
1671 pid = find_get_pid(upid);
1672 break;
1673 case P_PGID:
1674 type = PIDTYPE_PGID;
1675 if (upid < 0)
1676 return -EINVAL;
1677
1678 if (upid)
1679 pid = find_get_pid(upid);
1680 else
1681 pid = get_task_pid(current, PIDTYPE_PGID);
1682 break;
1683 case P_PIDFD:
1684 type = PIDTYPE_PID;
1685 if (upid < 0)
1686 return -EINVAL;
1687
1688 pid = pidfd_get_pid(upid, &f_flags);
1689 if (IS_ERR(pid))
1690 return PTR_ERR(pid);
1691
1692 break;
1693 default:
1694 return -EINVAL;
1695 }
1696
1697 wo.wo_type = type;
1698 wo.wo_pid = pid;
1699 wo.wo_flags = options;
1700 wo.wo_info = infop;
1701 wo.wo_rusage = ru;
1702 if (f_flags & O_NONBLOCK)
1703 wo.wo_flags |= WNOHANG;
1704
1705 ret = do_wait(&wo);
1706 if (!ret && !(options & WNOHANG) && (f_flags & O_NONBLOCK))
1707 ret = -EAGAIN;
1708
1709 put_pid(pid);
1710 return ret;
1711 }
1712
SYSCALL_DEFINE5(waitid,int,which,pid_t,upid,struct siginfo __user *,infop,int,options,struct rusage __user *,ru)1713 SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
1714 infop, int, options, struct rusage __user *, ru)
1715 {
1716 struct rusage r;
1717 struct waitid_info info = {.status = 0};
1718 long err = kernel_waitid(which, upid, &info, options, ru ? &r : NULL);
1719 int signo = 0;
1720
1721 if (err > 0) {
1722 signo = SIGCHLD;
1723 err = 0;
1724 if (ru && copy_to_user(ru, &r, sizeof(struct rusage)))
1725 return -EFAULT;
1726 }
1727 if (!infop)
1728 return err;
1729
1730 if (!user_write_access_begin(infop, sizeof(*infop)))
1731 return -EFAULT;
1732
1733 unsafe_put_user(signo, &infop->si_signo, Efault);
1734 unsafe_put_user(0, &infop->si_errno, Efault);
1735 unsafe_put_user(info.cause, &infop->si_code, Efault);
1736 unsafe_put_user(info.pid, &infop->si_pid, Efault);
1737 unsafe_put_user(info.uid, &infop->si_uid, Efault);
1738 unsafe_put_user(info.status, &infop->si_status, Efault);
1739 user_write_access_end();
1740 return err;
1741 Efault:
1742 user_write_access_end();
1743 return -EFAULT;
1744 }
1745
kernel_wait4(pid_t upid,int __user * stat_addr,int options,struct rusage * ru)1746 long kernel_wait4(pid_t upid, int __user *stat_addr, int options,
1747 struct rusage *ru)
1748 {
1749 struct wait_opts wo;
1750 struct pid *pid = NULL;
1751 enum pid_type type;
1752 long ret;
1753
1754 if (options & ~(WNOHANG|WUNTRACED|WCONTINUED|
1755 __WNOTHREAD|__WCLONE|__WALL))
1756 return -EINVAL;
1757
1758 /* -INT_MIN is not defined */
1759 if (upid == INT_MIN)
1760 return -ESRCH;
1761
1762 if (upid == -1)
1763 type = PIDTYPE_MAX;
1764 else if (upid < 0) {
1765 type = PIDTYPE_PGID;
1766 pid = find_get_pid(-upid);
1767 } else if (upid == 0) {
1768 type = PIDTYPE_PGID;
1769 pid = get_task_pid(current, PIDTYPE_PGID);
1770 } else /* upid > 0 */ {
1771 type = PIDTYPE_PID;
1772 pid = find_get_pid(upid);
1773 }
1774
1775 wo.wo_type = type;
1776 wo.wo_pid = pid;
1777 wo.wo_flags = options | WEXITED;
1778 wo.wo_info = NULL;
1779 wo.wo_stat = 0;
1780 wo.wo_rusage = ru;
1781 ret = do_wait(&wo);
1782 put_pid(pid);
1783 if (ret > 0 && stat_addr && put_user(wo.wo_stat, stat_addr))
1784 ret = -EFAULT;
1785
1786 return ret;
1787 }
1788
kernel_wait(pid_t pid,int * stat)1789 int kernel_wait(pid_t pid, int *stat)
1790 {
1791 struct wait_opts wo = {
1792 .wo_type = PIDTYPE_PID,
1793 .wo_pid = find_get_pid(pid),
1794 .wo_flags = WEXITED,
1795 };
1796 int ret;
1797
1798 ret = do_wait(&wo);
1799 if (ret > 0 && wo.wo_stat)
1800 *stat = wo.wo_stat;
1801 put_pid(wo.wo_pid);
1802 return ret;
1803 }
1804
SYSCALL_DEFINE4(wait4,pid_t,upid,int __user *,stat_addr,int,options,struct rusage __user *,ru)1805 SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr,
1806 int, options, struct rusage __user *, ru)
1807 {
1808 struct rusage r;
1809 long err = kernel_wait4(upid, stat_addr, options, ru ? &r : NULL);
1810
1811 if (err > 0) {
1812 if (ru && copy_to_user(ru, &r, sizeof(struct rusage)))
1813 return -EFAULT;
1814 }
1815 return err;
1816 }
1817
1818 #ifdef __ARCH_WANT_SYS_WAITPID
1819
1820 /*
1821 * sys_waitpid() remains for compatibility. waitpid() should be
1822 * implemented by calling sys_wait4() from libc.a.
1823 */
SYSCALL_DEFINE3(waitpid,pid_t,pid,int __user *,stat_addr,int,options)1824 SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options)
1825 {
1826 return kernel_wait4(pid, stat_addr, options, NULL);
1827 }
1828
1829 #endif
1830
1831 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE4(wait4,compat_pid_t,pid,compat_uint_t __user *,stat_addr,int,options,struct compat_rusage __user *,ru)1832 COMPAT_SYSCALL_DEFINE4(wait4,
1833 compat_pid_t, pid,
1834 compat_uint_t __user *, stat_addr,
1835 int, options,
1836 struct compat_rusage __user *, ru)
1837 {
1838 struct rusage r;
1839 long err = kernel_wait4(pid, stat_addr, options, ru ? &r : NULL);
1840 if (err > 0) {
1841 if (ru && put_compat_rusage(&r, ru))
1842 return -EFAULT;
1843 }
1844 return err;
1845 }
1846
COMPAT_SYSCALL_DEFINE5(waitid,int,which,compat_pid_t,pid,struct compat_siginfo __user *,infop,int,options,struct compat_rusage __user *,uru)1847 COMPAT_SYSCALL_DEFINE5(waitid,
1848 int, which, compat_pid_t, pid,
1849 struct compat_siginfo __user *, infop, int, options,
1850 struct compat_rusage __user *, uru)
1851 {
1852 struct rusage ru;
1853 struct waitid_info info = {.status = 0};
1854 long err = kernel_waitid(which, pid, &info, options, uru ? &ru : NULL);
1855 int signo = 0;
1856 if (err > 0) {
1857 signo = SIGCHLD;
1858 err = 0;
1859 if (uru) {
1860 /* kernel_waitid() overwrites everything in ru */
1861 if (COMPAT_USE_64BIT_TIME)
1862 err = copy_to_user(uru, &ru, sizeof(ru));
1863 else
1864 err = put_compat_rusage(&ru, uru);
1865 if (err)
1866 return -EFAULT;
1867 }
1868 }
1869
1870 if (!infop)
1871 return err;
1872
1873 if (!user_write_access_begin(infop, sizeof(*infop)))
1874 return -EFAULT;
1875
1876 unsafe_put_user(signo, &infop->si_signo, Efault);
1877 unsafe_put_user(0, &infop->si_errno, Efault);
1878 unsafe_put_user(info.cause, &infop->si_code, Efault);
1879 unsafe_put_user(info.pid, &infop->si_pid, Efault);
1880 unsafe_put_user(info.uid, &infop->si_uid, Efault);
1881 unsafe_put_user(info.status, &infop->si_status, Efault);
1882 user_write_access_end();
1883 return err;
1884 Efault:
1885 user_write_access_end();
1886 return -EFAULT;
1887 }
1888 #endif
1889
1890 /**
1891 * thread_group_exited - check that a thread group has exited
1892 * @pid: tgid of thread group to be checked.
1893 *
1894 * Test if the thread group represented by tgid has exited (all
1895 * threads are zombies, dead or completely gone).
1896 *
1897 * Return: true if the thread group has exited. false otherwise.
1898 */
thread_group_exited(struct pid * pid)1899 bool thread_group_exited(struct pid *pid)
1900 {
1901 struct task_struct *task;
1902 bool exited;
1903
1904 rcu_read_lock();
1905 task = pid_task(pid, PIDTYPE_PID);
1906 exited = !task ||
1907 (READ_ONCE(task->exit_state) && thread_group_empty(task));
1908 rcu_read_unlock();
1909
1910 return exited;
1911 }
1912 EXPORT_SYMBOL(thread_group_exited);
1913
abort(void)1914 __weak void abort(void)
1915 {
1916 BUG();
1917
1918 /* if that doesn't kill us, halt */
1919 panic("Oops failed to kill thread");
1920 }
1921 EXPORT_SYMBOL(abort);
1922