• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/slab.h>
3 #include <linux/file.h>
4 #include <linux/fdtable.h>
5 #include <linux/freezer.h>
6 #include <linux/mm.h>
7 #include <linux/stat.h>
8 #include <linux/fcntl.h>
9 #include <linux/swap.h>
10 #include <linux/ctype.h>
11 #include <linux/string.h>
12 #include <linux/init.h>
13 #include <linux/pagemap.h>
14 #include <linux/perf_event.h>
15 #include <linux/highmem.h>
16 #include <linux/spinlock.h>
17 #include <linux/key.h>
18 #include <linux/personality.h>
19 #include <linux/binfmts.h>
20 #include <linux/coredump.h>
21 #include <linux/sched/coredump.h>
22 #include <linux/sched/signal.h>
23 #include <linux/sched/task_stack.h>
24 #include <linux/utsname.h>
25 #include <linux/pid_namespace.h>
26 #include <linux/module.h>
27 #include <linux/namei.h>
28 #include <linux/mount.h>
29 #include <linux/security.h>
30 #include <linux/syscalls.h>
31 #include <linux/tsacct_kern.h>
32 #include <linux/cn_proc.h>
33 #include <linux/audit.h>
34 #include <linux/tracehook.h>
35 #include <linux/kmod.h>
36 #include <linux/fsnotify.h>
37 #include <linux/fs_struct.h>
38 #include <linux/pipe_fs_i.h>
39 #include <linux/oom.h>
40 #include <linux/compat.h>
41 #include <linux/fs.h>
42 #include <linux/path.h>
43 #include <linux/timekeeping.h>
44 #include <linux/elf.h>
45 
46 #include <linux/uaccess.h>
47 #include <asm/mmu_context.h>
48 #include <asm/tlb.h>
49 #include <asm/exec.h>
50 
51 #include <trace/events/task.h>
52 #include "internal.h"
53 
54 #include <trace/events/sched.h>
55 
56 int core_uses_pid;
57 unsigned int core_pipe_limit;
58 char core_pattern[CORENAME_MAX_SIZE] = "core";
59 static int core_name_size = CORENAME_MAX_SIZE;
60 
61 struct core_name {
62 	char *corename;
63 	int used, size;
64 };
65 
66 /* The maximal length of core_pattern is also specified in sysctl.c */
67 
expand_corename(struct core_name * cn,int size)68 static int expand_corename(struct core_name *cn, int size)
69 {
70 	char *corename = krealloc(cn->corename, size, GFP_KERNEL);
71 
72 	if (!corename)
73 		return -ENOMEM;
74 
75 	if (size > core_name_size) /* racy but harmless */
76 		core_name_size = size;
77 
78 	cn->size = ksize(corename);
79 	cn->corename = corename;
80 	return 0;
81 }
82 
cn_vprintf(struct core_name * cn,const char * fmt,va_list arg)83 static __printf(2, 0) int cn_vprintf(struct core_name *cn, const char *fmt,
84 				     va_list arg)
85 {
86 	int free, need;
87 	va_list arg_copy;
88 
89 again:
90 	free = cn->size - cn->used;
91 
92 	va_copy(arg_copy, arg);
93 	need = vsnprintf(cn->corename + cn->used, free, fmt, arg_copy);
94 	va_end(arg_copy);
95 
96 	if (need < free) {
97 		cn->used += need;
98 		return 0;
99 	}
100 
101 	if (!expand_corename(cn, cn->size + need - free + 1))
102 		goto again;
103 
104 	return -ENOMEM;
105 }
106 
cn_printf(struct core_name * cn,const char * fmt,...)107 static __printf(2, 3) int cn_printf(struct core_name *cn, const char *fmt, ...)
108 {
109 	va_list arg;
110 	int ret;
111 
112 	va_start(arg, fmt);
113 	ret = cn_vprintf(cn, fmt, arg);
114 	va_end(arg);
115 
116 	return ret;
117 }
118 
119 static __printf(2, 3)
cn_esc_printf(struct core_name * cn,const char * fmt,...)120 int cn_esc_printf(struct core_name *cn, const char *fmt, ...)
121 {
122 	int cur = cn->used;
123 	va_list arg;
124 	int ret;
125 
126 	va_start(arg, fmt);
127 	ret = cn_vprintf(cn, fmt, arg);
128 	va_end(arg);
129 
130 	if (ret == 0) {
131 		/*
132 		 * Ensure that this coredump name component can't cause the
133 		 * resulting corefile path to consist of a ".." or ".".
134 		 */
135 		if ((cn->used - cur == 1 && cn->corename[cur] == '.') ||
136 				(cn->used - cur == 2 && cn->corename[cur] == '.'
137 				&& cn->corename[cur+1] == '.'))
138 			cn->corename[cur] = '!';
139 
140 		/*
141 		 * Empty names are fishy and could be used to create a "//" in a
142 		 * corefile name, causing the coredump to happen one directory
143 		 * level too high. Enforce that all components of the core
144 		 * pattern are at least one character long.
145 		 */
146 		if (cn->used == cur)
147 			ret = cn_printf(cn, "!");
148 	}
149 
150 	for (; cur < cn->used; ++cur) {
151 		if (cn->corename[cur] == '/')
152 			cn->corename[cur] = '!';
153 	}
154 	return ret;
155 }
156 
cn_print_exe_file(struct core_name * cn,bool name_only)157 static int cn_print_exe_file(struct core_name *cn, bool name_only)
158 {
159 	struct file *exe_file;
160 	char *pathbuf, *path, *ptr;
161 	int ret;
162 
163 	exe_file = get_mm_exe_file(current->mm);
164 	if (!exe_file)
165 		return cn_esc_printf(cn, "%s (path unknown)", current->comm);
166 
167 	pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
168 	if (!pathbuf) {
169 		ret = -ENOMEM;
170 		goto put_exe_file;
171 	}
172 
173 	path = file_path(exe_file, pathbuf, PATH_MAX);
174 	if (IS_ERR(path)) {
175 		ret = PTR_ERR(path);
176 		goto free_buf;
177 	}
178 
179 	if (name_only) {
180 		ptr = strrchr(path, '/');
181 		if (ptr)
182 			path = ptr + 1;
183 	}
184 	ret = cn_esc_printf(cn, "%s", path);
185 
186 free_buf:
187 	kfree(pathbuf);
188 put_exe_file:
189 	fput(exe_file);
190 	return ret;
191 }
192 
193 /* format_corename will inspect the pattern parameter, and output a
194  * name into corename, which must have space for at least
195  * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
196  */
format_corename(struct core_name * cn,struct coredump_params * cprm,size_t ** argv,int * argc)197 static int format_corename(struct core_name *cn, struct coredump_params *cprm,
198 			   size_t **argv, int *argc)
199 {
200 	const struct cred *cred = current_cred();
201 	const char *pat_ptr = core_pattern;
202 	int ispipe = (*pat_ptr == '|');
203 	bool was_space = false;
204 	int pid_in_pattern = 0;
205 	int err = 0;
206 
207 	cn->used = 0;
208 	cn->corename = NULL;
209 	if (expand_corename(cn, core_name_size))
210 		return -ENOMEM;
211 	cn->corename[0] = '\0';
212 
213 	if (ispipe) {
214 		int argvs = sizeof(core_pattern) / 2;
215 		(*argv) = kmalloc_array(argvs, sizeof(**argv), GFP_KERNEL);
216 		if (!(*argv))
217 			return -ENOMEM;
218 		(*argv)[(*argc)++] = 0;
219 		++pat_ptr;
220 		if (!(*pat_ptr))
221 			return -ENOMEM;
222 	}
223 
224 	/* Repeat as long as we have more pattern to process and more output
225 	   space */
226 	while (*pat_ptr) {
227 		/*
228 		 * Split on spaces before doing template expansion so that
229 		 * %e and %E don't get split if they have spaces in them
230 		 */
231 		if (ispipe) {
232 			if (isspace(*pat_ptr)) {
233 				if (cn->used != 0)
234 					was_space = true;
235 				pat_ptr++;
236 				continue;
237 			} else if (was_space) {
238 				was_space = false;
239 				err = cn_printf(cn, "%c", '\0');
240 				if (err)
241 					return err;
242 				(*argv)[(*argc)++] = cn->used;
243 			}
244 		}
245 		if (*pat_ptr != '%') {
246 			err = cn_printf(cn, "%c", *pat_ptr++);
247 		} else {
248 			switch (*++pat_ptr) {
249 			/* single % at the end, drop that */
250 			case 0:
251 				goto out;
252 			/* Double percent, output one percent */
253 			case '%':
254 				err = cn_printf(cn, "%c", '%');
255 				break;
256 			/* pid */
257 			case 'p':
258 				pid_in_pattern = 1;
259 				err = cn_printf(cn, "%d",
260 					      task_tgid_vnr(current));
261 				break;
262 			/* global pid */
263 			case 'P':
264 				err = cn_printf(cn, "%d",
265 					      task_tgid_nr(current));
266 				break;
267 			case 'i':
268 				err = cn_printf(cn, "%d",
269 					      task_pid_vnr(current));
270 				break;
271 			case 'I':
272 				err = cn_printf(cn, "%d",
273 					      task_pid_nr(current));
274 				break;
275 			/* uid */
276 			case 'u':
277 				err = cn_printf(cn, "%u",
278 						from_kuid(&init_user_ns,
279 							  cred->uid));
280 				break;
281 			/* gid */
282 			case 'g':
283 				err = cn_printf(cn, "%u",
284 						from_kgid(&init_user_ns,
285 							  cred->gid));
286 				break;
287 			case 'd':
288 				err = cn_printf(cn, "%d",
289 					__get_dumpable(cprm->mm_flags));
290 				break;
291 			/* signal that caused the coredump */
292 			case 's':
293 				err = cn_printf(cn, "%d",
294 						cprm->siginfo->si_signo);
295 				break;
296 			/* UNIX time of coredump */
297 			case 't': {
298 				time64_t time;
299 
300 				time = ktime_get_real_seconds();
301 				err = cn_printf(cn, "%lld", time);
302 				break;
303 			}
304 			/* hostname */
305 			case 'h':
306 				down_read(&uts_sem);
307 				err = cn_esc_printf(cn, "%s",
308 					      utsname()->nodename);
309 				up_read(&uts_sem);
310 				break;
311 			/* executable, could be changed by prctl PR_SET_NAME etc */
312 			case 'e':
313 				err = cn_esc_printf(cn, "%s", current->comm);
314 				break;
315 			/* file name of executable */
316 			case 'f':
317 				err = cn_print_exe_file(cn, true);
318 				break;
319 			case 'E':
320 				err = cn_print_exe_file(cn, false);
321 				break;
322 			/* core limit size */
323 			case 'c':
324 				err = cn_printf(cn, "%lu",
325 					      rlimit(RLIMIT_CORE));
326 				break;
327 			default:
328 				break;
329 			}
330 			++pat_ptr;
331 		}
332 
333 		if (err)
334 			return err;
335 	}
336 
337 out:
338 	/* Backward compatibility with core_uses_pid:
339 	 *
340 	 * If core_pattern does not include a %p (as is the default)
341 	 * and core_uses_pid is set, then .%pid will be appended to
342 	 * the filename. Do not do this for piped commands. */
343 	if (!ispipe && !pid_in_pattern && core_uses_pid) {
344 		err = cn_printf(cn, ".%d", task_tgid_vnr(current));
345 		if (err)
346 			return err;
347 	}
348 	return ispipe;
349 }
350 
zap_process(struct task_struct * start,int exit_code,int flags)351 static int zap_process(struct task_struct *start, int exit_code, int flags)
352 {
353 	struct task_struct *t;
354 	int nr = 0;
355 
356 	/* ignore all signals except SIGKILL, see prepare_signal() */
357 	start->signal->flags = SIGNAL_GROUP_COREDUMP | flags;
358 	start->signal->group_exit_code = exit_code;
359 	start->signal->group_stop_count = 0;
360 
361 	for_each_thread(start, t) {
362 		task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
363 		if (t != current && t->mm) {
364 			sigaddset(&t->pending.signal, SIGKILL);
365 			signal_wake_up(t, 1);
366 			nr++;
367 		}
368 	}
369 
370 	return nr;
371 }
372 
zap_threads(struct task_struct * tsk,struct mm_struct * mm,struct core_state * core_state,int exit_code)373 static int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
374 			struct core_state *core_state, int exit_code)
375 {
376 	struct task_struct *g, *p;
377 	unsigned long flags;
378 	int nr = -EAGAIN;
379 
380 	spin_lock_irq(&tsk->sighand->siglock);
381 	if (!signal_group_exit(tsk->signal)) {
382 		mm->core_state = core_state;
383 		tsk->signal->group_exit_task = tsk;
384 		nr = zap_process(tsk, exit_code, 0);
385 		clear_tsk_thread_flag(tsk, TIF_SIGPENDING);
386 	}
387 	spin_unlock_irq(&tsk->sighand->siglock);
388 	if (unlikely(nr < 0))
389 		return nr;
390 
391 	tsk->flags |= PF_DUMPCORE;
392 	if (atomic_read(&mm->mm_users) == nr + 1)
393 		goto done;
394 	/*
395 	 * We should find and kill all tasks which use this mm, and we should
396 	 * count them correctly into ->nr_threads. We don't take tasklist
397 	 * lock, but this is safe wrt:
398 	 *
399 	 * fork:
400 	 *	None of sub-threads can fork after zap_process(leader). All
401 	 *	processes which were created before this point should be
402 	 *	visible to zap_threads() because copy_process() adds the new
403 	 *	process to the tail of init_task.tasks list, and lock/unlock
404 	 *	of ->siglock provides a memory barrier.
405 	 *
406 	 * do_exit:
407 	 *	The caller holds mm->mmap_lock. This means that the task which
408 	 *	uses this mm can't pass exit_mm(), so it can't exit or clear
409 	 *	its ->mm.
410 	 *
411 	 * de_thread:
412 	 *	It does list_replace_rcu(&leader->tasks, &current->tasks),
413 	 *	we must see either old or new leader, this does not matter.
414 	 *	However, it can change p->sighand, so lock_task_sighand(p)
415 	 *	must be used. Since p->mm != NULL and we hold ->mmap_lock
416 	 *	it can't fail.
417 	 *
418 	 *	Note also that "g" can be the old leader with ->mm == NULL
419 	 *	and already unhashed and thus removed from ->thread_group.
420 	 *	This is OK, __unhash_process()->list_del_rcu() does not
421 	 *	clear the ->next pointer, we will find the new leader via
422 	 *	next_thread().
423 	 */
424 	rcu_read_lock();
425 	for_each_process(g) {
426 		if (g == tsk->group_leader)
427 			continue;
428 		if (g->flags & PF_KTHREAD)
429 			continue;
430 
431 		for_each_thread(g, p) {
432 			if (unlikely(!p->mm))
433 				continue;
434 			if (unlikely(p->mm == mm)) {
435 				lock_task_sighand(p, &flags);
436 				nr += zap_process(p, exit_code,
437 							SIGNAL_GROUP_EXIT);
438 				unlock_task_sighand(p, &flags);
439 			}
440 			break;
441 		}
442 	}
443 	rcu_read_unlock();
444 done:
445 	atomic_set(&core_state->nr_threads, nr);
446 	return nr;
447 }
448 
coredump_wait(int exit_code,struct core_state * core_state)449 static int coredump_wait(int exit_code, struct core_state *core_state)
450 {
451 	struct task_struct *tsk = current;
452 	struct mm_struct *mm = tsk->mm;
453 	int core_waiters = -EBUSY;
454 
455 	init_completion(&core_state->startup);
456 	core_state->dumper.task = tsk;
457 	core_state->dumper.next = NULL;
458 
459 	if (mmap_write_lock_killable(mm))
460 		return -EINTR;
461 
462 	if (!mm->core_state)
463 		core_waiters = zap_threads(tsk, mm, core_state, exit_code);
464 	mmap_write_unlock(mm);
465 
466 	if (core_waiters > 0) {
467 		struct core_thread *ptr;
468 
469 		freezer_do_not_count();
470 		wait_for_completion(&core_state->startup);
471 		freezer_count();
472 		/*
473 		 * Wait for all the threads to become inactive, so that
474 		 * all the thread context (extended register state, like
475 		 * fpu etc) gets copied to the memory.
476 		 */
477 		ptr = core_state->dumper.next;
478 		while (ptr != NULL) {
479 			wait_task_inactive(ptr->task, 0);
480 			ptr = ptr->next;
481 		}
482 	}
483 
484 	return core_waiters;
485 }
486 
coredump_finish(struct mm_struct * mm,bool core_dumped)487 static void coredump_finish(struct mm_struct *mm, bool core_dumped)
488 {
489 	struct core_thread *curr, *next;
490 	struct task_struct *task;
491 
492 	spin_lock_irq(&current->sighand->siglock);
493 	if (core_dumped && !__fatal_signal_pending(current))
494 		current->signal->group_exit_code |= 0x80;
495 	current->signal->group_exit_task = NULL;
496 	current->signal->flags = SIGNAL_GROUP_EXIT;
497 	spin_unlock_irq(&current->sighand->siglock);
498 
499 	next = mm->core_state->dumper.next;
500 	while ((curr = next) != NULL) {
501 		next = curr->next;
502 		task = curr->task;
503 		/*
504 		 * see exit_mm(), curr->task must not see
505 		 * ->task == NULL before we read ->next.
506 		 */
507 		smp_mb();
508 		curr->task = NULL;
509 		wake_up_process(task);
510 	}
511 
512 	mm->core_state = NULL;
513 }
514 
dump_interrupted(void)515 static bool dump_interrupted(void)
516 {
517 	/*
518 	 * SIGKILL or freezing() interrupt the coredumping. Perhaps we
519 	 * can do try_to_freeze() and check __fatal_signal_pending(),
520 	 * but then we need to teach dump_write() to restart and clear
521 	 * TIF_SIGPENDING.
522 	 */
523 	return fatal_signal_pending(current) || freezing(current);
524 }
525 
wait_for_dump_helpers(struct file * file)526 static void wait_for_dump_helpers(struct file *file)
527 {
528 	struct pipe_inode_info *pipe = file->private_data;
529 
530 	pipe_lock(pipe);
531 	pipe->readers++;
532 	pipe->writers--;
533 	wake_up_interruptible_sync(&pipe->rd_wait);
534 	kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
535 	pipe_unlock(pipe);
536 
537 	/*
538 	 * We actually want wait_event_freezable() but then we need
539 	 * to clear TIF_SIGPENDING and improve dump_interrupted().
540 	 */
541 	wait_event_interruptible(pipe->rd_wait, pipe->readers == 1);
542 
543 	pipe_lock(pipe);
544 	pipe->readers--;
545 	pipe->writers++;
546 	pipe_unlock(pipe);
547 }
548 
549 /*
550  * umh_pipe_setup
551  * helper function to customize the process used
552  * to collect the core in userspace.  Specifically
553  * it sets up a pipe and installs it as fd 0 (stdin)
554  * for the process.  Returns 0 on success, or
555  * PTR_ERR on failure.
556  * Note that it also sets the core limit to 1.  This
557  * is a special value that we use to trap recursive
558  * core dumps
559  */
umh_pipe_setup(struct subprocess_info * info,struct cred * new)560 static int umh_pipe_setup(struct subprocess_info *info, struct cred *new)
561 {
562 	struct file *files[2];
563 	struct coredump_params *cp = (struct coredump_params *)info->data;
564 	int err = create_pipe_files(files, 0);
565 	if (err)
566 		return err;
567 
568 	cp->file = files[1];
569 
570 	err = replace_fd(0, files[0], 0);
571 	fput(files[0]);
572 	/* and disallow core files too */
573 	current->signal->rlim[RLIMIT_CORE] = (struct rlimit){1, 1};
574 
575 	return err;
576 }
577 
do_coredump(const kernel_siginfo_t * siginfo)578 void do_coredump(const kernel_siginfo_t *siginfo)
579 {
580 	struct core_state core_state;
581 	struct core_name cn;
582 	struct mm_struct *mm = current->mm;
583 	struct linux_binfmt * binfmt;
584 	const struct cred *old_cred;
585 	struct cred *cred;
586 	int retval = 0;
587 	int ispipe;
588 	size_t *argv = NULL;
589 	int argc = 0;
590 	struct files_struct *displaced;
591 	/* require nonrelative corefile path and be extra careful */
592 	bool need_suid_safe = false;
593 	bool core_dumped = false;
594 	static atomic_t core_dump_count = ATOMIC_INIT(0);
595 	struct coredump_params cprm = {
596 		.siginfo = siginfo,
597 		.regs = signal_pt_regs(),
598 		.limit = rlimit(RLIMIT_CORE),
599 		/*
600 		 * We must use the same mm->flags while dumping core to avoid
601 		 * inconsistency of bit flags, since this flag is not protected
602 		 * by any locks.
603 		 */
604 		.mm_flags = mm->flags,
605 	};
606 
607 	audit_core_dumps(siginfo->si_signo);
608 
609 	binfmt = mm->binfmt;
610 	if (!binfmt || !binfmt->core_dump)
611 		goto fail;
612 	if (!__get_dumpable(cprm.mm_flags))
613 		goto fail;
614 
615 	cred = prepare_creds();
616 	if (!cred)
617 		goto fail;
618 	/*
619 	 * We cannot trust fsuid as being the "true" uid of the process
620 	 * nor do we know its entire history. We only know it was tainted
621 	 * so we dump it as root in mode 2, and only into a controlled
622 	 * environment (pipe handler or fully qualified path).
623 	 */
624 	if (__get_dumpable(cprm.mm_flags) == SUID_DUMP_ROOT) {
625 		/* Setuid core dump mode */
626 		cred->fsuid = GLOBAL_ROOT_UID;	/* Dump root private */
627 		need_suid_safe = true;
628 	}
629 
630 	retval = coredump_wait(siginfo->si_signo, &core_state);
631 	if (retval < 0)
632 		goto fail_creds;
633 
634 	old_cred = override_creds(cred);
635 
636 	ispipe = format_corename(&cn, &cprm, &argv, &argc);
637 
638 	if (ispipe) {
639 		int argi;
640 		int dump_count;
641 		char **helper_argv;
642 		struct subprocess_info *sub_info;
643 
644 		if (ispipe < 0) {
645 			printk(KERN_WARNING "format_corename failed\n");
646 			printk(KERN_WARNING "Aborting core\n");
647 			goto fail_unlock;
648 		}
649 
650 		if (cprm.limit == 1) {
651 			/* See umh_pipe_setup() which sets RLIMIT_CORE = 1.
652 			 *
653 			 * Normally core limits are irrelevant to pipes, since
654 			 * we're not writing to the file system, but we use
655 			 * cprm.limit of 1 here as a special value, this is a
656 			 * consistent way to catch recursive crashes.
657 			 * We can still crash if the core_pattern binary sets
658 			 * RLIM_CORE = !1, but it runs as root, and can do
659 			 * lots of stupid things.
660 			 *
661 			 * Note that we use task_tgid_vnr here to grab the pid
662 			 * of the process group leader.  That way we get the
663 			 * right pid if a thread in a multi-threaded
664 			 * core_pattern process dies.
665 			 */
666 			printk(KERN_WARNING
667 				"Process %d(%s) has RLIMIT_CORE set to 1\n",
668 				task_tgid_vnr(current), current->comm);
669 			printk(KERN_WARNING "Aborting core\n");
670 			goto fail_unlock;
671 		}
672 		cprm.limit = RLIM_INFINITY;
673 
674 		dump_count = atomic_inc_return(&core_dump_count);
675 		if (core_pipe_limit && (core_pipe_limit < dump_count)) {
676 			printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
677 			       task_tgid_vnr(current), current->comm);
678 			printk(KERN_WARNING "Skipping core dump\n");
679 			goto fail_dropcount;
680 		}
681 
682 		helper_argv = kmalloc_array(argc + 1, sizeof(*helper_argv),
683 					    GFP_KERNEL);
684 		if (!helper_argv) {
685 			printk(KERN_WARNING "%s failed to allocate memory\n",
686 			       __func__);
687 			goto fail_dropcount;
688 		}
689 		for (argi = 0; argi < argc; argi++)
690 			helper_argv[argi] = cn.corename + argv[argi];
691 		helper_argv[argi] = NULL;
692 
693 		retval = -ENOMEM;
694 		sub_info = call_usermodehelper_setup(helper_argv[0],
695 						helper_argv, NULL, GFP_KERNEL,
696 						umh_pipe_setup, NULL, &cprm);
697 		if (sub_info)
698 			retval = call_usermodehelper_exec(sub_info,
699 							  UMH_WAIT_EXEC);
700 
701 		kfree(helper_argv);
702 		if (retval) {
703 			printk(KERN_INFO "Core dump to |%s pipe failed\n",
704 			       cn.corename);
705 			goto close_fail;
706 		}
707 	} else {
708 		struct inode *inode;
709 		int open_flags = O_CREAT | O_RDWR | O_NOFOLLOW |
710 				 O_LARGEFILE | O_EXCL;
711 
712 		if (cprm.limit < binfmt->min_coredump)
713 			goto fail_unlock;
714 
715 		if (need_suid_safe && cn.corename[0] != '/') {
716 			printk(KERN_WARNING "Pid %d(%s) can only dump core "\
717 				"to fully qualified path!\n",
718 				task_tgid_vnr(current), current->comm);
719 			printk(KERN_WARNING "Skipping core dump\n");
720 			goto fail_unlock;
721 		}
722 
723 		/*
724 		 * Unlink the file if it exists unless this is a SUID
725 		 * binary - in that case, we're running around with root
726 		 * privs and don't want to unlink another user's coredump.
727 		 */
728 		if (!need_suid_safe) {
729 			/*
730 			 * If it doesn't exist, that's fine. If there's some
731 			 * other problem, we'll catch it at the filp_open().
732 			 */
733 			do_unlinkat(AT_FDCWD, getname_kernel(cn.corename));
734 		}
735 
736 		/*
737 		 * There is a race between unlinking and creating the
738 		 * file, but if that causes an EEXIST here, that's
739 		 * fine - another process raced with us while creating
740 		 * the corefile, and the other process won. To userspace,
741 		 * what matters is that at least one of the two processes
742 		 * writes its coredump successfully, not which one.
743 		 */
744 		if (need_suid_safe) {
745 			/*
746 			 * Using user namespaces, normal user tasks can change
747 			 * their current->fs->root to point to arbitrary
748 			 * directories. Since the intention of the "only dump
749 			 * with a fully qualified path" rule is to control where
750 			 * coredumps may be placed using root privileges,
751 			 * current->fs->root must not be used. Instead, use the
752 			 * root directory of init_task.
753 			 */
754 			struct path root;
755 
756 			task_lock(&init_task);
757 			get_fs_root(init_task.fs, &root);
758 			task_unlock(&init_task);
759 			cprm.file = file_open_root(root.dentry, root.mnt,
760 				cn.corename, open_flags, 0600);
761 			path_put(&root);
762 		} else {
763 			cprm.file = filp_open(cn.corename, open_flags, 0600);
764 		}
765 		if (IS_ERR(cprm.file))
766 			goto fail_unlock;
767 
768 		inode = file_inode(cprm.file);
769 		if (inode->i_nlink > 1)
770 			goto close_fail;
771 		if (d_unhashed(cprm.file->f_path.dentry))
772 			goto close_fail;
773 		/*
774 		 * AK: actually i see no reason to not allow this for named
775 		 * pipes etc, but keep the previous behaviour for now.
776 		 */
777 		if (!S_ISREG(inode->i_mode))
778 			goto close_fail;
779 		/*
780 		 * Don't dump core if the filesystem changed owner or mode
781 		 * of the file during file creation. This is an issue when
782 		 * a process dumps core while its cwd is e.g. on a vfat
783 		 * filesystem.
784 		 */
785 		if (!uid_eq(inode->i_uid, current_fsuid()))
786 			goto close_fail;
787 		if ((inode->i_mode & 0677) != 0600)
788 			goto close_fail;
789 		if (!(cprm.file->f_mode & FMODE_CAN_WRITE))
790 			goto close_fail;
791 		if (do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file))
792 			goto close_fail;
793 	}
794 
795 	/* get us an unshared descriptor table; almost always a no-op */
796 	retval = unshare_files(&displaced);
797 	if (retval)
798 		goto close_fail;
799 	if (displaced)
800 		put_files_struct(displaced);
801 	if (!dump_interrupted()) {
802 		/*
803 		 * umh disabled with CONFIG_STATIC_USERMODEHELPER_PATH="" would
804 		 * have this set to NULL.
805 		 */
806 		if (!cprm.file) {
807 			pr_info("Core dump to |%s disabled\n", cn.corename);
808 			goto close_fail;
809 		}
810 		file_start_write(cprm.file);
811 		core_dumped = binfmt->core_dump(&cprm);
812 		file_end_write(cprm.file);
813 	}
814 	if (ispipe && core_pipe_limit)
815 		wait_for_dump_helpers(cprm.file);
816 close_fail:
817 	if (cprm.file)
818 		filp_close(cprm.file, NULL);
819 fail_dropcount:
820 	if (ispipe)
821 		atomic_dec(&core_dump_count);
822 fail_unlock:
823 	kfree(argv);
824 	kfree(cn.corename);
825 	coredump_finish(mm, core_dumped);
826 	revert_creds(old_cred);
827 fail_creds:
828 	put_cred(cred);
829 fail:
830 	return;
831 }
832 
833 /*
834  * Core dumping helper functions.  These are the only things you should
835  * do on a core-file: use only these functions to write out all the
836  * necessary info.
837  */
dump_emit(struct coredump_params * cprm,const void * addr,int nr)838 int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
839 {
840 	struct file *file = cprm->file;
841 	loff_t pos = file->f_pos;
842 	ssize_t n;
843 	if (cprm->written + nr > cprm->limit)
844 		return 0;
845 
846 
847 	if (dump_interrupted())
848 		return 0;
849 	n = __kernel_write(file, addr, nr, &pos);
850 	if (n != nr)
851 		return 0;
852 	file->f_pos = pos;
853 	cprm->written += n;
854 	cprm->pos += n;
855 
856 	return 1;
857 }
858 EXPORT_SYMBOL(dump_emit);
859 
dump_skip(struct coredump_params * cprm,size_t nr)860 int dump_skip(struct coredump_params *cprm, size_t nr)
861 {
862 	static char zeroes[PAGE_SIZE];
863 	struct file *file = cprm->file;
864 	if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
865 		if (dump_interrupted() ||
866 		    file->f_op->llseek(file, nr, SEEK_CUR) < 0)
867 			return 0;
868 		cprm->pos += nr;
869 		return 1;
870 	} else {
871 		while (nr > PAGE_SIZE) {
872 			if (!dump_emit(cprm, zeroes, PAGE_SIZE))
873 				return 0;
874 			nr -= PAGE_SIZE;
875 		}
876 		return dump_emit(cprm, zeroes, nr);
877 	}
878 }
879 EXPORT_SYMBOL(dump_skip);
880 
881 #ifdef CONFIG_ELF_CORE
dump_user_range(struct coredump_params * cprm,unsigned long start,unsigned long len)882 int dump_user_range(struct coredump_params *cprm, unsigned long start,
883 		    unsigned long len)
884 {
885 	unsigned long addr;
886 
887 	for (addr = start; addr < start + len; addr += PAGE_SIZE) {
888 		struct page *page;
889 		int stop;
890 
891 		/*
892 		 * To avoid having to allocate page tables for virtual address
893 		 * ranges that have never been used yet, and also to make it
894 		 * easy to generate sparse core files, use a helper that returns
895 		 * NULL when encountering an empty page table entry that would
896 		 * otherwise have been filled with the zero page.
897 		 */
898 		page = get_dump_page(addr);
899 		if (page) {
900 			void *kaddr = kmap(page);
901 
902 			stop = !dump_emit(cprm, kaddr, PAGE_SIZE);
903 			kunmap(page);
904 			put_user_page(page);
905 		} else {
906 			stop = !dump_skip(cprm, PAGE_SIZE);
907 		}
908 		if (stop)
909 			return 0;
910 	}
911 	return 1;
912 }
913 #endif
914 
dump_align(struct coredump_params * cprm,int align)915 int dump_align(struct coredump_params *cprm, int align)
916 {
917 	unsigned mod = cprm->pos & (align - 1);
918 	if (align & (align - 1))
919 		return 0;
920 	return mod ? dump_skip(cprm, align - mod) : 1;
921 }
922 EXPORT_SYMBOL(dump_align);
923 
924 /*
925  * Ensures that file size is big enough to contain the current file
926  * postion. This prevents gdb from complaining about a truncated file
927  * if the last "write" to the file was dump_skip.
928  */
dump_truncate(struct coredump_params * cprm)929 void dump_truncate(struct coredump_params *cprm)
930 {
931 	struct file *file = cprm->file;
932 	loff_t offset;
933 
934 	if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
935 		offset = file->f_op->llseek(file, 0, SEEK_CUR);
936 		if (i_size_read(file->f_mapping->host) < offset)
937 			do_truncate(file->f_path.dentry, offset, 0, file);
938 	}
939 }
940 EXPORT_SYMBOL(dump_truncate);
941 
942 /*
943  * The purpose of always_dump_vma() is to make sure that special kernel mappings
944  * that are useful for post-mortem analysis are included in every core dump.
945  * In that way we ensure that the core dump is fully interpretable later
946  * without matching up the same kernel and hardware config to see what PC values
947  * meant. These special mappings include - vDSO, vsyscall, and other
948  * architecture specific mappings
949  */
always_dump_vma(struct vm_area_struct * vma)950 static bool always_dump_vma(struct vm_area_struct *vma)
951 {
952 	/* Any vsyscall mappings? */
953 	if (vma == get_gate_vma(vma->vm_mm))
954 		return true;
955 
956 	/*
957 	 * Assume that all vmas with a .name op should always be dumped.
958 	 * If this changes, a new vm_ops field can easily be added.
959 	 */
960 	if (vma->vm_ops && vma->vm_ops->name && vma->vm_ops->name(vma))
961 		return true;
962 
963 	/*
964 	 * arch_vma_name() returns non-NULL for special architecture mappings,
965 	 * such as vDSO sections.
966 	 */
967 	if (arch_vma_name(vma))
968 		return true;
969 
970 	return false;
971 }
972 
973 #define DUMP_SIZE_MAYBE_ELFHDR_PLACEHOLDER 1
974 
975 /*
976  * Decide how much of @vma's contents should be included in a core dump.
977  */
vma_dump_size(struct vm_area_struct * vma,unsigned long mm_flags)978 static unsigned long vma_dump_size(struct vm_area_struct *vma,
979 				   unsigned long mm_flags)
980 {
981 #define FILTER(type)	(mm_flags & (1UL << MMF_DUMP_##type))
982 
983 	/* always dump the vdso and vsyscall sections */
984 	if (always_dump_vma(vma))
985 		goto whole;
986 
987 	if (vma->vm_flags & VM_DONTDUMP)
988 		return 0;
989 
990 	/* support for DAX */
991 	if (vma_is_dax(vma)) {
992 		if ((vma->vm_flags & VM_SHARED) && FILTER(DAX_SHARED))
993 			goto whole;
994 		if (!(vma->vm_flags & VM_SHARED) && FILTER(DAX_PRIVATE))
995 			goto whole;
996 		return 0;
997 	}
998 
999 	/* Hugetlb memory check */
1000 	if (is_vm_hugetlb_page(vma)) {
1001 		if ((vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_SHARED))
1002 			goto whole;
1003 		if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE))
1004 			goto whole;
1005 		return 0;
1006 	}
1007 
1008 	/* Do not dump I/O mapped devices or special mappings */
1009 	if (vma->vm_flags & VM_IO)
1010 		return 0;
1011 
1012 	/* By default, dump shared memory if mapped from an anonymous file. */
1013 	if (vma->vm_flags & VM_SHARED) {
1014 		if (file_inode(vma->vm_file)->i_nlink == 0 ?
1015 		    FILTER(ANON_SHARED) : FILTER(MAPPED_SHARED))
1016 			goto whole;
1017 		return 0;
1018 	}
1019 
1020 	/* Dump segments that have been written to.  */
1021 	if ((!IS_ENABLED(CONFIG_MMU) || vma->anon_vma) && FILTER(ANON_PRIVATE))
1022 		goto whole;
1023 	if (vma->vm_file == NULL)
1024 		return 0;
1025 
1026 	if (FILTER(MAPPED_PRIVATE))
1027 		goto whole;
1028 
1029 	/*
1030 	 * If this is the beginning of an executable file mapping,
1031 	 * dump the first page to aid in determining what was mapped here.
1032 	 */
1033 	if (FILTER(ELF_HEADERS) &&
1034 	    vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ)) {
1035 		if ((READ_ONCE(file_inode(vma->vm_file)->i_mode) & 0111) != 0)
1036 			return PAGE_SIZE;
1037 
1038 		/*
1039 		 * ELF libraries aren't always executable.
1040 		 * We'll want to check whether the mapping starts with the ELF
1041 		 * magic, but not now - we're holding the mmap lock,
1042 		 * so copy_from_user() doesn't work here.
1043 		 * Use a placeholder instead, and fix it up later in
1044 		 * dump_vma_snapshot().
1045 		 */
1046 		return DUMP_SIZE_MAYBE_ELFHDR_PLACEHOLDER;
1047 	}
1048 
1049 #undef	FILTER
1050 
1051 	return 0;
1052 
1053 whole:
1054 	return vma->vm_end - vma->vm_start;
1055 }
1056 
first_vma(struct task_struct * tsk,struct vm_area_struct * gate_vma)1057 static struct vm_area_struct *first_vma(struct task_struct *tsk,
1058 					struct vm_area_struct *gate_vma)
1059 {
1060 	struct vm_area_struct *ret = tsk->mm->mmap;
1061 
1062 	if (ret)
1063 		return ret;
1064 	return gate_vma;
1065 }
1066 
1067 /*
1068  * Helper function for iterating across a vma list.  It ensures that the caller
1069  * will visit `gate_vma' prior to terminating the search.
1070  */
next_vma(struct vm_area_struct * this_vma,struct vm_area_struct * gate_vma)1071 static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma,
1072 				       struct vm_area_struct *gate_vma)
1073 {
1074 	struct vm_area_struct *ret;
1075 
1076 	ret = this_vma->vm_next;
1077 	if (ret)
1078 		return ret;
1079 	if (this_vma == gate_vma)
1080 		return NULL;
1081 	return gate_vma;
1082 }
1083 
1084 /*
1085  * Under the mmap_lock, take a snapshot of relevant information about the task's
1086  * VMAs.
1087  */
dump_vma_snapshot(struct coredump_params * cprm,int * vma_count,struct core_vma_metadata ** vma_meta,size_t * vma_data_size_ptr)1088 int dump_vma_snapshot(struct coredump_params *cprm, int *vma_count,
1089 		      struct core_vma_metadata **vma_meta,
1090 		      size_t *vma_data_size_ptr)
1091 {
1092 	struct vm_area_struct *vma, *gate_vma;
1093 	struct mm_struct *mm = current->mm;
1094 	int i;
1095 	size_t vma_data_size = 0;
1096 
1097 	/*
1098 	 * Once the stack expansion code is fixed to not change VMA bounds
1099 	 * under mmap_lock in read mode, this can be changed to take the
1100 	 * mmap_lock in read mode.
1101 	 */
1102 	if (mmap_write_lock_killable(mm))
1103 		return -EINTR;
1104 
1105 	gate_vma = get_gate_vma(mm);
1106 	*vma_count = mm->map_count + (gate_vma ? 1 : 0);
1107 
1108 	*vma_meta = kvmalloc_array(*vma_count, sizeof(**vma_meta), GFP_KERNEL);
1109 	if (!*vma_meta) {
1110 		mmap_write_unlock(mm);
1111 		return -ENOMEM;
1112 	}
1113 
1114 	for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
1115 			vma = next_vma(vma, gate_vma), i++) {
1116 		struct core_vma_metadata *m = (*vma_meta) + i;
1117 
1118 		m->start = vma->vm_start;
1119 		m->end = vma->vm_end;
1120 		m->flags = vma->vm_flags;
1121 		m->dump_size = vma_dump_size(vma, cprm->mm_flags);
1122 	}
1123 
1124 	mmap_write_unlock(mm);
1125 
1126 	if (WARN_ON(i != *vma_count)) {
1127 		kvfree(*vma_meta);
1128 		return -EFAULT;
1129 	}
1130 
1131 	for (i = 0; i < *vma_count; i++) {
1132 		struct core_vma_metadata *m = (*vma_meta) + i;
1133 
1134 		if (m->dump_size == DUMP_SIZE_MAYBE_ELFHDR_PLACEHOLDER) {
1135 			char elfmag[SELFMAG];
1136 
1137 			if (copy_from_user(elfmag, (void __user *)m->start, SELFMAG) ||
1138 					memcmp(elfmag, ELFMAG, SELFMAG) != 0) {
1139 				m->dump_size = 0;
1140 			} else {
1141 				m->dump_size = PAGE_SIZE;
1142 			}
1143 		}
1144 
1145 		vma_data_size += m->dump_size;
1146 	}
1147 
1148 	*vma_data_size_ptr = vma_data_size;
1149 	return 0;
1150 }
1151