• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  linux/fs/exec.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6 
7 /*
8  * #!-checking implemented by tytso.
9  */
10 /*
11  * Demand-loading implemented 01.12.91 - no need to read anything but
12  * the header into memory. The inode of the executable is put into
13  * "current->executable", and page faults do the actual loading. Clean.
14  *
15  * Once more I can proudly say that linux stood up to being changed: it
16  * was less than 2 hours work to get demand-loading completely implemented.
17  *
18  * Demand loading changed July 1993 by Eric Youngdale.   Use mmap instead,
19  * current->executable is only used by the procfs.  This allows a dispatch
20  * table to check for several different types  of binary formats.  We keep
21  * trying until we recognize the file or we run out of supported binary
22  * formats.
23  */
24 
25 #include <linux/slab.h>
26 #include <linux/file.h>
27 #include <linux/fdtable.h>
28 #include <linux/mm.h>
29 #include <linux/stat.h>
30 #include <linux/fcntl.h>
31 #include <linux/swap.h>
32 #include <linux/string.h>
33 #include <linux/init.h>
34 #include <linux/pagemap.h>
35 #include <linux/perf_event.h>
36 #include <linux/highmem.h>
37 #include <linux/spinlock.h>
38 #include <linux/key.h>
39 #include <linux/personality.h>
40 #include <linux/binfmts.h>
41 #include <linux/utsname.h>
42 #include <linux/pid_namespace.h>
43 #include <linux/module.h>
44 #include <linux/namei.h>
45 #include <linux/mount.h>
46 #include <linux/security.h>
47 #include <linux/syscalls.h>
48 #include <linux/tsacct_kern.h>
49 #include <linux/cn_proc.h>
50 #include <linux/audit.h>
51 #include <linux/tracehook.h>
52 #include <linux/kmod.h>
53 #include <linux/fsnotify.h>
54 #include <linux/fs_struct.h>
55 #include <linux/pipe_fs_i.h>
56 #include <linux/oom.h>
57 #include <linux/compat.h>
58 
59 #include <asm/uaccess.h>
60 #include <asm/mmu_context.h>
61 #include <asm/tlb.h>
62 #include <asm/exec.h>
63 
64 #include <trace/events/task.h>
65 #include "internal.h"
66 #ifdef CONFIG_QEMU_TRACE
67 	void qemu_trace_thread_name(char *name);
68 #endif
69 
70 #include <trace/events/sched.h>
71 
72 int core_uses_pid;
73 char core_pattern[CORENAME_MAX_SIZE] = "core";
74 unsigned int core_pipe_limit;
75 int suid_dumpable = 0;
76 
77 struct core_name {
78 	char *corename;
79 	int used, size;
80 };
81 static atomic_t call_count = ATOMIC_INIT(1);
82 
83 /* The maximal length of core_pattern is also specified in sysctl.c */
84 
85 static LIST_HEAD(formats);
86 static DEFINE_RWLOCK(binfmt_lock);
87 
__register_binfmt(struct linux_binfmt * fmt,int insert)88 void __register_binfmt(struct linux_binfmt * fmt, int insert)
89 {
90 	BUG_ON(!fmt);
91 	write_lock(&binfmt_lock);
92 	insert ? list_add(&fmt->lh, &formats) :
93 		 list_add_tail(&fmt->lh, &formats);
94 	write_unlock(&binfmt_lock);
95 }
96 
97 EXPORT_SYMBOL(__register_binfmt);
98 
unregister_binfmt(struct linux_binfmt * fmt)99 void unregister_binfmt(struct linux_binfmt * fmt)
100 {
101 	write_lock(&binfmt_lock);
102 	list_del(&fmt->lh);
103 	write_unlock(&binfmt_lock);
104 }
105 
106 EXPORT_SYMBOL(unregister_binfmt);
107 
put_binfmt(struct linux_binfmt * fmt)108 static inline void put_binfmt(struct linux_binfmt * fmt)
109 {
110 	module_put(fmt->module);
111 }
112 
113 /*
114  * Note that a shared library must be both readable and executable due to
115  * security reasons.
116  *
117  * Also note that we take the address to load from from the file itself.
118  */
SYSCALL_DEFINE1(uselib,const char __user *,library)119 SYSCALL_DEFINE1(uselib, const char __user *, library)
120 {
121 	struct file *file;
122 	char *tmp = getname(library);
123 	int error = PTR_ERR(tmp);
124 	static const struct open_flags uselib_flags = {
125 		.open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
126 		.acc_mode = MAY_READ | MAY_EXEC | MAY_OPEN,
127 		.intent = LOOKUP_OPEN
128 	};
129 
130 	if (IS_ERR(tmp))
131 		goto out;
132 
133 	file = do_filp_open(AT_FDCWD, tmp, &uselib_flags, LOOKUP_FOLLOW);
134 	putname(tmp);
135 	error = PTR_ERR(file);
136 	if (IS_ERR(file))
137 		goto out;
138 
139 	error = -EINVAL;
140 	if (!S_ISREG(file->f_path.dentry->d_inode->i_mode))
141 		goto exit;
142 
143 	error = -EACCES;
144 	if (file->f_path.mnt->mnt_flags & MNT_NOEXEC)
145 		goto exit;
146 
147 	fsnotify_open(file);
148 
149 	error = -ENOEXEC;
150 	if(file->f_op) {
151 		struct linux_binfmt * fmt;
152 
153 		read_lock(&binfmt_lock);
154 		list_for_each_entry(fmt, &formats, lh) {
155 			if (!fmt->load_shlib)
156 				continue;
157 			if (!try_module_get(fmt->module))
158 				continue;
159 			read_unlock(&binfmt_lock);
160 			error = fmt->load_shlib(file);
161 			read_lock(&binfmt_lock);
162 			put_binfmt(fmt);
163 			if (error != -ENOEXEC)
164 				break;
165 		}
166 		read_unlock(&binfmt_lock);
167 	}
168 exit:
169 	fput(file);
170 out:
171   	return error;
172 }
173 
174 #ifdef CONFIG_MMU
175 /*
176  * The nascent bprm->mm is not visible until exec_mmap() but it can
177  * use a lot of memory, account these pages in current->mm temporary
178  * for oom_badness()->get_mm_rss(). Once exec succeeds or fails, we
179  * change the counter back via acct_arg_size(0).
180  */
acct_arg_size(struct linux_binprm * bprm,unsigned long pages)181 static void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
182 {
183 	struct mm_struct *mm = current->mm;
184 	long diff = (long)(pages - bprm->vma_pages);
185 
186 	if (!mm || !diff)
187 		return;
188 
189 	bprm->vma_pages = pages;
190 	add_mm_counter(mm, MM_ANONPAGES, diff);
191 }
192 
get_arg_page(struct linux_binprm * bprm,unsigned long pos,int write)193 static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
194 		int write)
195 {
196 	struct page *page;
197 	int ret;
198 
199 #ifdef CONFIG_STACK_GROWSUP
200 	if (write) {
201 		ret = expand_downwards(bprm->vma, pos);
202 		if (ret < 0)
203 			return NULL;
204 	}
205 #endif
206 	ret = get_user_pages(current, bprm->mm, pos,
207 			1, write, 1, &page, NULL);
208 	if (ret <= 0)
209 		return NULL;
210 
211 	if (write) {
212 		unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
213 		struct rlimit *rlim;
214 
215 		acct_arg_size(bprm, size / PAGE_SIZE);
216 
217 		/*
218 		 * We've historically supported up to 32 pages (ARG_MAX)
219 		 * of argument strings even with small stacks
220 		 */
221 		if (size <= ARG_MAX)
222 			return page;
223 
224 		/*
225 		 * Limit to 1/4-th the stack size for the argv+env strings.
226 		 * This ensures that:
227 		 *  - the remaining binfmt code will not run out of stack space,
228 		 *  - the program will have a reasonable amount of stack left
229 		 *    to work from.
230 		 */
231 		rlim = current->signal->rlim;
232 		if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) {
233 			put_page(page);
234 			return NULL;
235 		}
236 	}
237 
238 	return page;
239 }
240 
put_arg_page(struct page * page)241 static void put_arg_page(struct page *page)
242 {
243 	put_page(page);
244 }
245 
free_arg_page(struct linux_binprm * bprm,int i)246 static void free_arg_page(struct linux_binprm *bprm, int i)
247 {
248 }
249 
free_arg_pages(struct linux_binprm * bprm)250 static void free_arg_pages(struct linux_binprm *bprm)
251 {
252 }
253 
flush_arg_page(struct linux_binprm * bprm,unsigned long pos,struct page * page)254 static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
255 		struct page *page)
256 {
257 	flush_cache_page(bprm->vma, pos, page_to_pfn(page));
258 }
259 
__bprm_mm_init(struct linux_binprm * bprm)260 static int __bprm_mm_init(struct linux_binprm *bprm)
261 {
262 	int err;
263 	struct vm_area_struct *vma = NULL;
264 	struct mm_struct *mm = bprm->mm;
265 
266 	bprm->vma = vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
267 	if (!vma)
268 		return -ENOMEM;
269 
270 	down_write(&mm->mmap_sem);
271 	vma->vm_mm = mm;
272 
273 	/*
274 	 * Place the stack at the largest stack address the architecture
275 	 * supports. Later, we'll move this to an appropriate place. We don't
276 	 * use STACK_TOP because that can depend on attributes which aren't
277 	 * configured yet.
278 	 */
279 	BUILD_BUG_ON(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP);
280 	vma->vm_end = STACK_TOP_MAX;
281 	vma->vm_start = vma->vm_end - PAGE_SIZE;
282 	vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
283 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
284 	INIT_LIST_HEAD(&vma->anon_vma_chain);
285 
286 	err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
287 	if (err)
288 		goto err;
289 
290 	err = insert_vm_struct(mm, vma);
291 	if (err)
292 		goto err;
293 
294 	mm->stack_vm = mm->total_vm = 1;
295 	up_write(&mm->mmap_sem);
296 	bprm->p = vma->vm_end - sizeof(void *);
297 	return 0;
298 err:
299 	up_write(&mm->mmap_sem);
300 	bprm->vma = NULL;
301 	kmem_cache_free(vm_area_cachep, vma);
302 	return err;
303 }
304 
valid_arg_len(struct linux_binprm * bprm,long len)305 static bool valid_arg_len(struct linux_binprm *bprm, long len)
306 {
307 	return len <= MAX_ARG_STRLEN;
308 }
309 
310 #else
311 
acct_arg_size(struct linux_binprm * bprm,unsigned long pages)312 static inline void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
313 {
314 }
315 
get_arg_page(struct linux_binprm * bprm,unsigned long pos,int write)316 static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
317 		int write)
318 {
319 	struct page *page;
320 
321 	page = bprm->page[pos / PAGE_SIZE];
322 	if (!page && write) {
323 		page = alloc_page(GFP_HIGHUSER|__GFP_ZERO);
324 		if (!page)
325 			return NULL;
326 		bprm->page[pos / PAGE_SIZE] = page;
327 	}
328 
329 	return page;
330 }
331 
put_arg_page(struct page * page)332 static void put_arg_page(struct page *page)
333 {
334 }
335 
free_arg_page(struct linux_binprm * bprm,int i)336 static void free_arg_page(struct linux_binprm *bprm, int i)
337 {
338 	if (bprm->page[i]) {
339 		__free_page(bprm->page[i]);
340 		bprm->page[i] = NULL;
341 	}
342 }
343 
free_arg_pages(struct linux_binprm * bprm)344 static void free_arg_pages(struct linux_binprm *bprm)
345 {
346 	int i;
347 
348 	for (i = 0; i < MAX_ARG_PAGES; i++)
349 		free_arg_page(bprm, i);
350 }
351 
flush_arg_page(struct linux_binprm * bprm,unsigned long pos,struct page * page)352 static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
353 		struct page *page)
354 {
355 }
356 
__bprm_mm_init(struct linux_binprm * bprm)357 static int __bprm_mm_init(struct linux_binprm *bprm)
358 {
359 	bprm->p = PAGE_SIZE * MAX_ARG_PAGES - sizeof(void *);
360 	return 0;
361 }
362 
valid_arg_len(struct linux_binprm * bprm,long len)363 static bool valid_arg_len(struct linux_binprm *bprm, long len)
364 {
365 	return len <= bprm->p;
366 }
367 
368 #endif /* CONFIG_MMU */
369 
370 /*
371  * Create a new mm_struct and populate it with a temporary stack
372  * vm_area_struct.  We don't have enough context at this point to set the stack
373  * flags, permissions, and offset, so we use temporary values.  We'll update
374  * them later in setup_arg_pages().
375  */
bprm_mm_init(struct linux_binprm * bprm)376 int bprm_mm_init(struct linux_binprm *bprm)
377 {
378 	int err;
379 	struct mm_struct *mm = NULL;
380 
381 	bprm->mm = mm = mm_alloc();
382 	err = -ENOMEM;
383 	if (!mm)
384 		goto err;
385 
386 	err = init_new_context(current, mm);
387 	if (err)
388 		goto err;
389 
390 	err = __bprm_mm_init(bprm);
391 	if (err)
392 		goto err;
393 
394 	return 0;
395 
396 err:
397 	if (mm) {
398 		bprm->mm = NULL;
399 		mmdrop(mm);
400 	}
401 
402 	return err;
403 }
404 
405 struct user_arg_ptr {
406 #ifdef CONFIG_COMPAT
407 	bool is_compat;
408 #endif
409 	union {
410 		const char __user *const __user *native;
411 #ifdef CONFIG_COMPAT
412 		compat_uptr_t __user *compat;
413 #endif
414 	} ptr;
415 };
416 
get_user_arg_ptr(struct user_arg_ptr argv,int nr)417 static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
418 {
419 	const char __user *native;
420 
421 #ifdef CONFIG_COMPAT
422 	if (unlikely(argv.is_compat)) {
423 		compat_uptr_t compat;
424 
425 		if (get_user(compat, argv.ptr.compat + nr))
426 			return ERR_PTR(-EFAULT);
427 
428 		return compat_ptr(compat);
429 	}
430 #endif
431 
432 	if (get_user(native, argv.ptr.native + nr))
433 		return ERR_PTR(-EFAULT);
434 
435 	return native;
436 }
437 
438 /*
439  * count() counts the number of strings in array ARGV.
440  */
count(struct user_arg_ptr argv,int max)441 static int count(struct user_arg_ptr argv, int max)
442 {
443 	int i = 0;
444 
445 	if (argv.ptr.native != NULL) {
446 		for (;;) {
447 			const char __user *p = get_user_arg_ptr(argv, i);
448 
449 			if (!p)
450 				break;
451 
452 			if (IS_ERR(p))
453 				return -EFAULT;
454 
455 			if (i++ >= max)
456 				return -E2BIG;
457 
458 			if (fatal_signal_pending(current))
459 				return -ERESTARTNOHAND;
460 			cond_resched();
461 		}
462 	}
463 	return i;
464 }
465 
466 /*
467  * 'copy_strings()' copies argument/environment strings from the old
468  * processes's memory to the new process's stack.  The call to get_user_pages()
469  * ensures the destination page is created and not swapped out.
470  */
copy_strings(int argc,struct user_arg_ptr argv,struct linux_binprm * bprm)471 static int copy_strings(int argc, struct user_arg_ptr argv,
472 			struct linux_binprm *bprm)
473 {
474 	struct page *kmapped_page = NULL;
475 	char *kaddr = NULL;
476 	unsigned long kpos = 0;
477 	int ret;
478 
479 	while (argc-- > 0) {
480 		const char __user *str;
481 		int len;
482 		unsigned long pos;
483 
484 		ret = -EFAULT;
485 		str = get_user_arg_ptr(argv, argc);
486 		if (IS_ERR(str))
487 			goto out;
488 
489 		len = strnlen_user(str, MAX_ARG_STRLEN);
490 		if (!len)
491 			goto out;
492 
493 		ret = -E2BIG;
494 		if (!valid_arg_len(bprm, len))
495 			goto out;
496 
497 		/* We're going to work our way backwords. */
498 		pos = bprm->p;
499 		str += len;
500 		bprm->p -= len;
501 
502 		while (len > 0) {
503 			int offset, bytes_to_copy;
504 
505 			if (fatal_signal_pending(current)) {
506 				ret = -ERESTARTNOHAND;
507 				goto out;
508 			}
509 			cond_resched();
510 
511 			offset = pos % PAGE_SIZE;
512 			if (offset == 0)
513 				offset = PAGE_SIZE;
514 
515 			bytes_to_copy = offset;
516 			if (bytes_to_copy > len)
517 				bytes_to_copy = len;
518 
519 			offset -= bytes_to_copy;
520 			pos -= bytes_to_copy;
521 			str -= bytes_to_copy;
522 			len -= bytes_to_copy;
523 
524 			if (!kmapped_page || kpos != (pos & PAGE_MASK)) {
525 				struct page *page;
526 
527 				page = get_arg_page(bprm, pos, 1);
528 				if (!page) {
529 					ret = -E2BIG;
530 					goto out;
531 				}
532 
533 				if (kmapped_page) {
534 					flush_kernel_dcache_page(kmapped_page);
535 					kunmap(kmapped_page);
536 					put_arg_page(kmapped_page);
537 				}
538 				kmapped_page = page;
539 				kaddr = kmap(kmapped_page);
540 				kpos = pos & PAGE_MASK;
541 				flush_arg_page(bprm, kpos, kmapped_page);
542 			}
543 			if (copy_from_user(kaddr+offset, str, bytes_to_copy)) {
544 				ret = -EFAULT;
545 				goto out;
546 			}
547 		}
548 	}
549 	ret = 0;
550 out:
551 	if (kmapped_page) {
552 		flush_kernel_dcache_page(kmapped_page);
553 		kunmap(kmapped_page);
554 		put_arg_page(kmapped_page);
555 	}
556 	return ret;
557 }
558 
559 /*
560  * Like copy_strings, but get argv and its values from kernel memory.
561  */
copy_strings_kernel(int argc,const char * const * __argv,struct linux_binprm * bprm)562 int copy_strings_kernel(int argc, const char *const *__argv,
563 			struct linux_binprm *bprm)
564 {
565 	int r;
566 	mm_segment_t oldfs = get_fs();
567 	struct user_arg_ptr argv = {
568 		.ptr.native = (const char __user *const  __user *)__argv,
569 	};
570 
571 	set_fs(KERNEL_DS);
572 	r = copy_strings(argc, argv, bprm);
573 	set_fs(oldfs);
574 
575 	return r;
576 }
577 EXPORT_SYMBOL(copy_strings_kernel);
578 
579 #ifdef CONFIG_MMU
580 
581 /*
582  * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX.  Once
583  * the binfmt code determines where the new stack should reside, we shift it to
584  * its final location.  The process proceeds as follows:
585  *
586  * 1) Use shift to calculate the new vma endpoints.
587  * 2) Extend vma to cover both the old and new ranges.  This ensures the
588  *    arguments passed to subsequent functions are consistent.
589  * 3) Move vma's page tables to the new range.
590  * 4) Free up any cleared pgd range.
591  * 5) Shrink the vma to cover only the new range.
592  */
shift_arg_pages(struct vm_area_struct * vma,unsigned long shift)593 static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
594 {
595 	struct mm_struct *mm = vma->vm_mm;
596 	unsigned long old_start = vma->vm_start;
597 	unsigned long old_end = vma->vm_end;
598 	unsigned long length = old_end - old_start;
599 	unsigned long new_start = old_start - shift;
600 	unsigned long new_end = old_end - shift;
601 	struct mmu_gather tlb;
602 
603 	BUG_ON(new_start > new_end);
604 
605 	/*
606 	 * ensure there are no vmas between where we want to go
607 	 * and where we are
608 	 */
609 	if (vma != find_vma(mm, new_start))
610 		return -EFAULT;
611 
612 	/*
613 	 * cover the whole range: [new_start, old_end)
614 	 */
615 	if (vma_adjust(vma, new_start, old_end, vma->vm_pgoff, NULL))
616 		return -ENOMEM;
617 
618 	/*
619 	 * move the page tables downwards, on failure we rely on
620 	 * process cleanup to remove whatever mess we made.
621 	 */
622 	if (length != move_page_tables(vma, old_start,
623 				       vma, new_start, length))
624 		return -ENOMEM;
625 
626 	lru_add_drain();
627 	tlb_gather_mmu(&tlb, mm, 0);
628 	if (new_end > old_start) {
629 		/*
630 		 * when the old and new regions overlap clear from new_end.
631 		 */
632 		free_pgd_range(&tlb, new_end, old_end, new_end,
633 			vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
634 	} else {
635 		/*
636 		 * otherwise, clean from old_start; this is done to not touch
637 		 * the address space in [new_end, old_start) some architectures
638 		 * have constraints on va-space that make this illegal (IA64) -
639 		 * for the others its just a little faster.
640 		 */
641 		free_pgd_range(&tlb, old_start, old_end, new_end,
642 			vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
643 	}
644 	tlb_finish_mmu(&tlb, new_end, old_end);
645 
646 	/*
647 	 * Shrink the vma to just the new range.  Always succeeds.
648 	 */
649 	vma_adjust(vma, new_start, new_end, vma->vm_pgoff, NULL);
650 
651 	return 0;
652 }
653 
654 /*
655  * Finalizes the stack vm_area_struct. The flags and permissions are updated,
656  * the stack is optionally relocated, and some extra space is added.
657  */
setup_arg_pages(struct linux_binprm * bprm,unsigned long stack_top,int executable_stack)658 int setup_arg_pages(struct linux_binprm *bprm,
659 		    unsigned long stack_top,
660 		    int executable_stack)
661 {
662 	unsigned long ret;
663 	unsigned long stack_shift;
664 	struct mm_struct *mm = current->mm;
665 	struct vm_area_struct *vma = bprm->vma;
666 	struct vm_area_struct *prev = NULL;
667 	unsigned long vm_flags;
668 	unsigned long stack_base;
669 	unsigned long stack_size;
670 	unsigned long stack_expand;
671 	unsigned long rlim_stack;
672 
673 #ifdef CONFIG_STACK_GROWSUP
674 	/* Limit stack size to 1GB */
675 	stack_base = rlimit_max(RLIMIT_STACK);
676 	if (stack_base > (1 << 30))
677 		stack_base = 1 << 30;
678 
679 	/* Make sure we didn't let the argument array grow too large. */
680 	if (vma->vm_end - vma->vm_start > stack_base)
681 		return -ENOMEM;
682 
683 	stack_base = PAGE_ALIGN(stack_top - stack_base);
684 
685 	stack_shift = vma->vm_start - stack_base;
686 	mm->arg_start = bprm->p - stack_shift;
687 	bprm->p = vma->vm_end - stack_shift;
688 #else
689 	stack_top = arch_align_stack(stack_top);
690 	stack_top = PAGE_ALIGN(stack_top);
691 
692 	if (unlikely(stack_top < mmap_min_addr) ||
693 	    unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
694 		return -ENOMEM;
695 
696 	stack_shift = vma->vm_end - stack_top;
697 
698 	bprm->p -= stack_shift;
699 	mm->arg_start = bprm->p;
700 #endif
701 
702 	if (bprm->loader)
703 		bprm->loader -= stack_shift;
704 	bprm->exec -= stack_shift;
705 
706 	down_write(&mm->mmap_sem);
707 	vm_flags = VM_STACK_FLAGS;
708 
709 	/*
710 	 * Adjust stack execute permissions; explicitly enable for
711 	 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
712 	 * (arch default) otherwise.
713 	 */
714 	if (unlikely(executable_stack == EXSTACK_ENABLE_X))
715 		vm_flags |= VM_EXEC;
716 	else if (executable_stack == EXSTACK_DISABLE_X)
717 		vm_flags &= ~VM_EXEC;
718 	vm_flags |= mm->def_flags;
719 	vm_flags |= VM_STACK_INCOMPLETE_SETUP;
720 
721 	ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
722 			vm_flags);
723 	if (ret)
724 		goto out_unlock;
725 	BUG_ON(prev != vma);
726 
727 	/* Move stack pages down in memory. */
728 	if (stack_shift) {
729 		ret = shift_arg_pages(vma, stack_shift);
730 		if (ret)
731 			goto out_unlock;
732 	}
733 
734 	/* mprotect_fixup is overkill to remove the temporary stack flags */
735 	vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
736 
737 	stack_expand = 131072UL; /* randomly 32*4k (or 2*64k) pages */
738 	stack_size = vma->vm_end - vma->vm_start;
739 	/*
740 	 * Align this down to a page boundary as expand_stack
741 	 * will align it up.
742 	 */
743 	rlim_stack = rlimit(RLIMIT_STACK) & PAGE_MASK;
744 #ifdef CONFIG_STACK_GROWSUP
745 	if (stack_size + stack_expand > rlim_stack)
746 		stack_base = vma->vm_start + rlim_stack;
747 	else
748 		stack_base = vma->vm_end + stack_expand;
749 #else
750 	if (stack_size + stack_expand > rlim_stack)
751 		stack_base = vma->vm_end - rlim_stack;
752 	else
753 		stack_base = vma->vm_start - stack_expand;
754 #endif
755 	current->mm->start_stack = bprm->p;
756 	ret = expand_stack(vma, stack_base);
757 	if (ret)
758 		ret = -EFAULT;
759 
760 out_unlock:
761 	up_write(&mm->mmap_sem);
762 	return ret;
763 }
764 EXPORT_SYMBOL(setup_arg_pages);
765 
766 #endif /* CONFIG_MMU */
767 
open_exec(const char * name)768 struct file *open_exec(const char *name)
769 {
770 	struct file *file;
771 	int err;
772 	static const struct open_flags open_exec_flags = {
773 		.open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
774 		.acc_mode = MAY_EXEC | MAY_OPEN,
775 		.intent = LOOKUP_OPEN
776 	};
777 
778 	file = do_filp_open(AT_FDCWD, name, &open_exec_flags, LOOKUP_FOLLOW);
779 	if (IS_ERR(file))
780 		goto out;
781 
782 	err = -EACCES;
783 	if (!S_ISREG(file->f_path.dentry->d_inode->i_mode))
784 		goto exit;
785 
786 	if (file->f_path.mnt->mnt_flags & MNT_NOEXEC)
787 		goto exit;
788 
789 	fsnotify_open(file);
790 
791 	err = deny_write_access(file);
792 	if (err)
793 		goto exit;
794 
795 out:
796 	return file;
797 
798 exit:
799 	fput(file);
800 	return ERR_PTR(err);
801 }
802 EXPORT_SYMBOL(open_exec);
803 
kernel_read(struct file * file,loff_t offset,char * addr,unsigned long count)804 int kernel_read(struct file *file, loff_t offset,
805 		char *addr, unsigned long count)
806 {
807 	mm_segment_t old_fs;
808 	loff_t pos = offset;
809 	int result;
810 
811 	old_fs = get_fs();
812 	set_fs(get_ds());
813 	/* The cast to a user pointer is valid due to the set_fs() */
814 	result = vfs_read(file, (void __user *)addr, count, &pos);
815 	set_fs(old_fs);
816 	return result;
817 }
818 
819 EXPORT_SYMBOL(kernel_read);
820 
exec_mmap(struct mm_struct * mm)821 static int exec_mmap(struct mm_struct *mm)
822 {
823 	struct task_struct *tsk;
824 	struct mm_struct * old_mm, *active_mm;
825 
826 	/* Notify parent that we're no longer interested in the old VM */
827 	tsk = current;
828 	old_mm = current->mm;
829 	mm_release(tsk, old_mm);
830 
831 	if (old_mm) {
832 		sync_mm_rss(old_mm);
833 		/*
834 		 * Make sure that if there is a core dump in progress
835 		 * for the old mm, we get out and die instead of going
836 		 * through with the exec.  We must hold mmap_sem around
837 		 * checking core_state and changing tsk->mm.
838 		 */
839 		down_read(&old_mm->mmap_sem);
840 		if (unlikely(old_mm->core_state)) {
841 			up_read(&old_mm->mmap_sem);
842 			return -EINTR;
843 		}
844 	}
845 	task_lock(tsk);
846 	active_mm = tsk->active_mm;
847 	tsk->mm = mm;
848 	tsk->active_mm = mm;
849 	activate_mm(active_mm, mm);
850 	task_unlock(tsk);
851 	arch_pick_mmap_layout(mm);
852 	if (old_mm) {
853 		up_read(&old_mm->mmap_sem);
854 		BUG_ON(active_mm != old_mm);
855 		setmax_mm_hiwater_rss(&tsk->signal->maxrss, old_mm);
856 		mm_update_next_owner(old_mm);
857 		mmput(old_mm);
858 		return 0;
859 	}
860 	mmdrop(active_mm);
861 	return 0;
862 }
863 
864 /*
865  * This function makes sure the current process has its own signal table,
866  * so that flush_signal_handlers can later reset the handlers without
867  * disturbing other processes.  (Other processes might share the signal
868  * table via the CLONE_SIGHAND option to clone().)
869  */
de_thread(struct task_struct * tsk)870 static int de_thread(struct task_struct *tsk)
871 {
872 	struct signal_struct *sig = tsk->signal;
873 	struct sighand_struct *oldsighand = tsk->sighand;
874 	spinlock_t *lock = &oldsighand->siglock;
875 
876 	if (thread_group_empty(tsk))
877 		goto no_thread_group;
878 
879 	/*
880 	 * Kill all other threads in the thread group.
881 	 */
882 	spin_lock_irq(lock);
883 	if (signal_group_exit(sig)) {
884 		/*
885 		 * Another group action in progress, just
886 		 * return so that the signal is processed.
887 		 */
888 		spin_unlock_irq(lock);
889 		return -EAGAIN;
890 	}
891 
892 	sig->group_exit_task = tsk;
893 	sig->notify_count = zap_other_threads(tsk);
894 	if (!thread_group_leader(tsk))
895 		sig->notify_count--;
896 
897 	while (sig->notify_count) {
898 		__set_current_state(TASK_UNINTERRUPTIBLE);
899 		spin_unlock_irq(lock);
900 		schedule();
901 		spin_lock_irq(lock);
902 	}
903 	spin_unlock_irq(lock);
904 
905 	/*
906 	 * At this point all other threads have exited, all we have to
907 	 * do is to wait for the thread group leader to become inactive,
908 	 * and to assume its PID:
909 	 */
910 	if (!thread_group_leader(tsk)) {
911 		struct task_struct *leader = tsk->group_leader;
912 
913 		sig->notify_count = -1;	/* for exit_notify() */
914 		for (;;) {
915 			write_lock_irq(&tasklist_lock);
916 			if (likely(leader->exit_state))
917 				break;
918 			__set_current_state(TASK_UNINTERRUPTIBLE);
919 			write_unlock_irq(&tasklist_lock);
920 			schedule();
921 		}
922 
923 		/*
924 		 * The only record we have of the real-time age of a
925 		 * process, regardless of execs it's done, is start_time.
926 		 * All the past CPU time is accumulated in signal_struct
927 		 * from sister threads now dead.  But in this non-leader
928 		 * exec, nothing survives from the original leader thread,
929 		 * whose birth marks the true age of this process now.
930 		 * When we take on its identity by switching to its PID, we
931 		 * also take its birthdate (always earlier than our own).
932 		 */
933 		tsk->start_time = leader->start_time;
934 
935 		BUG_ON(!same_thread_group(leader, tsk));
936 		BUG_ON(has_group_leader_pid(tsk));
937 		/*
938 		 * An exec() starts a new thread group with the
939 		 * TGID of the previous thread group. Rehash the
940 		 * two threads with a switched PID, and release
941 		 * the former thread group leader:
942 		 */
943 
944 		/* Become a process group leader with the old leader's pid.
945 		 * The old leader becomes a thread of the this thread group.
946 		 * Note: The old leader also uses this pid until release_task
947 		 *       is called.  Odd but simple and correct.
948 		 */
949 		detach_pid(tsk, PIDTYPE_PID);
950 		tsk->pid = leader->pid;
951 		attach_pid(tsk, PIDTYPE_PID,  task_pid(leader));
952 		transfer_pid(leader, tsk, PIDTYPE_PGID);
953 		transfer_pid(leader, tsk, PIDTYPE_SID);
954 
955 		list_replace_rcu(&leader->tasks, &tsk->tasks);
956 		list_replace_init(&leader->sibling, &tsk->sibling);
957 
958 		tsk->group_leader = tsk;
959 		leader->group_leader = tsk;
960 
961 		tsk->exit_signal = SIGCHLD;
962 		leader->exit_signal = -1;
963 
964 		BUG_ON(leader->exit_state != EXIT_ZOMBIE);
965 		leader->exit_state = EXIT_DEAD;
966 
967 		/*
968 		 * We are going to release_task()->ptrace_unlink() silently,
969 		 * the tracer can sleep in do_wait(). EXIT_DEAD guarantees
970 		 * the tracer wont't block again waiting for this thread.
971 		 */
972 		if (unlikely(leader->ptrace))
973 			__wake_up_parent(leader, leader->parent);
974 		write_unlock_irq(&tasklist_lock);
975 
976 		release_task(leader);
977 	}
978 
979 	sig->group_exit_task = NULL;
980 	sig->notify_count = 0;
981 
982 no_thread_group:
983 	/* we have changed execution domain */
984 	tsk->exit_signal = SIGCHLD;
985 
986 	exit_itimers(sig);
987 	flush_itimer_signals();
988 
989 	if (atomic_read(&oldsighand->count) != 1) {
990 		struct sighand_struct *newsighand;
991 		/*
992 		 * This ->sighand is shared with the CLONE_SIGHAND
993 		 * but not CLONE_THREAD task, switch to the new one.
994 		 */
995 		newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
996 		if (!newsighand)
997 			return -ENOMEM;
998 
999 		atomic_set(&newsighand->count, 1);
1000 		memcpy(newsighand->action, oldsighand->action,
1001 		       sizeof(newsighand->action));
1002 
1003 		write_lock_irq(&tasklist_lock);
1004 		spin_lock(&oldsighand->siglock);
1005 		rcu_assign_pointer(tsk->sighand, newsighand);
1006 		spin_unlock(&oldsighand->siglock);
1007 		write_unlock_irq(&tasklist_lock);
1008 
1009 		__cleanup_sighand(oldsighand);
1010 	}
1011 
1012 	BUG_ON(!thread_group_leader(tsk));
1013 	return 0;
1014 }
1015 
1016 /*
1017  * These functions flushes out all traces of the currently running executable
1018  * so that a new one can be started
1019  */
flush_old_files(struct files_struct * files)1020 static void flush_old_files(struct files_struct * files)
1021 {
1022 	long j = -1;
1023 	struct fdtable *fdt;
1024 
1025 	spin_lock(&files->file_lock);
1026 	for (;;) {
1027 		unsigned long set, i;
1028 
1029 		j++;
1030 		i = j * BITS_PER_LONG;
1031 		fdt = files_fdtable(files);
1032 		if (i >= fdt->max_fds)
1033 			break;
1034 		set = fdt->close_on_exec[j];
1035 		if (!set)
1036 			continue;
1037 		fdt->close_on_exec[j] = 0;
1038 		spin_unlock(&files->file_lock);
1039 		for ( ; set ; i++,set >>= 1) {
1040 			if (set & 1) {
1041 				sys_close(i);
1042 			}
1043 		}
1044 		spin_lock(&files->file_lock);
1045 
1046 	}
1047 	spin_unlock(&files->file_lock);
1048 }
1049 
get_task_comm(char * buf,struct task_struct * tsk)1050 char *get_task_comm(char *buf, struct task_struct *tsk)
1051 {
1052 	/* buf must be at least sizeof(tsk->comm) in size */
1053 	task_lock(tsk);
1054 	strncpy(buf, tsk->comm, sizeof(tsk->comm));
1055 	task_unlock(tsk);
1056 	return buf;
1057 }
1058 EXPORT_SYMBOL_GPL(get_task_comm);
1059 
set_task_comm(struct task_struct * tsk,char * buf)1060 void set_task_comm(struct task_struct *tsk, char *buf)
1061 {
1062 	task_lock(tsk);
1063 
1064 	trace_task_rename(tsk, buf);
1065 
1066 	/*
1067 	 * Threads may access current->comm without holding
1068 	 * the task lock, so write the string carefully.
1069 	 * Readers without a lock may see incomplete new
1070 	 * names but are safe from non-terminating string reads.
1071 	 */
1072 	memset(tsk->comm, 0, TASK_COMM_LEN);
1073 	wmb();
1074 	strlcpy(tsk->comm, buf, sizeof(tsk->comm));
1075 	task_unlock(tsk);
1076 	perf_event_comm(tsk);
1077 #ifdef CONFIG_QEMU_TRACE
1078 	qemu_trace_thread_name(buf);
1079 #endif
1080 }
1081 
filename_to_taskname(char * tcomm,const char * fn,unsigned int len)1082 static void filename_to_taskname(char *tcomm, const char *fn, unsigned int len)
1083 {
1084 	int i, ch;
1085 
1086 	/* Copies the binary name from after last slash */
1087 	for (i = 0; (ch = *(fn++)) != '\0';) {
1088 		if (ch == '/')
1089 			i = 0; /* overwrite what we wrote */
1090 		else
1091 			if (i < len - 1)
1092 				tcomm[i++] = ch;
1093 	}
1094 	tcomm[i] = '\0';
1095 }
1096 
flush_old_exec(struct linux_binprm * bprm)1097 int flush_old_exec(struct linux_binprm * bprm)
1098 {
1099 	int retval;
1100 
1101 	/*
1102 	 * Make sure we have a private signal table and that
1103 	 * we are unassociated from the previous thread group.
1104 	 */
1105 	retval = de_thread(current);
1106 	if (retval)
1107 		goto out;
1108 
1109 	set_mm_exe_file(bprm->mm, bprm->file);
1110 
1111 	filename_to_taskname(bprm->tcomm, bprm->filename, sizeof(bprm->tcomm));
1112 	/*
1113 	 * Release all of the old mmap stuff
1114 	 */
1115 	acct_arg_size(bprm, 0);
1116 	retval = exec_mmap(bprm->mm);
1117 	if (retval)
1118 		goto out;
1119 
1120 	bprm->mm = NULL;		/* We're using it now */
1121 
1122 	set_fs(USER_DS);
1123 	current->flags &=
1124 		~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD | PF_NOFREEZE);
1125 	flush_thread();
1126 	current->personality &= ~bprm->per_clear;
1127 
1128 	return 0;
1129 
1130 out:
1131 	return retval;
1132 }
1133 EXPORT_SYMBOL(flush_old_exec);
1134 
would_dump(struct linux_binprm * bprm,struct file * file)1135 void would_dump(struct linux_binprm *bprm, struct file *file)
1136 {
1137 	if (inode_permission(file->f_path.dentry->d_inode, MAY_READ) < 0)
1138 		bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP;
1139 }
1140 EXPORT_SYMBOL(would_dump);
1141 
setup_new_exec(struct linux_binprm * bprm)1142 void setup_new_exec(struct linux_binprm * bprm)
1143 {
1144 	arch_pick_mmap_layout(current->mm);
1145 
1146 	/* This is the point of no return */
1147 	current->sas_ss_sp = current->sas_ss_size = 0;
1148 
1149 	if (current_euid() == current_uid() && current_egid() == current_gid())
1150 		set_dumpable(current->mm, 1);
1151 	else
1152 		set_dumpable(current->mm, suid_dumpable);
1153 
1154 	set_task_comm(current, bprm->tcomm);
1155 
1156 	/* Set the new mm task size. We have to do that late because it may
1157 	 * depend on TIF_32BIT which is only updated in flush_thread() on
1158 	 * some architectures like powerpc
1159 	 */
1160 	current->mm->task_size = TASK_SIZE;
1161 
1162 	/* install the new credentials */
1163 	if (bprm->cred->uid != current_euid() ||
1164 	    bprm->cred->gid != current_egid()) {
1165 		current->pdeath_signal = 0;
1166 	} else {
1167 		would_dump(bprm, bprm->file);
1168 		if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)
1169 			set_dumpable(current->mm, suid_dumpable);
1170 	}
1171 
1172 	/* An exec changes our domain. We are no longer part of the thread
1173 	   group */
1174 
1175 	current->self_exec_id++;
1176 
1177 	flush_signal_handlers(current, 0);
1178 	flush_old_files(current->files);
1179 }
1180 EXPORT_SYMBOL(setup_new_exec);
1181 
1182 /*
1183  * Prepare credentials and lock ->cred_guard_mutex.
1184  * install_exec_creds() commits the new creds and drops the lock.
1185  * Or, if exec fails before, free_bprm() should release ->cred and
1186  * and unlock.
1187  */
prepare_bprm_creds(struct linux_binprm * bprm)1188 int prepare_bprm_creds(struct linux_binprm *bprm)
1189 {
1190 	if (mutex_lock_interruptible(&current->signal->cred_guard_mutex))
1191 		return -ERESTARTNOINTR;
1192 
1193 	bprm->cred = prepare_exec_creds();
1194 	if (likely(bprm->cred))
1195 		return 0;
1196 
1197 	mutex_unlock(&current->signal->cred_guard_mutex);
1198 	return -ENOMEM;
1199 }
1200 
free_bprm(struct linux_binprm * bprm)1201 void free_bprm(struct linux_binprm *bprm)
1202 {
1203 	free_arg_pages(bprm);
1204 	if (bprm->cred) {
1205 		mutex_unlock(&current->signal->cred_guard_mutex);
1206 		abort_creds(bprm->cred);
1207 	}
1208 	/* If a binfmt changed the interp, free it. */
1209 	if (bprm->interp != bprm->filename)
1210 		kfree(bprm->interp);
1211 	kfree(bprm);
1212 }
1213 
bprm_change_interp(char * interp,struct linux_binprm * bprm)1214 int bprm_change_interp(char *interp, struct linux_binprm *bprm)
1215 {
1216 	/* If a binfmt changed the interp, free it first. */
1217 	if (bprm->interp != bprm->filename)
1218 		kfree(bprm->interp);
1219 	bprm->interp = kstrdup(interp, GFP_KERNEL);
1220 	if (!bprm->interp)
1221 		return -ENOMEM;
1222 	return 0;
1223 }
1224 EXPORT_SYMBOL(bprm_change_interp);
1225 
1226 /*
1227  * install the new credentials for this executable
1228  */
install_exec_creds(struct linux_binprm * bprm)1229 void install_exec_creds(struct linux_binprm *bprm)
1230 {
1231 	security_bprm_committing_creds(bprm);
1232 
1233 	commit_creds(bprm->cred);
1234 	bprm->cred = NULL;
1235 
1236 	/*
1237 	 * Disable monitoring for regular users
1238 	 * when executing setuid binaries. Must
1239 	 * wait until new credentials are committed
1240 	 * by commit_creds() above
1241 	 */
1242 	if (get_dumpable(current->mm) != SUID_DUMP_USER)
1243 		perf_event_exit_task(current);
1244 	/*
1245 	 * cred_guard_mutex must be held at least to this point to prevent
1246 	 * ptrace_attach() from altering our determination of the task's
1247 	 * credentials; any time after this it may be unlocked.
1248 	 */
1249 	security_bprm_committed_creds(bprm);
1250 	mutex_unlock(&current->signal->cred_guard_mutex);
1251 }
1252 EXPORT_SYMBOL(install_exec_creds);
1253 
1254 /*
1255  * determine how safe it is to execute the proposed program
1256  * - the caller must hold ->cred_guard_mutex to protect against
1257  *   PTRACE_ATTACH or seccomp thread-sync
1258  */
check_unsafe_exec(struct linux_binprm * bprm)1259 static int check_unsafe_exec(struct linux_binprm *bprm)
1260 {
1261 	struct task_struct *p = current, *t;
1262 	unsigned n_fs;
1263 	int res = 0;
1264 
1265 	if (p->ptrace) {
1266 		if (p->ptrace & PT_PTRACE_CAP)
1267 			bprm->unsafe |= LSM_UNSAFE_PTRACE_CAP;
1268 		else
1269 			bprm->unsafe |= LSM_UNSAFE_PTRACE;
1270 	}
1271 
1272 	/*
1273 	 * This isn't strictly necessary, but it makes it harder for LSMs to
1274 	 * mess up.
1275 	 */
1276 	if (task_no_new_privs(current))
1277 		bprm->unsafe |= LSM_UNSAFE_NO_NEW_PRIVS;
1278 
1279 	n_fs = 1;
1280 	spin_lock(&p->fs->lock);
1281 	rcu_read_lock();
1282 	for (t = next_thread(p); t != p; t = next_thread(t)) {
1283 		if (t->fs == p->fs)
1284 			n_fs++;
1285 	}
1286 	rcu_read_unlock();
1287 
1288 	if (p->fs->users > n_fs) {
1289 		bprm->unsafe |= LSM_UNSAFE_SHARE;
1290 	} else {
1291 		res = -EAGAIN;
1292 		if (!p->fs->in_exec) {
1293 			p->fs->in_exec = 1;
1294 			res = 1;
1295 		}
1296 	}
1297 	spin_unlock(&p->fs->lock);
1298 
1299 	return res;
1300 }
1301 
1302 /*
1303  * Fill the binprm structure from the inode.
1304  * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
1305  *
1306  * This may be called multiple times for binary chains (scripts for example).
1307  */
prepare_binprm(struct linux_binprm * bprm)1308 int prepare_binprm(struct linux_binprm *bprm)
1309 {
1310 	umode_t mode;
1311 	struct inode * inode = bprm->file->f_path.dentry->d_inode;
1312 	int retval;
1313 
1314 	mode = inode->i_mode;
1315 	if (bprm->file->f_op == NULL)
1316 		return -EACCES;
1317 
1318 	/* clear any previous set[ug]id data from a previous binary */
1319 	bprm->cred->euid = current_euid();
1320 	bprm->cred->egid = current_egid();
1321 
1322 	if (!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID) &&
1323 	    !task_no_new_privs(current)) {
1324 		/* Set-uid? */
1325 		if (mode & S_ISUID) {
1326 			bprm->per_clear |= PER_CLEAR_ON_SETID;
1327 			bprm->cred->euid = inode->i_uid;
1328 		}
1329 
1330 		/* Set-gid? */
1331 		/*
1332 		 * If setgid is set but no group execute bit then this
1333 		 * is a candidate for mandatory locking, not a setgid
1334 		 * executable.
1335 		 */
1336 		if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
1337 			bprm->per_clear |= PER_CLEAR_ON_SETID;
1338 			bprm->cred->egid = inode->i_gid;
1339 		}
1340 	}
1341 
1342 	/* fill in binprm security blob */
1343 	retval = security_bprm_set_creds(bprm);
1344 	if (retval)
1345 		return retval;
1346 	bprm->cred_prepared = 1;
1347 
1348 	memset(bprm->buf, 0, BINPRM_BUF_SIZE);
1349 	return kernel_read(bprm->file, 0, bprm->buf, BINPRM_BUF_SIZE);
1350 }
1351 
1352 EXPORT_SYMBOL(prepare_binprm);
1353 
1354 /*
1355  * Arguments are '\0' separated strings found at the location bprm->p
1356  * points to; chop off the first by relocating brpm->p to right after
1357  * the first '\0' encountered.
1358  */
remove_arg_zero(struct linux_binprm * bprm)1359 int remove_arg_zero(struct linux_binprm *bprm)
1360 {
1361 	int ret = 0;
1362 	unsigned long offset;
1363 	char *kaddr;
1364 	struct page *page;
1365 
1366 	if (!bprm->argc)
1367 		return 0;
1368 
1369 	do {
1370 		offset = bprm->p & ~PAGE_MASK;
1371 		page = get_arg_page(bprm, bprm->p, 0);
1372 		if (!page) {
1373 			ret = -EFAULT;
1374 			goto out;
1375 		}
1376 		kaddr = kmap_atomic(page);
1377 
1378 		for (; offset < PAGE_SIZE && kaddr[offset];
1379 				offset++, bprm->p++)
1380 			;
1381 
1382 		kunmap_atomic(kaddr);
1383 		put_arg_page(page);
1384 
1385 		if (offset == PAGE_SIZE)
1386 			free_arg_page(bprm, (bprm->p >> PAGE_SHIFT) - 1);
1387 	} while (offset == PAGE_SIZE);
1388 
1389 	bprm->p++;
1390 	bprm->argc--;
1391 	ret = 0;
1392 
1393 out:
1394 	return ret;
1395 }
1396 EXPORT_SYMBOL(remove_arg_zero);
1397 
1398 /*
1399  * cycle the list of binary formats handler, until one recognizes the image
1400  */
search_binary_handler(struct linux_binprm * bprm,struct pt_regs * regs)1401 int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
1402 {
1403 	unsigned int depth = bprm->recursion_depth;
1404 	int try,retval;
1405 	struct linux_binfmt *fmt;
1406 	pid_t old_pid, old_vpid;
1407 
1408 	/* This allows 4 levels of binfmt rewrites before failing hard. */
1409 	if (depth > 5)
1410 		return -ELOOP;
1411 
1412 	retval = security_bprm_check(bprm);
1413 	if (retval)
1414 		return retval;
1415 
1416 	retval = audit_bprm(bprm);
1417 	if (retval)
1418 		return retval;
1419 
1420 	/* Need to fetch pid before load_binary changes it */
1421 	old_pid = current->pid;
1422 	rcu_read_lock();
1423 	old_vpid = task_pid_nr_ns(current, task_active_pid_ns(current->parent));
1424 	rcu_read_unlock();
1425 
1426 	retval = -ENOENT;
1427 	for (try=0; try<2; try++) {
1428 		read_lock(&binfmt_lock);
1429 		list_for_each_entry(fmt, &formats, lh) {
1430 			int (*fn)(struct linux_binprm *, struct pt_regs *) = fmt->load_binary;
1431 			if (!fn)
1432 				continue;
1433 			if (!try_module_get(fmt->module))
1434 				continue;
1435 			read_unlock(&binfmt_lock);
1436 			bprm->recursion_depth = depth + 1;
1437 			retval = fn(bprm, regs);
1438 			bprm->recursion_depth = depth;
1439 			if (retval >= 0) {
1440 				if (depth == 0) {
1441 					trace_sched_process_exec(current, old_pid, bprm);
1442 					ptrace_event(PTRACE_EVENT_EXEC, old_vpid);
1443 				}
1444 				put_binfmt(fmt);
1445 				allow_write_access(bprm->file);
1446 				if (bprm->file)
1447 					fput(bprm->file);
1448 				bprm->file = NULL;
1449 				current->did_exec = 1;
1450 				proc_exec_connector(current);
1451 				return retval;
1452 			}
1453 			read_lock(&binfmt_lock);
1454 			put_binfmt(fmt);
1455 			if (retval != -ENOEXEC || bprm->mm == NULL)
1456 				break;
1457 			if (!bprm->file) {
1458 				read_unlock(&binfmt_lock);
1459 				return retval;
1460 			}
1461 		}
1462 		read_unlock(&binfmt_lock);
1463 #ifdef CONFIG_MODULES
1464 		if (retval != -ENOEXEC || bprm->mm == NULL) {
1465 			break;
1466 		} else {
1467 #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
1468 			if (printable(bprm->buf[0]) &&
1469 			    printable(bprm->buf[1]) &&
1470 			    printable(bprm->buf[2]) &&
1471 			    printable(bprm->buf[3]))
1472 				break; /* -ENOEXEC */
1473 			if (try)
1474 				break; /* -ENOEXEC */
1475 			request_module("binfmt-%04x", *(unsigned short *)(&bprm->buf[2]));
1476 		}
1477 #else
1478 		break;
1479 #endif
1480 	}
1481 	return retval;
1482 }
1483 
1484 EXPORT_SYMBOL(search_binary_handler);
1485 
1486 #ifdef CONFIG_QEMU_TRACE
1487 extern void qemu_trace_execve(int argc, const char __user *const __user * argv);
1488 #endif
1489 
1490 /*
1491  * sys_execve() executes a new program.
1492  */
do_execve_common(const char * filename,struct user_arg_ptr argv,struct user_arg_ptr envp,struct pt_regs * regs)1493 static int do_execve_common(const char *filename,
1494 				struct user_arg_ptr argv,
1495 				struct user_arg_ptr envp,
1496 				struct pt_regs *regs)
1497 {
1498 	struct linux_binprm *bprm;
1499 	struct file *file;
1500 	struct files_struct *displaced;
1501 	bool clear_in_exec;
1502 	int retval;
1503 	const struct cred *cred = current_cred();
1504 
1505 	/*
1506 	 * We move the actual failure in case of RLIMIT_NPROC excess from
1507 	 * set*uid() to execve() because too many poorly written programs
1508 	 * don't check setuid() return code.  Here we additionally recheck
1509 	 * whether NPROC limit is still exceeded.
1510 	 */
1511 	if ((current->flags & PF_NPROC_EXCEEDED) &&
1512 	    atomic_read(&cred->user->processes) > rlimit(RLIMIT_NPROC)) {
1513 		retval = -EAGAIN;
1514 		goto out_ret;
1515 	}
1516 
1517 	/* We're below the limit (still or again), so we don't want to make
1518 	 * further execve() calls fail. */
1519 	current->flags &= ~PF_NPROC_EXCEEDED;
1520 
1521 	retval = unshare_files(&displaced);
1522 	if (retval)
1523 		goto out_ret;
1524 
1525 	retval = -ENOMEM;
1526 	bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
1527 	if (!bprm)
1528 		goto out_files;
1529 
1530 	retval = prepare_bprm_creds(bprm);
1531 	if (retval)
1532 		goto out_free;
1533 
1534 	retval = check_unsafe_exec(bprm);
1535 	if (retval < 0)
1536 		goto out_free;
1537 	clear_in_exec = retval;
1538 	current->in_execve = 1;
1539 
1540 	file = open_exec(filename);
1541 	retval = PTR_ERR(file);
1542 	if (IS_ERR(file))
1543 		goto out_unmark;
1544 
1545 	sched_exec();
1546 
1547 	bprm->file = file;
1548 	bprm->filename = filename;
1549 	bprm->interp = filename;
1550 
1551 	retval = bprm_mm_init(bprm);
1552 	if (retval)
1553 		goto out_file;
1554 
1555 	bprm->argc = count(argv, MAX_ARG_STRINGS);
1556 	if ((retval = bprm->argc) < 0)
1557 		goto out;
1558 
1559 	bprm->envc = count(envp, MAX_ARG_STRINGS);
1560 	if ((retval = bprm->envc) < 0)
1561 		goto out;
1562 
1563 	retval = prepare_binprm(bprm);
1564 	if (retval < 0)
1565 		goto out;
1566 
1567 	retval = copy_strings_kernel(1, &bprm->filename, bprm);
1568 	if (retval < 0)
1569 		goto out;
1570 
1571 	bprm->exec = bprm->p;
1572 	retval = copy_strings(bprm->envc, envp, bprm);
1573 	if (retval < 0)
1574 		goto out;
1575 
1576 	retval = copy_strings(bprm->argc, argv, bprm);
1577 	if (retval < 0)
1578 		goto out;
1579 
1580 #ifdef CONFIG_QEMU_TRACE
1581         qemu_trace_execve(bprm->argc, argv.ptr.native);
1582 #endif
1583 
1584 	retval = search_binary_handler(bprm,regs);
1585 	if (retval < 0)
1586 		goto out;
1587 
1588 	/* execve succeeded */
1589 	current->fs->in_exec = 0;
1590 	current->in_execve = 0;
1591 	acct_update_integrals(current);
1592 	free_bprm(bprm);
1593 	if (displaced)
1594 		put_files_struct(displaced);
1595 	return retval;
1596 
1597 out:
1598 	if (bprm->mm) {
1599 		acct_arg_size(bprm, 0);
1600 		mmput(bprm->mm);
1601 	}
1602 
1603 out_file:
1604 	if (bprm->file) {
1605 		allow_write_access(bprm->file);
1606 		fput(bprm->file);
1607 	}
1608 
1609 out_unmark:
1610 	if (clear_in_exec)
1611 		current->fs->in_exec = 0;
1612 	current->in_execve = 0;
1613 
1614 out_free:
1615 	free_bprm(bprm);
1616 
1617 out_files:
1618 	if (displaced)
1619 		reset_files_struct(displaced);
1620 out_ret:
1621 	return retval;
1622 }
1623 
do_execve(const char * filename,const char __user * const __user * __argv,const char __user * const __user * __envp,struct pt_regs * regs)1624 int do_execve(const char *filename,
1625 	const char __user *const __user *__argv,
1626 	const char __user *const __user *__envp,
1627 	struct pt_regs *regs)
1628 {
1629 	struct user_arg_ptr argv = { .ptr.native = __argv };
1630 	struct user_arg_ptr envp = { .ptr.native = __envp };
1631 	return do_execve_common(filename, argv, envp, regs);
1632 }
1633 
1634 #ifdef CONFIG_COMPAT
compat_do_execve(char * filename,compat_uptr_t __user * __argv,compat_uptr_t __user * __envp,struct pt_regs * regs)1635 int compat_do_execve(char *filename,
1636 	compat_uptr_t __user *__argv,
1637 	compat_uptr_t __user *__envp,
1638 	struct pt_regs *regs)
1639 {
1640 	struct user_arg_ptr argv = {
1641 		.is_compat = true,
1642 		.ptr.compat = __argv,
1643 	};
1644 	struct user_arg_ptr envp = {
1645 		.is_compat = true,
1646 		.ptr.compat = __envp,
1647 	};
1648 	return do_execve_common(filename, argv, envp, regs);
1649 }
1650 #endif
1651 
set_binfmt(struct linux_binfmt * new)1652 void set_binfmt(struct linux_binfmt *new)
1653 {
1654 	struct mm_struct *mm = current->mm;
1655 
1656 	if (mm->binfmt)
1657 		module_put(mm->binfmt->module);
1658 
1659 	mm->binfmt = new;
1660 	if (new)
1661 		__module_get(new->module);
1662 }
1663 
1664 EXPORT_SYMBOL(set_binfmt);
1665 
expand_corename(struct core_name * cn)1666 static int expand_corename(struct core_name *cn)
1667 {
1668 	char *old_corename = cn->corename;
1669 
1670 	cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
1671 	cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
1672 
1673 	if (!cn->corename) {
1674 		kfree(old_corename);
1675 		return -ENOMEM;
1676 	}
1677 
1678 	return 0;
1679 }
1680 
cn_printf(struct core_name * cn,const char * fmt,...)1681 static int cn_printf(struct core_name *cn, const char *fmt, ...)
1682 {
1683 	char *cur;
1684 	int need;
1685 	int ret;
1686 	va_list arg;
1687 
1688 	va_start(arg, fmt);
1689 	need = vsnprintf(NULL, 0, fmt, arg);
1690 	va_end(arg);
1691 
1692 	if (likely(need < cn->size - cn->used - 1))
1693 		goto out_printf;
1694 
1695 	ret = expand_corename(cn);
1696 	if (ret)
1697 		goto expand_fail;
1698 
1699 out_printf:
1700 	cur = cn->corename + cn->used;
1701 	va_start(arg, fmt);
1702 	vsnprintf(cur, need + 1, fmt, arg);
1703 	va_end(arg);
1704 	cn->used += need;
1705 	return 0;
1706 
1707 expand_fail:
1708 	return ret;
1709 }
1710 
cn_escape(char * str)1711 static void cn_escape(char *str)
1712 {
1713 	for (; *str; str++)
1714 		if (*str == '/')
1715 			*str = '!';
1716 }
1717 
cn_print_exe_file(struct core_name * cn)1718 static int cn_print_exe_file(struct core_name *cn)
1719 {
1720 	struct file *exe_file;
1721 	char *pathbuf, *path;
1722 	int ret;
1723 
1724 	exe_file = get_mm_exe_file(current->mm);
1725 	if (!exe_file) {
1726 		char *commstart = cn->corename + cn->used;
1727 		ret = cn_printf(cn, "%s (path unknown)", current->comm);
1728 		cn_escape(commstart);
1729 		return ret;
1730 	}
1731 
1732 	pathbuf = kmalloc(PATH_MAX, GFP_TEMPORARY);
1733 	if (!pathbuf) {
1734 		ret = -ENOMEM;
1735 		goto put_exe_file;
1736 	}
1737 
1738 	path = d_path(&exe_file->f_path, pathbuf, PATH_MAX);
1739 	if (IS_ERR(path)) {
1740 		ret = PTR_ERR(path);
1741 		goto free_buf;
1742 	}
1743 
1744 	cn_escape(path);
1745 
1746 	ret = cn_printf(cn, "%s", path);
1747 
1748 free_buf:
1749 	kfree(pathbuf);
1750 put_exe_file:
1751 	fput(exe_file);
1752 	return ret;
1753 }
1754 
1755 /* format_corename will inspect the pattern parameter, and output a
1756  * name into corename, which must have space for at least
1757  * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
1758  */
format_corename(struct core_name * cn,long signr)1759 static int format_corename(struct core_name *cn, long signr)
1760 {
1761 	const struct cred *cred = current_cred();
1762 	const char *pat_ptr = core_pattern;
1763 	int ispipe = (*pat_ptr == '|');
1764 	int pid_in_pattern = 0;
1765 	int err = 0;
1766 
1767 	cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
1768 	cn->corename = kmalloc(cn->size, GFP_KERNEL);
1769 	cn->used = 0;
1770 
1771 	if (!cn->corename)
1772 		return -ENOMEM;
1773 
1774 	/* Repeat as long as we have more pattern to process and more output
1775 	   space */
1776 	while (*pat_ptr) {
1777 		if (*pat_ptr != '%') {
1778 			if (*pat_ptr == 0)
1779 				goto out;
1780 			err = cn_printf(cn, "%c", *pat_ptr++);
1781 		} else {
1782 			switch (*++pat_ptr) {
1783 			/* single % at the end, drop that */
1784 			case 0:
1785 				goto out;
1786 			/* Double percent, output one percent */
1787 			case '%':
1788 				err = cn_printf(cn, "%c", '%');
1789 				break;
1790 			/* pid */
1791 			case 'p':
1792 				pid_in_pattern = 1;
1793 				err = cn_printf(cn, "%d",
1794 					      task_tgid_vnr(current));
1795 				break;
1796 			/* uid */
1797 			case 'u':
1798 				err = cn_printf(cn, "%d", cred->uid);
1799 				break;
1800 			/* gid */
1801 			case 'g':
1802 				err = cn_printf(cn, "%d", cred->gid);
1803 				break;
1804 			/* signal that caused the coredump */
1805 			case 's':
1806 				err = cn_printf(cn, "%ld", signr);
1807 				break;
1808 			/* UNIX time of coredump */
1809 			case 't': {
1810 				struct timeval tv;
1811 				do_gettimeofday(&tv);
1812 				err = cn_printf(cn, "%lu", tv.tv_sec);
1813 				break;
1814 			}
1815 			/* hostname */
1816 			case 'h': {
1817 				char *namestart = cn->corename + cn->used;
1818 				down_read(&uts_sem);
1819 				err = cn_printf(cn, "%s",
1820 					      utsname()->nodename);
1821 				up_read(&uts_sem);
1822 				cn_escape(namestart);
1823 				break;
1824 			}
1825 			/* executable */
1826 			case 'e': {
1827 				char *commstart = cn->corename + cn->used;
1828 				err = cn_printf(cn, "%s", current->comm);
1829 				cn_escape(commstart);
1830 				break;
1831 			}
1832 			case 'E':
1833 				err = cn_print_exe_file(cn);
1834 				break;
1835 			/* core limit size */
1836 			case 'c':
1837 				err = cn_printf(cn, "%lu",
1838 					      rlimit(RLIMIT_CORE));
1839 				break;
1840 			default:
1841 				break;
1842 			}
1843 			++pat_ptr;
1844 		}
1845 
1846 		if (err)
1847 			return err;
1848 	}
1849 
1850 	/* Backward compatibility with core_uses_pid:
1851 	 *
1852 	 * If core_pattern does not include a %p (as is the default)
1853 	 * and core_uses_pid is set, then .%pid will be appended to
1854 	 * the filename. Do not do this for piped commands. */
1855 	if (!ispipe && !pid_in_pattern && core_uses_pid) {
1856 		err = cn_printf(cn, ".%d", task_tgid_vnr(current));
1857 		if (err)
1858 			return err;
1859 	}
1860 out:
1861 	return ispipe;
1862 }
1863 
zap_process(struct task_struct * start,int exit_code)1864 static int zap_process(struct task_struct *start, int exit_code)
1865 {
1866 	struct task_struct *t;
1867 	int nr = 0;
1868 
1869 	start->signal->flags = SIGNAL_GROUP_EXIT;
1870 	start->signal->group_exit_code = exit_code;
1871 	start->signal->group_stop_count = 0;
1872 
1873 	t = start;
1874 	do {
1875 		task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1876 		if (t != current && t->mm) {
1877 			sigaddset(&t->pending.signal, SIGKILL);
1878 			signal_wake_up(t, 1);
1879 			nr++;
1880 		}
1881 	} while_each_thread(start, t);
1882 
1883 	return nr;
1884 }
1885 
zap_threads(struct task_struct * tsk,struct mm_struct * mm,struct core_state * core_state,int exit_code)1886 static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
1887 				struct core_state *core_state, int exit_code)
1888 {
1889 	struct task_struct *g, *p;
1890 	unsigned long flags;
1891 	int nr = -EAGAIN;
1892 
1893 	spin_lock_irq(&tsk->sighand->siglock);
1894 	if (!signal_group_exit(tsk->signal)) {
1895 		mm->core_state = core_state;
1896 		nr = zap_process(tsk, exit_code);
1897 	}
1898 	spin_unlock_irq(&tsk->sighand->siglock);
1899 	if (unlikely(nr < 0))
1900 		return nr;
1901 
1902 	if (atomic_read(&mm->mm_users) == nr + 1)
1903 		goto done;
1904 	/*
1905 	 * We should find and kill all tasks which use this mm, and we should
1906 	 * count them correctly into ->nr_threads. We don't take tasklist
1907 	 * lock, but this is safe wrt:
1908 	 *
1909 	 * fork:
1910 	 *	None of sub-threads can fork after zap_process(leader). All
1911 	 *	processes which were created before this point should be
1912 	 *	visible to zap_threads() because copy_process() adds the new
1913 	 *	process to the tail of init_task.tasks list, and lock/unlock
1914 	 *	of ->siglock provides a memory barrier.
1915 	 *
1916 	 * do_exit:
1917 	 *	The caller holds mm->mmap_sem. This means that the task which
1918 	 *	uses this mm can't pass exit_mm(), so it can't exit or clear
1919 	 *	its ->mm.
1920 	 *
1921 	 * de_thread:
1922 	 *	It does list_replace_rcu(&leader->tasks, &current->tasks),
1923 	 *	we must see either old or new leader, this does not matter.
1924 	 *	However, it can change p->sighand, so lock_task_sighand(p)
1925 	 *	must be used. Since p->mm != NULL and we hold ->mmap_sem
1926 	 *	it can't fail.
1927 	 *
1928 	 *	Note also that "g" can be the old leader with ->mm == NULL
1929 	 *	and already unhashed and thus removed from ->thread_group.
1930 	 *	This is OK, __unhash_process()->list_del_rcu() does not
1931 	 *	clear the ->next pointer, we will find the new leader via
1932 	 *	next_thread().
1933 	 */
1934 	rcu_read_lock();
1935 	for_each_process(g) {
1936 		if (g == tsk->group_leader)
1937 			continue;
1938 		if (g->flags & PF_KTHREAD)
1939 			continue;
1940 		p = g;
1941 		do {
1942 			if (p->mm) {
1943 				if (unlikely(p->mm == mm)) {
1944 					lock_task_sighand(p, &flags);
1945 					nr += zap_process(p, exit_code);
1946 					unlock_task_sighand(p, &flags);
1947 				}
1948 				break;
1949 			}
1950 		} while_each_thread(g, p);
1951 	}
1952 	rcu_read_unlock();
1953 done:
1954 	atomic_set(&core_state->nr_threads, nr);
1955 	return nr;
1956 }
1957 
coredump_wait(int exit_code,struct core_state * core_state)1958 static int coredump_wait(int exit_code, struct core_state *core_state)
1959 {
1960 	struct task_struct *tsk = current;
1961 	struct mm_struct *mm = tsk->mm;
1962 	int core_waiters = -EBUSY;
1963 
1964 	init_completion(&core_state->startup);
1965 	core_state->dumper.task = tsk;
1966 	core_state->dumper.next = NULL;
1967 
1968 	down_write(&mm->mmap_sem);
1969 	if (!mm->core_state)
1970 		core_waiters = zap_threads(tsk, mm, core_state, exit_code);
1971 	up_write(&mm->mmap_sem);
1972 
1973 	if (core_waiters > 0)
1974 		wait_for_completion(&core_state->startup);
1975 
1976 	return core_waiters;
1977 }
1978 
coredump_finish(struct mm_struct * mm)1979 static void coredump_finish(struct mm_struct *mm)
1980 {
1981 	struct core_thread *curr, *next;
1982 	struct task_struct *task;
1983 
1984 	next = mm->core_state->dumper.next;
1985 	while ((curr = next) != NULL) {
1986 		next = curr->next;
1987 		task = curr->task;
1988 		/*
1989 		 * see exit_mm(), curr->task must not see
1990 		 * ->task == NULL before we read ->next.
1991 		 */
1992 		smp_mb();
1993 		curr->task = NULL;
1994 		wake_up_process(task);
1995 	}
1996 
1997 	mm->core_state = NULL;
1998 }
1999 
2000 /*
2001  * set_dumpable converts traditional three-value dumpable to two flags and
2002  * stores them into mm->flags.  It modifies lower two bits of mm->flags, but
2003  * these bits are not changed atomically.  So get_dumpable can observe the
2004  * intermediate state.  To avoid doing unexpected behavior, get get_dumpable
2005  * return either old dumpable or new one by paying attention to the order of
2006  * modifying the bits.
2007  *
2008  * dumpable |   mm->flags (binary)
2009  * old  new | initial interim  final
2010  * ---------+-----------------------
2011  *  0    1  |   00      01      01
2012  *  0    2  |   00      10(*)   11
2013  *  1    0  |   01      00      00
2014  *  1    2  |   01      11      11
2015  *  2    0  |   11      10(*)   00
2016  *  2    1  |   11      11      01
2017  *
2018  * (*) get_dumpable regards interim value of 10 as 11.
2019  */
set_dumpable(struct mm_struct * mm,int value)2020 void set_dumpable(struct mm_struct *mm, int value)
2021 {
2022 	switch (value) {
2023 	case 0:
2024 		clear_bit(MMF_DUMPABLE, &mm->flags);
2025 		smp_wmb();
2026 		clear_bit(MMF_DUMP_SECURELY, &mm->flags);
2027 		break;
2028 	case 1:
2029 		set_bit(MMF_DUMPABLE, &mm->flags);
2030 		smp_wmb();
2031 		clear_bit(MMF_DUMP_SECURELY, &mm->flags);
2032 		break;
2033 	case 2:
2034 		set_bit(MMF_DUMP_SECURELY, &mm->flags);
2035 		smp_wmb();
2036 		set_bit(MMF_DUMPABLE, &mm->flags);
2037 		break;
2038 	}
2039 }
2040 
__get_dumpable(unsigned long mm_flags)2041 static int __get_dumpable(unsigned long mm_flags)
2042 {
2043 	int ret;
2044 
2045 	ret = mm_flags & MMF_DUMPABLE_MASK;
2046 	return (ret >= 2) ? 2 : ret;
2047 }
2048 
get_dumpable(struct mm_struct * mm)2049 int get_dumpable(struct mm_struct *mm)
2050 {
2051 	return __get_dumpable(mm->flags);
2052 }
2053 
wait_for_dump_helpers(struct file * file)2054 static void wait_for_dump_helpers(struct file *file)
2055 {
2056 	struct pipe_inode_info *pipe;
2057 
2058 	pipe = file->f_path.dentry->d_inode->i_pipe;
2059 
2060 	pipe_lock(pipe);
2061 	pipe->readers++;
2062 	pipe->writers--;
2063 
2064 	while ((pipe->readers > 1) && (!signal_pending(current))) {
2065 		wake_up_interruptible_sync(&pipe->wait);
2066 		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
2067 		pipe_wait(pipe);
2068 	}
2069 
2070 	pipe->readers--;
2071 	pipe->writers++;
2072 	pipe_unlock(pipe);
2073 
2074 }
2075 
2076 
2077 /*
2078  * umh_pipe_setup
2079  * helper function to customize the process used
2080  * to collect the core in userspace.  Specifically
2081  * it sets up a pipe and installs it as fd 0 (stdin)
2082  * for the process.  Returns 0 on success, or
2083  * PTR_ERR on failure.
2084  * Note that it also sets the core limit to 1.  This
2085  * is a special value that we use to trap recursive
2086  * core dumps
2087  */
umh_pipe_setup(struct subprocess_info * info,struct cred * new)2088 static int umh_pipe_setup(struct subprocess_info *info, struct cred *new)
2089 {
2090 	struct file *rp, *wp;
2091 	struct fdtable *fdt;
2092 	struct coredump_params *cp = (struct coredump_params *)info->data;
2093 	struct files_struct *cf = current->files;
2094 
2095 	wp = create_write_pipe(0);
2096 	if (IS_ERR(wp))
2097 		return PTR_ERR(wp);
2098 
2099 	rp = create_read_pipe(wp, 0);
2100 	if (IS_ERR(rp)) {
2101 		free_write_pipe(wp);
2102 		return PTR_ERR(rp);
2103 	}
2104 
2105 	cp->file = wp;
2106 
2107 	sys_close(0);
2108 	fd_install(0, rp);
2109 	spin_lock(&cf->file_lock);
2110 	fdt = files_fdtable(cf);
2111 	__set_open_fd(0, fdt);
2112 	__clear_close_on_exec(0, fdt);
2113 	spin_unlock(&cf->file_lock);
2114 
2115 	/* and disallow core files too */
2116 	current->signal->rlim[RLIMIT_CORE] = (struct rlimit){1, 1};
2117 
2118 	return 0;
2119 }
2120 
do_coredump(long signr,int exit_code,struct pt_regs * regs)2121 void do_coredump(long signr, int exit_code, struct pt_regs *regs)
2122 {
2123 	struct core_state core_state;
2124 	struct core_name cn;
2125 	struct mm_struct *mm = current->mm;
2126 	struct linux_binfmt * binfmt;
2127 	const struct cred *old_cred;
2128 	struct cred *cred;
2129 	int retval = 0;
2130 	int flag = 0;
2131 	int ispipe;
2132 	static atomic_t core_dump_count = ATOMIC_INIT(0);
2133 	struct coredump_params cprm = {
2134 		.signr = signr,
2135 		.regs = regs,
2136 		.limit = rlimit(RLIMIT_CORE),
2137 		/*
2138 		 * We must use the same mm->flags while dumping core to avoid
2139 		 * inconsistency of bit flags, since this flag is not protected
2140 		 * by any locks.
2141 		 */
2142 		.mm_flags = mm->flags,
2143 	};
2144 
2145 	audit_core_dumps(signr);
2146 
2147 	binfmt = mm->binfmt;
2148 	if (!binfmt || !binfmt->core_dump)
2149 		goto fail;
2150 	if (!__get_dumpable(cprm.mm_flags))
2151 		goto fail;
2152 
2153 	cred = prepare_creds();
2154 	if (!cred)
2155 		goto fail;
2156 	/*
2157 	 *	We cannot trust fsuid as being the "true" uid of the
2158 	 *	process nor do we know its entire history. We only know it
2159 	 *	was tainted so we dump it as root in mode 2.
2160 	 */
2161 	if (__get_dumpable(cprm.mm_flags) == 2) {
2162 		/* Setuid core dump mode */
2163 		flag = O_EXCL;		/* Stop rewrite attacks */
2164 		cred->fsuid = 0;	/* Dump root private */
2165 	}
2166 
2167 	retval = coredump_wait(exit_code, &core_state);
2168 	if (retval < 0)
2169 		goto fail_creds;
2170 
2171 	old_cred = override_creds(cred);
2172 
2173 	/*
2174 	 * Clear any false indication of pending signals that might
2175 	 * be seen by the filesystem code called to write the core file.
2176 	 */
2177 	clear_thread_flag(TIF_SIGPENDING);
2178 
2179 	ispipe = format_corename(&cn, signr);
2180 
2181  	if (ispipe) {
2182 		int dump_count;
2183 		char **helper_argv;
2184 
2185 		if (ispipe < 0) {
2186 			printk(KERN_WARNING "format_corename failed\n");
2187 			printk(KERN_WARNING "Aborting core\n");
2188 			goto fail_corename;
2189 		}
2190 
2191 		if (cprm.limit == 1) {
2192 			/*
2193 			 * Normally core limits are irrelevant to pipes, since
2194 			 * we're not writing to the file system, but we use
2195 			 * cprm.limit of 1 here as a speacial value. Any
2196 			 * non-1 limit gets set to RLIM_INFINITY below, but
2197 			 * a limit of 0 skips the dump.  This is a consistent
2198 			 * way to catch recursive crashes.  We can still crash
2199 			 * if the core_pattern binary sets RLIM_CORE =  !1
2200 			 * but it runs as root, and can do lots of stupid things
2201 			 * Note that we use task_tgid_vnr here to grab the pid
2202 			 * of the process group leader.  That way we get the
2203 			 * right pid if a thread in a multi-threaded
2204 			 * core_pattern process dies.
2205 			 */
2206 			printk(KERN_WARNING
2207 				"Process %d(%s) has RLIMIT_CORE set to 1\n",
2208 				task_tgid_vnr(current), current->comm);
2209 			printk(KERN_WARNING "Aborting core\n");
2210 			goto fail_unlock;
2211 		}
2212 		cprm.limit = RLIM_INFINITY;
2213 
2214 		dump_count = atomic_inc_return(&core_dump_count);
2215 		if (core_pipe_limit && (core_pipe_limit < dump_count)) {
2216 			printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
2217 			       task_tgid_vnr(current), current->comm);
2218 			printk(KERN_WARNING "Skipping core dump\n");
2219 			goto fail_dropcount;
2220 		}
2221 
2222 		helper_argv = argv_split(GFP_KERNEL, cn.corename+1, NULL);
2223 		if (!helper_argv) {
2224 			printk(KERN_WARNING "%s failed to allocate memory\n",
2225 			       __func__);
2226 			goto fail_dropcount;
2227 		}
2228 
2229 		retval = call_usermodehelper_fns(helper_argv[0], helper_argv,
2230 					NULL, UMH_WAIT_EXEC, umh_pipe_setup,
2231 					NULL, &cprm);
2232 		argv_free(helper_argv);
2233 		if (retval) {
2234  			printk(KERN_INFO "Core dump to %s pipe failed\n",
2235 			       cn.corename);
2236 			goto close_fail;
2237  		}
2238 	} else {
2239 		struct inode *inode;
2240 
2241 		if (cprm.limit < binfmt->min_coredump)
2242 			goto fail_unlock;
2243 
2244 		cprm.file = filp_open(cn.corename,
2245 				 O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
2246 				 0600);
2247 		if (IS_ERR(cprm.file))
2248 			goto fail_unlock;
2249 
2250 		inode = cprm.file->f_path.dentry->d_inode;
2251 		if (inode->i_nlink > 1)
2252 			goto close_fail;
2253 		if (d_unhashed(cprm.file->f_path.dentry))
2254 			goto close_fail;
2255 		/*
2256 		 * AK: actually i see no reason to not allow this for named
2257 		 * pipes etc, but keep the previous behaviour for now.
2258 		 */
2259 		if (!S_ISREG(inode->i_mode))
2260 			goto close_fail;
2261 		/*
2262 		 * Dont allow local users get cute and trick others to coredump
2263 		 * into their pre-created files.
2264 		 */
2265 		if (inode->i_uid != current_fsuid())
2266 			goto close_fail;
2267 		if (!cprm.file->f_op || !cprm.file->f_op->write)
2268 			goto close_fail;
2269 		if (do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file))
2270 			goto close_fail;
2271 	}
2272 
2273 	retval = binfmt->core_dump(&cprm);
2274 	if (retval)
2275 		current->signal->group_exit_code |= 0x80;
2276 
2277 	if (ispipe && core_pipe_limit)
2278 		wait_for_dump_helpers(cprm.file);
2279 close_fail:
2280 	if (cprm.file)
2281 		filp_close(cprm.file, NULL);
2282 fail_dropcount:
2283 	if (ispipe)
2284 		atomic_dec(&core_dump_count);
2285 fail_unlock:
2286 	kfree(cn.corename);
2287 fail_corename:
2288 	coredump_finish(mm);
2289 	revert_creds(old_cred);
2290 fail_creds:
2291 	put_cred(cred);
2292 fail:
2293 	return;
2294 }
2295 
2296 /*
2297  * Core dumping helper functions.  These are the only things you should
2298  * do on a core-file: use only these functions to write out all the
2299  * necessary info.
2300  */
dump_write(struct file * file,const void * addr,int nr)2301 int dump_write(struct file *file, const void *addr, int nr)
2302 {
2303 	return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
2304 }
2305 EXPORT_SYMBOL(dump_write);
2306 
dump_seek(struct file * file,loff_t off)2307 int dump_seek(struct file *file, loff_t off)
2308 {
2309 	int ret = 1;
2310 
2311 	if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
2312 		if (file->f_op->llseek(file, off, SEEK_CUR) < 0)
2313 			return 0;
2314 	} else {
2315 		char *buf = (char *)get_zeroed_page(GFP_KERNEL);
2316 
2317 		if (!buf)
2318 			return 0;
2319 		while (off > 0) {
2320 			unsigned long n = off;
2321 
2322 			if (n > PAGE_SIZE)
2323 				n = PAGE_SIZE;
2324 			if (!dump_write(file, buf, n)) {
2325 				ret = 0;
2326 				break;
2327 			}
2328 			off -= n;
2329 		}
2330 		free_page((unsigned long)buf);
2331 	}
2332 	return ret;
2333 }
2334 EXPORT_SYMBOL(dump_seek);
2335