• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * linux/fs/binfmt_elf.c
4  *
5  * These are the functions used to load ELF format executables as used
6  * on SVr4 machines.  Information on the format may be found in the book
7  * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
8  * Tools".
9  *
10  * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
11  */
12 
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/fs.h>
16 #include <linux/log2.h>
17 #include <linux/mm.h>
18 #include <linux/mman.h>
19 #include <linux/errno.h>
20 #include <linux/signal.h>
21 #include <linux/binfmts.h>
22 #include <linux/string.h>
23 #include <linux/file.h>
24 #include <linux/slab.h>
25 #include <linux/personality.h>
26 #include <linux/elfcore.h>
27 #include <linux/init.h>
28 #include <linux/highuid.h>
29 #include <linux/compiler.h>
30 #include <linux/highmem.h>
31 #include <linux/hugetlb.h>
32 #include <linux/pagemap.h>
33 #include <linux/vmalloc.h>
34 #include <linux/security.h>
35 #include <linux/random.h>
36 #include <linux/elf.h>
37 #include <linux/elf-randomize.h>
38 #include <linux/utsname.h>
39 #include <linux/coredump.h>
40 #include <linux/sched.h>
41 #include <linux/sched/coredump.h>
42 #include <linux/sched/task_stack.h>
43 #include <linux/sched/cputime.h>
44 #include <linux/sizes.h>
45 #include <linux/types.h>
46 #include <linux/cred.h>
47 #include <linux/dax.h>
48 #include <linux/uaccess.h>
49 #include <asm/param.h>
50 #include <asm/page.h>
51 
52 #ifndef ELF_COMPAT
53 #define ELF_COMPAT 0
54 #endif
55 
56 #ifndef user_long_t
57 #define user_long_t long
58 #endif
59 #ifndef user_siginfo_t
60 #define user_siginfo_t siginfo_t
61 #endif
62 
63 /* That's for binfmt_elf_fdpic to deal with */
64 #ifndef elf_check_fdpic
65 #define elf_check_fdpic(ex) false
66 #endif
67 
68 static int load_elf_binary(struct linux_binprm *bprm);
69 
70 #ifdef CONFIG_USELIB
71 static int load_elf_library(struct file *);
72 #else
73 #define load_elf_library NULL
74 #endif
75 
76 /*
77  * If we don't support core dumping, then supply a NULL so we
78  * don't even try.
79  */
80 #ifdef CONFIG_ELF_CORE
81 static int elf_core_dump(struct coredump_params *cprm);
82 #else
83 #define elf_core_dump	NULL
84 #endif
85 
86 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
87 #define ELF_MIN_ALIGN	ELF_EXEC_PAGESIZE
88 #else
89 #define ELF_MIN_ALIGN	PAGE_SIZE
90 #endif
91 
92 #ifndef ELF_CORE_EFLAGS
93 #define ELF_CORE_EFLAGS	0
94 #endif
95 
96 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
97 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
98 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
99 
100 static struct linux_binfmt elf_format = {
101 	.module		= THIS_MODULE,
102 	.load_binary	= load_elf_binary,
103 	.load_shlib	= load_elf_library,
104 	.core_dump	= elf_core_dump,
105 	.min_coredump	= ELF_EXEC_PAGESIZE,
106 };
107 
108 #define BAD_ADDR(x) (unlikely((unsigned long)(x) >= TASK_SIZE))
109 
set_brk(unsigned long start,unsigned long end,int prot)110 static int set_brk(unsigned long start, unsigned long end, int prot)
111 {
112 	start = ELF_PAGEALIGN(start);
113 	end = ELF_PAGEALIGN(end);
114 	if (end > start) {
115 		/*
116 		 * Map the last of the bss segment.
117 		 * If the header is requesting these pages to be
118 		 * executable, honour that (ppc32 needs this).
119 		 */
120 		int error = vm_brk_flags(start, end - start,
121 				prot & PROT_EXEC ? VM_EXEC : 0);
122 		if (error)
123 			return error;
124 	}
125 	current->mm->start_brk = current->mm->brk = end;
126 	return 0;
127 }
128 
129 /* We need to explicitly zero any fractional pages
130    after the data section (i.e. bss).  This would
131    contain the junk from the file that should not
132    be in memory
133  */
padzero(unsigned long elf_bss)134 static int padzero(unsigned long elf_bss)
135 {
136 	unsigned long nbyte;
137 
138 	nbyte = ELF_PAGEOFFSET(elf_bss);
139 	if (nbyte) {
140 		nbyte = ELF_MIN_ALIGN - nbyte;
141 		if (clear_user((void __user *) elf_bss, nbyte))
142 			return -EFAULT;
143 	}
144 	return 0;
145 }
146 
147 /* Let's use some macros to make this stack manipulation a little clearer */
148 #ifdef CONFIG_STACK_GROWSUP
149 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
150 #define STACK_ROUND(sp, items) \
151 	((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
152 #define STACK_ALLOC(sp, len) ({ \
153 	elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; \
154 	old_sp; })
155 #else
156 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
157 #define STACK_ROUND(sp, items) \
158 	(((unsigned long) (sp - items)) &~ 15UL)
159 #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
160 #endif
161 
162 #ifndef ELF_BASE_PLATFORM
163 /*
164  * AT_BASE_PLATFORM indicates the "real" hardware/microarchitecture.
165  * If the arch defines ELF_BASE_PLATFORM (in asm/elf.h), the value
166  * will be copied to the user stack in the same manner as AT_PLATFORM.
167  */
168 #define ELF_BASE_PLATFORM NULL
169 #endif
170 
171 static int
create_elf_tables(struct linux_binprm * bprm,const struct elfhdr * exec,unsigned long interp_load_addr,unsigned long e_entry,unsigned long phdr_addr)172 create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
173 		unsigned long interp_load_addr,
174 		unsigned long e_entry, unsigned long phdr_addr)
175 {
176 	struct mm_struct *mm = current->mm;
177 	unsigned long p = bprm->p;
178 	int argc = bprm->argc;
179 	int envc = bprm->envc;
180 	elf_addr_t __user *sp;
181 	elf_addr_t __user *u_platform;
182 	elf_addr_t __user *u_base_platform;
183 	elf_addr_t __user *u_rand_bytes;
184 	const char *k_platform = ELF_PLATFORM;
185 	const char *k_base_platform = ELF_BASE_PLATFORM;
186 	unsigned char k_rand_bytes[16];
187 	int items;
188 	elf_addr_t *elf_info;
189 	int ei_index;
190 	const struct cred *cred = current_cred();
191 	struct vm_area_struct *vma;
192 
193 	/*
194 	 * In some cases (e.g. Hyper-Threading), we want to avoid L1
195 	 * evictions by the processes running on the same package. One
196 	 * thing we can do is to shuffle the initial stack for them.
197 	 */
198 
199 	p = arch_align_stack(p);
200 
201 	/*
202 	 * If this architecture has a platform capability string, copy it
203 	 * to userspace.  In some cases (Sparc), this info is impossible
204 	 * for userspace to get any other way, in others (i386) it is
205 	 * merely difficult.
206 	 */
207 	u_platform = NULL;
208 	if (k_platform) {
209 		size_t len = strlen(k_platform) + 1;
210 
211 		u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
212 		if (copy_to_user(u_platform, k_platform, len))
213 			return -EFAULT;
214 	}
215 
216 	/*
217 	 * If this architecture has a "base" platform capability
218 	 * string, copy it to userspace.
219 	 */
220 	u_base_platform = NULL;
221 	if (k_base_platform) {
222 		size_t len = strlen(k_base_platform) + 1;
223 
224 		u_base_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
225 		if (copy_to_user(u_base_platform, k_base_platform, len))
226 			return -EFAULT;
227 	}
228 
229 	/*
230 	 * Generate 16 random bytes for userspace PRNG seeding.
231 	 */
232 	get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
233 	u_rand_bytes = (elf_addr_t __user *)
234 		       STACK_ALLOC(p, sizeof(k_rand_bytes));
235 	if (copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
236 		return -EFAULT;
237 
238 	/* Create the ELF interpreter info */
239 	elf_info = (elf_addr_t *)mm->saved_auxv;
240 	/* update AT_VECTOR_SIZE_BASE if the number of NEW_AUX_ENT() changes */
241 #define NEW_AUX_ENT(id, val) \
242 	do { \
243 		*elf_info++ = id; \
244 		*elf_info++ = val; \
245 	} while (0)
246 
247 #ifdef ARCH_DLINFO
248 	/*
249 	 * ARCH_DLINFO must come first so PPC can do its special alignment of
250 	 * AUXV.
251 	 * update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT() in
252 	 * ARCH_DLINFO changes
253 	 */
254 	ARCH_DLINFO;
255 #endif
256 	NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
257 	NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
258 	NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
259 	NEW_AUX_ENT(AT_PHDR, phdr_addr);
260 	NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr));
261 	NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
262 	NEW_AUX_ENT(AT_BASE, interp_load_addr);
263 	NEW_AUX_ENT(AT_FLAGS, 0);
264 	NEW_AUX_ENT(AT_ENTRY, e_entry);
265 	NEW_AUX_ENT(AT_UID, from_kuid_munged(cred->user_ns, cred->uid));
266 	NEW_AUX_ENT(AT_EUID, from_kuid_munged(cred->user_ns, cred->euid));
267 	NEW_AUX_ENT(AT_GID, from_kgid_munged(cred->user_ns, cred->gid));
268 	NEW_AUX_ENT(AT_EGID, from_kgid_munged(cred->user_ns, cred->egid));
269 	NEW_AUX_ENT(AT_SECURE, bprm->secureexec);
270 	NEW_AUX_ENT(AT_RANDOM, (elf_addr_t)(unsigned long)u_rand_bytes);
271 #ifdef ELF_HWCAP2
272 	NEW_AUX_ENT(AT_HWCAP2, ELF_HWCAP2);
273 #endif
274 	NEW_AUX_ENT(AT_EXECFN, bprm->exec);
275 	if (k_platform) {
276 		NEW_AUX_ENT(AT_PLATFORM,
277 			    (elf_addr_t)(unsigned long)u_platform);
278 	}
279 	if (k_base_platform) {
280 		NEW_AUX_ENT(AT_BASE_PLATFORM,
281 			    (elf_addr_t)(unsigned long)u_base_platform);
282 	}
283 	if (bprm->have_execfd) {
284 		NEW_AUX_ENT(AT_EXECFD, bprm->execfd);
285 	}
286 #undef NEW_AUX_ENT
287 	/* AT_NULL is zero; clear the rest too */
288 	memset(elf_info, 0, (char *)mm->saved_auxv +
289 			sizeof(mm->saved_auxv) - (char *)elf_info);
290 
291 	/* And advance past the AT_NULL entry.  */
292 	elf_info += 2;
293 
294 	ei_index = elf_info - (elf_addr_t *)mm->saved_auxv;
295 	sp = STACK_ADD(p, ei_index);
296 
297 	items = (argc + 1) + (envc + 1) + 1;
298 	bprm->p = STACK_ROUND(sp, items);
299 
300 	/* Point sp at the lowest address on the stack */
301 #ifdef CONFIG_STACK_GROWSUP
302 	sp = (elf_addr_t __user *)bprm->p - items - ei_index;
303 	bprm->exec = (unsigned long)sp; /* XXX: PARISC HACK */
304 #else
305 	sp = (elf_addr_t __user *)bprm->p;
306 #endif
307 
308 
309 	/*
310 	 * Grow the stack manually; some architectures have a limit on how
311 	 * far ahead a user-space access may be in order to grow the stack.
312 	 */
313 	if (mmap_read_lock_killable(mm))
314 		return -EINTR;
315 	vma = find_extend_vma(mm, bprm->p);
316 	mmap_read_unlock(mm);
317 	if (!vma)
318 		return -EFAULT;
319 
320 	/* Now, let's put argc (and argv, envp if appropriate) on the stack */
321 	if (put_user(argc, sp++))
322 		return -EFAULT;
323 
324 	/* Populate list of argv pointers back to argv strings. */
325 	p = mm->arg_end = mm->arg_start;
326 	while (argc-- > 0) {
327 		size_t len;
328 		if (put_user((elf_addr_t)p, sp++))
329 			return -EFAULT;
330 		len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
331 		if (!len || len > MAX_ARG_STRLEN)
332 			return -EINVAL;
333 		p += len;
334 	}
335 	if (put_user(0, sp++))
336 		return -EFAULT;
337 	mm->arg_end = p;
338 
339 	/* Populate list of envp pointers back to envp strings. */
340 	mm->env_end = mm->env_start = p;
341 	while (envc-- > 0) {
342 		size_t len;
343 		if (put_user((elf_addr_t)p, sp++))
344 			return -EFAULT;
345 		len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
346 		if (!len || len > MAX_ARG_STRLEN)
347 			return -EINVAL;
348 		p += len;
349 	}
350 	if (put_user(0, sp++))
351 		return -EFAULT;
352 	mm->env_end = p;
353 
354 	/* Put the elf_info on the stack in the right place.  */
355 	if (copy_to_user(sp, mm->saved_auxv, ei_index * sizeof(elf_addr_t)))
356 		return -EFAULT;
357 	return 0;
358 }
359 
elf_map(struct file * filep,unsigned long addr,const struct elf_phdr * eppnt,int prot,int type,unsigned long total_size)360 static unsigned long elf_map(struct file *filep, unsigned long addr,
361 		const struct elf_phdr *eppnt, int prot, int type,
362 		unsigned long total_size)
363 {
364 	unsigned long map_addr;
365 	unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr);
366 	unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr);
367 	addr = ELF_PAGESTART(addr);
368 	size = ELF_PAGEALIGN(size);
369 
370 	/* mmap() will return -EINVAL if given a zero size, but a
371 	 * segment with zero filesize is perfectly valid */
372 	if (!size)
373 		return addr;
374 
375 	/*
376 	* total_size is the size of the ELF (interpreter) image.
377 	* The _first_ mmap needs to know the full size, otherwise
378 	* randomization might put this image into an overlapping
379 	* position with the ELF binary image. (since size < total_size)
380 	* So we first map the 'big' image - and unmap the remainder at
381 	* the end. (which unmap is needed for ELF images with holes.)
382 	*/
383 	if (total_size) {
384 		total_size = ELF_PAGEALIGN(total_size);
385 		map_addr = vm_mmap(filep, addr, total_size, prot, type, off);
386 		if (!BAD_ADDR(map_addr))
387 			vm_munmap(map_addr+size, total_size-size);
388 	} else
389 		map_addr = vm_mmap(filep, addr, size, prot, type, off);
390 
391 	if ((type & MAP_FIXED_NOREPLACE) &&
392 	    PTR_ERR((void *)map_addr) == -EEXIST)
393 		pr_info("%d (%s): Uhuuh, elf segment at %px requested but the memory is mapped already\n",
394 			task_pid_nr(current), current->comm, (void *)addr);
395 
396 	return(map_addr);
397 }
398 
total_mapping_size(const struct elf_phdr * cmds,int nr)399 static unsigned long total_mapping_size(const struct elf_phdr *cmds, int nr)
400 {
401 	int i, first_idx = -1, last_idx = -1;
402 
403 	for (i = 0; i < nr; i++) {
404 		if (cmds[i].p_type == PT_LOAD) {
405 			last_idx = i;
406 			if (first_idx == -1)
407 				first_idx = i;
408 		}
409 	}
410 	if (first_idx == -1)
411 		return 0;
412 
413 	return cmds[last_idx].p_vaddr + cmds[last_idx].p_memsz -
414 				ELF_PAGESTART(cmds[first_idx].p_vaddr);
415 }
416 
elf_read(struct file * file,void * buf,size_t len,loff_t pos)417 static int elf_read(struct file *file, void *buf, size_t len, loff_t pos)
418 {
419 	ssize_t rv;
420 
421 	rv = kernel_read(file, buf, len, &pos);
422 	if (unlikely(rv != len)) {
423 		return (rv < 0) ? rv : -EIO;
424 	}
425 	return 0;
426 }
427 
maximum_alignment(struct elf_phdr * cmds,int nr)428 static unsigned long maximum_alignment(struct elf_phdr *cmds, int nr)
429 {
430 	unsigned long alignment = 0;
431 	int i;
432 
433 	for (i = 0; i < nr; i++) {
434 		if (cmds[i].p_type == PT_LOAD) {
435 			unsigned long p_align = cmds[i].p_align;
436 
437 			/* skip non-power of two alignments as invalid */
438 			if (!is_power_of_2(p_align))
439 				continue;
440 			alignment = max(alignment, p_align);
441 		}
442 	}
443 
444 	/* ensure we align to at least one page */
445 	return ELF_PAGEALIGN(alignment);
446 }
447 
448 /**
449  * load_elf_phdrs() - load ELF program headers
450  * @elf_ex:   ELF header of the binary whose program headers should be loaded
451  * @elf_file: the opened ELF binary file
452  *
453  * Loads ELF program headers from the binary file elf_file, which has the ELF
454  * header pointed to by elf_ex, into a newly allocated array. The caller is
455  * responsible for freeing the allocated data. Returns an ERR_PTR upon failure.
456  */
load_elf_phdrs(const struct elfhdr * elf_ex,struct file * elf_file)457 static struct elf_phdr *load_elf_phdrs(const struct elfhdr *elf_ex,
458 				       struct file *elf_file)
459 {
460 	struct elf_phdr *elf_phdata = NULL;
461 	int retval, err = -1;
462 	unsigned int size;
463 
464 	/*
465 	 * If the size of this structure has changed, then punt, since
466 	 * we will be doing the wrong thing.
467 	 */
468 	if (elf_ex->e_phentsize != sizeof(struct elf_phdr))
469 		goto out;
470 
471 	/* Sanity check the number of program headers... */
472 	/* ...and their total size. */
473 	size = sizeof(struct elf_phdr) * elf_ex->e_phnum;
474 	if (size == 0 || size > 65536 || size > ELF_MIN_ALIGN)
475 		goto out;
476 
477 	elf_phdata = kmalloc(size, GFP_KERNEL);
478 	if (!elf_phdata)
479 		goto out;
480 
481 	/* Read in the program headers */
482 	retval = elf_read(elf_file, elf_phdata, size, elf_ex->e_phoff);
483 	if (retval < 0) {
484 		err = retval;
485 		goto out;
486 	}
487 
488 	/* Success! */
489 	err = 0;
490 out:
491 	if (err) {
492 		kfree(elf_phdata);
493 		elf_phdata = NULL;
494 	}
495 	return elf_phdata;
496 }
497 
498 #ifndef CONFIG_ARCH_BINFMT_ELF_STATE
499 
500 /**
501  * struct arch_elf_state - arch-specific ELF loading state
502  *
503  * This structure is used to preserve architecture specific data during
504  * the loading of an ELF file, throughout the checking of architecture
505  * specific ELF headers & through to the point where the ELF load is
506  * known to be proceeding (ie. SET_PERSONALITY).
507  *
508  * This implementation is a dummy for architectures which require no
509  * specific state.
510  */
511 struct arch_elf_state {
512 };
513 
514 #define INIT_ARCH_ELF_STATE {}
515 
516 /**
517  * arch_elf_pt_proc() - check a PT_LOPROC..PT_HIPROC ELF program header
518  * @ehdr:	The main ELF header
519  * @phdr:	The program header to check
520  * @elf:	The open ELF file
521  * @is_interp:	True if the phdr is from the interpreter of the ELF being
522  *		loaded, else false.
523  * @state:	Architecture-specific state preserved throughout the process
524  *		of loading the ELF.
525  *
526  * Inspects the program header phdr to validate its correctness and/or
527  * suitability for the system. Called once per ELF program header in the
528  * range PT_LOPROC to PT_HIPROC, for both the ELF being loaded and its
529  * interpreter.
530  *
531  * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load
532  *         with that return code.
533  */
arch_elf_pt_proc(struct elfhdr * ehdr,struct elf_phdr * phdr,struct file * elf,bool is_interp,struct arch_elf_state * state)534 static inline int arch_elf_pt_proc(struct elfhdr *ehdr,
535 				   struct elf_phdr *phdr,
536 				   struct file *elf, bool is_interp,
537 				   struct arch_elf_state *state)
538 {
539 	/* Dummy implementation, always proceed */
540 	return 0;
541 }
542 
543 /**
544  * arch_check_elf() - check an ELF executable
545  * @ehdr:	The main ELF header
546  * @has_interp:	True if the ELF has an interpreter, else false.
547  * @interp_ehdr: The interpreter's ELF header
548  * @state:	Architecture-specific state preserved throughout the process
549  *		of loading the ELF.
550  *
551  * Provides a final opportunity for architecture code to reject the loading
552  * of the ELF & cause an exec syscall to return an error. This is called after
553  * all program headers to be checked by arch_elf_pt_proc have been.
554  *
555  * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load
556  *         with that return code.
557  */
arch_check_elf(struct elfhdr * ehdr,bool has_interp,struct elfhdr * interp_ehdr,struct arch_elf_state * state)558 static inline int arch_check_elf(struct elfhdr *ehdr, bool has_interp,
559 				 struct elfhdr *interp_ehdr,
560 				 struct arch_elf_state *state)
561 {
562 	/* Dummy implementation, always proceed */
563 	return 0;
564 }
565 
566 #endif /* !CONFIG_ARCH_BINFMT_ELF_STATE */
567 
make_prot(u32 p_flags,struct arch_elf_state * arch_state,bool has_interp,bool is_interp)568 static inline int make_prot(u32 p_flags, struct arch_elf_state *arch_state,
569 			    bool has_interp, bool is_interp)
570 {
571 	int prot = 0;
572 
573 	if (p_flags & PF_R)
574 		prot |= PROT_READ;
575 	if (p_flags & PF_W)
576 		prot |= PROT_WRITE;
577 	if (p_flags & PF_X)
578 		prot |= PROT_EXEC;
579 
580 	return arch_elf_adjust_prot(prot, arch_state, has_interp, is_interp);
581 }
582 
583 /* This is much more generalized than the library routine read function,
584    so we keep this separate.  Technically the library read function
585    is only provided so that we can read a.out libraries that have
586    an ELF header */
587 
load_elf_interp(struct elfhdr * interp_elf_ex,struct file * interpreter,unsigned long no_base,struct elf_phdr * interp_elf_phdata,struct arch_elf_state * arch_state)588 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
589 		struct file *interpreter,
590 		unsigned long no_base, struct elf_phdr *interp_elf_phdata,
591 		struct arch_elf_state *arch_state)
592 {
593 	struct elf_phdr *eppnt;
594 	unsigned long load_addr = 0;
595 	int load_addr_set = 0;
596 	unsigned long last_bss = 0, elf_bss = 0;
597 	int bss_prot = 0;
598 	unsigned long error = ~0UL;
599 	unsigned long total_size;
600 	int i;
601 
602 	/* First of all, some simple consistency checks */
603 	if (interp_elf_ex->e_type != ET_EXEC &&
604 	    interp_elf_ex->e_type != ET_DYN)
605 		goto out;
606 	if (!elf_check_arch(interp_elf_ex) ||
607 	    elf_check_fdpic(interp_elf_ex))
608 		goto out;
609 	if (!interpreter->f_op->mmap)
610 		goto out;
611 
612 	total_size = total_mapping_size(interp_elf_phdata,
613 					interp_elf_ex->e_phnum);
614 	if (!total_size) {
615 		error = -EINVAL;
616 		goto out;
617 	}
618 
619 	eppnt = interp_elf_phdata;
620 	for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
621 		if (eppnt->p_type == PT_LOAD) {
622 			int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
623 			int elf_prot = make_prot(eppnt->p_flags, arch_state,
624 						 true, true);
625 			unsigned long vaddr = 0;
626 			unsigned long k, map_addr;
627 
628 			vaddr = eppnt->p_vaddr;
629 			if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
630 				elf_type |= MAP_FIXED;
631 			else if (no_base && interp_elf_ex->e_type == ET_DYN)
632 				load_addr = -vaddr;
633 
634 			map_addr = elf_map(interpreter, load_addr + vaddr,
635 					eppnt, elf_prot, elf_type, total_size);
636 			total_size = 0;
637 			error = map_addr;
638 			if (BAD_ADDR(map_addr))
639 				goto out;
640 
641 			if (!load_addr_set &&
642 			    interp_elf_ex->e_type == ET_DYN) {
643 				load_addr = map_addr - ELF_PAGESTART(vaddr);
644 				load_addr_set = 1;
645 			}
646 
647 			/*
648 			 * Check to see if the section's size will overflow the
649 			 * allowed task size. Note that p_filesz must always be
650 			 * <= p_memsize so it's only necessary to check p_memsz.
651 			 */
652 			k = load_addr + eppnt->p_vaddr;
653 			if (BAD_ADDR(k) ||
654 			    eppnt->p_filesz > eppnt->p_memsz ||
655 			    eppnt->p_memsz > TASK_SIZE ||
656 			    TASK_SIZE - eppnt->p_memsz < k) {
657 				error = -ENOMEM;
658 				goto out;
659 			}
660 
661 			/*
662 			 * Find the end of the file mapping for this phdr, and
663 			 * keep track of the largest address we see for this.
664 			 */
665 			k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
666 			if (k > elf_bss)
667 				elf_bss = k;
668 
669 			/*
670 			 * Do the same thing for the memory mapping - between
671 			 * elf_bss and last_bss is the bss section.
672 			 */
673 			k = load_addr + eppnt->p_vaddr + eppnt->p_memsz;
674 			if (k > last_bss) {
675 				last_bss = k;
676 				bss_prot = elf_prot;
677 			}
678 		}
679 	}
680 
681 	/*
682 	 * Now fill out the bss section: first pad the last page from
683 	 * the file up to the page boundary, and zero it from elf_bss
684 	 * up to the end of the page.
685 	 */
686 	if (padzero(elf_bss)) {
687 		error = -EFAULT;
688 		goto out;
689 	}
690 	/*
691 	 * Next, align both the file and mem bss up to the page size,
692 	 * since this is where elf_bss was just zeroed up to, and where
693 	 * last_bss will end after the vm_brk_flags() below.
694 	 */
695 	elf_bss = ELF_PAGEALIGN(elf_bss);
696 	last_bss = ELF_PAGEALIGN(last_bss);
697 	/* Finally, if there is still more bss to allocate, do it. */
698 	if (last_bss > elf_bss) {
699 		error = vm_brk_flags(elf_bss, last_bss - elf_bss,
700 				bss_prot & PROT_EXEC ? VM_EXEC : 0);
701 		if (error)
702 			goto out;
703 	}
704 
705 	error = load_addr;
706 out:
707 	return error;
708 }
709 
710 /*
711  * These are the functions used to load ELF style executables and shared
712  * libraries.  There is no binary dependent code anywhere else.
713  */
714 
parse_elf_property(const char * data,size_t * off,size_t datasz,struct arch_elf_state * arch,bool have_prev_type,u32 * prev_type)715 static int parse_elf_property(const char *data, size_t *off, size_t datasz,
716 			      struct arch_elf_state *arch,
717 			      bool have_prev_type, u32 *prev_type)
718 {
719 	size_t o, step;
720 	const struct gnu_property *pr;
721 	int ret;
722 
723 	if (*off == datasz)
724 		return -ENOENT;
725 
726 	if (WARN_ON_ONCE(*off > datasz || *off % ELF_GNU_PROPERTY_ALIGN))
727 		return -EIO;
728 	o = *off;
729 	datasz -= *off;
730 
731 	if (datasz < sizeof(*pr))
732 		return -ENOEXEC;
733 	pr = (const struct gnu_property *)(data + o);
734 	o += sizeof(*pr);
735 	datasz -= sizeof(*pr);
736 
737 	if (pr->pr_datasz > datasz)
738 		return -ENOEXEC;
739 
740 	WARN_ON_ONCE(o % ELF_GNU_PROPERTY_ALIGN);
741 	step = round_up(pr->pr_datasz, ELF_GNU_PROPERTY_ALIGN);
742 	if (step > datasz)
743 		return -ENOEXEC;
744 
745 	/* Properties are supposed to be unique and sorted on pr_type: */
746 	if (have_prev_type && pr->pr_type <= *prev_type)
747 		return -ENOEXEC;
748 	*prev_type = pr->pr_type;
749 
750 	ret = arch_parse_elf_property(pr->pr_type, data + o,
751 				      pr->pr_datasz, ELF_COMPAT, arch);
752 	if (ret)
753 		return ret;
754 
755 	*off = o + step;
756 	return 0;
757 }
758 
759 #define NOTE_DATA_SZ SZ_1K
760 #define GNU_PROPERTY_TYPE_0_NAME "GNU"
761 #define NOTE_NAME_SZ (sizeof(GNU_PROPERTY_TYPE_0_NAME))
762 
parse_elf_properties(struct file * f,const struct elf_phdr * phdr,struct arch_elf_state * arch)763 static int parse_elf_properties(struct file *f, const struct elf_phdr *phdr,
764 				struct arch_elf_state *arch)
765 {
766 	union {
767 		struct elf_note nhdr;
768 		char data[NOTE_DATA_SZ];
769 	} note;
770 	loff_t pos;
771 	ssize_t n;
772 	size_t off, datasz;
773 	int ret;
774 	bool have_prev_type;
775 	u32 prev_type;
776 
777 	if (!IS_ENABLED(CONFIG_ARCH_USE_GNU_PROPERTY) || !phdr)
778 		return 0;
779 
780 	/* load_elf_binary() shouldn't call us unless this is true... */
781 	if (WARN_ON_ONCE(phdr->p_type != PT_GNU_PROPERTY))
782 		return -ENOEXEC;
783 
784 	/* If the properties are crazy large, that's too bad (for now): */
785 	if (phdr->p_filesz > sizeof(note))
786 		return -ENOEXEC;
787 
788 	pos = phdr->p_offset;
789 	n = kernel_read(f, &note, phdr->p_filesz, &pos);
790 
791 	BUILD_BUG_ON(sizeof(note) < sizeof(note.nhdr) + NOTE_NAME_SZ);
792 	if (n < 0 || n < sizeof(note.nhdr) + NOTE_NAME_SZ)
793 		return -EIO;
794 
795 	if (note.nhdr.n_type != NT_GNU_PROPERTY_TYPE_0 ||
796 	    note.nhdr.n_namesz != NOTE_NAME_SZ ||
797 	    strncmp(note.data + sizeof(note.nhdr),
798 		    GNU_PROPERTY_TYPE_0_NAME, n - sizeof(note.nhdr)))
799 		return -ENOEXEC;
800 
801 	off = round_up(sizeof(note.nhdr) + NOTE_NAME_SZ,
802 		       ELF_GNU_PROPERTY_ALIGN);
803 	if (off > n)
804 		return -ENOEXEC;
805 
806 	if (note.nhdr.n_descsz > n - off)
807 		return -ENOEXEC;
808 	datasz = off + note.nhdr.n_descsz;
809 
810 	have_prev_type = false;
811 	do {
812 		ret = parse_elf_property(note.data, &off, datasz, arch,
813 					 have_prev_type, &prev_type);
814 		have_prev_type = true;
815 	} while (!ret);
816 
817 	return ret == -ENOENT ? 0 : ret;
818 }
819 
load_elf_binary(struct linux_binprm * bprm)820 static int load_elf_binary(struct linux_binprm *bprm)
821 {
822 	struct file *interpreter = NULL; /* to shut gcc up */
823 	unsigned long load_addr, load_bias = 0, phdr_addr = 0;
824 	int load_addr_set = 0;
825 	unsigned long error;
826 	struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL;
827 	struct elf_phdr *elf_property_phdata = NULL;
828 	unsigned long elf_bss, elf_brk;
829 	int bss_prot = 0;
830 	int retval, i;
831 	unsigned long elf_entry;
832 	unsigned long e_entry;
833 	unsigned long interp_load_addr = 0;
834 	unsigned long start_code, end_code, start_data, end_data;
835 	unsigned long reloc_func_desc __maybe_unused = 0;
836 	int executable_stack = EXSTACK_DEFAULT;
837 	struct elfhdr *elf_ex = (struct elfhdr *)bprm->buf;
838 	struct elfhdr *interp_elf_ex = NULL;
839 	struct arch_elf_state arch_state = INIT_ARCH_ELF_STATE;
840 	struct mm_struct *mm;
841 	struct pt_regs *regs;
842 
843 	retval = -ENOEXEC;
844 	/* First of all, some simple consistency checks */
845 	if (memcmp(elf_ex->e_ident, ELFMAG, SELFMAG) != 0)
846 		goto out;
847 
848 	if (elf_ex->e_type != ET_EXEC && elf_ex->e_type != ET_DYN)
849 		goto out;
850 	if (!elf_check_arch(elf_ex))
851 		goto out;
852 	if (elf_check_fdpic(elf_ex))
853 		goto out;
854 	if (!bprm->file->f_op->mmap)
855 		goto out;
856 
857 	elf_phdata = load_elf_phdrs(elf_ex, bprm->file);
858 	if (!elf_phdata)
859 		goto out;
860 
861 	elf_ppnt = elf_phdata;
862 	for (i = 0; i < elf_ex->e_phnum; i++, elf_ppnt++) {
863 		char *elf_interpreter;
864 
865 		if (elf_ppnt->p_type == PT_GNU_PROPERTY) {
866 			elf_property_phdata = elf_ppnt;
867 			continue;
868 		}
869 
870 		if (elf_ppnt->p_type != PT_INTERP)
871 			continue;
872 
873 		/*
874 		 * This is the program interpreter used for shared libraries -
875 		 * for now assume that this is an a.out format binary.
876 		 */
877 		retval = -ENOEXEC;
878 		if (elf_ppnt->p_filesz > PATH_MAX || elf_ppnt->p_filesz < 2)
879 			goto out_free_ph;
880 
881 		retval = -ENOMEM;
882 		elf_interpreter = kmalloc(elf_ppnt->p_filesz, GFP_KERNEL);
883 		if (!elf_interpreter)
884 			goto out_free_ph;
885 
886 		retval = elf_read(bprm->file, elf_interpreter, elf_ppnt->p_filesz,
887 				  elf_ppnt->p_offset);
888 		if (retval < 0)
889 			goto out_free_interp;
890 		/* make sure path is NULL terminated */
891 		retval = -ENOEXEC;
892 		if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
893 			goto out_free_interp;
894 
895 		interpreter = open_exec(elf_interpreter);
896 		kfree(elf_interpreter);
897 		retval = PTR_ERR(interpreter);
898 		if (IS_ERR(interpreter))
899 			goto out_free_ph;
900 
901 		/*
902 		 * If the binary is not readable then enforce mm->dumpable = 0
903 		 * regardless of the interpreter's permissions.
904 		 */
905 		would_dump(bprm, interpreter);
906 
907 		interp_elf_ex = kmalloc(sizeof(*interp_elf_ex), GFP_KERNEL);
908 		if (!interp_elf_ex) {
909 			retval = -ENOMEM;
910 			goto out_free_file;
911 		}
912 
913 		/* Get the exec headers */
914 		retval = elf_read(interpreter, interp_elf_ex,
915 				  sizeof(*interp_elf_ex), 0);
916 		if (retval < 0)
917 			goto out_free_dentry;
918 
919 		break;
920 
921 out_free_interp:
922 		kfree(elf_interpreter);
923 		goto out_free_ph;
924 	}
925 
926 	elf_ppnt = elf_phdata;
927 	for (i = 0; i < elf_ex->e_phnum; i++, elf_ppnt++)
928 		switch (elf_ppnt->p_type) {
929 		case PT_GNU_STACK:
930 			if (elf_ppnt->p_flags & PF_X)
931 				executable_stack = EXSTACK_ENABLE_X;
932 			else
933 				executable_stack = EXSTACK_DISABLE_X;
934 			break;
935 
936 		case PT_LOPROC ... PT_HIPROC:
937 			retval = arch_elf_pt_proc(elf_ex, elf_ppnt,
938 						  bprm->file, false,
939 						  &arch_state);
940 			if (retval)
941 				goto out_free_dentry;
942 			break;
943 		}
944 
945 	/* Some simple consistency checks for the interpreter */
946 	if (interpreter) {
947 		retval = -ELIBBAD;
948 		/* Not an ELF interpreter */
949 		if (memcmp(interp_elf_ex->e_ident, ELFMAG, SELFMAG) != 0)
950 			goto out_free_dentry;
951 		/* Verify the interpreter has a valid arch */
952 		if (!elf_check_arch(interp_elf_ex) ||
953 		    elf_check_fdpic(interp_elf_ex))
954 			goto out_free_dentry;
955 
956 		/* Load the interpreter program headers */
957 		interp_elf_phdata = load_elf_phdrs(interp_elf_ex,
958 						   interpreter);
959 		if (!interp_elf_phdata)
960 			goto out_free_dentry;
961 
962 		/* Pass PT_LOPROC..PT_HIPROC headers to arch code */
963 		elf_property_phdata = NULL;
964 		elf_ppnt = interp_elf_phdata;
965 		for (i = 0; i < interp_elf_ex->e_phnum; i++, elf_ppnt++)
966 			switch (elf_ppnt->p_type) {
967 			case PT_GNU_PROPERTY:
968 				elf_property_phdata = elf_ppnt;
969 				break;
970 
971 			case PT_LOPROC ... PT_HIPROC:
972 				retval = arch_elf_pt_proc(interp_elf_ex,
973 							  elf_ppnt, interpreter,
974 							  true, &arch_state);
975 				if (retval)
976 					goto out_free_dentry;
977 				break;
978 			}
979 	}
980 
981 	retval = parse_elf_properties(interpreter ?: bprm->file,
982 				      elf_property_phdata, &arch_state);
983 	if (retval)
984 		goto out_free_dentry;
985 
986 	/*
987 	 * Allow arch code to reject the ELF at this point, whilst it's
988 	 * still possible to return an error to the code that invoked
989 	 * the exec syscall.
990 	 */
991 	retval = arch_check_elf(elf_ex,
992 				!!interpreter, interp_elf_ex,
993 				&arch_state);
994 	if (retval)
995 		goto out_free_dentry;
996 
997 	/* Flush all traces of the currently running executable */
998 	retval = begin_new_exec(bprm);
999 	if (retval)
1000 		goto out_free_dentry;
1001 
1002 	/* Do this immediately, since STACK_TOP as used in setup_arg_pages
1003 	   may depend on the personality.  */
1004 	SET_PERSONALITY2(*elf_ex, &arch_state);
1005 	if (elf_read_implies_exec(*elf_ex, executable_stack))
1006 		current->personality |= READ_IMPLIES_EXEC;
1007 
1008 	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1009 		current->flags |= PF_RANDOMIZE;
1010 
1011 	setup_new_exec(bprm);
1012 
1013 	/* Do this so that we can load the interpreter, if need be.  We will
1014 	   change some of these later */
1015 	retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
1016 				 executable_stack);
1017 	if (retval < 0)
1018 		goto out_free_dentry;
1019 
1020 	elf_bss = 0;
1021 	elf_brk = 0;
1022 
1023 	start_code = ~0UL;
1024 	end_code = 0;
1025 	start_data = 0;
1026 	end_data = 0;
1027 
1028 	/* Now we do a little grungy work by mmapping the ELF image into
1029 	   the correct location in memory. */
1030 	for(i = 0, elf_ppnt = elf_phdata;
1031 	    i < elf_ex->e_phnum; i++, elf_ppnt++) {
1032 		int elf_prot, elf_flags;
1033 		unsigned long k, vaddr;
1034 		unsigned long total_size = 0;
1035 		unsigned long alignment;
1036 
1037 		if (elf_ppnt->p_type != PT_LOAD)
1038 			continue;
1039 
1040 		if (unlikely (elf_brk > elf_bss)) {
1041 			unsigned long nbyte;
1042 
1043 			/* There was a PT_LOAD segment with p_memsz > p_filesz
1044 			   before this one. Map anonymous pages, if needed,
1045 			   and clear the area.  */
1046 			retval = set_brk(elf_bss + load_bias,
1047 					 elf_brk + load_bias,
1048 					 bss_prot);
1049 			if (retval)
1050 				goto out_free_dentry;
1051 			nbyte = ELF_PAGEOFFSET(elf_bss);
1052 			if (nbyte) {
1053 				nbyte = ELF_MIN_ALIGN - nbyte;
1054 				if (nbyte > elf_brk - elf_bss)
1055 					nbyte = elf_brk - elf_bss;
1056 				if (clear_user((void __user *)elf_bss +
1057 							load_bias, nbyte)) {
1058 					/*
1059 					 * This bss-zeroing can fail if the ELF
1060 					 * file specifies odd protections. So
1061 					 * we don't check the return value
1062 					 */
1063 				}
1064 			}
1065 		}
1066 
1067 		elf_prot = make_prot(elf_ppnt->p_flags, &arch_state,
1068 				     !!interpreter, false);
1069 
1070 		elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE;
1071 
1072 		vaddr = elf_ppnt->p_vaddr;
1073 		/*
1074 		 * If we are loading ET_EXEC or we have already performed
1075 		 * the ET_DYN load_addr calculations, proceed normally.
1076 		 */
1077 		if (elf_ex->e_type == ET_EXEC || load_addr_set) {
1078 			elf_flags |= MAP_FIXED;
1079 		} else if (elf_ex->e_type == ET_DYN) {
1080 			/*
1081 			 * This logic is run once for the first LOAD Program
1082 			 * Header for ET_DYN binaries to calculate the
1083 			 * randomization (load_bias) for all the LOAD
1084 			 * Program Headers, and to calculate the entire
1085 			 * size of the ELF mapping (total_size). (Note that
1086 			 * load_addr_set is set to true later once the
1087 			 * initial mapping is performed.)
1088 			 *
1089 			 * There are effectively two types of ET_DYN
1090 			 * binaries: programs (i.e. PIE: ET_DYN with INTERP)
1091 			 * and loaders (ET_DYN without INTERP, since they
1092 			 * _are_ the ELF interpreter). The loaders must
1093 			 * be loaded away from programs since the program
1094 			 * may otherwise collide with the loader (especially
1095 			 * for ET_EXEC which does not have a randomized
1096 			 * position). For example to handle invocations of
1097 			 * "./ld.so someprog" to test out a new version of
1098 			 * the loader, the subsequent program that the
1099 			 * loader loads must avoid the loader itself, so
1100 			 * they cannot share the same load range. Sufficient
1101 			 * room for the brk must be allocated with the
1102 			 * loader as well, since brk must be available with
1103 			 * the loader.
1104 			 *
1105 			 * Therefore, programs are loaded offset from
1106 			 * ELF_ET_DYN_BASE and loaders are loaded into the
1107 			 * independently randomized mmap region (0 load_bias
1108 			 * without MAP_FIXED).
1109 			 */
1110 			if (interpreter) {
1111 				load_bias = ELF_ET_DYN_BASE;
1112 				if (current->flags & PF_RANDOMIZE)
1113 					load_bias += arch_mmap_rnd();
1114 				alignment = maximum_alignment(elf_phdata, elf_ex->e_phnum);
1115 				if (alignment)
1116 					load_bias &= ~(alignment - 1);
1117 				elf_flags |= MAP_FIXED;
1118 			} else
1119 				load_bias = 0;
1120 
1121 			/*
1122 			 * Since load_bias is used for all subsequent loading
1123 			 * calculations, we must lower it by the first vaddr
1124 			 * so that the remaining calculations based on the
1125 			 * ELF vaddrs will be correctly offset. The result
1126 			 * is then page aligned.
1127 			 */
1128 			load_bias = ELF_PAGESTART(load_bias - vaddr);
1129 
1130 			total_size = total_mapping_size(elf_phdata,
1131 							elf_ex->e_phnum);
1132 			if (!total_size) {
1133 				retval = -EINVAL;
1134 				goto out_free_dentry;
1135 			}
1136 		}
1137 
1138 		error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
1139 				elf_prot, elf_flags, total_size);
1140 		if (BAD_ADDR(error)) {
1141 			retval = IS_ERR((void *)error) ?
1142 				PTR_ERR((void*)error) : -EINVAL;
1143 			goto out_free_dentry;
1144 		}
1145 
1146 		if (!load_addr_set) {
1147 			load_addr_set = 1;
1148 			load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
1149 			if (elf_ex->e_type == ET_DYN) {
1150 				load_bias += error -
1151 				             ELF_PAGESTART(load_bias + vaddr);
1152 				load_addr += load_bias;
1153 				reloc_func_desc = load_bias;
1154 			}
1155 		}
1156 
1157 		/*
1158 		 * Figure out which segment in the file contains the Program
1159 		 * Header table, and map to the associated memory address.
1160 		 */
1161 		if (elf_ppnt->p_offset <= elf_ex->e_phoff &&
1162 		    elf_ex->e_phoff < elf_ppnt->p_offset + elf_ppnt->p_filesz) {
1163 			phdr_addr = elf_ex->e_phoff - elf_ppnt->p_offset +
1164 				    elf_ppnt->p_vaddr;
1165 		}
1166 
1167 		k = elf_ppnt->p_vaddr;
1168 		if ((elf_ppnt->p_flags & PF_X) && k < start_code)
1169 			start_code = k;
1170 		if (start_data < k)
1171 			start_data = k;
1172 
1173 		/*
1174 		 * Check to see if the section's size will overflow the
1175 		 * allowed task size. Note that p_filesz must always be
1176 		 * <= p_memsz so it is only necessary to check p_memsz.
1177 		 */
1178 		if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
1179 		    elf_ppnt->p_memsz > TASK_SIZE ||
1180 		    TASK_SIZE - elf_ppnt->p_memsz < k) {
1181 			/* set_brk can never work. Avoid overflows. */
1182 			retval = -EINVAL;
1183 			goto out_free_dentry;
1184 		}
1185 
1186 		k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
1187 
1188 		if (k > elf_bss)
1189 			elf_bss = k;
1190 		if ((elf_ppnt->p_flags & PF_X) && end_code < k)
1191 			end_code = k;
1192 		if (end_data < k)
1193 			end_data = k;
1194 		k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
1195 		if (k > elf_brk) {
1196 			bss_prot = elf_prot;
1197 			elf_brk = k;
1198 		}
1199 	}
1200 
1201 	e_entry = elf_ex->e_entry + load_bias;
1202 	phdr_addr += load_bias;
1203 	elf_bss += load_bias;
1204 	elf_brk += load_bias;
1205 	start_code += load_bias;
1206 	end_code += load_bias;
1207 	start_data += load_bias;
1208 	end_data += load_bias;
1209 
1210 	/* Calling set_brk effectively mmaps the pages that we need
1211 	 * for the bss and break sections.  We must do this before
1212 	 * mapping in the interpreter, to make sure it doesn't wind
1213 	 * up getting placed where the bss needs to go.
1214 	 */
1215 	retval = set_brk(elf_bss, elf_brk, bss_prot);
1216 	if (retval)
1217 		goto out_free_dentry;
1218 	if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
1219 		retval = -EFAULT; /* Nobody gets to see this, but.. */
1220 		goto out_free_dentry;
1221 	}
1222 
1223 	if (interpreter) {
1224 		elf_entry = load_elf_interp(interp_elf_ex,
1225 					    interpreter,
1226 					    load_bias, interp_elf_phdata,
1227 					    &arch_state);
1228 		if (!IS_ERR((void *)elf_entry)) {
1229 			/*
1230 			 * load_elf_interp() returns relocation
1231 			 * adjustment
1232 			 */
1233 			interp_load_addr = elf_entry;
1234 			elf_entry += interp_elf_ex->e_entry;
1235 		}
1236 		if (BAD_ADDR(elf_entry)) {
1237 			retval = IS_ERR((void *)elf_entry) ?
1238 					(int)elf_entry : -EINVAL;
1239 			goto out_free_dentry;
1240 		}
1241 		reloc_func_desc = interp_load_addr;
1242 
1243 		allow_write_access(interpreter);
1244 		fput(interpreter);
1245 
1246 		kfree(interp_elf_ex);
1247 		kfree(interp_elf_phdata);
1248 	} else {
1249 		elf_entry = e_entry;
1250 		if (BAD_ADDR(elf_entry)) {
1251 			retval = -EINVAL;
1252 			goto out_free_dentry;
1253 		}
1254 	}
1255 
1256 	kfree(elf_phdata);
1257 
1258 	set_binfmt(&elf_format);
1259 
1260 #ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
1261 	retval = arch_setup_additional_pages(bprm, !!interpreter);
1262 	if (retval < 0)
1263 		goto out;
1264 #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
1265 
1266 	retval = create_elf_tables(bprm, elf_ex, interp_load_addr,
1267 				   e_entry, phdr_addr);
1268 	if (retval < 0)
1269 		goto out;
1270 
1271 	mm = current->mm;
1272 	mm->end_code = end_code;
1273 	mm->start_code = start_code;
1274 	mm->start_data = start_data;
1275 	mm->end_data = end_data;
1276 	mm->start_stack = bprm->p;
1277 
1278 	if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) {
1279 		/*
1280 		 * For architectures with ELF randomization, when executing
1281 		 * a loader directly (i.e. no interpreter listed in ELF
1282 		 * headers), move the brk area out of the mmap region
1283 		 * (since it grows up, and may collide early with the stack
1284 		 * growing down), and into the unused ELF_ET_DYN_BASE region.
1285 		 */
1286 		if (IS_ENABLED(CONFIG_ARCH_HAS_ELF_RANDOMIZE) &&
1287 		    elf_ex->e_type == ET_DYN && !interpreter) {
1288 			mm->brk = mm->start_brk = ELF_ET_DYN_BASE;
1289 		}
1290 
1291 		mm->brk = mm->start_brk = arch_randomize_brk(mm);
1292 #ifdef compat_brk_randomized
1293 		current->brk_randomized = 1;
1294 #endif
1295 	}
1296 
1297 	if (current->personality & MMAP_PAGE_ZERO) {
1298 		/* Why this, you ask???  Well SVr4 maps page 0 as read-only,
1299 		   and some applications "depend" upon this behavior.
1300 		   Since we do not have the power to recompile these, we
1301 		   emulate the SVr4 behavior. Sigh. */
1302 		error = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
1303 				MAP_FIXED | MAP_PRIVATE, 0);
1304 	}
1305 
1306 	regs = current_pt_regs();
1307 #ifdef ELF_PLAT_INIT
1308 	/*
1309 	 * The ABI may specify that certain registers be set up in special
1310 	 * ways (on i386 %edx is the address of a DT_FINI function, for
1311 	 * example.  In addition, it may also specify (eg, PowerPC64 ELF)
1312 	 * that the e_entry field is the address of the function descriptor
1313 	 * for the startup routine, rather than the address of the startup
1314 	 * routine itself.  This macro performs whatever initialization to
1315 	 * the regs structure is required as well as any relocations to the
1316 	 * function descriptor entries when executing dynamically links apps.
1317 	 */
1318 	ELF_PLAT_INIT(regs, reloc_func_desc);
1319 #endif
1320 
1321 	finalize_exec(bprm);
1322 	start_thread(regs, elf_entry, bprm->p);
1323 	retval = 0;
1324 out:
1325 	return retval;
1326 
1327 	/* error cleanup */
1328 out_free_dentry:
1329 	kfree(interp_elf_ex);
1330 	kfree(interp_elf_phdata);
1331 out_free_file:
1332 	allow_write_access(interpreter);
1333 	if (interpreter)
1334 		fput(interpreter);
1335 out_free_ph:
1336 	kfree(elf_phdata);
1337 	goto out;
1338 }
1339 
1340 #ifdef CONFIG_USELIB
1341 /* This is really simpleminded and specialized - we are loading an
1342    a.out library that is given an ELF header. */
load_elf_library(struct file * file)1343 static int load_elf_library(struct file *file)
1344 {
1345 	struct elf_phdr *elf_phdata;
1346 	struct elf_phdr *eppnt;
1347 	unsigned long elf_bss, bss, len;
1348 	int retval, error, i, j;
1349 	struct elfhdr elf_ex;
1350 
1351 	error = -ENOEXEC;
1352 	retval = elf_read(file, &elf_ex, sizeof(elf_ex), 0);
1353 	if (retval < 0)
1354 		goto out;
1355 
1356 	if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1357 		goto out;
1358 
1359 	/* First of all, some simple consistency checks */
1360 	if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
1361 	    !elf_check_arch(&elf_ex) || !file->f_op->mmap)
1362 		goto out;
1363 	if (elf_check_fdpic(&elf_ex))
1364 		goto out;
1365 
1366 	/* Now read in all of the header information */
1367 
1368 	j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
1369 	/* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1370 
1371 	error = -ENOMEM;
1372 	elf_phdata = kmalloc(j, GFP_KERNEL);
1373 	if (!elf_phdata)
1374 		goto out;
1375 
1376 	eppnt = elf_phdata;
1377 	error = -ENOEXEC;
1378 	retval = elf_read(file, eppnt, j, elf_ex.e_phoff);
1379 	if (retval < 0)
1380 		goto out_free_ph;
1381 
1382 	for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
1383 		if ((eppnt + i)->p_type == PT_LOAD)
1384 			j++;
1385 	if (j != 1)
1386 		goto out_free_ph;
1387 
1388 	while (eppnt->p_type != PT_LOAD)
1389 		eppnt++;
1390 
1391 	/* Now use mmap to map the library into memory. */
1392 	error = vm_mmap(file,
1393 			ELF_PAGESTART(eppnt->p_vaddr),
1394 			(eppnt->p_filesz +
1395 			 ELF_PAGEOFFSET(eppnt->p_vaddr)),
1396 			PROT_READ | PROT_WRITE | PROT_EXEC,
1397 			MAP_FIXED_NOREPLACE | MAP_PRIVATE | MAP_DENYWRITE,
1398 			(eppnt->p_offset -
1399 			 ELF_PAGEOFFSET(eppnt->p_vaddr)));
1400 	if (error != ELF_PAGESTART(eppnt->p_vaddr))
1401 		goto out_free_ph;
1402 
1403 	elf_bss = eppnt->p_vaddr + eppnt->p_filesz;
1404 	if (padzero(elf_bss)) {
1405 		error = -EFAULT;
1406 		goto out_free_ph;
1407 	}
1408 
1409 	len = ELF_PAGEALIGN(eppnt->p_filesz + eppnt->p_vaddr);
1410 	bss = ELF_PAGEALIGN(eppnt->p_memsz + eppnt->p_vaddr);
1411 	if (bss > len) {
1412 		error = vm_brk(len, bss - len);
1413 		if (error)
1414 			goto out_free_ph;
1415 	}
1416 	error = 0;
1417 
1418 out_free_ph:
1419 	kfree(elf_phdata);
1420 out:
1421 	return error;
1422 }
1423 #endif /* #ifdef CONFIG_USELIB */
1424 
1425 #ifdef CONFIG_ELF_CORE
1426 /*
1427  * ELF core dumper
1428  *
1429  * Modelled on fs/exec.c:aout_core_dump()
1430  * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1431  */
1432 
1433 /* An ELF note in memory */
1434 struct memelfnote
1435 {
1436 	const char *name;
1437 	int type;
1438 	unsigned int datasz;
1439 	void *data;
1440 };
1441 
notesize(struct memelfnote * en)1442 static int notesize(struct memelfnote *en)
1443 {
1444 	int sz;
1445 
1446 	sz = sizeof(struct elf_note);
1447 	sz += roundup(strlen(en->name) + 1, 4);
1448 	sz += roundup(en->datasz, 4);
1449 
1450 	return sz;
1451 }
1452 
writenote(struct memelfnote * men,struct coredump_params * cprm)1453 static int writenote(struct memelfnote *men, struct coredump_params *cprm)
1454 {
1455 	struct elf_note en;
1456 	en.n_namesz = strlen(men->name) + 1;
1457 	en.n_descsz = men->datasz;
1458 	en.n_type = men->type;
1459 
1460 	return dump_emit(cprm, &en, sizeof(en)) &&
1461 	    dump_emit(cprm, men->name, en.n_namesz) && dump_align(cprm, 4) &&
1462 	    dump_emit(cprm, men->data, men->datasz) && dump_align(cprm, 4);
1463 }
1464 
fill_elf_header(struct elfhdr * elf,int segs,u16 machine,u32 flags)1465 static void fill_elf_header(struct elfhdr *elf, int segs,
1466 			    u16 machine, u32 flags)
1467 {
1468 	memset(elf, 0, sizeof(*elf));
1469 
1470 	memcpy(elf->e_ident, ELFMAG, SELFMAG);
1471 	elf->e_ident[EI_CLASS] = ELF_CLASS;
1472 	elf->e_ident[EI_DATA] = ELF_DATA;
1473 	elf->e_ident[EI_VERSION] = EV_CURRENT;
1474 	elf->e_ident[EI_OSABI] = ELF_OSABI;
1475 
1476 	elf->e_type = ET_CORE;
1477 	elf->e_machine = machine;
1478 	elf->e_version = EV_CURRENT;
1479 	elf->e_phoff = sizeof(struct elfhdr);
1480 	elf->e_flags = flags;
1481 	elf->e_ehsize = sizeof(struct elfhdr);
1482 	elf->e_phentsize = sizeof(struct elf_phdr);
1483 	elf->e_phnum = segs;
1484 }
1485 
fill_elf_note_phdr(struct elf_phdr * phdr,int sz,loff_t offset)1486 static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offset)
1487 {
1488 	phdr->p_type = PT_NOTE;
1489 	phdr->p_offset = offset;
1490 	phdr->p_vaddr = 0;
1491 	phdr->p_paddr = 0;
1492 	phdr->p_filesz = sz;
1493 	phdr->p_memsz = 0;
1494 	phdr->p_flags = 0;
1495 	phdr->p_align = 0;
1496 }
1497 
fill_note(struct memelfnote * note,const char * name,int type,unsigned int sz,void * data)1498 static void fill_note(struct memelfnote *note, const char *name, int type,
1499 		unsigned int sz, void *data)
1500 {
1501 	note->name = name;
1502 	note->type = type;
1503 	note->datasz = sz;
1504 	note->data = data;
1505 }
1506 
1507 /*
1508  * fill up all the fields in prstatus from the given task struct, except
1509  * registers which need to be filled up separately.
1510  */
fill_prstatus(struct elf_prstatus * prstatus,struct task_struct * p,long signr)1511 static void fill_prstatus(struct elf_prstatus *prstatus,
1512 		struct task_struct *p, long signr)
1513 {
1514 	prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1515 	prstatus->pr_sigpend = p->pending.signal.sig[0];
1516 	prstatus->pr_sighold = p->blocked.sig[0];
1517 	rcu_read_lock();
1518 	prstatus->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
1519 	rcu_read_unlock();
1520 	prstatus->pr_pid = task_pid_vnr(p);
1521 	prstatus->pr_pgrp = task_pgrp_vnr(p);
1522 	prstatus->pr_sid = task_session_vnr(p);
1523 	if (thread_group_leader(p)) {
1524 		struct task_cputime cputime;
1525 
1526 		/*
1527 		 * This is the record for the group leader.  It shows the
1528 		 * group-wide total, not its individual thread total.
1529 		 */
1530 		thread_group_cputime(p, &cputime);
1531 		prstatus->pr_utime = ns_to_kernel_old_timeval(cputime.utime);
1532 		prstatus->pr_stime = ns_to_kernel_old_timeval(cputime.stime);
1533 	} else {
1534 		u64 utime, stime;
1535 
1536 		task_cputime(p, &utime, &stime);
1537 		prstatus->pr_utime = ns_to_kernel_old_timeval(utime);
1538 		prstatus->pr_stime = ns_to_kernel_old_timeval(stime);
1539 	}
1540 
1541 	prstatus->pr_cutime = ns_to_kernel_old_timeval(p->signal->cutime);
1542 	prstatus->pr_cstime = ns_to_kernel_old_timeval(p->signal->cstime);
1543 }
1544 
fill_psinfo(struct elf_prpsinfo * psinfo,struct task_struct * p,struct mm_struct * mm)1545 static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1546 		       struct mm_struct *mm)
1547 {
1548 	const struct cred *cred;
1549 	unsigned int i, len;
1550 
1551 	/* first copy the parameters from user space */
1552 	memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1553 
1554 	len = mm->arg_end - mm->arg_start;
1555 	if (len >= ELF_PRARGSZ)
1556 		len = ELF_PRARGSZ-1;
1557 	if (copy_from_user(&psinfo->pr_psargs,
1558 		           (const char __user *)mm->arg_start, len))
1559 		return -EFAULT;
1560 	for(i = 0; i < len; i++)
1561 		if (psinfo->pr_psargs[i] == 0)
1562 			psinfo->pr_psargs[i] = ' ';
1563 	psinfo->pr_psargs[len] = 0;
1564 
1565 	rcu_read_lock();
1566 	psinfo->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
1567 	rcu_read_unlock();
1568 	psinfo->pr_pid = task_pid_vnr(p);
1569 	psinfo->pr_pgrp = task_pgrp_vnr(p);
1570 	psinfo->pr_sid = task_session_vnr(p);
1571 
1572 	i = p->state ? ffz(~p->state) + 1 : 0;
1573 	psinfo->pr_state = i;
1574 	psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i];
1575 	psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1576 	psinfo->pr_nice = task_nice(p);
1577 	psinfo->pr_flag = p->flags;
1578 	rcu_read_lock();
1579 	cred = __task_cred(p);
1580 	SET_UID(psinfo->pr_uid, from_kuid_munged(cred->user_ns, cred->uid));
1581 	SET_GID(psinfo->pr_gid, from_kgid_munged(cred->user_ns, cred->gid));
1582 	rcu_read_unlock();
1583 	strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1584 
1585 	return 0;
1586 }
1587 
fill_auxv_note(struct memelfnote * note,struct mm_struct * mm)1588 static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
1589 {
1590 	elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
1591 	int i = 0;
1592 	do
1593 		i += 2;
1594 	while (auxv[i - 2] != AT_NULL);
1595 	fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
1596 }
1597 
fill_siginfo_note(struct memelfnote * note,user_siginfo_t * csigdata,const kernel_siginfo_t * siginfo)1598 static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
1599 		const kernel_siginfo_t *siginfo)
1600 {
1601 	copy_siginfo_to_external(csigdata, siginfo);
1602 	fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
1603 }
1604 
1605 #define MAX_FILE_NOTE_SIZE (4*1024*1024)
1606 /*
1607  * Format of NT_FILE note:
1608  *
1609  * long count     -- how many files are mapped
1610  * long page_size -- units for file_ofs
1611  * array of [COUNT] elements of
1612  *   long start
1613  *   long end
1614  *   long file_ofs
1615  * followed by COUNT filenames in ASCII: "FILE1" NUL "FILE2" NUL...
1616  */
fill_files_note(struct memelfnote * note)1617 static int fill_files_note(struct memelfnote *note)
1618 {
1619 	struct mm_struct *mm = current->mm;
1620 	struct vm_area_struct *vma;
1621 	unsigned count, size, names_ofs, remaining, n;
1622 	user_long_t *data;
1623 	user_long_t *start_end_ofs;
1624 	char *name_base, *name_curpos;
1625 
1626 	/* *Estimated* file count and total data size needed */
1627 	count = mm->map_count;
1628 	if (count > UINT_MAX / 64)
1629 		return -EINVAL;
1630 	size = count * 64;
1631 
1632 	names_ofs = (2 + 3 * count) * sizeof(data[0]);
1633  alloc:
1634 	if (size >= MAX_FILE_NOTE_SIZE) /* paranoia check */
1635 		return -EINVAL;
1636 	size = round_up(size, PAGE_SIZE);
1637 	/*
1638 	 * "size" can be 0 here legitimately.
1639 	 * Let it ENOMEM and omit NT_FILE section which will be empty anyway.
1640 	 */
1641 	data = kvmalloc(size, GFP_KERNEL);
1642 	if (ZERO_OR_NULL_PTR(data))
1643 		return -ENOMEM;
1644 
1645 	start_end_ofs = data + 2;
1646 	name_base = name_curpos = ((char *)data) + names_ofs;
1647 	remaining = size - names_ofs;
1648 	count = 0;
1649 	for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
1650 		struct file *file;
1651 		const char *filename;
1652 
1653 		file = vma->vm_file;
1654 		if (!file)
1655 			continue;
1656 		filename = file_path(file, name_curpos, remaining);
1657 		if (IS_ERR(filename)) {
1658 			if (PTR_ERR(filename) == -ENAMETOOLONG) {
1659 				kvfree(data);
1660 				size = size * 5 / 4;
1661 				goto alloc;
1662 			}
1663 			continue;
1664 		}
1665 
1666 		/* file_path() fills at the end, move name down */
1667 		/* n = strlen(filename) + 1: */
1668 		n = (name_curpos + remaining) - filename;
1669 		remaining = filename - name_curpos;
1670 		memmove(name_curpos, filename, n);
1671 		name_curpos += n;
1672 
1673 		*start_end_ofs++ = vma->vm_start;
1674 		*start_end_ofs++ = vma->vm_end;
1675 		*start_end_ofs++ = vma->vm_pgoff;
1676 		count++;
1677 	}
1678 
1679 	/* Now we know exact count of files, can store it */
1680 	data[0] = count;
1681 	data[1] = PAGE_SIZE;
1682 	/*
1683 	 * Count usually is less than mm->map_count,
1684 	 * we need to move filenames down.
1685 	 */
1686 	n = mm->map_count - count;
1687 	if (n != 0) {
1688 		unsigned shift_bytes = n * 3 * sizeof(data[0]);
1689 		memmove(name_base - shift_bytes, name_base,
1690 			name_curpos - name_base);
1691 		name_curpos -= shift_bytes;
1692 	}
1693 
1694 	size = name_curpos - (char *)data;
1695 	fill_note(note, "CORE", NT_FILE, size, data);
1696 	return 0;
1697 }
1698 
1699 #ifdef CORE_DUMP_USE_REGSET
1700 #include <linux/regset.h>
1701 
1702 struct elf_thread_core_info {
1703 	struct elf_thread_core_info *next;
1704 	struct task_struct *task;
1705 	struct elf_prstatus prstatus;
1706 	struct memelfnote notes[];
1707 };
1708 
1709 struct elf_note_info {
1710 	struct elf_thread_core_info *thread;
1711 	struct memelfnote psinfo;
1712 	struct memelfnote signote;
1713 	struct memelfnote auxv;
1714 	struct memelfnote files;
1715 	user_siginfo_t csigdata;
1716 	size_t size;
1717 	int thread_notes;
1718 };
1719 
1720 /*
1721  * When a regset has a writeback hook, we call it on each thread before
1722  * dumping user memory.  On register window machines, this makes sure the
1723  * user memory backing the register data is up to date before we read it.
1724  */
do_thread_regset_writeback(struct task_struct * task,const struct user_regset * regset)1725 static void do_thread_regset_writeback(struct task_struct *task,
1726 				       const struct user_regset *regset)
1727 {
1728 	if (regset->writeback)
1729 		regset->writeback(task, regset, 1);
1730 }
1731 
1732 #ifndef PRSTATUS_SIZE
1733 #define PRSTATUS_SIZE(S, R) sizeof(S)
1734 #endif
1735 
1736 #ifndef SET_PR_FPVALID
1737 #define SET_PR_FPVALID(S, V, R) ((S)->pr_fpvalid = (V))
1738 #endif
1739 
fill_thread_core_info(struct elf_thread_core_info * t,const struct user_regset_view * view,long signr,size_t * total)1740 static int fill_thread_core_info(struct elf_thread_core_info *t,
1741 				 const struct user_regset_view *view,
1742 				 long signr, size_t *total)
1743 {
1744 	unsigned int i;
1745 	int regset0_size;
1746 
1747 	/*
1748 	 * NT_PRSTATUS is the one special case, because the regset data
1749 	 * goes into the pr_reg field inside the note contents, rather
1750 	 * than being the whole note contents.  We fill the reset in here.
1751 	 * We assume that regset 0 is NT_PRSTATUS.
1752 	 */
1753 	fill_prstatus(&t->prstatus, t->task, signr);
1754 	regset0_size = regset_get(t->task, &view->regsets[0],
1755 		   sizeof(t->prstatus.pr_reg), &t->prstatus.pr_reg);
1756 	if (regset0_size < 0)
1757 		return 0;
1758 
1759 	fill_note(&t->notes[0], "CORE", NT_PRSTATUS,
1760 		  PRSTATUS_SIZE(t->prstatus, regset0_size), &t->prstatus);
1761 	*total += notesize(&t->notes[0]);
1762 
1763 	do_thread_regset_writeback(t->task, &view->regsets[0]);
1764 
1765 	/*
1766 	 * Each other regset might generate a note too.  For each regset
1767 	 * that has no core_note_type or is inactive, we leave t->notes[i]
1768 	 * all zero and we'll know to skip writing it later.
1769 	 */
1770 	for (i = 1; i < view->n; ++i) {
1771 		const struct user_regset *regset = &view->regsets[i];
1772 		int note_type = regset->core_note_type;
1773 		bool is_fpreg = note_type == NT_PRFPREG;
1774 		void *data;
1775 		int ret;
1776 
1777 		do_thread_regset_writeback(t->task, regset);
1778 		if (!note_type) // not for coredumps
1779 			continue;
1780 		if (regset->active && regset->active(t->task, regset) <= 0)
1781 			continue;
1782 
1783 		ret = regset_get_alloc(t->task, regset, ~0U, &data);
1784 		if (ret < 0)
1785 			continue;
1786 
1787 		if (is_fpreg)
1788 			SET_PR_FPVALID(&t->prstatus, 1, regset0_size);
1789 
1790 		fill_note(&t->notes[i], is_fpreg ? "CORE" : "LINUX",
1791 			  note_type, ret, data);
1792 
1793 		*total += notesize(&t->notes[i]);
1794 	}
1795 
1796 	return 1;
1797 }
1798 
fill_note_info(struct elfhdr * elf,int phdrs,struct elf_note_info * info,struct coredump_params * cprm)1799 static int fill_note_info(struct elfhdr *elf, int phdrs,
1800 			  struct elf_note_info *info,
1801 			  struct coredump_params *cprm)
1802 {
1803 	struct task_struct *dump_task = current;
1804 	const struct user_regset_view *view = task_user_regset_view(dump_task);
1805 	struct elf_thread_core_info *t;
1806 	struct elf_prpsinfo *psinfo;
1807 	struct core_thread *ct;
1808 	unsigned int i;
1809 
1810 	info->size = 0;
1811 	info->thread = NULL;
1812 
1813 	psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
1814 	if (psinfo == NULL) {
1815 		info->psinfo.data = NULL; /* So we don't free this wrongly */
1816 		return 0;
1817 	}
1818 
1819 	fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1820 
1821 	/*
1822 	 * Figure out how many notes we're going to need for each thread.
1823 	 */
1824 	info->thread_notes = 0;
1825 	for (i = 0; i < view->n; ++i)
1826 		if (view->regsets[i].core_note_type != 0)
1827 			++info->thread_notes;
1828 
1829 	/*
1830 	 * Sanity check.  We rely on regset 0 being in NT_PRSTATUS,
1831 	 * since it is our one special case.
1832 	 */
1833 	if (unlikely(info->thread_notes == 0) ||
1834 	    unlikely(view->regsets[0].core_note_type != NT_PRSTATUS)) {
1835 		WARN_ON(1);
1836 		return 0;
1837 	}
1838 
1839 	/*
1840 	 * Initialize the ELF file header.
1841 	 */
1842 	fill_elf_header(elf, phdrs,
1843 			view->e_machine, view->e_flags);
1844 
1845 	/*
1846 	 * Allocate a structure for each thread.
1847 	 */
1848 	for (ct = &dump_task->mm->core_state->dumper; ct; ct = ct->next) {
1849 		t = kzalloc(offsetof(struct elf_thread_core_info,
1850 				     notes[info->thread_notes]),
1851 			    GFP_KERNEL);
1852 		if (unlikely(!t))
1853 			return 0;
1854 
1855 		t->task = ct->task;
1856 		if (ct->task == dump_task || !info->thread) {
1857 			t->next = info->thread;
1858 			info->thread = t;
1859 		} else {
1860 			/*
1861 			 * Make sure to keep the original task at
1862 			 * the head of the list.
1863 			 */
1864 			t->next = info->thread->next;
1865 			info->thread->next = t;
1866 		}
1867 	}
1868 
1869 	/*
1870 	 * Now fill in each thread's information.
1871 	 */
1872 	for (t = info->thread; t != NULL; t = t->next)
1873 		if (!fill_thread_core_info(t, view, cprm->siginfo->si_signo, &info->size))
1874 			return 0;
1875 
1876 	/*
1877 	 * Fill in the two process-wide notes.
1878 	 */
1879 	fill_psinfo(psinfo, dump_task->group_leader, dump_task->mm);
1880 	info->size += notesize(&info->psinfo);
1881 
1882 	fill_siginfo_note(&info->signote, &info->csigdata, cprm->siginfo);
1883 	info->size += notesize(&info->signote);
1884 
1885 	fill_auxv_note(&info->auxv, current->mm);
1886 	info->size += notesize(&info->auxv);
1887 
1888 	if (fill_files_note(&info->files) == 0)
1889 		info->size += notesize(&info->files);
1890 
1891 	return 1;
1892 }
1893 
get_note_info_size(struct elf_note_info * info)1894 static size_t get_note_info_size(struct elf_note_info *info)
1895 {
1896 	return info->size;
1897 }
1898 
1899 /*
1900  * Write all the notes for each thread.  When writing the first thread, the
1901  * process-wide notes are interleaved after the first thread-specific note.
1902  */
write_note_info(struct elf_note_info * info,struct coredump_params * cprm)1903 static int write_note_info(struct elf_note_info *info,
1904 			   struct coredump_params *cprm)
1905 {
1906 	bool first = true;
1907 	struct elf_thread_core_info *t = info->thread;
1908 
1909 	do {
1910 		int i;
1911 
1912 		if (!writenote(&t->notes[0], cprm))
1913 			return 0;
1914 
1915 		if (first && !writenote(&info->psinfo, cprm))
1916 			return 0;
1917 		if (first && !writenote(&info->signote, cprm))
1918 			return 0;
1919 		if (first && !writenote(&info->auxv, cprm))
1920 			return 0;
1921 		if (first && info->files.data &&
1922 				!writenote(&info->files, cprm))
1923 			return 0;
1924 
1925 		for (i = 1; i < info->thread_notes; ++i)
1926 			if (t->notes[i].data &&
1927 			    !writenote(&t->notes[i], cprm))
1928 				return 0;
1929 
1930 		first = false;
1931 		t = t->next;
1932 	} while (t);
1933 
1934 	return 1;
1935 }
1936 
free_note_info(struct elf_note_info * info)1937 static void free_note_info(struct elf_note_info *info)
1938 {
1939 	struct elf_thread_core_info *threads = info->thread;
1940 	while (threads) {
1941 		unsigned int i;
1942 		struct elf_thread_core_info *t = threads;
1943 		threads = t->next;
1944 		WARN_ON(t->notes[0].data && t->notes[0].data != &t->prstatus);
1945 		for (i = 1; i < info->thread_notes; ++i)
1946 			kfree(t->notes[i].data);
1947 		kfree(t);
1948 	}
1949 	kfree(info->psinfo.data);
1950 	kvfree(info->files.data);
1951 }
1952 
1953 #else
1954 
1955 /* Here is the structure in which status of each thread is captured. */
1956 struct elf_thread_status
1957 {
1958 	struct list_head list;
1959 	struct elf_prstatus prstatus;	/* NT_PRSTATUS */
1960 	elf_fpregset_t fpu;		/* NT_PRFPREG */
1961 	struct task_struct *thread;
1962 	struct memelfnote notes[3];
1963 	int num_notes;
1964 };
1965 
1966 /*
1967  * In order to add the specific thread information for the elf file format,
1968  * we need to keep a linked list of every threads pr_status and then create
1969  * a single section for them in the final core file.
1970  */
elf_dump_thread_status(long signr,struct elf_thread_status * t)1971 static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
1972 {
1973 	int sz = 0;
1974 	struct task_struct *p = t->thread;
1975 	t->num_notes = 0;
1976 
1977 	fill_prstatus(&t->prstatus, p, signr);
1978 	elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
1979 
1980 	fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus),
1981 		  &(t->prstatus));
1982 	t->num_notes++;
1983 	sz += notesize(&t->notes[0]);
1984 
1985 	if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL,
1986 								&t->fpu))) {
1987 		fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu),
1988 			  &(t->fpu));
1989 		t->num_notes++;
1990 		sz += notesize(&t->notes[1]);
1991 	}
1992 	return sz;
1993 }
1994 
1995 struct elf_note_info {
1996 	struct memelfnote *notes;
1997 	struct memelfnote *notes_files;
1998 	struct elf_prstatus *prstatus;	/* NT_PRSTATUS */
1999 	struct elf_prpsinfo *psinfo;	/* NT_PRPSINFO */
2000 	struct list_head thread_list;
2001 	elf_fpregset_t *fpu;
2002 	user_siginfo_t csigdata;
2003 	int thread_status_size;
2004 	int numnote;
2005 };
2006 
elf_note_info_init(struct elf_note_info * info)2007 static int elf_note_info_init(struct elf_note_info *info)
2008 {
2009 	memset(info, 0, sizeof(*info));
2010 	INIT_LIST_HEAD(&info->thread_list);
2011 
2012 	/* Allocate space for ELF notes */
2013 	info->notes = kmalloc_array(8, sizeof(struct memelfnote), GFP_KERNEL);
2014 	if (!info->notes)
2015 		return 0;
2016 	info->psinfo = kmalloc(sizeof(*info->psinfo), GFP_KERNEL);
2017 	if (!info->psinfo)
2018 		return 0;
2019 	info->prstatus = kmalloc(sizeof(*info->prstatus), GFP_KERNEL);
2020 	if (!info->prstatus)
2021 		return 0;
2022 	info->fpu = kmalloc(sizeof(*info->fpu), GFP_KERNEL);
2023 	if (!info->fpu)
2024 		return 0;
2025 	return 1;
2026 }
2027 
fill_note_info(struct elfhdr * elf,int phdrs,struct elf_note_info * info,struct coredump_params * cprm)2028 static int fill_note_info(struct elfhdr *elf, int phdrs,
2029 			  struct elf_note_info *info,
2030 			  struct coredump_params *cprm)
2031 {
2032 	struct core_thread *ct;
2033 	struct elf_thread_status *ets;
2034 
2035 	if (!elf_note_info_init(info))
2036 		return 0;
2037 
2038 	for (ct = current->mm->core_state->dumper.next;
2039 					ct; ct = ct->next) {
2040 		ets = kzalloc(sizeof(*ets), GFP_KERNEL);
2041 		if (!ets)
2042 			return 0;
2043 
2044 		ets->thread = ct->task;
2045 		list_add(&ets->list, &info->thread_list);
2046 	}
2047 
2048 	list_for_each_entry(ets, &info->thread_list, list) {
2049 		int sz;
2050 
2051 		sz = elf_dump_thread_status(cprm->siginfo->si_signo, ets);
2052 		info->thread_status_size += sz;
2053 	}
2054 	/* now collect the dump for the current */
2055 	memset(info->prstatus, 0, sizeof(*info->prstatus));
2056 	fill_prstatus(info->prstatus, current, cprm->siginfo->si_signo);
2057 	elf_core_copy_regs(&info->prstatus->pr_reg, cprm->regs);
2058 
2059 	/* Set up header */
2060 	fill_elf_header(elf, phdrs, ELF_ARCH, ELF_CORE_EFLAGS);
2061 
2062 	/*
2063 	 * Set up the notes in similar form to SVR4 core dumps made
2064 	 * with info from their /proc.
2065 	 */
2066 
2067 	fill_note(info->notes + 0, "CORE", NT_PRSTATUS,
2068 		  sizeof(*info->prstatus), info->prstatus);
2069 	fill_psinfo(info->psinfo, current->group_leader, current->mm);
2070 	fill_note(info->notes + 1, "CORE", NT_PRPSINFO,
2071 		  sizeof(*info->psinfo), info->psinfo);
2072 
2073 	fill_siginfo_note(info->notes + 2, &info->csigdata, cprm->siginfo);
2074 	fill_auxv_note(info->notes + 3, current->mm);
2075 	info->numnote = 4;
2076 
2077 	if (fill_files_note(info->notes + info->numnote) == 0) {
2078 		info->notes_files = info->notes + info->numnote;
2079 		info->numnote++;
2080 	}
2081 
2082 	/* Try to dump the FPU. */
2083 	info->prstatus->pr_fpvalid =
2084 		elf_core_copy_task_fpregs(current, cprm->regs, info->fpu);
2085 	if (info->prstatus->pr_fpvalid)
2086 		fill_note(info->notes + info->numnote++,
2087 			  "CORE", NT_PRFPREG, sizeof(*info->fpu), info->fpu);
2088 	return 1;
2089 }
2090 
get_note_info_size(struct elf_note_info * info)2091 static size_t get_note_info_size(struct elf_note_info *info)
2092 {
2093 	int sz = 0;
2094 	int i;
2095 
2096 	for (i = 0; i < info->numnote; i++)
2097 		sz += notesize(info->notes + i);
2098 
2099 	sz += info->thread_status_size;
2100 
2101 	return sz;
2102 }
2103 
write_note_info(struct elf_note_info * info,struct coredump_params * cprm)2104 static int write_note_info(struct elf_note_info *info,
2105 			   struct coredump_params *cprm)
2106 {
2107 	struct elf_thread_status *ets;
2108 	int i;
2109 
2110 	for (i = 0; i < info->numnote; i++)
2111 		if (!writenote(info->notes + i, cprm))
2112 			return 0;
2113 
2114 	/* write out the thread status notes section */
2115 	list_for_each_entry(ets, &info->thread_list, list) {
2116 		for (i = 0; i < ets->num_notes; i++)
2117 			if (!writenote(&ets->notes[i], cprm))
2118 				return 0;
2119 	}
2120 
2121 	return 1;
2122 }
2123 
free_note_info(struct elf_note_info * info)2124 static void free_note_info(struct elf_note_info *info)
2125 {
2126 	while (!list_empty(&info->thread_list)) {
2127 		struct list_head *tmp = info->thread_list.next;
2128 		list_del(tmp);
2129 		kfree(list_entry(tmp, struct elf_thread_status, list));
2130 	}
2131 
2132 	/* Free data possibly allocated by fill_files_note(): */
2133 	if (info->notes_files)
2134 		kvfree(info->notes_files->data);
2135 
2136 	kfree(info->prstatus);
2137 	kfree(info->psinfo);
2138 	kfree(info->notes);
2139 	kfree(info->fpu);
2140 }
2141 
2142 #endif
2143 
fill_extnum_info(struct elfhdr * elf,struct elf_shdr * shdr4extnum,elf_addr_t e_shoff,int segs)2144 static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
2145 			     elf_addr_t e_shoff, int segs)
2146 {
2147 	elf->e_shoff = e_shoff;
2148 	elf->e_shentsize = sizeof(*shdr4extnum);
2149 	elf->e_shnum = 1;
2150 	elf->e_shstrndx = SHN_UNDEF;
2151 
2152 	memset(shdr4extnum, 0, sizeof(*shdr4extnum));
2153 
2154 	shdr4extnum->sh_type = SHT_NULL;
2155 	shdr4extnum->sh_size = elf->e_shnum;
2156 	shdr4extnum->sh_link = elf->e_shstrndx;
2157 	shdr4extnum->sh_info = segs;
2158 }
2159 
2160 /*
2161  * Actual dumper
2162  *
2163  * This is a two-pass process; first we find the offsets of the bits,
2164  * and then they are actually written out.  If we run out of core limit
2165  * we just truncate.
2166  */
elf_core_dump(struct coredump_params * cprm)2167 static int elf_core_dump(struct coredump_params *cprm)
2168 {
2169 	int has_dumped = 0;
2170 	int vma_count, segs, i;
2171 	size_t vma_data_size;
2172 	struct elfhdr elf;
2173 	loff_t offset = 0, dataoff;
2174 	struct elf_note_info info = { };
2175 	struct elf_phdr *phdr4note = NULL;
2176 	struct elf_shdr *shdr4extnum = NULL;
2177 	Elf_Half e_phnum;
2178 	elf_addr_t e_shoff;
2179 	struct core_vma_metadata *vma_meta;
2180 
2181 	if (dump_vma_snapshot(cprm, &vma_count, &vma_meta, &vma_data_size))
2182 		return 0;
2183 
2184 	/*
2185 	 * The number of segs are recored into ELF header as 16bit value.
2186 	 * Please check DEFAULT_MAX_MAP_COUNT definition when you modify here.
2187 	 */
2188 	segs = vma_count + elf_core_extra_phdrs();
2189 
2190 	/* for notes section */
2191 	segs++;
2192 
2193 	/* If segs > PN_XNUM(0xffff), then e_phnum overflows. To avoid
2194 	 * this, kernel supports extended numbering. Have a look at
2195 	 * include/linux/elf.h for further information. */
2196 	e_phnum = segs > PN_XNUM ? PN_XNUM : segs;
2197 
2198 	/*
2199 	 * Collect all the non-memory information about the process for the
2200 	 * notes.  This also sets up the file header.
2201 	 */
2202 	if (!fill_note_info(&elf, e_phnum, &info, cprm))
2203 		goto end_coredump;
2204 
2205 	has_dumped = 1;
2206 
2207 	offset += sizeof(elf);				/* Elf header */
2208 	offset += segs * sizeof(struct elf_phdr);	/* Program headers */
2209 
2210 	/* Write notes phdr entry */
2211 	{
2212 		size_t sz = get_note_info_size(&info);
2213 
2214 		sz += elf_coredump_extra_notes_size();
2215 
2216 		phdr4note = kmalloc(sizeof(*phdr4note), GFP_KERNEL);
2217 		if (!phdr4note)
2218 			goto end_coredump;
2219 
2220 		fill_elf_note_phdr(phdr4note, sz, offset);
2221 		offset += sz;
2222 	}
2223 
2224 	dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
2225 
2226 	offset += vma_data_size;
2227 	offset += elf_core_extra_data_size();
2228 	e_shoff = offset;
2229 
2230 	if (e_phnum == PN_XNUM) {
2231 		shdr4extnum = kmalloc(sizeof(*shdr4extnum), GFP_KERNEL);
2232 		if (!shdr4extnum)
2233 			goto end_coredump;
2234 		fill_extnum_info(&elf, shdr4extnum, e_shoff, segs);
2235 	}
2236 
2237 	offset = dataoff;
2238 
2239 	if (!dump_emit(cprm, &elf, sizeof(elf)))
2240 		goto end_coredump;
2241 
2242 	if (!dump_emit(cprm, phdr4note, sizeof(*phdr4note)))
2243 		goto end_coredump;
2244 
2245 	/* Write program headers for segments dump */
2246 	for (i = 0; i < vma_count; i++) {
2247 		struct core_vma_metadata *meta = vma_meta + i;
2248 		struct elf_phdr phdr;
2249 
2250 		phdr.p_type = PT_LOAD;
2251 		phdr.p_offset = offset;
2252 		phdr.p_vaddr = meta->start;
2253 		phdr.p_paddr = 0;
2254 		phdr.p_filesz = meta->dump_size;
2255 		phdr.p_memsz = meta->end - meta->start;
2256 		offset += phdr.p_filesz;
2257 		phdr.p_flags = 0;
2258 		if (meta->flags & VM_READ)
2259 			phdr.p_flags |= PF_R;
2260 		if (meta->flags & VM_WRITE)
2261 			phdr.p_flags |= PF_W;
2262 		if (meta->flags & VM_EXEC)
2263 			phdr.p_flags |= PF_X;
2264 		phdr.p_align = ELF_EXEC_PAGESIZE;
2265 
2266 		if (!dump_emit(cprm, &phdr, sizeof(phdr)))
2267 			goto end_coredump;
2268 	}
2269 
2270 	if (!elf_core_write_extra_phdrs(cprm, offset))
2271 		goto end_coredump;
2272 
2273  	/* write out the notes section */
2274 	if (!write_note_info(&info, cprm))
2275 		goto end_coredump;
2276 
2277 	if (elf_coredump_extra_notes_write(cprm))
2278 		goto end_coredump;
2279 
2280 	/* Align to page */
2281 	if (!dump_skip(cprm, dataoff - cprm->pos))
2282 		goto end_coredump;
2283 
2284 	for (i = 0; i < vma_count; i++) {
2285 		struct core_vma_metadata *meta = vma_meta + i;
2286 
2287 		if (!dump_user_range(cprm, meta->start, meta->dump_size))
2288 			goto end_coredump;
2289 	}
2290 	dump_truncate(cprm);
2291 
2292 	if (!elf_core_write_extra_data(cprm))
2293 		goto end_coredump;
2294 
2295 	if (e_phnum == PN_XNUM) {
2296 		if (!dump_emit(cprm, shdr4extnum, sizeof(*shdr4extnum)))
2297 			goto end_coredump;
2298 	}
2299 
2300 end_coredump:
2301 	free_note_info(&info);
2302 	kfree(shdr4extnum);
2303 	kvfree(vma_meta);
2304 	kfree(phdr4note);
2305 	return has_dumped;
2306 }
2307 
2308 #endif		/* CONFIG_ELF_CORE */
2309 
init_elf_binfmt(void)2310 static int __init init_elf_binfmt(void)
2311 {
2312 	register_binfmt(&elf_format);
2313 	return 0;
2314 }
2315 
exit_elf_binfmt(void)2316 static void __exit exit_elf_binfmt(void)
2317 {
2318 	/* Remove the COFF and ELF loaders. */
2319 	unregister_binfmt(&elf_format);
2320 }
2321 
2322 core_initcall(init_elf_binfmt);
2323 module_exit(exit_elf_binfmt);
2324 MODULE_LICENSE("GPL");
2325