• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/compat.h>
3 #include <linux/errno.h>
4 #include <linux/sched.h>
5 #include <linux/sched/mm.h>
6 #include <linux/syscalls.h>
7 #include <linux/mm.h>
8 #include <linux/fs.h>
9 #include <linux/smp.h>
10 #include <linux/sem.h>
11 #include <linux/msg.h>
12 #include <linux/shm.h>
13 #include <linux/stat.h>
14 #include <linux/mman.h>
15 #include <linux/file.h>
16 #include <linux/utsname.h>
17 #include <linux/personality.h>
18 #include <linux/random.h>
19 #include <linux/uaccess.h>
20 #include <linux/elf.h>
21 
22 #include <asm/elf.h>
23 #include <asm/ia32.h>
24 #include <asm/syscalls.h>
25 #include <asm/mpx.h>
26 
27 /*
28  * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
29  */
get_align_mask(void)30 static unsigned long get_align_mask(void)
31 {
32 	/* handle 32- and 64-bit case with a single conditional */
33 	if (va_align.flags < 0 || !(va_align.flags & (2 - mmap_is_ia32())))
34 		return 0;
35 
36 	if (!(current->flags & PF_RANDOMIZE))
37 		return 0;
38 
39 	return va_align.mask;
40 }
41 
42 /*
43  * To avoid aliasing in the I$ on AMD F15h, the bits defined by the
44  * va_align.bits, [12:upper_bit), are set to a random value instead of
45  * zeroing them. This random value is computed once per boot. This form
46  * of ASLR is known as "per-boot ASLR".
47  *
48  * To achieve this, the random value is added to the info.align_offset
49  * value before calling vm_unmapped_area() or ORed directly to the
50  * address.
51  */
get_align_bits(void)52 static unsigned long get_align_bits(void)
53 {
54 	return va_align.bits & get_align_mask();
55 }
56 
align_vdso_addr(unsigned long addr)57 unsigned long align_vdso_addr(unsigned long addr)
58 {
59 	unsigned long align_mask = get_align_mask();
60 	addr = (addr + align_mask) & ~align_mask;
61 	return addr | get_align_bits();
62 }
63 
control_va_addr_alignment(char * str)64 static int __init control_va_addr_alignment(char *str)
65 {
66 	/* guard against enabling this on other CPU families */
67 	if (va_align.flags < 0)
68 		return 1;
69 
70 	if (*str == 0)
71 		return 1;
72 
73 	if (!strcmp(str, "32"))
74 		va_align.flags = ALIGN_VA_32;
75 	else if (!strcmp(str, "64"))
76 		va_align.flags = ALIGN_VA_64;
77 	else if (!strcmp(str, "off"))
78 		va_align.flags = 0;
79 	else if (!strcmp(str, "on"))
80 		va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
81 	else
82 		pr_warn("invalid option value: 'align_va_addr=%s'\n", str);
83 
84 	return 1;
85 }
86 __setup("align_va_addr=", control_va_addr_alignment);
87 
SYSCALL_DEFINE6(mmap,unsigned long,addr,unsigned long,len,unsigned long,prot,unsigned long,flags,unsigned long,fd,unsigned long,off)88 SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
89 		unsigned long, prot, unsigned long, flags,
90 		unsigned long, fd, unsigned long, off)
91 {
92 	long error;
93 	error = -EINVAL;
94 	if (off & ~PAGE_MASK)
95 		goto out;
96 
97 	error = ksys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
98 out:
99 	return error;
100 }
101 
find_start_end(unsigned long addr,unsigned long flags,unsigned long * begin,unsigned long * end)102 static void find_start_end(unsigned long addr, unsigned long flags,
103 		unsigned long *begin, unsigned long *end)
104 {
105 	if (!in_32bit_syscall() && (flags & MAP_32BIT)) {
106 		/* This is usually used needed to map code in small
107 		   model, so it needs to be in the first 31bit. Limit
108 		   it to that.  This means we need to move the
109 		   unmapped base down for this case. This can give
110 		   conflicts with the heap, but we assume that glibc
111 		   malloc knows how to fall back to mmap. Give it 1GB
112 		   of playground for now. -AK */
113 		*begin = 0x40000000;
114 		*end = 0x80000000;
115 		if (current->flags & PF_RANDOMIZE) {
116 			*begin = randomize_page(*begin, 0x02000000);
117 		}
118 		return;
119 	}
120 
121 	*begin	= get_mmap_base(1);
122 	if (in_32bit_syscall())
123 		*end = task_size_32bit();
124 	else
125 		*end = task_size_64bit(addr > DEFAULT_MAP_WINDOW);
126 }
127 
128 unsigned long
arch_get_unmapped_area(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)129 arch_get_unmapped_area(struct file *filp, unsigned long addr,
130 		unsigned long len, unsigned long pgoff, unsigned long flags)
131 {
132 	struct mm_struct *mm = current->mm;
133 	struct vm_area_struct *vma;
134 	struct vm_unmapped_area_info info;
135 	unsigned long begin, end;
136 
137 	addr = mpx_unmapped_area_check(addr, len, flags);
138 	if (IS_ERR_VALUE(addr))
139 		return addr;
140 
141 	if (flags & MAP_FIXED)
142 		return addr;
143 
144 	find_start_end(addr, flags, &begin, &end);
145 
146 	if (len > end)
147 		return -ENOMEM;
148 
149 	if (addr) {
150 		addr = PAGE_ALIGN(addr);
151 		vma = find_vma(mm, addr);
152 		if (end - len >= addr &&
153 		    (!vma || addr + len <= vm_start_gap(vma)))
154 			return addr;
155 	}
156 
157 	info.flags = 0;
158 	info.length = len;
159 	info.low_limit = begin;
160 	info.high_limit = end;
161 	info.align_mask = 0;
162 	info.align_offset = pgoff << PAGE_SHIFT;
163 	if (filp) {
164 		info.align_mask = get_align_mask();
165 		info.align_offset += get_align_bits();
166 	}
167 	return vm_unmapped_area(&info);
168 }
169 
170 unsigned long
arch_get_unmapped_area_topdown(struct file * filp,const unsigned long addr0,const unsigned long len,const unsigned long pgoff,const unsigned long flags)171 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
172 			  const unsigned long len, const unsigned long pgoff,
173 			  const unsigned long flags)
174 {
175 	struct vm_area_struct *vma;
176 	struct mm_struct *mm = current->mm;
177 	unsigned long addr = addr0;
178 	struct vm_unmapped_area_info info;
179 
180 	addr = mpx_unmapped_area_check(addr, len, flags);
181 	if (IS_ERR_VALUE(addr))
182 		return addr;
183 
184 	/* requested length too big for entire address space */
185 	if (len > TASK_SIZE)
186 		return -ENOMEM;
187 
188 	/* No address checking. See comment at mmap_address_hint_valid() */
189 	if (flags & MAP_FIXED)
190 		return addr;
191 
192 	/* for MAP_32BIT mappings we force the legacy mmap base */
193 	if (!in_32bit_syscall() && (flags & MAP_32BIT))
194 		goto bottomup;
195 
196 	/* requesting a specific address */
197 	if (addr) {
198 		addr &= PAGE_MASK;
199 		if (!mmap_address_hint_valid(addr, len))
200 			goto get_unmapped_area;
201 
202 		vma = find_vma(mm, addr);
203 		if (!vma || addr + len <= vm_start_gap(vma))
204 			return addr;
205 	}
206 get_unmapped_area:
207 
208 	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
209 	info.length = len;
210 	info.low_limit = PAGE_SIZE;
211 	info.high_limit = get_mmap_base(0);
212 
213 	/*
214 	 * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
215 	 * in the full address space.
216 	 *
217 	 * !in_32bit_syscall() check to avoid high addresses for x32
218 	 * (and make it no op on native i386).
219 	 */
220 	if (addr > DEFAULT_MAP_WINDOW && !in_32bit_syscall())
221 		info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW;
222 
223 	info.align_mask = 0;
224 	info.align_offset = pgoff << PAGE_SHIFT;
225 	if (filp) {
226 		info.align_mask = get_align_mask();
227 		info.align_offset += get_align_bits();
228 	}
229 	addr = vm_unmapped_area(&info);
230 	if (!(addr & ~PAGE_MASK))
231 		return addr;
232 	VM_BUG_ON(addr != -ENOMEM);
233 
234 bottomup:
235 	/*
236 	 * A failed mmap() very likely causes application failure,
237 	 * so fall back to the bottom-up function here. This scenario
238 	 * can happen with large stack limits and large mmap()
239 	 * allocations.
240 	 */
241 	return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
242 }
243