• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/compat.h>
3 #include <linux/errno.h>
4 #include <linux/sched.h>
5 #include <linux/sched/mm.h>
6 #include <linux/syscalls.h>
7 #include <linux/mm.h>
8 #include <linux/fs.h>
9 #include <linux/smp.h>
10 #include <linux/sem.h>
11 #include <linux/msg.h>
12 #include <linux/shm.h>
13 #include <linux/stat.h>
14 #include <linux/mman.h>
15 #include <linux/file.h>
16 #include <linux/utsname.h>
17 #include <linux/personality.h>
18 #include <linux/random.h>
19 #include <linux/uaccess.h>
20 #include <linux/elf.h>
21 
22 #include <asm/elf.h>
23 #include <asm/ia32.h>
24 
25 /*
26  * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
27  */
get_align_mask(void)28 static unsigned long get_align_mask(void)
29 {
30 	/* handle 32- and 64-bit case with a single conditional */
31 	if (va_align.flags < 0 || !(va_align.flags & (2 - mmap_is_ia32())))
32 		return 0;
33 
34 	if (!(current->flags & PF_RANDOMIZE))
35 		return 0;
36 
37 	return va_align.mask;
38 }
39 
40 /*
41  * To avoid aliasing in the I$ on AMD F15h, the bits defined by the
42  * va_align.bits, [12:upper_bit), are set to a random value instead of
43  * zeroing them. This random value is computed once per boot. This form
44  * of ASLR is known as "per-boot ASLR".
45  *
46  * To achieve this, the random value is added to the info.align_offset
47  * value before calling vm_unmapped_area() or ORed directly to the
48  * address.
49  */
get_align_bits(void)50 static unsigned long get_align_bits(void)
51 {
52 	return va_align.bits & get_align_mask();
53 }
54 
align_vdso_addr(unsigned long addr)55 unsigned long align_vdso_addr(unsigned long addr)
56 {
57 	unsigned long align_mask = get_align_mask();
58 	addr = (addr + align_mask) & ~align_mask;
59 	return addr | get_align_bits();
60 }
61 
control_va_addr_alignment(char * str)62 static int __init control_va_addr_alignment(char *str)
63 {
64 	/* guard against enabling this on other CPU families */
65 	if (va_align.flags < 0)
66 		return 1;
67 
68 	if (*str == 0)
69 		return 1;
70 
71 	if (!strcmp(str, "32"))
72 		va_align.flags = ALIGN_VA_32;
73 	else if (!strcmp(str, "64"))
74 		va_align.flags = ALIGN_VA_64;
75 	else if (!strcmp(str, "off"))
76 		va_align.flags = 0;
77 	else if (!strcmp(str, "on"))
78 		va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
79 	else
80 		pr_warn("invalid option value: 'align_va_addr=%s'\n", str);
81 
82 	return 1;
83 }
84 __setup("align_va_addr=", control_va_addr_alignment);
85 
SYSCALL_DEFINE6(mmap,unsigned long,addr,unsigned long,len,unsigned long,prot,unsigned long,flags,unsigned long,fd,unsigned long,off)86 SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
87 		unsigned long, prot, unsigned long, flags,
88 		unsigned long, fd, unsigned long, off)
89 {
90 	long error;
91 	error = -EINVAL;
92 	if (off & ~PAGE_MASK)
93 		goto out;
94 
95 	error = ksys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
96 out:
97 	return error;
98 }
99 
find_start_end(unsigned long addr,unsigned long flags,unsigned long * begin,unsigned long * end)100 static void find_start_end(unsigned long addr, unsigned long flags,
101 		unsigned long *begin, unsigned long *end)
102 {
103 	if (!in_32bit_syscall() && (flags & MAP_32BIT)) {
104 		/* This is usually used needed to map code in small
105 		   model, so it needs to be in the first 31bit. Limit
106 		   it to that.  This means we need to move the
107 		   unmapped base down for this case. This can give
108 		   conflicts with the heap, but we assume that glibc
109 		   malloc knows how to fall back to mmap. Give it 1GB
110 		   of playground for now. -AK */
111 		*begin = 0x40000000;
112 		*end = 0x80000000;
113 		if (current->flags & PF_RANDOMIZE) {
114 			*begin = randomize_page(*begin, 0x02000000);
115 		}
116 		return;
117 	}
118 
119 	*begin	= get_mmap_base(1);
120 	if (in_32bit_syscall())
121 		*end = task_size_32bit();
122 	else
123 		*end = task_size_64bit(addr > DEFAULT_MAP_WINDOW);
124 }
125 
126 unsigned long
arch_get_unmapped_area(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)127 arch_get_unmapped_area(struct file *filp, unsigned long addr,
128 		unsigned long len, unsigned long pgoff, unsigned long flags)
129 {
130 	struct mm_struct *mm = current->mm;
131 	struct vm_area_struct *vma;
132 	struct vm_unmapped_area_info info;
133 	unsigned long begin, end;
134 
135 	if (flags & MAP_FIXED)
136 		return addr;
137 
138 	find_start_end(addr, flags, &begin, &end);
139 
140 	if (len > end)
141 		return -ENOMEM;
142 
143 	if (addr) {
144 		addr = PAGE_ALIGN(addr);
145 		vma = find_vma(mm, addr);
146 		if (end - len >= addr &&
147 		    (!vma || addr + len <= vm_start_gap(vma)))
148 			return addr;
149 	}
150 
151 	info.flags = 0;
152 	info.length = len;
153 	info.low_limit = begin;
154 	info.high_limit = end;
155 	info.align_mask = 0;
156 	info.align_offset = pgoff << PAGE_SHIFT;
157 	if (filp) {
158 		info.align_mask = get_align_mask();
159 		info.align_offset += get_align_bits();
160 	}
161 	return vm_unmapped_area(&info);
162 }
163 
164 unsigned long
arch_get_unmapped_area_topdown(struct file * filp,const unsigned long addr0,const unsigned long len,const unsigned long pgoff,const unsigned long flags)165 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
166 			  const unsigned long len, const unsigned long pgoff,
167 			  const unsigned long flags)
168 {
169 	struct vm_area_struct *vma;
170 	struct mm_struct *mm = current->mm;
171 	unsigned long addr = addr0;
172 	struct vm_unmapped_area_info info;
173 
174 	/* requested length too big for entire address space */
175 	if (len > TASK_SIZE)
176 		return -ENOMEM;
177 
178 	/* No address checking. See comment at mmap_address_hint_valid() */
179 	if (flags & MAP_FIXED)
180 		return addr;
181 
182 	/* for MAP_32BIT mappings we force the legacy mmap base */
183 	if (!in_32bit_syscall() && (flags & MAP_32BIT))
184 		goto bottomup;
185 
186 	/* requesting a specific address */
187 	if (addr) {
188 		addr &= PAGE_MASK;
189 		if (!mmap_address_hint_valid(addr, len))
190 			goto get_unmapped_area;
191 
192 		vma = find_vma(mm, addr);
193 		if (!vma || addr + len <= vm_start_gap(vma))
194 			return addr;
195 	}
196 get_unmapped_area:
197 
198 	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
199 	info.length = len;
200 	info.low_limit = PAGE_SIZE;
201 	info.high_limit = get_mmap_base(0);
202 
203 	/*
204 	 * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
205 	 * in the full address space.
206 	 *
207 	 * !in_32bit_syscall() check to avoid high addresses for x32
208 	 * (and make it no op on native i386).
209 	 */
210 	if (addr > DEFAULT_MAP_WINDOW && !in_32bit_syscall())
211 		info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW;
212 
213 	info.align_mask = 0;
214 	info.align_offset = pgoff << PAGE_SHIFT;
215 	if (filp) {
216 		info.align_mask = get_align_mask();
217 		info.align_offset += get_align_bits();
218 	}
219 	addr = vm_unmapped_area(&info);
220 	if (!(addr & ~PAGE_MASK))
221 		return addr;
222 	VM_BUG_ON(addr != -ENOMEM);
223 
224 bottomup:
225 	/*
226 	 * A failed mmap() very likely causes application failure,
227 	 * so fall back to the bottom-up function here. This scenario
228 	 * can happen with large stack limits and large mmap()
229 	 * allocations.
230 	 */
231 	return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
232 }
233