1 /*
2 * Set up the VMAs to tell the VM about the vDSO.
3 * Copyright 2007 Andi Kleen, SUSE Labs.
4 * Subject to the GPL, v.2
5 */
6 #include <linux/mm.h>
7 #include <linux/err.h>
8 #include <linux/sched.h>
9 #include <linux/slab.h>
10 #include <linux/init.h>
11 #include <linux/random.h>
12 #include <linux/elf.h>
13 #include <asm/vsyscall.h>
14 #include <asm/vgtod.h>
15 #include <asm/proto.h>
16 #include <asm/vdso.h>
17 #include <asm/page.h>
18 #include <asm/hpet.h>
19
20 #if defined(CONFIG_X86_64)
21 unsigned int __read_mostly vdso64_enabled = 1;
22
23 extern unsigned short vdso_sync_cpuid;
24 #endif
25
init_vdso_image(const struct vdso_image * image)26 void __init init_vdso_image(const struct vdso_image *image)
27 {
28 int i;
29 int npages = (image->size) / PAGE_SIZE;
30
31 BUG_ON(image->size % PAGE_SIZE != 0);
32 for (i = 0; i < npages; i++)
33 image->text_mapping.pages[i] =
34 virt_to_page(image->data + i*PAGE_SIZE);
35
36 apply_alternatives((struct alt_instr *)(image->data + image->alt),
37 (struct alt_instr *)(image->data + image->alt +
38 image->alt_len));
39 }
40
41 #if defined(CONFIG_X86_64)
init_vdso(void)42 static int __init init_vdso(void)
43 {
44 init_vdso_image(&vdso_image_64);
45
46 #ifdef CONFIG_X86_X32_ABI
47 init_vdso_image(&vdso_image_x32);
48 #endif
49
50 return 0;
51 }
52 subsys_initcall(init_vdso);
53 #endif
54
55 struct linux_binprm;
56
57 /*
58 * Put the vdso above the (randomized) stack with another randomized
59 * offset. This way there is no hole in the middle of address space.
60 * To save memory make sure it is still in the same PTE as the stack
61 * top. This doesn't give that many random bits.
62 *
63 * Note that this algorithm is imperfect: the distribution of the vdso
64 * start address within a PMD is biased toward the end.
65 *
66 * Only used for the 64-bit and x32 vdsos.
67 */
vdso_addr(unsigned long start,unsigned len)68 static unsigned long vdso_addr(unsigned long start, unsigned len)
69 {
70 #ifdef CONFIG_X86_32
71 return 0;
72 #else
73 unsigned long addr, end;
74 unsigned offset;
75
76 /*
77 * Round up the start address. It can start out unaligned as a result
78 * of stack start randomization.
79 */
80 start = PAGE_ALIGN(start);
81
82 /* Round the lowest possible end address up to a PMD boundary. */
83 end = (start + len + PMD_SIZE - 1) & PMD_MASK;
84 if (end >= TASK_SIZE_MAX)
85 end = TASK_SIZE_MAX;
86 end -= len;
87
88 if (end > start) {
89 offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
90 addr = start + (offset << PAGE_SHIFT);
91 } else {
92 addr = start;
93 }
94
95 /*
96 * Forcibly align the final address in case we have a hardware
97 * issue that requires alignment for performance reasons.
98 */
99 addr = align_vdso_addr(addr);
100
101 return addr;
102 #endif
103 }
104
map_vdso(const struct vdso_image * image,bool calculate_addr)105 static int map_vdso(const struct vdso_image *image, bool calculate_addr)
106 {
107 struct mm_struct *mm = current->mm;
108 struct vm_area_struct *vma;
109 unsigned long addr, text_start;
110 int ret = 0;
111 static struct page *no_pages[] = {NULL};
112 static struct vm_special_mapping vvar_mapping = {
113 .name = "[vvar]",
114 .pages = no_pages,
115 };
116
117 if (calculate_addr) {
118 addr = vdso_addr(current->mm->start_stack,
119 image->size - image->sym_vvar_start);
120 } else {
121 addr = 0;
122 }
123
124 down_write(&mm->mmap_sem);
125
126 addr = get_unmapped_area(NULL, addr,
127 image->size - image->sym_vvar_start, 0, 0);
128 if (IS_ERR_VALUE(addr)) {
129 ret = addr;
130 goto up_fail;
131 }
132
133 text_start = addr - image->sym_vvar_start;
134 current->mm->context.vdso = (void __user *)text_start;
135
136 /*
137 * MAYWRITE to allow gdb to COW and set breakpoints
138 */
139 vma = _install_special_mapping(mm,
140 text_start,
141 image->size,
142 VM_READ|VM_EXEC|
143 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
144 &image->text_mapping);
145
146 if (IS_ERR(vma)) {
147 ret = PTR_ERR(vma);
148 goto up_fail;
149 }
150
151 vma = _install_special_mapping(mm,
152 addr,
153 -image->sym_vvar_start,
154 VM_READ|VM_MAYREAD,
155 &vvar_mapping);
156
157 if (IS_ERR(vma)) {
158 ret = PTR_ERR(vma);
159 goto up_fail;
160 }
161
162 if (image->sym_vvar_page)
163 ret = remap_pfn_range(vma,
164 text_start + image->sym_vvar_page,
165 __pa_symbol(&__vvar_page) >> PAGE_SHIFT,
166 PAGE_SIZE,
167 PAGE_READONLY);
168
169 if (ret)
170 goto up_fail;
171
172 #ifdef CONFIG_HPET_TIMER
173 if (hpet_address && image->sym_hpet_page) {
174 ret = io_remap_pfn_range(vma,
175 text_start + image->sym_hpet_page,
176 hpet_address >> PAGE_SHIFT,
177 PAGE_SIZE,
178 pgprot_noncached(PAGE_READONLY));
179
180 if (ret)
181 goto up_fail;
182 }
183 #endif
184
185 up_fail:
186 if (ret)
187 current->mm->context.vdso = NULL;
188
189 up_write(&mm->mmap_sem);
190 return ret;
191 }
192
193 #if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
load_vdso32(void)194 static int load_vdso32(void)
195 {
196 int ret;
197
198 if (vdso32_enabled != 1) /* Other values all mean "disabled" */
199 return 0;
200
201 ret = map_vdso(selected_vdso32, false);
202 if (ret)
203 return ret;
204
205 if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN)
206 current_thread_info()->sysenter_return =
207 current->mm->context.vdso +
208 selected_vdso32->sym_VDSO32_SYSENTER_RETURN;
209
210 return 0;
211 }
212 #endif
213
214 #ifdef CONFIG_X86_64
arch_setup_additional_pages(struct linux_binprm * bprm,int uses_interp)215 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
216 {
217 if (!vdso64_enabled)
218 return 0;
219
220 return map_vdso(&vdso_image_64, true);
221 }
222
223 #ifdef CONFIG_COMPAT
compat_arch_setup_additional_pages(struct linux_binprm * bprm,int uses_interp)224 int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
225 int uses_interp)
226 {
227 #ifdef CONFIG_X86_X32_ABI
228 if (test_thread_flag(TIF_X32)) {
229 if (!vdso64_enabled)
230 return 0;
231
232 return map_vdso(&vdso_image_x32, true);
233 }
234 #endif
235
236 return load_vdso32();
237 }
238 #endif
239 #else
arch_setup_additional_pages(struct linux_binprm * bprm,int uses_interp)240 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
241 {
242 return load_vdso32();
243 }
244 #endif
245
246 #ifdef CONFIG_X86_64
vdso_setup(char * s)247 static __init int vdso_setup(char *s)
248 {
249 vdso64_enabled = simple_strtoul(s, NULL, 0);
250 return 0;
251 }
252 __setup("vdso=", vdso_setup);
253 #endif
254