1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * VDSO implementations.
4 *
5 * Copyright (C) 2012 ARM Limited
6 *
7 * Author: Will Deacon <will.deacon@arm.com>
8 */
9
10 #include <linux/cache.h>
11 #include <linux/clocksource.h>
12 #include <linux/elf.h>
13 #include <linux/err.h>
14 #include <linux/errno.h>
15 #include <linux/gfp.h>
16 #include <linux/kernel.h>
17 #include <linux/mm.h>
18 #include <linux/sched.h>
19 #include <linux/signal.h>
20 #include <linux/slab.h>
21 #include <linux/timekeeper_internal.h>
22 #include <linux/vmalloc.h>
23 #include <vdso/datapage.h>
24 #include <vdso/helpers.h>
25 #include <vdso/vsyscall.h>
26
27 #include <asm/cacheflush.h>
28 #include <asm/signal32.h>
29 #include <asm/vdso.h>
30
31 extern char vdso_start[], vdso_end[];
32 #ifdef CONFIG_COMPAT_VDSO
33 extern char vdso32_start[], vdso32_end[];
34 #endif /* CONFIG_COMPAT_VDSO */
35
36 /* vdso_lookup arch_index */
37 enum arch_vdso_type {
38 ARM64_VDSO = 0,
39 #ifdef CONFIG_COMPAT_VDSO
40 ARM64_VDSO32 = 1,
41 #endif /* CONFIG_COMPAT_VDSO */
42 };
43 #ifdef CONFIG_COMPAT_VDSO
44 #define VDSO_TYPES (ARM64_VDSO32 + 1)
45 #else
46 #define VDSO_TYPES (ARM64_VDSO + 1)
47 #endif /* CONFIG_COMPAT_VDSO */
48
49 struct __vdso_abi {
50 const char *name;
51 const char *vdso_code_start;
52 const char *vdso_code_end;
53 unsigned long vdso_pages;
54 /* Data Mapping */
55 struct vm_special_mapping *dm;
56 /* Code Mapping */
57 struct vm_special_mapping *cm;
58 };
59
60 static struct __vdso_abi vdso_lookup[VDSO_TYPES] __ro_after_init = {
61 {
62 .name = "vdso",
63 .vdso_code_start = vdso_start,
64 .vdso_code_end = vdso_end,
65 },
66 #ifdef CONFIG_COMPAT_VDSO
67 {
68 .name = "vdso32",
69 .vdso_code_start = vdso32_start,
70 .vdso_code_end = vdso32_end,
71 },
72 #endif /* CONFIG_COMPAT_VDSO */
73 };
74
75 /*
76 * The vDSO data page.
77 */
78 static union {
79 struct vdso_data data[CS_BASES];
80 u8 page[PAGE_SIZE];
81 } vdso_data_store __page_aligned_data;
82 struct vdso_data *vdso_data = vdso_data_store.data;
83
__vdso_remap(enum arch_vdso_type arch_index,const struct vm_special_mapping * sm,struct vm_area_struct * new_vma)84 static int __vdso_remap(enum arch_vdso_type arch_index,
85 const struct vm_special_mapping *sm,
86 struct vm_area_struct *new_vma)
87 {
88 unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
89 unsigned long vdso_size = vdso_lookup[arch_index].vdso_code_end -
90 vdso_lookup[arch_index].vdso_code_start;
91
92 if (vdso_size != new_size)
93 return -EINVAL;
94
95 current->mm->context.vdso = (void *)new_vma->vm_start;
96
97 return 0;
98 }
99
__vdso_init(enum arch_vdso_type arch_index)100 static int __vdso_init(enum arch_vdso_type arch_index)
101 {
102 int i;
103 struct page **vdso_pagelist;
104 unsigned long pfn;
105
106 if (memcmp(vdso_lookup[arch_index].vdso_code_start, "\177ELF", 4)) {
107 pr_err("vDSO is not a valid ELF object!\n");
108 return -EINVAL;
109 }
110
111 vdso_lookup[arch_index].vdso_pages = (
112 vdso_lookup[arch_index].vdso_code_end -
113 vdso_lookup[arch_index].vdso_code_start) >>
114 PAGE_SHIFT;
115
116 /* Allocate the vDSO pagelist, plus a page for the data. */
117 vdso_pagelist = kcalloc(vdso_lookup[arch_index].vdso_pages + 1,
118 sizeof(struct page *),
119 GFP_KERNEL);
120 if (vdso_pagelist == NULL)
121 return -ENOMEM;
122
123 /* Grab the vDSO data page. */
124 vdso_pagelist[0] = phys_to_page(__pa_symbol(vdso_data));
125
126
127 /* Grab the vDSO code pages. */
128 pfn = sym_to_pfn(vdso_lookup[arch_index].vdso_code_start);
129
130 for (i = 0; i < vdso_lookup[arch_index].vdso_pages; i++)
131 vdso_pagelist[i + 1] = pfn_to_page(pfn + i);
132
133 vdso_lookup[arch_index].dm->pages = &vdso_pagelist[0];
134 vdso_lookup[arch_index].cm->pages = &vdso_pagelist[1];
135
136 return 0;
137 }
138
__setup_additional_pages(enum arch_vdso_type arch_index,struct mm_struct * mm,struct linux_binprm * bprm,int uses_interp)139 static int __setup_additional_pages(enum arch_vdso_type arch_index,
140 struct mm_struct *mm,
141 struct linux_binprm *bprm,
142 int uses_interp)
143 {
144 unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
145 void *ret;
146
147 vdso_text_len = vdso_lookup[arch_index].vdso_pages << PAGE_SHIFT;
148 /* Be sure to map the data page */
149 vdso_mapping_len = vdso_text_len + PAGE_SIZE;
150
151 vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
152 if (IS_ERR_VALUE(vdso_base)) {
153 ret = ERR_PTR(vdso_base);
154 goto up_fail;
155 }
156
157 ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE,
158 VM_READ|VM_MAYREAD,
159 vdso_lookup[arch_index].dm);
160 if (IS_ERR(ret))
161 goto up_fail;
162
163 vdso_base += PAGE_SIZE;
164 mm->context.vdso = (void *)vdso_base;
165 ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
166 VM_READ|VM_EXEC|
167 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
168 vdso_lookup[arch_index].cm);
169 if (IS_ERR(ret))
170 goto up_fail;
171
172 return 0;
173
174 up_fail:
175 mm->context.vdso = NULL;
176 return PTR_ERR(ret);
177 }
178
179 #ifdef CONFIG_COMPAT
180 /*
181 * Create and map the vectors page for AArch32 tasks.
182 */
183 #ifdef CONFIG_COMPAT_VDSO
aarch32_vdso_mremap(const struct vm_special_mapping * sm,struct vm_area_struct * new_vma)184 static int aarch32_vdso_mremap(const struct vm_special_mapping *sm,
185 struct vm_area_struct *new_vma)
186 {
187 return __vdso_remap(ARM64_VDSO32, sm, new_vma);
188 }
189 #endif /* CONFIG_COMPAT_VDSO */
190
191 /*
192 * aarch32_vdso_pages:
193 * 0 - kuser helpers
194 * 1 - sigreturn code
195 * or (CONFIG_COMPAT_VDSO):
196 * 0 - kuser helpers
197 * 1 - vdso data
198 * 2 - vdso code
199 */
200 #define C_VECTORS 0
201 #ifdef CONFIG_COMPAT_VDSO
202 #define C_VVAR 1
203 #define C_VDSO 2
204 #define C_PAGES (C_VDSO + 1)
205 #else
206 #define C_SIGPAGE 1
207 #define C_PAGES (C_SIGPAGE + 1)
208 #endif /* CONFIG_COMPAT_VDSO */
209 static struct page *aarch32_vdso_pages[C_PAGES] __ro_after_init;
210 static struct vm_special_mapping aarch32_vdso_spec[C_PAGES] = {
211 {
212 .name = "[vectors]", /* ABI */
213 .pages = &aarch32_vdso_pages[C_VECTORS],
214 },
215 #ifdef CONFIG_COMPAT_VDSO
216 {
217 .name = "[vvar]",
218 },
219 {
220 .name = "[vdso]",
221 .mremap = aarch32_vdso_mremap,
222 },
223 #else
224 {
225 .name = "[sigpage]", /* ABI */
226 .pages = &aarch32_vdso_pages[C_SIGPAGE],
227 },
228 #endif /* CONFIG_COMPAT_VDSO */
229 };
230
aarch32_alloc_kuser_vdso_page(void)231 static int aarch32_alloc_kuser_vdso_page(void)
232 {
233 extern char __kuser_helper_start[], __kuser_helper_end[];
234 int kuser_sz = __kuser_helper_end - __kuser_helper_start;
235 unsigned long vdso_page;
236
237 if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
238 return 0;
239
240 vdso_page = get_zeroed_page(GFP_ATOMIC);
241 if (!vdso_page)
242 return -ENOMEM;
243
244 memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start,
245 kuser_sz);
246 aarch32_vdso_pages[C_VECTORS] = virt_to_page(vdso_page);
247 flush_dcache_page(aarch32_vdso_pages[C_VECTORS]);
248 return 0;
249 }
250
251 #ifdef CONFIG_COMPAT_VDSO
__aarch32_alloc_vdso_pages(void)252 static int __aarch32_alloc_vdso_pages(void)
253 {
254 int ret;
255
256 vdso_lookup[ARM64_VDSO32].dm = &aarch32_vdso_spec[C_VVAR];
257 vdso_lookup[ARM64_VDSO32].cm = &aarch32_vdso_spec[C_VDSO];
258
259 ret = __vdso_init(ARM64_VDSO32);
260 if (ret)
261 return ret;
262
263 ret = aarch32_alloc_kuser_vdso_page();
264 if (ret) {
265 unsigned long c_vvar =
266 (unsigned long)page_to_virt(aarch32_vdso_pages[C_VVAR]);
267 unsigned long c_vdso =
268 (unsigned long)page_to_virt(aarch32_vdso_pages[C_VDSO]);
269
270 free_page(c_vvar);
271 free_page(c_vdso);
272 }
273
274 return ret;
275 }
276 #else
__aarch32_alloc_vdso_pages(void)277 static int __aarch32_alloc_vdso_pages(void)
278 {
279 extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
280 int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
281 unsigned long sigpage;
282 int ret;
283
284 sigpage = get_zeroed_page(GFP_ATOMIC);
285 if (!sigpage)
286 return -ENOMEM;
287
288 memcpy((void *)sigpage, __aarch32_sigret_code_start, sigret_sz);
289 aarch32_vdso_pages[C_SIGPAGE] = virt_to_page(sigpage);
290 flush_dcache_page(aarch32_vdso_pages[C_SIGPAGE]);
291
292 ret = aarch32_alloc_kuser_vdso_page();
293 if (ret)
294 free_page(sigpage);
295
296 return ret;
297 }
298 #endif /* CONFIG_COMPAT_VDSO */
299
aarch32_alloc_vdso_pages(void)300 static int __init aarch32_alloc_vdso_pages(void)
301 {
302 return __aarch32_alloc_vdso_pages();
303 }
304 arch_initcall(aarch32_alloc_vdso_pages);
305
aarch32_kuser_helpers_setup(struct mm_struct * mm)306 static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
307 {
308 void *ret;
309
310 if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
311 return 0;
312
313 /*
314 * Avoid VM_MAYWRITE for compatibility with arch/arm/, where it's
315 * not safe to CoW the page containing the CPU exception vectors.
316 */
317 ret = _install_special_mapping(mm, AARCH32_VECTORS_BASE, PAGE_SIZE,
318 VM_READ | VM_EXEC |
319 VM_MAYREAD | VM_MAYEXEC,
320 &aarch32_vdso_spec[C_VECTORS]);
321
322 return PTR_ERR_OR_ZERO(ret);
323 }
324
325 #ifndef CONFIG_COMPAT_VDSO
aarch32_sigreturn_setup(struct mm_struct * mm)326 static int aarch32_sigreturn_setup(struct mm_struct *mm)
327 {
328 unsigned long addr;
329 void *ret;
330
331 addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
332 if (IS_ERR_VALUE(addr)) {
333 ret = ERR_PTR(addr);
334 goto out;
335 }
336
337 /*
338 * VM_MAYWRITE is required to allow gdb to Copy-on-Write and
339 * set breakpoints.
340 */
341 ret = _install_special_mapping(mm, addr, PAGE_SIZE,
342 VM_READ | VM_EXEC | VM_MAYREAD |
343 VM_MAYWRITE | VM_MAYEXEC,
344 &aarch32_vdso_spec[C_SIGPAGE]);
345 if (IS_ERR(ret))
346 goto out;
347
348 mm->context.vdso = (void *)addr;
349
350 out:
351 return PTR_ERR_OR_ZERO(ret);
352 }
353 #endif /* !CONFIG_COMPAT_VDSO */
354
aarch32_setup_additional_pages(struct linux_binprm * bprm,int uses_interp)355 int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
356 {
357 struct mm_struct *mm = current->mm;
358 int ret;
359
360 if (down_write_killable(&mm->mmap_sem))
361 return -EINTR;
362
363 ret = aarch32_kuser_helpers_setup(mm);
364 if (ret)
365 goto out;
366
367 #ifdef CONFIG_COMPAT_VDSO
368 ret = __setup_additional_pages(ARM64_VDSO32,
369 mm,
370 bprm,
371 uses_interp);
372 #else
373 ret = aarch32_sigreturn_setup(mm);
374 #endif /* CONFIG_COMPAT_VDSO */
375
376 out:
377 up_write(&mm->mmap_sem);
378 return ret;
379 }
380 #endif /* CONFIG_COMPAT */
381
vdso_mremap(const struct vm_special_mapping * sm,struct vm_area_struct * new_vma)382 static int vdso_mremap(const struct vm_special_mapping *sm,
383 struct vm_area_struct *new_vma)
384 {
385 return __vdso_remap(ARM64_VDSO, sm, new_vma);
386 }
387
388 /*
389 * aarch64_vdso_pages:
390 * 0 - vvar
391 * 1 - vdso
392 */
393 #define A_VVAR 0
394 #define A_VDSO 1
395 #define A_PAGES (A_VDSO + 1)
396 static struct vm_special_mapping vdso_spec[A_PAGES] __ro_after_init = {
397 {
398 .name = "[vvar]",
399 },
400 {
401 .name = "[vdso]",
402 .mremap = vdso_mremap,
403 },
404 };
405
vdso_init(void)406 static int __init vdso_init(void)
407 {
408 vdso_lookup[ARM64_VDSO].dm = &vdso_spec[A_VVAR];
409 vdso_lookup[ARM64_VDSO].cm = &vdso_spec[A_VDSO];
410
411 return __vdso_init(ARM64_VDSO);
412 }
413 arch_initcall(vdso_init);
414
arch_setup_additional_pages(struct linux_binprm * bprm,int uses_interp)415 int arch_setup_additional_pages(struct linux_binprm *bprm,
416 int uses_interp)
417 {
418 struct mm_struct *mm = current->mm;
419 int ret;
420
421 if (down_write_killable(&mm->mmap_sem))
422 return -EINTR;
423
424 ret = __setup_additional_pages(ARM64_VDSO,
425 mm,
426 bprm,
427 uses_interp);
428
429 up_write(&mm->mmap_sem);
430
431 return ret;
432 }
433