1 /*
2 * Copyright 2007 Andi Kleen, SUSE Labs.
3 * Subject to the GPL, v.2
4 *
5 * This contains most of the x86 vDSO kernel-side code.
6 */
7 #include <linux/mm.h>
8 #include <linux/err.h>
9 #include <linux/sched.h>
10 #include <linux/sched/task_stack.h>
11 #include <linux/slab.h>
12 #include <linux/init.h>
13 #include <linux/random.h>
14 #include <linux/elf.h>
15 #include <linux/cpu.h>
16 #include <linux/ptrace.h>
17 #include <asm/pvclock.h>
18 #include <asm/vgtod.h>
19 #include <asm/proto.h>
20 #include <asm/vdso.h>
21 #include <asm/vvar.h>
22 #include <asm/page.h>
23 #include <asm/desc.h>
24 #include <asm/cpufeature.h>
25 #include <asm/mshyperv.h>
26
27 #if defined(CONFIG_X86_64)
28 unsigned int __read_mostly vdso64_enabled = 1;
29 #endif
30
init_vdso_image(const struct vdso_image * image)31 void __init init_vdso_image(const struct vdso_image *image)
32 {
33 BUG_ON(image->size % PAGE_SIZE != 0);
34
35 apply_alternatives((struct alt_instr *)(image->data + image->alt),
36 (struct alt_instr *)(image->data + image->alt +
37 image->alt_len));
38 }
39
40 struct linux_binprm;
41
vdso_fault(const struct vm_special_mapping * sm,struct vm_area_struct * vma,struct vm_fault * vmf)42 static int vdso_fault(const struct vm_special_mapping *sm,
43 struct vm_area_struct *vma, struct vm_fault *vmf)
44 {
45 const struct vdso_image *image = vma->vm_mm->context.vdso_image;
46
47 if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size)
48 return VM_FAULT_SIGBUS;
49
50 vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT));
51 get_page(vmf->page);
52 return 0;
53 }
54
vdso_fix_landing(const struct vdso_image * image,struct vm_area_struct * new_vma)55 static void vdso_fix_landing(const struct vdso_image *image,
56 struct vm_area_struct *new_vma)
57 {
58 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
59 if (in_ia32_syscall() && image == &vdso_image_32) {
60 struct pt_regs *regs = current_pt_regs();
61 unsigned long vdso_land = image->sym_int80_landing_pad;
62 unsigned long old_land_addr = vdso_land +
63 (unsigned long)current->mm->context.vdso;
64
65 /* Fixing userspace landing - look at do_fast_syscall_32 */
66 if (regs->ip == old_land_addr)
67 regs->ip = new_vma->vm_start + vdso_land;
68 }
69 #endif
70 }
71
vdso_mremap(const struct vm_special_mapping * sm,struct vm_area_struct * new_vma)72 static int vdso_mremap(const struct vm_special_mapping *sm,
73 struct vm_area_struct *new_vma)
74 {
75 unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
76 const struct vdso_image *image = current->mm->context.vdso_image;
77
78 if (image->size != new_size)
79 return -EINVAL;
80
81 vdso_fix_landing(image, new_vma);
82 current->mm->context.vdso = (void __user *)new_vma->vm_start;
83
84 return 0;
85 }
86
vvar_fault(const struct vm_special_mapping * sm,struct vm_area_struct * vma,struct vm_fault * vmf)87 static int vvar_fault(const struct vm_special_mapping *sm,
88 struct vm_area_struct *vma, struct vm_fault *vmf)
89 {
90 const struct vdso_image *image = vma->vm_mm->context.vdso_image;
91 long sym_offset;
92 int ret = -EFAULT;
93
94 if (!image)
95 return VM_FAULT_SIGBUS;
96
97 sym_offset = (long)(vmf->pgoff << PAGE_SHIFT) +
98 image->sym_vvar_start;
99
100 /*
101 * Sanity check: a symbol offset of zero means that the page
102 * does not exist for this vdso image, not that the page is at
103 * offset zero relative to the text mapping. This should be
104 * impossible here, because sym_offset should only be zero for
105 * the page past the end of the vvar mapping.
106 */
107 if (sym_offset == 0)
108 return VM_FAULT_SIGBUS;
109
110 if (sym_offset == image->sym_vvar_page) {
111 ret = vm_insert_pfn(vma, vmf->address,
112 __pa_symbol(&__vvar_page) >> PAGE_SHIFT);
113 } else if (sym_offset == image->sym_pvclock_page) {
114 struct pvclock_vsyscall_time_info *pvti =
115 pvclock_get_pvti_cpu0_va();
116 if (pvti && vclock_was_used(VCLOCK_PVCLOCK)) {
117 ret = vm_insert_pfn(
118 vma,
119 vmf->address,
120 __pa(pvti) >> PAGE_SHIFT);
121 }
122 } else if (sym_offset == image->sym_hvclock_page) {
123 struct ms_hyperv_tsc_page *tsc_pg = hv_get_tsc_page();
124
125 if (tsc_pg && vclock_was_used(VCLOCK_HVCLOCK))
126 ret = vm_insert_pfn(vma, vmf->address,
127 vmalloc_to_pfn(tsc_pg));
128 }
129
130 if (ret == 0 || ret == -EBUSY)
131 return VM_FAULT_NOPAGE;
132
133 return VM_FAULT_SIGBUS;
134 }
135
136 static const struct vm_special_mapping vdso_mapping = {
137 .name = "[vdso]",
138 .fault = vdso_fault,
139 .mremap = vdso_mremap,
140 };
141 static const struct vm_special_mapping vvar_mapping = {
142 .name = "[vvar]",
143 .fault = vvar_fault,
144 };
145
146 /*
147 * Add vdso and vvar mappings to current process.
148 * @image - blob to map
149 * @addr - request a specific address (zero to map at free addr)
150 */
map_vdso(const struct vdso_image * image,unsigned long addr)151 static int map_vdso(const struct vdso_image *image, unsigned long addr)
152 {
153 struct mm_struct *mm = current->mm;
154 struct vm_area_struct *vma;
155 unsigned long text_start;
156 int ret = 0;
157
158 if (down_write_killable(&mm->mmap_sem))
159 return -EINTR;
160
161 addr = get_unmapped_area(NULL, addr,
162 image->size - image->sym_vvar_start, 0, 0);
163 if (IS_ERR_VALUE(addr)) {
164 ret = addr;
165 goto up_fail;
166 }
167
168 text_start = addr - image->sym_vvar_start;
169
170 /*
171 * MAYWRITE to allow gdb to COW and set breakpoints
172 */
173 vma = _install_special_mapping(mm,
174 text_start,
175 image->size,
176 VM_READ|VM_EXEC|
177 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
178 &vdso_mapping);
179
180 if (IS_ERR(vma)) {
181 ret = PTR_ERR(vma);
182 goto up_fail;
183 }
184
185 vma = _install_special_mapping(mm,
186 addr,
187 -image->sym_vvar_start,
188 VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
189 VM_PFNMAP,
190 &vvar_mapping);
191
192 if (IS_ERR(vma)) {
193 ret = PTR_ERR(vma);
194 do_munmap(mm, text_start, image->size, NULL);
195 } else {
196 current->mm->context.vdso = (void __user *)text_start;
197 current->mm->context.vdso_image = image;
198 }
199
200 up_fail:
201 up_write(&mm->mmap_sem);
202 return ret;
203 }
204
205 #ifdef CONFIG_X86_64
206 /*
207 * Put the vdso above the (randomized) stack with another randomized
208 * offset. This way there is no hole in the middle of address space.
209 * To save memory make sure it is still in the same PTE as the stack
210 * top. This doesn't give that many random bits.
211 *
212 * Note that this algorithm is imperfect: the distribution of the vdso
213 * start address within a PMD is biased toward the end.
214 *
215 * Only used for the 64-bit and x32 vdsos.
216 */
vdso_addr(unsigned long start,unsigned len)217 static unsigned long vdso_addr(unsigned long start, unsigned len)
218 {
219 unsigned long addr, end;
220 unsigned offset;
221
222 /*
223 * Round up the start address. It can start out unaligned as a result
224 * of stack start randomization.
225 */
226 start = PAGE_ALIGN(start);
227
228 /* Round the lowest possible end address up to a PMD boundary. */
229 end = (start + len + PMD_SIZE - 1) & PMD_MASK;
230 if (end >= TASK_SIZE_MAX)
231 end = TASK_SIZE_MAX;
232 end -= len;
233
234 if (end > start) {
235 offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
236 addr = start + (offset << PAGE_SHIFT);
237 } else {
238 addr = start;
239 }
240
241 /*
242 * Forcibly align the final address in case we have a hardware
243 * issue that requires alignment for performance reasons.
244 */
245 addr = align_vdso_addr(addr);
246
247 return addr;
248 }
249
map_vdso_randomized(const struct vdso_image * image)250 static int map_vdso_randomized(const struct vdso_image *image)
251 {
252 unsigned long addr = vdso_addr(current->mm->start_stack, image->size-image->sym_vvar_start);
253
254 return map_vdso(image, addr);
255 }
256 #endif
257
map_vdso_once(const struct vdso_image * image,unsigned long addr)258 int map_vdso_once(const struct vdso_image *image, unsigned long addr)
259 {
260 struct mm_struct *mm = current->mm;
261 struct vm_area_struct *vma;
262
263 down_write(&mm->mmap_sem);
264 /*
265 * Check if we have already mapped vdso blob - fail to prevent
266 * abusing from userspace install_speciall_mapping, which may
267 * not do accounting and rlimit right.
268 * We could search vma near context.vdso, but it's a slowpath,
269 * so let's explicitely check all VMAs to be completely sure.
270 */
271 for (vma = mm->mmap; vma; vma = vma->vm_next) {
272 if (vma_is_special_mapping(vma, &vdso_mapping) ||
273 vma_is_special_mapping(vma, &vvar_mapping)) {
274 up_write(&mm->mmap_sem);
275 return -EEXIST;
276 }
277 }
278 up_write(&mm->mmap_sem);
279
280 return map_vdso(image, addr);
281 }
282
283 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
load_vdso32(void)284 static int load_vdso32(void)
285 {
286 if (vdso32_enabled != 1) /* Other values all mean "disabled" */
287 return 0;
288
289 return map_vdso(&vdso_image_32, 0);
290 }
291 #endif
292
293 #ifdef CONFIG_X86_64
arch_setup_additional_pages(struct linux_binprm * bprm,int uses_interp)294 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
295 {
296 if (!vdso64_enabled)
297 return 0;
298
299 return map_vdso_randomized(&vdso_image_64);
300 }
301
302 #ifdef CONFIG_COMPAT
compat_arch_setup_additional_pages(struct linux_binprm * bprm,int uses_interp)303 int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
304 int uses_interp)
305 {
306 #ifdef CONFIG_X86_X32_ABI
307 if (test_thread_flag(TIF_X32)) {
308 if (!vdso64_enabled)
309 return 0;
310 return map_vdso_randomized(&vdso_image_x32);
311 }
312 #endif
313 #ifdef CONFIG_IA32_EMULATION
314 return load_vdso32();
315 #else
316 return 0;
317 #endif
318 }
319 #endif
320 #else
arch_setup_additional_pages(struct linux_binprm * bprm,int uses_interp)321 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
322 {
323 return load_vdso32();
324 }
325 #endif
326
327 #ifdef CONFIG_X86_64
vdso_setup(char * s)328 static __init int vdso_setup(char *s)
329 {
330 vdso64_enabled = simple_strtoul(s, NULL, 0);
331 return 0;
332 }
333 __setup("vdso=", vdso_setup);
334 #endif
335
336 #ifdef CONFIG_X86_64
vgetcpu_cpu_init(void * arg)337 static void vgetcpu_cpu_init(void *arg)
338 {
339 int cpu = smp_processor_id();
340 struct desc_struct d = { };
341 unsigned long node = 0;
342 #ifdef CONFIG_NUMA
343 node = cpu_to_node(cpu);
344 #endif
345 if (static_cpu_has(X86_FEATURE_RDTSCP))
346 write_rdtscp_aux((node << 12) | cpu);
347
348 /*
349 * Store cpu number in limit so that it can be loaded
350 * quickly in user space in vgetcpu. (12 bits for the CPU
351 * and 8 bits for the node)
352 */
353 d.limit0 = cpu | ((node & 0xf) << 12);
354 d.limit1 = node >> 4;
355 d.type = 5; /* RO data, expand down, accessed */
356 d.dpl = 3; /* Visible to user code */
357 d.s = 1; /* Not a system segment */
358 d.p = 1; /* Present */
359 d.d = 1; /* 32-bit */
360
361 write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
362 }
363
vgetcpu_online(unsigned int cpu)364 static int vgetcpu_online(unsigned int cpu)
365 {
366 return smp_call_function_single(cpu, vgetcpu_cpu_init, NULL, 1);
367 }
368
init_vdso(void)369 static int __init init_vdso(void)
370 {
371 init_vdso_image(&vdso_image_64);
372
373 #ifdef CONFIG_X86_X32_ABI
374 init_vdso_image(&vdso_image_x32);
375 #endif
376
377 /* notifier priority > KVM */
378 return cpuhp_setup_state(CPUHP_AP_X86_VDSO_VMA_ONLINE,
379 "x86/vdso/vma:online", vgetcpu_online, NULL);
380 }
381 subsys_initcall(init_vdso);
382 #endif /* CONFIG_X86_64 */
383