• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Set up the VMAs to tell the VM about the vDSO.
3  * Copyright 2007 Andi Kleen, SUSE Labs.
4  * Subject to the GPL, v.2
5  */
6 #include <linux/mm.h>
7 #include <linux/err.h>
8 #include <linux/sched.h>
9 #include <linux/init.h>
10 #include <linux/random.h>
11 #include <asm/vsyscall.h>
12 #include <asm/vgtod.h>
13 #include <asm/proto.h>
14 #include <asm/vdso.h>
15 
16 #include "vextern.h"		/* Just for VMAGIC.  */
17 #undef VEXTERN
18 
19 unsigned int __read_mostly vdso_enabled = 1;
20 
21 extern char vdso_start[], vdso_end[];
22 extern unsigned short vdso_sync_cpuid;
23 
24 static struct page **vdso_pages;
25 static unsigned vdso_size;
26 
var_ref(void * p,char * name)27 static inline void *var_ref(void *p, char *name)
28 {
29 	if (*(void **)p != (void *)VMAGIC) {
30 		printk("VDSO: variable %s broken\n", name);
31 		vdso_enabled = 0;
32 	}
33 	return p;
34 }
35 
init_vdso_vars(void)36 static int __init init_vdso_vars(void)
37 {
38 	int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
39 	int i;
40 	char *vbase;
41 
42 	vdso_size = npages << PAGE_SHIFT;
43 	vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL);
44 	if (!vdso_pages)
45 		goto oom;
46 	for (i = 0; i < npages; i++) {
47 		struct page *p;
48 		p = alloc_page(GFP_KERNEL);
49 		if (!p)
50 			goto oom;
51 		vdso_pages[i] = p;
52 		copy_page(page_address(p), vdso_start + i*PAGE_SIZE);
53 	}
54 
55 	vbase = vmap(vdso_pages, npages, 0, PAGE_KERNEL);
56 	if (!vbase)
57 		goto oom;
58 
59 	if (memcmp(vbase, "\177ELF", 4)) {
60 		printk("VDSO: I'm broken; not ELF\n");
61 		vdso_enabled = 0;
62 	}
63 
64 #define VEXTERN(x) \
65 	*(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
66 #include "vextern.h"
67 #undef VEXTERN
68 	return 0;
69 
70  oom:
71 	printk("Cannot allocate vdso\n");
72 	vdso_enabled = 0;
73 	return -ENOMEM;
74 }
75 __initcall(init_vdso_vars);
76 
77 struct linux_binprm;
78 
79 /* Put the vdso above the (randomized) stack with another randomized offset.
80    This way there is no hole in the middle of address space.
81    To save memory make sure it is still in the same PTE as the stack top.
82    This doesn't give that many random bits */
vdso_addr(unsigned long start,unsigned len)83 static unsigned long vdso_addr(unsigned long start, unsigned len)
84 {
85 	unsigned long addr, end;
86 	unsigned offset;
87 	end = (start + PMD_SIZE - 1) & PMD_MASK;
88 	if (end >= TASK_SIZE64)
89 		end = TASK_SIZE64;
90 	end -= len;
91 	/* This loses some more bits than a modulo, but is cheaper */
92 	offset = get_random_int() & (PTRS_PER_PTE - 1);
93 	addr = start + (offset << PAGE_SHIFT);
94 	if (addr >= end)
95 		addr = end;
96 	return addr;
97 }
98 
99 /* Setup a VMA at program startup for the vsyscall page.
100    Not called for compat tasks */
arch_setup_additional_pages(struct linux_binprm * bprm,int uses_interp)101 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
102 {
103 	struct mm_struct *mm = current->mm;
104 	unsigned long addr;
105 	int ret;
106 
107 	if (!vdso_enabled)
108 		return 0;
109 
110 	down_write(&mm->mmap_sem);
111 	addr = vdso_addr(mm->start_stack, vdso_size);
112 	addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
113 	if (IS_ERR_VALUE(addr)) {
114 		ret = addr;
115 		goto up_fail;
116 	}
117 
118 	ret = install_special_mapping(mm, addr, vdso_size,
119 				      VM_READ|VM_EXEC|
120 				      VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
121 				      VM_ALWAYSDUMP,
122 				      vdso_pages);
123 	if (ret)
124 		goto up_fail;
125 
126 	current->mm->context.vdso = (void *)addr;
127 up_fail:
128 	up_write(&mm->mmap_sem);
129 	return ret;
130 }
131 
vdso_setup(char * s)132 static __init int vdso_setup(char *s)
133 {
134 	vdso_enabled = simple_strtoul(s, NULL, 0);
135 	return 0;
136 }
137 __setup("vdso=", vdso_setup);
138