1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /*
4 * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
5 * <benh@kernel.crashing.org>
6 */
7
8 #include <linux/errno.h>
9 #include <linux/sched.h>
10 #include <linux/kernel.h>
11 #include <linux/mm.h>
12 #include <linux/smp.h>
13 #include <linux/stddef.h>
14 #include <linux/unistd.h>
15 #include <linux/slab.h>
16 #include <linux/user.h>
17 #include <linux/elf.h>
18 #include <linux/security.h>
19 #include <linux/memblock.h>
20 #include <linux/syscalls.h>
21 #include <linux/time_namespace.h>
22 #include <vdso/datapage.h>
23
24 #include <asm/syscall.h>
25 #include <asm/processor.h>
26 #include <asm/mmu.h>
27 #include <asm/mmu_context.h>
28 #include <asm/prom.h>
29 #include <asm/machdep.h>
30 #include <asm/cputable.h>
31 #include <asm/sections.h>
32 #include <asm/firmware.h>
33 #include <asm/vdso.h>
34 #include <asm/vdso_datapage.h>
35 #include <asm/setup.h>
36
37 /* The alignment of the vDSO */
38 #define VDSO_ALIGNMENT (1 << 16)
39
40 extern char vdso32_start, vdso32_end;
41 extern char vdso64_start, vdso64_end;
42
43 /*
44 * The vdso data page (aka. systemcfg for old ppc64 fans) is here.
45 * Once the early boot kernel code no longer needs to muck around
46 * with it, it will become dynamically allocated
47 */
48 static union {
49 struct vdso_arch_data data;
50 u8 page[PAGE_SIZE];
51 } vdso_data_store __page_aligned_data;
52 struct vdso_arch_data *vdso_data = &vdso_data_store.data;
53
54 enum vvar_pages {
55 VVAR_DATA_PAGE_OFFSET,
56 VVAR_TIMENS_PAGE_OFFSET,
57 VVAR_NR_PAGES,
58 };
59
vdso_mremap(const struct vm_special_mapping * sm,struct vm_area_struct * new_vma,unsigned long text_size)60 static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma,
61 unsigned long text_size)
62 {
63 unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
64
65 if (new_size != text_size)
66 return -EINVAL;
67
68 current->mm->context.vdso = (void __user *)new_vma->vm_start;
69
70 return 0;
71 }
72
vdso32_mremap(const struct vm_special_mapping * sm,struct vm_area_struct * new_vma)73 static int vdso32_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma)
74 {
75 return vdso_mremap(sm, new_vma, &vdso32_end - &vdso32_start);
76 }
77
vdso64_mremap(const struct vm_special_mapping * sm,struct vm_area_struct * new_vma)78 static int vdso64_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma)
79 {
80 return vdso_mremap(sm, new_vma, &vdso64_end - &vdso64_start);
81 }
82
83 static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
84 struct vm_area_struct *vma, struct vm_fault *vmf);
85
86 static struct vm_special_mapping vvar_spec __ro_after_init = {
87 .name = "[vvar]",
88 .fault = vvar_fault,
89 };
90
91 static struct vm_special_mapping vdso32_spec __ro_after_init = {
92 .name = "[vdso]",
93 .mremap = vdso32_mremap,
94 };
95
96 static struct vm_special_mapping vdso64_spec __ro_after_init = {
97 .name = "[vdso]",
98 .mremap = vdso64_mremap,
99 };
100
101 #ifdef CONFIG_TIME_NS
arch_get_vdso_data(void * vvar_page)102 struct vdso_data *arch_get_vdso_data(void *vvar_page)
103 {
104 return ((struct vdso_arch_data *)vvar_page)->data;
105 }
106
107 /*
108 * The vvar mapping contains data for a specific time namespace, so when a task
109 * changes namespace we must unmap its vvar data for the old namespace.
110 * Subsequent faults will map in data for the new namespace.
111 *
112 * For more details see timens_setup_vdso_data().
113 */
vdso_join_timens(struct task_struct * task,struct time_namespace * ns)114 int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
115 {
116 struct mm_struct *mm = task->mm;
117 struct vm_area_struct *vma;
118
119 mmap_read_lock(mm);
120
121 for (vma = mm->mmap; vma; vma = vma->vm_next) {
122 unsigned long size = vma->vm_end - vma->vm_start;
123
124 if (vma_is_special_mapping(vma, &vvar_spec))
125 zap_page_range(vma, vma->vm_start, size);
126 }
127
128 mmap_read_unlock(mm);
129 return 0;
130 }
131
find_timens_vvar_page(struct vm_area_struct * vma)132 static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
133 {
134 if (likely(vma->vm_mm == current->mm))
135 return current->nsproxy->time_ns->vvar_page;
136
137 /*
138 * VM_PFNMAP | VM_IO protect .fault() handler from being called
139 * through interfaces like /proc/$pid/mem or
140 * process_vm_{readv,writev}() as long as there's no .access()
141 * in special_mapping_vmops.
142 * For more details check_vma_flags() and __access_remote_vm()
143 */
144 WARN(1, "vvar_page accessed remotely");
145
146 return NULL;
147 }
148 #else
find_timens_vvar_page(struct vm_area_struct * vma)149 static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
150 {
151 return NULL;
152 }
153 #endif
154
vvar_fault(const struct vm_special_mapping * sm,struct vm_area_struct * vma,struct vm_fault * vmf)155 static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
156 struct vm_area_struct *vma, struct vm_fault *vmf)
157 {
158 struct page *timens_page = find_timens_vvar_page(vma);
159 unsigned long pfn;
160
161 switch (vmf->pgoff) {
162 case VVAR_DATA_PAGE_OFFSET:
163 if (timens_page)
164 pfn = page_to_pfn(timens_page);
165 else
166 pfn = virt_to_pfn(vdso_data);
167 break;
168 #ifdef CONFIG_TIME_NS
169 case VVAR_TIMENS_PAGE_OFFSET:
170 /*
171 * If a task belongs to a time namespace then a namespace
172 * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
173 * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
174 * offset.
175 * See also the comment near timens_setup_vdso_data().
176 */
177 if (!timens_page)
178 return VM_FAULT_SIGBUS;
179 pfn = virt_to_pfn(vdso_data);
180 break;
181 #endif /* CONFIG_TIME_NS */
182 default:
183 return VM_FAULT_SIGBUS;
184 }
185
186 return vmf_insert_pfn(vma, vmf->address, pfn);
187 }
188
189 /*
190 * This is called from binfmt_elf, we create the special vma for the
191 * vDSO and insert it into the mm struct tree
192 */
__arch_setup_additional_pages(struct linux_binprm * bprm,int uses_interp)193 static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
194 {
195 unsigned long vdso_size, vdso_base, mappings_size;
196 struct vm_special_mapping *vdso_spec;
197 unsigned long vvar_size = VVAR_NR_PAGES * PAGE_SIZE;
198 struct mm_struct *mm = current->mm;
199 struct vm_area_struct *vma;
200
201 if (is_32bit_task()) {
202 vdso_spec = &vdso32_spec;
203 vdso_size = &vdso32_end - &vdso32_start;
204 vdso_base = VDSO32_MBASE;
205 } else {
206 vdso_spec = &vdso64_spec;
207 vdso_size = &vdso64_end - &vdso64_start;
208 /*
209 * On 64bit we don't have a preferred map address. This
210 * allows get_unmapped_area to find an area near other mmaps
211 * and most likely share a SLB entry.
212 */
213 vdso_base = 0;
214 }
215
216 mappings_size = vdso_size + vvar_size;
217 mappings_size += (VDSO_ALIGNMENT - 1) & PAGE_MASK;
218
219 /*
220 * pick a base address for the vDSO in process space. We try to put it
221 * at vdso_base which is the "natural" base for it, but we might fail
222 * and end up putting it elsewhere.
223 * Add enough to the size so that the result can be aligned.
224 */
225 vdso_base = get_unmapped_area(NULL, vdso_base, mappings_size, 0, 0);
226 if (IS_ERR_VALUE(vdso_base))
227 return vdso_base;
228
229 /* Add required alignment. */
230 vdso_base = ALIGN(vdso_base, VDSO_ALIGNMENT);
231
232 /*
233 * Put vDSO base into mm struct. We need to do this before calling
234 * install_special_mapping or the perf counter mmap tracking code
235 * will fail to recognise it as a vDSO.
236 */
237 mm->context.vdso = (void __user *)vdso_base + vvar_size;
238
239 vma = _install_special_mapping(mm, vdso_base, vvar_size,
240 VM_READ | VM_MAYREAD | VM_IO |
241 VM_DONTDUMP | VM_PFNMAP, &vvar_spec);
242 if (IS_ERR(vma))
243 return PTR_ERR(vma);
244
245 /*
246 * our vma flags don't have VM_WRITE so by default, the process isn't
247 * allowed to write those pages.
248 * gdb can break that with ptrace interface, and thus trigger COW on
249 * those pages but it's then your responsibility to never do that on
250 * the "data" page of the vDSO or you'll stop getting kernel updates
251 * and your nice userland gettimeofday will be totally dead.
252 * It's fine to use that for setting breakpoints in the vDSO code
253 * pages though.
254 */
255 vma = _install_special_mapping(mm, vdso_base + vvar_size, vdso_size,
256 VM_READ | VM_EXEC | VM_MAYREAD |
257 VM_MAYWRITE | VM_MAYEXEC, vdso_spec);
258 if (IS_ERR(vma))
259 do_munmap(mm, vdso_base, vvar_size, NULL);
260
261 return PTR_ERR_OR_ZERO(vma);
262 }
263
arch_setup_additional_pages(struct linux_binprm * bprm,int uses_interp)264 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
265 {
266 struct mm_struct *mm = current->mm;
267 int rc;
268
269 mm->context.vdso = NULL;
270
271 if (mmap_write_lock_killable(mm))
272 return -EINTR;
273
274 rc = __arch_setup_additional_pages(bprm, uses_interp);
275 if (rc)
276 mm->context.vdso = NULL;
277
278 mmap_write_unlock(mm);
279 return rc;
280 }
281
282 #define VDSO_DO_FIXUPS(type, value, bits, sec) do { \
283 void *__start = (void *)VDSO##bits##_SYMBOL(&vdso##bits##_start, sec##_start); \
284 void *__end = (void *)VDSO##bits##_SYMBOL(&vdso##bits##_start, sec##_end); \
285 \
286 do_##type##_fixups((value), __start, __end); \
287 } while (0)
288
vdso_fixup_features(void)289 static void __init vdso_fixup_features(void)
290 {
291 #ifdef CONFIG_PPC64
292 VDSO_DO_FIXUPS(feature, cur_cpu_spec->cpu_features, 64, ftr_fixup);
293 VDSO_DO_FIXUPS(feature, cur_cpu_spec->mmu_features, 64, mmu_ftr_fixup);
294 VDSO_DO_FIXUPS(feature, powerpc_firmware_features, 64, fw_ftr_fixup);
295 VDSO_DO_FIXUPS(lwsync, cur_cpu_spec->cpu_features, 64, lwsync_fixup);
296 #endif /* CONFIG_PPC64 */
297
298 #ifdef CONFIG_VDSO32
299 VDSO_DO_FIXUPS(feature, cur_cpu_spec->cpu_features, 32, ftr_fixup);
300 VDSO_DO_FIXUPS(feature, cur_cpu_spec->mmu_features, 32, mmu_ftr_fixup);
301 #ifdef CONFIG_PPC64
302 VDSO_DO_FIXUPS(feature, powerpc_firmware_features, 32, fw_ftr_fixup);
303 #endif /* CONFIG_PPC64 */
304 VDSO_DO_FIXUPS(lwsync, cur_cpu_spec->cpu_features, 32, lwsync_fixup);
305 #endif
306 }
307
308 /*
309 * Called from setup_arch to initialize the bitmap of available
310 * syscalls in the systemcfg page
311 */
vdso_setup_syscall_map(void)312 static void __init vdso_setup_syscall_map(void)
313 {
314 unsigned int i;
315
316 for (i = 0; i < NR_syscalls; i++) {
317 if (sys_call_table[i] != (unsigned long)&sys_ni_syscall)
318 vdso_data->syscall_map[i >> 5] |= 0x80000000UL >> (i & 0x1f);
319 if (IS_ENABLED(CONFIG_COMPAT) &&
320 compat_sys_call_table[i] != (unsigned long)&sys_ni_syscall)
321 vdso_data->compat_syscall_map[i >> 5] |= 0x80000000UL >> (i & 0x1f);
322 }
323 }
324
325 #ifdef CONFIG_PPC64
vdso_getcpu_init(void)326 int vdso_getcpu_init(void)
327 {
328 unsigned long cpu, node, val;
329
330 /*
331 * SPRG_VDSO contains the CPU in the bottom 16 bits and the NUMA node
332 * in the next 16 bits. The VDSO uses this to implement getcpu().
333 */
334 cpu = get_cpu();
335 WARN_ON_ONCE(cpu > 0xffff);
336
337 node = cpu_to_node(cpu);
338 WARN_ON_ONCE(node > 0xffff);
339
340 val = (cpu & 0xffff) | ((node & 0xffff) << 16);
341 mtspr(SPRN_SPRG_VDSO_WRITE, val);
342 get_paca()->sprg_vdso = val;
343
344 put_cpu();
345
346 return 0;
347 }
348 /* We need to call this before SMP init */
349 early_initcall(vdso_getcpu_init);
350 #endif
351
vdso_setup_pages(void * start,void * end)352 static struct page ** __init vdso_setup_pages(void *start, void *end)
353 {
354 int i;
355 struct page **pagelist;
356 int pages = (end - start) >> PAGE_SHIFT;
357
358 pagelist = kcalloc(pages + 1, sizeof(struct page *), GFP_KERNEL);
359 if (!pagelist)
360 panic("%s: Cannot allocate page list for VDSO", __func__);
361
362 for (i = 0; i < pages; i++)
363 pagelist[i] = virt_to_page(start + i * PAGE_SIZE);
364
365 return pagelist;
366 }
367
vdso_init(void)368 static int __init vdso_init(void)
369 {
370 #ifdef CONFIG_PPC64
371 /*
372 * Fill up the "systemcfg" stuff for backward compatibility
373 */
374 strcpy((char *)vdso_data->eye_catcher, "SYSTEMCFG:PPC64");
375 vdso_data->version.major = SYSTEMCFG_MAJOR;
376 vdso_data->version.minor = SYSTEMCFG_MINOR;
377 vdso_data->processor = mfspr(SPRN_PVR);
378 /*
379 * Fake the old platform number for pSeries and add
380 * in LPAR bit if necessary
381 */
382 vdso_data->platform = 0x100;
383 if (firmware_has_feature(FW_FEATURE_LPAR))
384 vdso_data->platform |= 1;
385 vdso_data->physicalMemorySize = memblock_phys_mem_size();
386 vdso_data->dcache_size = ppc64_caches.l1d.size;
387 vdso_data->dcache_line_size = ppc64_caches.l1d.line_size;
388 vdso_data->icache_size = ppc64_caches.l1i.size;
389 vdso_data->icache_line_size = ppc64_caches.l1i.line_size;
390 vdso_data->dcache_block_size = ppc64_caches.l1d.block_size;
391 vdso_data->icache_block_size = ppc64_caches.l1i.block_size;
392 vdso_data->dcache_log_block_size = ppc64_caches.l1d.log_block_size;
393 vdso_data->icache_log_block_size = ppc64_caches.l1i.log_block_size;
394 #endif /* CONFIG_PPC64 */
395
396 vdso_setup_syscall_map();
397
398 vdso_fixup_features();
399
400 if (IS_ENABLED(CONFIG_VDSO32))
401 vdso32_spec.pages = vdso_setup_pages(&vdso32_start, &vdso32_end);
402
403 if (IS_ENABLED(CONFIG_PPC64))
404 vdso64_spec.pages = vdso_setup_pages(&vdso64_start, &vdso64_end);
405
406 smp_wmb();
407
408 return 0;
409 }
410 arch_initcall(vdso_init);
411