• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2012-2014 Andy Lutomirski <luto@amacapital.net>
4  *
5  * Based on the original implementation which is:
6  *  Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
7  *  Copyright 2003 Andi Kleen, SuSE Labs.
8  *
9  *  Parts of the original code have been moved to arch/x86/vdso/vma.c
10  *
11  * This file implements vsyscall emulation.  vsyscalls are a legacy ABI:
12  * Userspace can request certain kernel services by calling fixed
13  * addresses.  This concept is problematic:
14  *
15  * - It interferes with ASLR.
16  * - It's awkward to write code that lives in kernel addresses but is
17  *   callable by userspace at fixed addresses.
18  * - The whole concept is impossible for 32-bit compat userspace.
19  * - UML cannot easily virtualize a vsyscall.
20  *
21  * As of mid-2014, I believe that there is no new userspace code that
22  * will use a vsyscall if the vDSO is present.  I hope that there will
23  * soon be no new userspace code that will ever use a vsyscall.
24  *
25  * The code in this file emulates vsyscalls when notified of a page
26  * fault to a vsyscall address.
27  */
28 
29 #include <linux/kernel.h>
30 #include <linux/timer.h>
31 #include <linux/sched/signal.h>
32 #include <linux/mm_types.h>
33 #include <linux/syscalls.h>
34 #include <linux/ratelimit.h>
35 
36 #include <asm/vsyscall.h>
37 #include <asm/unistd.h>
38 #include <asm/fixmap.h>
39 #include <asm/traps.h>
40 #include <asm/paravirt.h>
41 
42 #define CREATE_TRACE_POINTS
43 #include "vsyscall_trace.h"
44 
45 static enum { EMULATE, XONLY, NONE } vsyscall_mode __ro_after_init =
46 #ifdef CONFIG_LEGACY_VSYSCALL_NONE
47 	NONE;
48 #elif defined(CONFIG_LEGACY_VSYSCALL_XONLY)
49 	XONLY;
50 #else
51 	EMULATE;
52 #endif
53 
vsyscall_setup(char * str)54 static int __init vsyscall_setup(char *str)
55 {
56 	if (str) {
57 		if (!strcmp("emulate", str))
58 			vsyscall_mode = EMULATE;
59 		else if (!strcmp("xonly", str))
60 			vsyscall_mode = XONLY;
61 		else if (!strcmp("none", str))
62 			vsyscall_mode = NONE;
63 		else
64 			return -EINVAL;
65 
66 		return 0;
67 	}
68 
69 	return -EINVAL;
70 }
71 early_param("vsyscall", vsyscall_setup);
72 
warn_bad_vsyscall(const char * level,struct pt_regs * regs,const char * message)73 static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
74 			      const char *message)
75 {
76 	if (!show_unhandled_signals)
77 		return;
78 
79 	printk_ratelimited("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n",
80 			   level, current->comm, task_pid_nr(current),
81 			   message, regs->ip, regs->cs,
82 			   regs->sp, regs->ax, regs->si, regs->di);
83 }
84 
addr_to_vsyscall_nr(unsigned long addr)85 static int addr_to_vsyscall_nr(unsigned long addr)
86 {
87 	int nr;
88 
89 	if ((addr & ~0xC00UL) != VSYSCALL_ADDR)
90 		return -EINVAL;
91 
92 	nr = (addr & 0xC00UL) >> 10;
93 	if (nr >= 3)
94 		return -EINVAL;
95 
96 	return nr;
97 }
98 
write_ok_or_segv(unsigned long ptr,size_t size)99 static bool write_ok_or_segv(unsigned long ptr, size_t size)
100 {
101 	/*
102 	 * XXX: if access_ok, get_user, and put_user handled
103 	 * sig_on_uaccess_err, this could go away.
104 	 */
105 
106 	if (!access_ok((void __user *)ptr, size)) {
107 		struct thread_struct *thread = &current->thread;
108 
109 		thread->error_code	= X86_PF_USER | X86_PF_WRITE;
110 		thread->cr2		= ptr;
111 		thread->trap_nr		= X86_TRAP_PF;
112 
113 		force_sig_fault(SIGSEGV, SEGV_MAPERR, (void __user *)ptr);
114 		return false;
115 	} else {
116 		return true;
117 	}
118 }
119 
emulate_vsyscall(unsigned long error_code,struct pt_regs * regs,unsigned long address)120 bool emulate_vsyscall(unsigned long error_code,
121 		      struct pt_regs *regs, unsigned long address)
122 {
123 	struct task_struct *tsk;
124 	unsigned long caller;
125 	int vsyscall_nr, syscall_nr, tmp;
126 	int prev_sig_on_uaccess_err;
127 	long ret;
128 	unsigned long orig_dx;
129 
130 	/* Write faults or kernel-privilege faults never get fixed up. */
131 	if ((error_code & (X86_PF_WRITE | X86_PF_USER)) != X86_PF_USER)
132 		return false;
133 
134 	if (!(error_code & X86_PF_INSTR)) {
135 		/* Failed vsyscall read */
136 		if (vsyscall_mode == EMULATE)
137 			return false;
138 
139 		/*
140 		 * User code tried and failed to read the vsyscall page.
141 		 */
142 		warn_bad_vsyscall(KERN_INFO, regs, "vsyscall read attempt denied -- look up the vsyscall kernel parameter if you need a workaround");
143 		return false;
144 	}
145 
146 	/*
147 	 * No point in checking CS -- the only way to get here is a user mode
148 	 * trap to a high address, which means that we're in 64-bit user code.
149 	 */
150 
151 	WARN_ON_ONCE(address != regs->ip);
152 
153 	if (vsyscall_mode == NONE) {
154 		warn_bad_vsyscall(KERN_INFO, regs,
155 				  "vsyscall attempted with vsyscall=none");
156 		return false;
157 	}
158 
159 	vsyscall_nr = addr_to_vsyscall_nr(address);
160 
161 	trace_emulate_vsyscall(vsyscall_nr);
162 
163 	if (vsyscall_nr < 0) {
164 		warn_bad_vsyscall(KERN_WARNING, regs,
165 				  "misaligned vsyscall (exploit attempt or buggy program) -- look up the vsyscall kernel parameter if you need a workaround");
166 		goto sigsegv;
167 	}
168 
169 	if (get_user(caller, (unsigned long __user *)regs->sp) != 0) {
170 		warn_bad_vsyscall(KERN_WARNING, regs,
171 				  "vsyscall with bad stack (exploit attempt?)");
172 		goto sigsegv;
173 	}
174 
175 	tsk = current;
176 
177 	/*
178 	 * Check for access_ok violations and find the syscall nr.
179 	 *
180 	 * NULL is a valid user pointer (in the access_ok sense) on 32-bit and
181 	 * 64-bit, so we don't need to special-case it here.  For all the
182 	 * vsyscalls, NULL means "don't write anything" not "write it at
183 	 * address 0".
184 	 */
185 	switch (vsyscall_nr) {
186 	case 0:
187 		if (!write_ok_or_segv(regs->di, sizeof(struct timeval)) ||
188 		    !write_ok_or_segv(regs->si, sizeof(struct timezone))) {
189 			ret = -EFAULT;
190 			goto check_fault;
191 		}
192 
193 		syscall_nr = __NR_gettimeofday;
194 		break;
195 
196 	case 1:
197 		if (!write_ok_or_segv(regs->di, sizeof(time_t))) {
198 			ret = -EFAULT;
199 			goto check_fault;
200 		}
201 
202 		syscall_nr = __NR_time;
203 		break;
204 
205 	case 2:
206 		if (!write_ok_or_segv(regs->di, sizeof(unsigned)) ||
207 		    !write_ok_or_segv(regs->si, sizeof(unsigned))) {
208 			ret = -EFAULT;
209 			goto check_fault;
210 		}
211 
212 		syscall_nr = __NR_getcpu;
213 		break;
214 	}
215 
216 	/*
217 	 * Handle seccomp.  regs->ip must be the original value.
218 	 * See seccomp_send_sigsys and Documentation/userspace-api/seccomp_filter.rst.
219 	 *
220 	 * We could optimize the seccomp disabled case, but performance
221 	 * here doesn't matter.
222 	 */
223 	regs->orig_ax = syscall_nr;
224 	regs->ax = -ENOSYS;
225 	tmp = secure_computing(NULL);
226 	if ((!tmp && regs->orig_ax != syscall_nr) || regs->ip != address) {
227 		warn_bad_vsyscall(KERN_DEBUG, regs,
228 				  "seccomp tried to change syscall nr or ip");
229 		do_exit(SIGSYS);
230 	}
231 	regs->orig_ax = -1;
232 	if (tmp)
233 		goto do_ret;  /* skip requested */
234 
235 	/*
236 	 * With a real vsyscall, page faults cause SIGSEGV.  We want to
237 	 * preserve that behavior to make writing exploits harder.
238 	 */
239 	prev_sig_on_uaccess_err = current->thread.sig_on_uaccess_err;
240 	current->thread.sig_on_uaccess_err = 1;
241 
242 	ret = -EFAULT;
243 	switch (vsyscall_nr) {
244 	case 0:
245 		/* this decodes regs->di and regs->si on its own */
246 		ret = __x64_sys_gettimeofday(regs);
247 		break;
248 
249 	case 1:
250 		/* this decodes regs->di on its own */
251 		ret = __x64_sys_time(regs);
252 		break;
253 
254 	case 2:
255 		/* while we could clobber regs->dx, we didn't in the past... */
256 		orig_dx = regs->dx;
257 		regs->dx = 0;
258 		/* this decodes regs->di, regs->si and regs->dx on its own */
259 		ret = __x64_sys_getcpu(regs);
260 		regs->dx = orig_dx;
261 		break;
262 	}
263 
264 	current->thread.sig_on_uaccess_err = prev_sig_on_uaccess_err;
265 
266 check_fault:
267 	if (ret == -EFAULT) {
268 		/* Bad news -- userspace fed a bad pointer to a vsyscall. */
269 		warn_bad_vsyscall(KERN_INFO, regs,
270 				  "vsyscall fault (exploit attempt?)");
271 
272 		/*
273 		 * If we failed to generate a signal for any reason,
274 		 * generate one here.  (This should be impossible.)
275 		 */
276 		if (WARN_ON_ONCE(!sigismember(&tsk->pending.signal, SIGBUS) &&
277 				 !sigismember(&tsk->pending.signal, SIGSEGV)))
278 			goto sigsegv;
279 
280 		return true;  /* Don't emulate the ret. */
281 	}
282 
283 	regs->ax = ret;
284 
285 do_ret:
286 	/* Emulate a ret instruction. */
287 	regs->ip = caller;
288 	regs->sp += 8;
289 	return true;
290 
291 sigsegv:
292 	force_sig(SIGSEGV);
293 	return true;
294 }
295 
296 /*
297  * A pseudo VMA to allow ptrace access for the vsyscall page.  This only
298  * covers the 64bit vsyscall page now. 32bit has a real VMA now and does
299  * not need special handling anymore:
300  */
gate_vma_name(struct vm_area_struct * vma)301 static const char *gate_vma_name(struct vm_area_struct *vma)
302 {
303 	return "[vsyscall]";
304 }
305 static const struct vm_operations_struct gate_vma_ops = {
306 	.name = gate_vma_name,
307 };
308 static struct vm_area_struct gate_vma __ro_after_init = {
309 	.vm_start	= VSYSCALL_ADDR,
310 	.vm_end		= VSYSCALL_ADDR + PAGE_SIZE,
311 	.vm_page_prot	= PAGE_READONLY_EXEC,
312 	.vm_flags	= VM_READ | VM_EXEC,
313 	.vm_ops		= &gate_vma_ops,
314 };
315 
get_gate_vma(struct mm_struct * mm)316 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
317 {
318 #ifdef CONFIG_COMPAT
319 	if (!mm || mm->context.ia32_compat)
320 		return NULL;
321 #endif
322 	if (vsyscall_mode == NONE)
323 		return NULL;
324 	return &gate_vma;
325 }
326 
in_gate_area(struct mm_struct * mm,unsigned long addr)327 int in_gate_area(struct mm_struct *mm, unsigned long addr)
328 {
329 	struct vm_area_struct *vma = get_gate_vma(mm);
330 
331 	if (!vma)
332 		return 0;
333 
334 	return (addr >= vma->vm_start) && (addr < vma->vm_end);
335 }
336 
337 /*
338  * Use this when you have no reliable mm, typically from interrupt
339  * context. It is less reliable than using a task's mm and may give
340  * false positives.
341  */
in_gate_area_no_mm(unsigned long addr)342 int in_gate_area_no_mm(unsigned long addr)
343 {
344 	return vsyscall_mode != NONE && (addr & PAGE_MASK) == VSYSCALL_ADDR;
345 }
346 
347 /*
348  * The VSYSCALL page is the only user-accessible page in the kernel address
349  * range.  Normally, the kernel page tables can have _PAGE_USER clear, but
350  * the tables covering VSYSCALL_ADDR need _PAGE_USER set if vsyscalls
351  * are enabled.
352  *
353  * Some day we may create a "minimal" vsyscall mode in which we emulate
354  * vsyscalls but leave the page not present.  If so, we skip calling
355  * this.
356  */
set_vsyscall_pgtable_user_bits(pgd_t * root)357 void __init set_vsyscall_pgtable_user_bits(pgd_t *root)
358 {
359 	pgd_t *pgd;
360 	p4d_t *p4d;
361 	pud_t *pud;
362 	pmd_t *pmd;
363 
364 	pgd = pgd_offset_pgd(root, VSYSCALL_ADDR);
365 	set_pgd(pgd, __pgd(pgd_val(*pgd) | _PAGE_USER));
366 	p4d = p4d_offset(pgd, VSYSCALL_ADDR);
367 #if CONFIG_PGTABLE_LEVELS >= 5
368 	set_p4d(p4d, __p4d(p4d_val(*p4d) | _PAGE_USER));
369 #endif
370 	pud = pud_offset(p4d, VSYSCALL_ADDR);
371 	set_pud(pud, __pud(pud_val(*pud) | _PAGE_USER));
372 	pmd = pmd_offset(pud, VSYSCALL_ADDR);
373 	set_pmd(pmd, __pmd(pmd_val(*pmd) | _PAGE_USER));
374 }
375 
map_vsyscall(void)376 void __init map_vsyscall(void)
377 {
378 	extern char __vsyscall_page;
379 	unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
380 
381 	/*
382 	 * For full emulation, the page needs to exist for real.  In
383 	 * execute-only mode, there is no PTE at all backing the vsyscall
384 	 * page.
385 	 */
386 	if (vsyscall_mode == EMULATE) {
387 		__set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
388 			     PAGE_KERNEL_VVAR);
389 		set_vsyscall_pgtable_user_bits(swapper_pg_dir);
390 	}
391 
392 	if (vsyscall_mode == XONLY)
393 		gate_vma.vm_flags = VM_EXEC;
394 
395 	BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
396 		     (unsigned long)VSYSCALL_ADDR);
397 }
398