• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright(c) 2017 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * This code is based in part on work published here:
14  *
15  *	https://github.com/IAIK/KAISER
16  *
17  * The original work was written by and and signed off by for the Linux
18  * kernel by:
19  *
20  *   Signed-off-by: Richard Fellner <richard.fellner@student.tugraz.at>
21  *   Signed-off-by: Moritz Lipp <moritz.lipp@iaik.tugraz.at>
22  *   Signed-off-by: Daniel Gruss <daniel.gruss@iaik.tugraz.at>
23  *   Signed-off-by: Michael Schwarz <michael.schwarz@iaik.tugraz.at>
24  *
25  * Major changes to the original code by: Dave Hansen <dave.hansen@intel.com>
26  * Mostly rewritten by Thomas Gleixner <tglx@linutronix.de> and
27  *		       Andy Lutomirsky <luto@amacapital.net>
28  */
29 #include <linux/kernel.h>
30 #include <linux/errno.h>
31 #include <linux/string.h>
32 #include <linux/types.h>
33 #include <linux/bug.h>
34 #include <linux/init.h>
35 #include <linux/spinlock.h>
36 #include <linux/mm.h>
37 #include <linux/uaccess.h>
38 #include <linux/cpu.h>
39 
40 #include <asm/cpufeature.h>
41 #include <asm/hypervisor.h>
42 #include <asm/vsyscall.h>
43 #include <asm/cmdline.h>
44 #include <asm/pti.h>
45 #include <asm/pgtable.h>
46 #include <asm/pgalloc.h>
47 #include <asm/tlbflush.h>
48 #include <asm/desc.h>
49 #include <asm/sections.h>
50 
51 #undef pr_fmt
52 #define pr_fmt(fmt)     "Kernel/User page tables isolation: " fmt
53 
54 /* Backporting helper */
55 #ifndef __GFP_NOTRACK
56 #define __GFP_NOTRACK	0
57 #endif
58 
pti_print_if_insecure(const char * reason)59 static void __init pti_print_if_insecure(const char *reason)
60 {
61 	if (boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
62 		pr_info("%s\n", reason);
63 }
64 
pti_print_if_secure(const char * reason)65 static void __init pti_print_if_secure(const char *reason)
66 {
67 	if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
68 		pr_info("%s\n", reason);
69 }
70 
pti_check_boottime_disable(void)71 void __init pti_check_boottime_disable(void)
72 {
73 	char arg[5];
74 	int ret;
75 
76 	if (hypervisor_is_type(X86_HYPER_XEN_PV)) {
77 		pti_print_if_insecure("disabled on XEN PV.");
78 		return;
79 	}
80 
81 	ret = cmdline_find_option(boot_command_line, "pti", arg, sizeof(arg));
82 	if (ret > 0)  {
83 		if (ret == 3 && !strncmp(arg, "off", 3)) {
84 			pti_print_if_insecure("disabled on command line.");
85 			return;
86 		}
87 		if (ret == 2 && !strncmp(arg, "on", 2)) {
88 			pti_print_if_secure("force enabled on command line.");
89 			goto enable;
90 		}
91 		if (ret == 4 && !strncmp(arg, "auto", 4))
92 			goto autosel;
93 	}
94 
95 	if (cmdline_find_option_bool(boot_command_line, "nopti") ||
96 	    cpu_mitigations_off()) {
97 		pti_print_if_insecure("disabled on command line.");
98 		return;
99 	}
100 
101 autosel:
102 	if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
103 		return;
104 enable:
105 	setup_force_cpu_cap(X86_FEATURE_PTI);
106 }
107 
__pti_set_user_pgd(pgd_t * pgdp,pgd_t pgd)108 pgd_t __pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd)
109 {
110 	/*
111 	 * Changes to the high (kernel) portion of the kernelmode page
112 	 * tables are not automatically propagated to the usermode tables.
113 	 *
114 	 * Users should keep in mind that, unlike the kernelmode tables,
115 	 * there is no vmalloc_fault equivalent for the usermode tables.
116 	 * Top-level entries added to init_mm's usermode pgd after boot
117 	 * will not be automatically propagated to other mms.
118 	 */
119 	if (!pgdp_maps_userspace(pgdp))
120 		return pgd;
121 
122 	/*
123 	 * The user page tables get the full PGD, accessible from
124 	 * userspace:
125 	 */
126 	kernel_to_user_pgdp(pgdp)->pgd = pgd.pgd;
127 
128 	/*
129 	 * If this is normal user memory, make it NX in the kernel
130 	 * pagetables so that, if we somehow screw up and return to
131 	 * usermode with the kernel CR3 loaded, we'll get a page fault
132 	 * instead of allowing user code to execute with the wrong CR3.
133 	 *
134 	 * As exceptions, we don't set NX if:
135 	 *  - _PAGE_USER is not set.  This could be an executable
136 	 *     EFI runtime mapping or something similar, and the kernel
137 	 *     may execute from it
138 	 *  - we don't have NX support
139 	 *  - we're clearing the PGD (i.e. the new pgd is not present).
140 	 */
141 	if ((pgd.pgd & (_PAGE_USER|_PAGE_PRESENT)) == (_PAGE_USER|_PAGE_PRESENT) &&
142 	    (__supported_pte_mask & _PAGE_NX))
143 		pgd.pgd |= _PAGE_NX;
144 
145 	/* return the copy of the PGD we want the kernel to use: */
146 	return pgd;
147 }
148 
149 /*
150  * Walk the user copy of the page tables (optionally) trying to allocate
151  * page table pages on the way down.
152  *
153  * Returns a pointer to a P4D on success, or NULL on failure.
154  */
pti_user_pagetable_walk_p4d(unsigned long address)155 static __init p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
156 {
157 	pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address));
158 	gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
159 
160 	if (address < PAGE_OFFSET) {
161 		WARN_ONCE(1, "attempt to walk user address\n");
162 		return NULL;
163 	}
164 
165 	if (pgd_none(*pgd)) {
166 		unsigned long new_p4d_page = __get_free_page(gfp);
167 		if (WARN_ON_ONCE(!new_p4d_page))
168 			return NULL;
169 
170 		set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page)));
171 	}
172 	BUILD_BUG_ON(pgd_large(*pgd) != 0);
173 
174 	return p4d_offset(pgd, address);
175 }
176 
177 /*
178  * Walk the user copy of the page tables (optionally) trying to allocate
179  * page table pages on the way down.
180  *
181  * Returns a pointer to a PMD on success, or NULL on failure.
182  */
pti_user_pagetable_walk_pmd(unsigned long address)183 static __init pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
184 {
185 	gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
186 	p4d_t *p4d;
187 	pud_t *pud;
188 
189 	p4d = pti_user_pagetable_walk_p4d(address);
190 	if (!p4d)
191 		return NULL;
192 
193 	BUILD_BUG_ON(p4d_large(*p4d) != 0);
194 	if (p4d_none(*p4d)) {
195 		unsigned long new_pud_page = __get_free_page(gfp);
196 		if (WARN_ON_ONCE(!new_pud_page))
197 			return NULL;
198 
199 		set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page)));
200 	}
201 
202 	pud = pud_offset(p4d, address);
203 	/* The user page tables do not use large mappings: */
204 	if (pud_large(*pud)) {
205 		WARN_ON(1);
206 		return NULL;
207 	}
208 	if (pud_none(*pud)) {
209 		unsigned long new_pmd_page = __get_free_page(gfp);
210 		if (WARN_ON_ONCE(!new_pmd_page))
211 			return NULL;
212 
213 		set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
214 	}
215 
216 	return pmd_offset(pud, address);
217 }
218 
219 #ifdef CONFIG_X86_VSYSCALL_EMULATION
220 /*
221  * Walk the shadow copy of the page tables (optionally) trying to allocate
222  * page table pages on the way down.  Does not support large pages.
223  *
224  * Note: this is only used when mapping *new* kernel data into the
225  * user/shadow page tables.  It is never used for userspace data.
226  *
227  * Returns a pointer to a PTE on success, or NULL on failure.
228  */
pti_user_pagetable_walk_pte(unsigned long address)229 static pte_t *pti_user_pagetable_walk_pte(unsigned long address)
230 {
231 	gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
232 	pmd_t *pmd;
233 	pte_t *pte;
234 
235 	pmd = pti_user_pagetable_walk_pmd(address);
236 	if (!pmd)
237 		return NULL;
238 
239 	/* We can't do anything sensible if we hit a large mapping. */
240 	if (pmd_large(*pmd)) {
241 		WARN_ON(1);
242 		return NULL;
243 	}
244 
245 	if (pmd_none(*pmd)) {
246 		unsigned long new_pte_page = __get_free_page(gfp);
247 		if (!new_pte_page)
248 			return NULL;
249 
250 		set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page)));
251 	}
252 
253 	pte = pte_offset_kernel(pmd, address);
254 	if (pte_flags(*pte) & _PAGE_USER) {
255 		WARN_ONCE(1, "attempt to walk to user pte\n");
256 		return NULL;
257 	}
258 	return pte;
259 }
260 
pti_setup_vsyscall(void)261 static void __init pti_setup_vsyscall(void)
262 {
263 	pte_t *pte, *target_pte;
264 	unsigned int level;
265 
266 	pte = lookup_address(VSYSCALL_ADDR, &level);
267 	if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte))
268 		return;
269 
270 	target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR);
271 	if (WARN_ON(!target_pte))
272 		return;
273 
274 	*target_pte = *pte;
275 	set_vsyscall_pgtable_user_bits(kernel_to_user_pgdp(swapper_pg_dir));
276 }
277 #else
pti_setup_vsyscall(void)278 static void __init pti_setup_vsyscall(void) { }
279 #endif
280 
281 static void __init
pti_clone_pmds(unsigned long start,unsigned long end,pmdval_t clear)282 pti_clone_pmds(unsigned long start, unsigned long end, pmdval_t clear)
283 {
284 	unsigned long addr;
285 
286 	/*
287 	 * Clone the populated PMDs which cover start to end. These PMD areas
288 	 * can have holes.
289 	 */
290 	for (addr = start; addr < end; addr += PMD_SIZE) {
291 		pmd_t *pmd, *target_pmd;
292 		pgd_t *pgd;
293 		p4d_t *p4d;
294 		pud_t *pud;
295 
296 		/* Overflow check */
297 		if (addr < start)
298 			break;
299 
300 		pgd = pgd_offset_k(addr);
301 		if (WARN_ON(pgd_none(*pgd)))
302 			return;
303 		p4d = p4d_offset(pgd, addr);
304 		if (WARN_ON(p4d_none(*p4d)))
305 			return;
306 		pud = pud_offset(p4d, addr);
307 		if (pud_none(*pud))
308 			continue;
309 		pmd = pmd_offset(pud, addr);
310 		if (pmd_none(*pmd))
311 			continue;
312 
313 		target_pmd = pti_user_pagetable_walk_pmd(addr);
314 		if (WARN_ON(!target_pmd))
315 			return;
316 
317 		/*
318 		 * Copy the PMD.  That is, the kernelmode and usermode
319 		 * tables will share the last-level page tables of this
320 		 * address range
321 		 */
322 		*target_pmd = pmd_clear_flags(*pmd, clear);
323 	}
324 }
325 
326 /*
327  * Clone a single p4d (i.e. a top-level entry on 4-level systems and a
328  * next-level entry on 5-level systems.
329  */
pti_clone_p4d(unsigned long addr)330 static void __init pti_clone_p4d(unsigned long addr)
331 {
332 	p4d_t *kernel_p4d, *user_p4d;
333 	pgd_t *kernel_pgd;
334 
335 	user_p4d = pti_user_pagetable_walk_p4d(addr);
336 	if (!user_p4d)
337 		return;
338 
339 	kernel_pgd = pgd_offset_k(addr);
340 	kernel_p4d = p4d_offset(kernel_pgd, addr);
341 	*user_p4d = *kernel_p4d;
342 }
343 
344 /*
345  * Clone the CPU_ENTRY_AREA into the user space visible page table.
346  */
pti_clone_user_shared(void)347 static void __init pti_clone_user_shared(void)
348 {
349 	pti_clone_p4d(CPU_ENTRY_AREA_BASE);
350 }
351 
352 /*
353  * Clone the ESPFIX P4D into the user space visinble page table
354  */
pti_setup_espfix64(void)355 static void __init pti_setup_espfix64(void)
356 {
357 #ifdef CONFIG_X86_ESPFIX64
358 	pti_clone_p4d(ESPFIX_BASE_ADDR);
359 #endif
360 }
361 
362 /*
363  * Clone the populated PMDs of the entry and irqentry text and force it RO.
364  */
pti_clone_entry_text(void)365 static void __init pti_clone_entry_text(void)
366 {
367 	pti_clone_pmds((unsigned long) __entry_text_start,
368 			(unsigned long) __irqentry_text_end,
369 		       _PAGE_RW | _PAGE_GLOBAL);
370 }
371 
372 /*
373  * Initialize kernel page table isolation
374  */
pti_init(void)375 void __init pti_init(void)
376 {
377 	if (!static_cpu_has(X86_FEATURE_PTI))
378 		return;
379 
380 	pr_info("enabled\n");
381 
382 	pti_clone_user_shared();
383 	pti_clone_entry_text();
384 	pti_setup_espfix64();
385 	pti_setup_vsyscall();
386 }
387