• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * This code is used on x86_64 to create page table identity mappings on
4  * demand by building up a new set of page tables (or appending to the
5  * existing ones), and then switching over to them when ready.
6  *
7  * Copyright (C) 2015-2016  Yinghai Lu
8  * Copyright (C)      2016  Kees Cook
9  */
10 
11 /*
12  * Since we're dealing with identity mappings, physical and virtual
13  * addresses are the same, so override these defines which are ultimately
14  * used by the headers in misc.h.
15  */
16 #define __pa(x)  ((unsigned long)(x))
17 #define __va(x)  ((void *)((unsigned long)(x)))
18 
19 /* No PAGE_TABLE_ISOLATION support needed either: */
20 #undef CONFIG_PAGE_TABLE_ISOLATION
21 
22 #include "error.h"
23 #include "misc.h"
24 
25 /* These actually do the work of building the kernel identity maps. */
26 #include <linux/pgtable.h>
27 #include <asm/cmpxchg.h>
28 #include <asm/trap_pf.h>
29 #include <asm/trapnr.h>
30 #include <asm/init.h>
31 /* Use the static base for this part of the boot process */
32 #undef __PAGE_OFFSET
33 #define __PAGE_OFFSET __PAGE_OFFSET_BASE
34 #include "../../mm/ident_map.c"
35 
36 #define _SETUP
37 #include <asm/setup.h>	/* For COMMAND_LINE_SIZE */
38 #undef _SETUP
39 
40 extern unsigned long get_cmd_line_ptr(void);
41 
42 /* Used by PAGE_KERN* macros: */
43 pteval_t __default_kernel_pte_mask __read_mostly = ~0;
44 
45 /* Used to track our page table allocation area. */
46 struct alloc_pgt_data {
47 	unsigned char *pgt_buf;
48 	unsigned long pgt_buf_size;
49 	unsigned long pgt_buf_offset;
50 };
51 
52 /*
53  * Allocates space for a page table entry, using struct alloc_pgt_data
54  * above. Besides the local callers, this is used as the allocation
55  * callback in mapping_info below.
56  */
alloc_pgt_page(void * context)57 static void *alloc_pgt_page(void *context)
58 {
59 	struct alloc_pgt_data *pages = (struct alloc_pgt_data *)context;
60 	unsigned char *entry;
61 
62 	/* Validate there is space available for a new page. */
63 	if (pages->pgt_buf_offset >= pages->pgt_buf_size) {
64 		debug_putstr("out of pgt_buf in " __FILE__ "!?\n");
65 		debug_putaddr(pages->pgt_buf_offset);
66 		debug_putaddr(pages->pgt_buf_size);
67 		return NULL;
68 	}
69 
70 	/* Consumed more tables than expected? */
71 	if (pages->pgt_buf_offset == BOOT_PGT_SIZE_WARN) {
72 		debug_putstr("pgt_buf running low in " __FILE__ "\n");
73 		debug_putstr("Need to raise BOOT_PGT_SIZE?\n");
74 		debug_putaddr(pages->pgt_buf_offset);
75 		debug_putaddr(pages->pgt_buf_size);
76 	}
77 
78 	entry = pages->pgt_buf + pages->pgt_buf_offset;
79 	pages->pgt_buf_offset += PAGE_SIZE;
80 
81 	return entry;
82 }
83 
84 /* Used to track our allocated page tables. */
85 static struct alloc_pgt_data pgt_data;
86 
87 /* The top level page table entry pointer. */
88 static unsigned long top_level_pgt;
89 
90 phys_addr_t physical_mask = (1ULL << __PHYSICAL_MASK_SHIFT) - 1;
91 
92 /*
93  * Mapping information structure passed to kernel_ident_mapping_init().
94  * Due to relocation, pointers must be assigned at run time not build time.
95  */
96 static struct x86_mapping_info mapping_info;
97 
98 /*
99  * Adds the specified range to the identity mappings.
100  */
add_identity_map(unsigned long start,unsigned long end)101 static void add_identity_map(unsigned long start, unsigned long end)
102 {
103 	int ret;
104 
105 	/* Align boundary to 2M. */
106 	start = round_down(start, PMD_SIZE);
107 	end = round_up(end, PMD_SIZE);
108 	if (start >= end)
109 		return;
110 
111 	/* Build the mapping. */
112 	ret = kernel_ident_mapping_init(&mapping_info, (pgd_t *)top_level_pgt, start, end);
113 	if (ret)
114 		error("Error: kernel_ident_mapping_init() failed\n");
115 }
116 
117 /* Locates and clears a region for a new top level page table. */
initialize_identity_maps(void * rmode)118 void initialize_identity_maps(void *rmode)
119 {
120 	unsigned long cmdline;
121 
122 	/* Exclude the encryption mask from __PHYSICAL_MASK */
123 	physical_mask &= ~sme_me_mask;
124 
125 	/* Init mapping_info with run-time function/buffer pointers. */
126 	mapping_info.alloc_pgt_page = alloc_pgt_page;
127 	mapping_info.context = &pgt_data;
128 	mapping_info.page_flag = __PAGE_KERNEL_LARGE_EXEC | sme_me_mask;
129 	mapping_info.kernpg_flag = _KERNPG_TABLE;
130 
131 	/*
132 	 * It should be impossible for this not to already be true,
133 	 * but since calling this a second time would rewind the other
134 	 * counters, let's just make sure this is reset too.
135 	 */
136 	pgt_data.pgt_buf_offset = 0;
137 
138 	/*
139 	 * If we came here via startup_32(), cr3 will be _pgtable already
140 	 * and we must append to the existing area instead of entirely
141 	 * overwriting it.
142 	 *
143 	 * With 5-level paging, we use '_pgtable' to allocate the p4d page table,
144 	 * the top-level page table is allocated separately.
145 	 *
146 	 * p4d_offset(top_level_pgt, 0) would cover both the 4- and 5-level
147 	 * cases. On 4-level paging it's equal to 'top_level_pgt'.
148 	 */
149 	top_level_pgt = read_cr3_pa();
150 	if (p4d_offset((pgd_t *)top_level_pgt, 0) == (p4d_t *)_pgtable) {
151 		pgt_data.pgt_buf = _pgtable + BOOT_INIT_PGT_SIZE;
152 		pgt_data.pgt_buf_size = BOOT_PGT_SIZE - BOOT_INIT_PGT_SIZE;
153 		memset(pgt_data.pgt_buf, 0, pgt_data.pgt_buf_size);
154 	} else {
155 		pgt_data.pgt_buf = _pgtable;
156 		pgt_data.pgt_buf_size = BOOT_PGT_SIZE;
157 		memset(pgt_data.pgt_buf, 0, pgt_data.pgt_buf_size);
158 		top_level_pgt = (unsigned long)alloc_pgt_page(&pgt_data);
159 	}
160 
161 	/*
162 	 * New page-table is set up - map the kernel image, boot_params and the
163 	 * command line. The uncompressed kernel requires boot_params and the
164 	 * command line to be mapped in the identity mapping. Map them
165 	 * explicitly here in case the compressed kernel does not touch them,
166 	 * or does not touch all the pages covering them.
167 	 */
168 	add_identity_map((unsigned long)_head, (unsigned long)_end);
169 	boot_params = rmode;
170 	add_identity_map((unsigned long)boot_params, (unsigned long)(boot_params + 1));
171 	cmdline = get_cmd_line_ptr();
172 	add_identity_map(cmdline, cmdline + COMMAND_LINE_SIZE);
173 
174 	/* Load the new page-table. */
175 	sev_verify_cbit(top_level_pgt);
176 	write_cr3(top_level_pgt);
177 }
178 
split_large_pmd(struct x86_mapping_info * info,pmd_t * pmdp,unsigned long __address)179 static pte_t *split_large_pmd(struct x86_mapping_info *info,
180 			      pmd_t *pmdp, unsigned long __address)
181 {
182 	unsigned long page_flags;
183 	unsigned long address;
184 	pte_t *pte;
185 	pmd_t pmd;
186 	int i;
187 
188 	pte = (pte_t *)info->alloc_pgt_page(info->context);
189 	if (!pte)
190 		return NULL;
191 
192 	address     = __address & PMD_MASK;
193 	/* No large page - clear PSE flag */
194 	page_flags  = info->page_flag & ~_PAGE_PSE;
195 
196 	/* Populate the PTEs */
197 	for (i = 0; i < PTRS_PER_PMD; i++) {
198 		set_pte(&pte[i], __pte(address | page_flags));
199 		address += PAGE_SIZE;
200 	}
201 
202 	/*
203 	 * Ideally we need to clear the large PMD first and do a TLB
204 	 * flush before we write the new PMD. But the 2M range of the
205 	 * PMD might contain the code we execute and/or the stack
206 	 * we are on, so we can't do that. But that should be safe here
207 	 * because we are going from large to small mappings and we are
208 	 * also the only user of the page-table, so there is no chance
209 	 * of a TLB multihit.
210 	 */
211 	pmd = __pmd((unsigned long)pte | info->kernpg_flag);
212 	set_pmd(pmdp, pmd);
213 	/* Flush TLB to establish the new PMD */
214 	write_cr3(top_level_pgt);
215 
216 	return pte + pte_index(__address);
217 }
218 
clflush_page(unsigned long address)219 static void clflush_page(unsigned long address)
220 {
221 	unsigned int flush_size;
222 	char *cl, *start, *end;
223 
224 	/*
225 	 * Hardcode cl-size to 64 - CPUID can't be used here because that might
226 	 * cause another #VC exception and the GHCB is not ready to use yet.
227 	 */
228 	flush_size = 64;
229 	start      = (char *)(address & PAGE_MASK);
230 	end        = start + PAGE_SIZE;
231 
232 	/*
233 	 * First make sure there are no pending writes on the cache-lines to
234 	 * flush.
235 	 */
236 	asm volatile("mfence" : : : "memory");
237 
238 	for (cl = start; cl != end; cl += flush_size)
239 		clflush(cl);
240 }
241 
set_clr_page_flags(struct x86_mapping_info * info,unsigned long address,pteval_t set,pteval_t clr)242 static int set_clr_page_flags(struct x86_mapping_info *info,
243 			      unsigned long address,
244 			      pteval_t set, pteval_t clr)
245 {
246 	pgd_t *pgdp = (pgd_t *)top_level_pgt;
247 	p4d_t *p4dp;
248 	pud_t *pudp;
249 	pmd_t *pmdp;
250 	pte_t *ptep, pte;
251 
252 	/*
253 	 * First make sure there is a PMD mapping for 'address'.
254 	 * It should already exist, but keep things generic.
255 	 *
256 	 * To map the page just read from it and fault it in if there is no
257 	 * mapping yet. add_identity_map() can't be called here because that
258 	 * would unconditionally map the address on PMD level, destroying any
259 	 * PTE-level mappings that might already exist. Use assembly here so
260 	 * the access won't be optimized away.
261 	 */
262 	asm volatile("mov %[address], %%r9"
263 		     :: [address] "g" (*(unsigned long *)address)
264 		     : "r9", "memory");
265 
266 	/*
267 	 * The page is mapped at least with PMD size - so skip checks and walk
268 	 * directly to the PMD.
269 	 */
270 	p4dp = p4d_offset(pgdp, address);
271 	pudp = pud_offset(p4dp, address);
272 	pmdp = pmd_offset(pudp, address);
273 
274 	if (pmd_large(*pmdp))
275 		ptep = split_large_pmd(info, pmdp, address);
276 	else
277 		ptep = pte_offset_kernel(pmdp, address);
278 
279 	if (!ptep)
280 		return -ENOMEM;
281 
282 	/*
283 	 * Changing encryption attributes of a page requires to flush it from
284 	 * the caches.
285 	 */
286 	if ((set | clr) & _PAGE_ENC)
287 		clflush_page(address);
288 
289 	/* Update PTE */
290 	pte = *ptep;
291 	pte = pte_set_flags(pte, set);
292 	pte = pte_clear_flags(pte, clr);
293 	set_pte(ptep, pte);
294 
295 	/* Flush TLB after changing encryption attribute */
296 	write_cr3(top_level_pgt);
297 
298 	return 0;
299 }
300 
set_page_decrypted(unsigned long address)301 int set_page_decrypted(unsigned long address)
302 {
303 	return set_clr_page_flags(&mapping_info, address, 0, _PAGE_ENC);
304 }
305 
set_page_encrypted(unsigned long address)306 int set_page_encrypted(unsigned long address)
307 {
308 	return set_clr_page_flags(&mapping_info, address, _PAGE_ENC, 0);
309 }
310 
set_page_non_present(unsigned long address)311 int set_page_non_present(unsigned long address)
312 {
313 	return set_clr_page_flags(&mapping_info, address, 0, _PAGE_PRESENT);
314 }
315 
do_pf_error(const char * msg,unsigned long error_code,unsigned long address,unsigned long ip)316 static void do_pf_error(const char *msg, unsigned long error_code,
317 			unsigned long address, unsigned long ip)
318 {
319 	error_putstr(msg);
320 
321 	error_putstr("\nError Code: ");
322 	error_puthex(error_code);
323 	error_putstr("\nCR2: 0x");
324 	error_puthex(address);
325 	error_putstr("\nRIP relative to _head: 0x");
326 	error_puthex(ip - (unsigned long)_head);
327 	error_putstr("\n");
328 
329 	error("Stopping.\n");
330 }
331 
do_boot_page_fault(struct pt_regs * regs,unsigned long error_code)332 void do_boot_page_fault(struct pt_regs *regs, unsigned long error_code)
333 {
334 	unsigned long address = native_read_cr2();
335 	unsigned long end;
336 	bool ghcb_fault;
337 
338 	ghcb_fault = sev_es_check_ghcb_fault(address);
339 
340 	address   &= PMD_MASK;
341 	end        = address + PMD_SIZE;
342 
343 	/*
344 	 * Check for unexpected error codes. Unexpected are:
345 	 *	- Faults on present pages
346 	 *	- User faults
347 	 *	- Reserved bits set
348 	 */
349 	if (error_code & (X86_PF_PROT | X86_PF_USER | X86_PF_RSVD))
350 		do_pf_error("Unexpected page-fault:", error_code, address, regs->ip);
351 	else if (ghcb_fault)
352 		do_pf_error("Page-fault on GHCB page:", error_code, address, regs->ip);
353 
354 	/*
355 	 * Error code is sane - now identity map the 2M region around
356 	 * the faulting address.
357 	 */
358 	add_identity_map(address, end);
359 }
360 
do_boot_nmi_trap(struct pt_regs * regs,unsigned long error_code)361 void do_boot_nmi_trap(struct pt_regs *regs, unsigned long error_code)
362 {
363 	/* Empty handler to ignore NMI during early boot */
364 }
365