1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * This code is used on x86_64 to create page table identity mappings on
4 * demand by building up a new set of page tables (or appending to the
5 * existing ones), and then switching over to them when ready.
6 *
7 * Copyright (C) 2015-2016 Yinghai Lu
8 * Copyright (C) 2016 Kees Cook
9 */
10
11 /*
12 * Since we're dealing with identity mappings, physical and virtual
13 * addresses are the same, so override these defines which are ultimately
14 * used by the headers in misc.h.
15 */
16 #define __pa(x) ((unsigned long)(x))
17 #define __va(x) ((void *)((unsigned long)(x)))
18
19 /* No PAGE_TABLE_ISOLATION support needed either: */
20 #undef CONFIG_PAGE_TABLE_ISOLATION
21
22 #include "misc.h"
23
24 /* These actually do the work of building the kernel identity maps. */
25 #include <asm/init.h>
26 #include <asm/pgtable.h>
27 /* Use the static base for this part of the boot process */
28 #undef __PAGE_OFFSET
29 #define __PAGE_OFFSET __PAGE_OFFSET_BASE
30 #include "../../mm/ident_map.c"
31
32 /* Used to track our page table allocation area. */
33 struct alloc_pgt_data {
34 unsigned char *pgt_buf;
35 unsigned long pgt_buf_size;
36 unsigned long pgt_buf_offset;
37 };
38
39 /*
40 * Allocates space for a page table entry, using struct alloc_pgt_data
41 * above. Besides the local callers, this is used as the allocation
42 * callback in mapping_info below.
43 */
alloc_pgt_page(void * context)44 static void *alloc_pgt_page(void *context)
45 {
46 struct alloc_pgt_data *pages = (struct alloc_pgt_data *)context;
47 unsigned char *entry;
48
49 /* Validate there is space available for a new page. */
50 if (pages->pgt_buf_offset >= pages->pgt_buf_size) {
51 debug_putstr("out of pgt_buf in " __FILE__ "!?\n");
52 debug_putaddr(pages->pgt_buf_offset);
53 debug_putaddr(pages->pgt_buf_size);
54 return NULL;
55 }
56
57 entry = pages->pgt_buf + pages->pgt_buf_offset;
58 pages->pgt_buf_offset += PAGE_SIZE;
59
60 return entry;
61 }
62
63 /* Used to track our allocated page tables. */
64 static struct alloc_pgt_data pgt_data;
65
66 /* The top level page table entry pointer. */
67 static unsigned long top_level_pgt;
68
69 phys_addr_t physical_mask = (1ULL << __PHYSICAL_MASK_SHIFT) - 1;
70
71 /*
72 * Mapping information structure passed to kernel_ident_mapping_init().
73 * Due to relocation, pointers must be assigned at run time not build time.
74 */
75 static struct x86_mapping_info mapping_info;
76
77 /* Locates and clears a region for a new top level page table. */
initialize_identity_maps(void)78 void initialize_identity_maps(void)
79 {
80 /* If running as an SEV guest, the encryption mask is required. */
81 set_sev_encryption_mask();
82
83 /* Exclude the encryption mask from __PHYSICAL_MASK */
84 physical_mask &= ~sme_me_mask;
85
86 /* Init mapping_info with run-time function/buffer pointers. */
87 mapping_info.alloc_pgt_page = alloc_pgt_page;
88 mapping_info.context = &pgt_data;
89 mapping_info.page_flag = __PAGE_KERNEL_LARGE_EXEC | sme_me_mask;
90 mapping_info.kernpg_flag = _KERNPG_TABLE;
91
92 /*
93 * It should be impossible for this not to already be true,
94 * but since calling this a second time would rewind the other
95 * counters, let's just make sure this is reset too.
96 */
97 pgt_data.pgt_buf_offset = 0;
98
99 /*
100 * If we came here via startup_32(), cr3 will be _pgtable already
101 * and we must append to the existing area instead of entirely
102 * overwriting it.
103 *
104 * With 5-level paging, we use '_pgtable' to allocate the p4d page table,
105 * the top-level page table is allocated separately.
106 *
107 * p4d_offset(top_level_pgt, 0) would cover both the 4- and 5-level
108 * cases. On 4-level paging it's equal to 'top_level_pgt'.
109 */
110 top_level_pgt = read_cr3_pa();
111 if (p4d_offset((pgd_t *)top_level_pgt, 0) == (p4d_t *)_pgtable) {
112 debug_putstr("booted via startup_32()\n");
113 pgt_data.pgt_buf = _pgtable + BOOT_INIT_PGT_SIZE;
114 pgt_data.pgt_buf_size = BOOT_PGT_SIZE - BOOT_INIT_PGT_SIZE;
115 memset(pgt_data.pgt_buf, 0, pgt_data.pgt_buf_size);
116 } else {
117 debug_putstr("booted via startup_64()\n");
118 pgt_data.pgt_buf = _pgtable;
119 pgt_data.pgt_buf_size = BOOT_PGT_SIZE;
120 memset(pgt_data.pgt_buf, 0, pgt_data.pgt_buf_size);
121 top_level_pgt = (unsigned long)alloc_pgt_page(&pgt_data);
122 }
123 }
124
125 /*
126 * Adds the specified range to what will become the new identity mappings.
127 * Once all ranges have been added, the new mapping is activated by calling
128 * finalize_identity_maps() below.
129 */
add_identity_map(unsigned long start,unsigned long size)130 void add_identity_map(unsigned long start, unsigned long size)
131 {
132 unsigned long end = start + size;
133
134 /* Align boundary to 2M. */
135 start = round_down(start, PMD_SIZE);
136 end = round_up(end, PMD_SIZE);
137 if (start >= end)
138 return;
139
140 /* Build the mapping. */
141 kernel_ident_mapping_init(&mapping_info, (pgd_t *)top_level_pgt,
142 start, end);
143 }
144
145 /*
146 * This switches the page tables to the new level4 that has been built
147 * via calls to add_identity_map() above. If booted via startup_32(),
148 * this is effectively a no-op.
149 */
finalize_identity_maps(void)150 void finalize_identity_maps(void)
151 {
152 write_cr3(top_level_pgt);
153 }
154