1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PGTABLE_64_DEFS_H
3 #define _ASM_X86_PGTABLE_64_DEFS_H
4
5 #include <asm/sparsemem.h>
6
7 #ifndef __ASSEMBLY__
8 #include <linux/types.h>
9 #include <asm/kaslr.h>
10
11 /*
12 * These are used to make use of C type-checking..
13 */
14 typedef unsigned long pteval_t;
15 typedef unsigned long pmdval_t;
16 typedef unsigned long pudval_t;
17 typedef unsigned long p4dval_t;
18 typedef unsigned long pgdval_t;
19 typedef unsigned long pgprotval_t;
20
21 typedef struct { pteval_t pte; } pte_t;
22 typedef struct { pmdval_t pmd; } pmd_t;
23
24 extern unsigned int __pgtable_l5_enabled;
25
26 #ifdef CONFIG_X86_5LEVEL
27 #ifdef USE_EARLY_PGTABLE_L5
28 /*
29 * cpu_feature_enabled() is not available in early boot code.
30 * Use variable instead.
31 */
pgtable_l5_enabled(void)32 static inline bool pgtable_l5_enabled(void)
33 {
34 return __pgtable_l5_enabled;
35 }
36 #else
37 #define pgtable_l5_enabled() cpu_feature_enabled(X86_FEATURE_LA57)
38 #endif /* USE_EARLY_PGTABLE_L5 */
39
40 #else
41 #define pgtable_l5_enabled() 0
42 #endif /* CONFIG_X86_5LEVEL */
43
44 #define ARCH_PAGE_TABLE_SYNC_MASK \
45 (pgtable_l5_enabled() ? PGTBL_PGD_MODIFIED : PGTBL_P4D_MODIFIED)
46
47 extern unsigned int pgdir_shift;
48 extern unsigned int ptrs_per_p4d;
49
50 #endif /* !__ASSEMBLY__ */
51
52 #define SHARED_KERNEL_PMD 0
53
54 #ifdef CONFIG_X86_5LEVEL
55
56 /*
57 * PGDIR_SHIFT determines what a top-level page table entry can map
58 */
59 #define PGDIR_SHIFT pgdir_shift
60 #define PTRS_PER_PGD 512
61
62 /*
63 * 4th level page in 5-level paging case
64 */
65 #define P4D_SHIFT 39
66 #define MAX_PTRS_PER_P4D 512
67 #define PTRS_PER_P4D ptrs_per_p4d
68 #define P4D_SIZE (_AC(1, UL) << P4D_SHIFT)
69 #define P4D_MASK (~(P4D_SIZE - 1))
70
71 #define MAX_POSSIBLE_PHYSMEM_BITS 52
72
73 #else /* CONFIG_X86_5LEVEL */
74
75 /*
76 * PGDIR_SHIFT determines what a top-level page table entry can map
77 */
78 #define PGDIR_SHIFT 39
79 #define PTRS_PER_PGD 512
80 #define MAX_PTRS_PER_P4D 1
81
82 #endif /* CONFIG_X86_5LEVEL */
83
84 /*
85 * 3rd level page
86 */
87 #define PUD_SHIFT 30
88 #define PTRS_PER_PUD 512
89
90 /*
91 * PMD_SHIFT determines the size of the area a middle-level
92 * page table can map
93 */
94 #define PMD_SHIFT 21
95 #define PTRS_PER_PMD 512
96
97 /*
98 * entries per page directory level
99 */
100 #define PTRS_PER_PTE 512
101
102 #define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
103 #define PMD_MASK (~(PMD_SIZE - 1))
104 #define PUD_SIZE (_AC(1, UL) << PUD_SHIFT)
105 #define PUD_MASK (~(PUD_SIZE - 1))
106 #define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
107 #define PGDIR_MASK (~(PGDIR_SIZE - 1))
108
109 /*
110 * See Documentation/arch/x86/x86_64/mm.rst for a description of the memory map.
111 *
112 * Be very careful vs. KASLR when changing anything here. The KASLR address
113 * range must not overlap with anything except the KASAN shadow area, which
114 * is correct as KASAN disables KASLR.
115 */
116 #define MAXMEM (1UL << MAX_PHYSMEM_BITS)
117
118 #define GUARD_HOLE_PGD_ENTRY -256UL
119 #define GUARD_HOLE_SIZE (16UL << PGDIR_SHIFT)
120 #define GUARD_HOLE_BASE_ADDR (GUARD_HOLE_PGD_ENTRY << PGDIR_SHIFT)
121 #define GUARD_HOLE_END_ADDR (GUARD_HOLE_BASE_ADDR + GUARD_HOLE_SIZE)
122
123 #define LDT_PGD_ENTRY -240UL
124 #define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT)
125 #define LDT_END_ADDR (LDT_BASE_ADDR + PGDIR_SIZE)
126
127 #define __VMALLOC_BASE_L4 0xffffc90000000000UL
128 #define __VMALLOC_BASE_L5 0xffa0000000000000UL
129
130 #define VMALLOC_SIZE_TB_L4 32UL
131 #define VMALLOC_SIZE_TB_L5 12800UL
132
133 #define __VMEMMAP_BASE_L4 0xffffea0000000000UL
134 #define __VMEMMAP_BASE_L5 0xffd4000000000000UL
135
136 #ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT
137 # define VMALLOC_START vmalloc_base
138 # define VMALLOC_SIZE_TB (pgtable_l5_enabled() ? VMALLOC_SIZE_TB_L5 : VMALLOC_SIZE_TB_L4)
139 # define VMEMMAP_START vmemmap_base
140 #else
141 # define VMALLOC_START __VMALLOC_BASE_L4
142 # define VMALLOC_SIZE_TB VMALLOC_SIZE_TB_L4
143 # define VMEMMAP_START __VMEMMAP_BASE_L4
144 #endif /* CONFIG_DYNAMIC_MEMORY_LAYOUT */
145
146 #ifdef CONFIG_RANDOMIZE_MEMORY
147 # define PHYSMEM_END physmem_end
148 #endif
149
150 /*
151 * End of the region for which vmalloc page tables are pre-allocated.
152 * For non-KMSAN builds, this is the same as VMALLOC_END.
153 * For KMSAN builds, VMALLOC_START..VMEMORY_END is 4 times bigger than
154 * VMALLOC_START..VMALLOC_END (see below).
155 */
156 #define VMEMORY_END (VMALLOC_START + (VMALLOC_SIZE_TB << 40) - 1)
157
158 #ifndef CONFIG_KMSAN
159 #define VMALLOC_END VMEMORY_END
160 #else
161 /*
162 * In KMSAN builds vmalloc area is four times smaller, and the remaining 3/4
163 * are used to keep the metadata for virtual pages. The memory formerly
164 * belonging to vmalloc area is now laid out as follows:
165 *
166 * 1st quarter: VMALLOC_START to VMALLOC_END - new vmalloc area
167 * 2nd quarter: KMSAN_VMALLOC_SHADOW_START to
168 * VMALLOC_END+KMSAN_VMALLOC_SHADOW_OFFSET - vmalloc area shadow
169 * 3rd quarter: KMSAN_VMALLOC_ORIGIN_START to
170 * VMALLOC_END+KMSAN_VMALLOC_ORIGIN_OFFSET - vmalloc area origins
171 * 4th quarter: KMSAN_MODULES_SHADOW_START to KMSAN_MODULES_ORIGIN_START
172 * - shadow for modules,
173 * KMSAN_MODULES_ORIGIN_START to
174 * KMSAN_MODULES_ORIGIN_START + MODULES_LEN - origins for modules.
175 */
176 #define VMALLOC_QUARTER_SIZE ((VMALLOC_SIZE_TB << 40) >> 2)
177 #define VMALLOC_END (VMALLOC_START + VMALLOC_QUARTER_SIZE - 1)
178
179 /*
180 * vmalloc metadata addresses are calculated by adding shadow/origin offsets
181 * to vmalloc address.
182 */
183 #define KMSAN_VMALLOC_SHADOW_OFFSET VMALLOC_QUARTER_SIZE
184 #define KMSAN_VMALLOC_ORIGIN_OFFSET (VMALLOC_QUARTER_SIZE << 1)
185
186 #define KMSAN_VMALLOC_SHADOW_START (VMALLOC_START + KMSAN_VMALLOC_SHADOW_OFFSET)
187 #define KMSAN_VMALLOC_ORIGIN_START (VMALLOC_START + KMSAN_VMALLOC_ORIGIN_OFFSET)
188
189 /*
190 * The shadow/origin for modules are placed one by one in the last 1/4 of
191 * vmalloc space.
192 */
193 #define KMSAN_MODULES_SHADOW_START (VMALLOC_END + KMSAN_VMALLOC_ORIGIN_OFFSET + 1)
194 #define KMSAN_MODULES_ORIGIN_START (KMSAN_MODULES_SHADOW_START + MODULES_LEN)
195 #endif /* CONFIG_KMSAN */
196
197 #define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
198 /* The module sections ends with the start of the fixmap */
199 #ifndef CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP
200 # define MODULES_END _AC(0xffffffffff000000, UL)
201 #else
202 # define MODULES_END _AC(0xfffffffffe000000, UL)
203 #endif
204 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
205
206 #define ESPFIX_PGD_ENTRY _AC(-2, UL)
207 #define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << P4D_SHIFT)
208
209 #define CPU_ENTRY_AREA_PGD _AC(-4, UL)
210 #define CPU_ENTRY_AREA_BASE (CPU_ENTRY_AREA_PGD << P4D_SHIFT)
211
212 #define EFI_VA_START ( -4 * (_AC(1, UL) << 30))
213 #define EFI_VA_END (-68 * (_AC(1, UL) << 30))
214
215 #define EARLY_DYNAMIC_PAGE_TABLES 64
216
217 #define PGD_KERNEL_START ((PAGE_SIZE / 2) / sizeof(pgd_t))
218
219 /*
220 * We borrow bit 3 to remember PG_anon_exclusive.
221 */
222 #define _PAGE_SWP_EXCLUSIVE _PAGE_PWT
223
224 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
225