• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef _ASM_X86_PGTABLE_DEFS_H
2 #define _ASM_X86_PGTABLE_DEFS_H
3 
4 #include <linux/const.h>
5 #include <asm/page_types.h>
6 
7 #define FIRST_USER_ADDRESS	0
8 
9 #define _PAGE_BIT_PRESENT	0	/* is present */
10 #define _PAGE_BIT_RW		1	/* writeable */
11 #define _PAGE_BIT_USER		2	/* userspace addressable */
12 #define _PAGE_BIT_PWT		3	/* page write through */
13 #define _PAGE_BIT_PCD		4	/* page cache disabled */
14 #define _PAGE_BIT_ACCESSED	5	/* was accessed (raised by CPU) */
15 #define _PAGE_BIT_DIRTY		6	/* was written to (raised by CPU) */
16 #define _PAGE_BIT_PSE		7	/* 4 MB (or 2MB) page */
17 #define _PAGE_BIT_PAT		7	/* on 4KB pages */
18 #define _PAGE_BIT_GLOBAL	8	/* Global TLB entry PPro+ */
19 #define _PAGE_BIT_UNUSED1	9	/* available for programmer */
20 #define _PAGE_BIT_IOMAP		10	/* flag used to indicate IO mapping */
21 #define _PAGE_BIT_HIDDEN	11	/* hidden by kmemcheck */
22 #define _PAGE_BIT_PAT_LARGE	12	/* On 2MB or 1GB pages */
23 #define _PAGE_BIT_SPECIAL	_PAGE_BIT_UNUSED1
24 #define _PAGE_BIT_CPA_TEST	_PAGE_BIT_UNUSED1
25 #define _PAGE_BIT_SPLITTING	_PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
26 #define _PAGE_BIT_NX           63       /* No execute: only valid after cpuid check */
27 
28 /* If _PAGE_BIT_PRESENT is clear, we use these: */
29 /* - if the user mapped it with PROT_NONE; pte_present gives true */
30 #define _PAGE_BIT_PROTNONE	_PAGE_BIT_GLOBAL
31 /* - set: nonlinear file mapping, saved PTE; unset:swap */
32 #define _PAGE_BIT_FILE		_PAGE_BIT_DIRTY
33 
34 #define _PAGE_PRESENT	(_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
35 #define _PAGE_RW	(_AT(pteval_t, 1) << _PAGE_BIT_RW)
36 #define _PAGE_USER	(_AT(pteval_t, 1) << _PAGE_BIT_USER)
37 #define _PAGE_PWT	(_AT(pteval_t, 1) << _PAGE_BIT_PWT)
38 #define _PAGE_PCD	(_AT(pteval_t, 1) << _PAGE_BIT_PCD)
39 #define _PAGE_ACCESSED	(_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED)
40 #define _PAGE_DIRTY	(_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
41 #define _PAGE_PSE	(_AT(pteval_t, 1) << _PAGE_BIT_PSE)
42 #define _PAGE_GLOBAL	(_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
43 #define _PAGE_UNUSED1	(_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
44 #define _PAGE_IOMAP	(_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
45 #define _PAGE_PAT	(_AT(pteval_t, 1) << _PAGE_BIT_PAT)
46 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
47 #define _PAGE_SPECIAL	(_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
48 #define _PAGE_CPA_TEST	(_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
49 #define _PAGE_SPLITTING	(_AT(pteval_t, 1) << _PAGE_BIT_SPLITTING)
50 #define __HAVE_ARCH_PTE_SPECIAL
51 
52 #ifdef CONFIG_KMEMCHECK
53 #define _PAGE_HIDDEN	(_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
54 #else
55 #define _PAGE_HIDDEN	(_AT(pteval_t, 0))
56 #endif
57 
58 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
59 #define _PAGE_NX	(_AT(pteval_t, 1) << _PAGE_BIT_NX)
60 #else
61 #define _PAGE_NX	(_AT(pteval_t, 0))
62 #endif
63 
64 #define _PAGE_FILE	(_AT(pteval_t, 1) << _PAGE_BIT_FILE)
65 #define _PAGE_PROTNONE	(_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
66 
67 /*
68  * _PAGE_NUMA indicates that this page will trigger a numa hinting
69  * minor page fault to gather numa placement statistics (see
70  * pte_numa()). The bit picked (8) is within the range between
71  * _PAGE_FILE (6) and _PAGE_PROTNONE (8) bits. Therefore, it doesn't
72  * require changes to the swp entry format because that bit is always
73  * zero when the pte is not present.
74  *
75  * The bit picked must be always zero when the pmd is present and not
76  * present, so that we don't lose information when we set it while
77  * atomically clearing the present bit.
78  *
79  * Because we shared the same bit (8) with _PAGE_PROTNONE this can be
80  * interpreted as _PAGE_NUMA only in places that _PAGE_PROTNONE
81  * couldn't reach, like handle_mm_fault() (see access_error in
82  * arch/x86/mm/fault.c, the vma protection must not be PROT_NONE for
83  * handle_mm_fault() to be invoked).
84  */
85 #define _PAGE_NUMA	_PAGE_PROTNONE
86 
87 #define _PAGE_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |	\
88 			 _PAGE_ACCESSED | _PAGE_DIRTY)
89 #define _KERNPG_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED |	\
90 			 _PAGE_DIRTY)
91 
92 /* Set of bits not changed in pte_modify */
93 #define _PAGE_CHG_MASK	(PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT |		\
94 			 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY)
95 #define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
96 
97 #define _PAGE_CACHE_MASK	(_PAGE_PCD | _PAGE_PWT)
98 #define _PAGE_CACHE_WB		(0)
99 #define _PAGE_CACHE_WC		(_PAGE_PWT)
100 #define _PAGE_CACHE_UC_MINUS	(_PAGE_PCD)
101 #define _PAGE_CACHE_UC		(_PAGE_PCD | _PAGE_PWT)
102 
103 #define PAGE_NONE	__pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
104 #define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
105 				 _PAGE_ACCESSED | _PAGE_NX)
106 
107 #define PAGE_SHARED_EXEC	__pgprot(_PAGE_PRESENT | _PAGE_RW |	\
108 					 _PAGE_USER | _PAGE_ACCESSED)
109 #define PAGE_COPY_NOEXEC	__pgprot(_PAGE_PRESENT | _PAGE_USER |	\
110 					 _PAGE_ACCESSED | _PAGE_NX)
111 #define PAGE_COPY_EXEC		__pgprot(_PAGE_PRESENT | _PAGE_USER |	\
112 					 _PAGE_ACCESSED)
113 #define PAGE_COPY		PAGE_COPY_NOEXEC
114 #define PAGE_READONLY		__pgprot(_PAGE_PRESENT | _PAGE_USER |	\
115 					 _PAGE_ACCESSED | _PAGE_NX)
116 #define PAGE_READONLY_EXEC	__pgprot(_PAGE_PRESENT | _PAGE_USER |	\
117 					 _PAGE_ACCESSED)
118 
119 #define __PAGE_KERNEL_EXEC						\
120 	(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
121 #define __PAGE_KERNEL		(__PAGE_KERNEL_EXEC | _PAGE_NX)
122 
123 #define __PAGE_KERNEL_RO		(__PAGE_KERNEL & ~_PAGE_RW)
124 #define __PAGE_KERNEL_RX		(__PAGE_KERNEL_EXEC & ~_PAGE_RW)
125 #define __PAGE_KERNEL_EXEC_NOCACHE	(__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT)
126 #define __PAGE_KERNEL_WC		(__PAGE_KERNEL | _PAGE_CACHE_WC)
127 #define __PAGE_KERNEL_NOCACHE		(__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
128 #define __PAGE_KERNEL_UC_MINUS		(__PAGE_KERNEL | _PAGE_PCD)
129 #define __PAGE_KERNEL_VSYSCALL		(__PAGE_KERNEL_RX | _PAGE_USER)
130 #define __PAGE_KERNEL_VVAR		(__PAGE_KERNEL_RO | _PAGE_USER)
131 #define __PAGE_KERNEL_VVAR_NOCACHE	(__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
132 #define __PAGE_KERNEL_LARGE		(__PAGE_KERNEL | _PAGE_PSE)
133 #define __PAGE_KERNEL_LARGE_NOCACHE	(__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
134 #define __PAGE_KERNEL_LARGE_EXEC	(__PAGE_KERNEL_EXEC | _PAGE_PSE)
135 
136 #define __PAGE_KERNEL_IO		(__PAGE_KERNEL | _PAGE_IOMAP)
137 #define __PAGE_KERNEL_IO_NOCACHE	(__PAGE_KERNEL_NOCACHE | _PAGE_IOMAP)
138 #define __PAGE_KERNEL_IO_UC_MINUS	(__PAGE_KERNEL_UC_MINUS | _PAGE_IOMAP)
139 #define __PAGE_KERNEL_IO_WC		(__PAGE_KERNEL_WC | _PAGE_IOMAP)
140 
141 #define PAGE_KERNEL			__pgprot(__PAGE_KERNEL)
142 #define PAGE_KERNEL_RO			__pgprot(__PAGE_KERNEL_RO)
143 #define PAGE_KERNEL_EXEC		__pgprot(__PAGE_KERNEL_EXEC)
144 #define PAGE_KERNEL_RX			__pgprot(__PAGE_KERNEL_RX)
145 #define PAGE_KERNEL_WC			__pgprot(__PAGE_KERNEL_WC)
146 #define PAGE_KERNEL_NOCACHE		__pgprot(__PAGE_KERNEL_NOCACHE)
147 #define PAGE_KERNEL_UC_MINUS		__pgprot(__PAGE_KERNEL_UC_MINUS)
148 #define PAGE_KERNEL_EXEC_NOCACHE	__pgprot(__PAGE_KERNEL_EXEC_NOCACHE)
149 #define PAGE_KERNEL_LARGE		__pgprot(__PAGE_KERNEL_LARGE)
150 #define PAGE_KERNEL_LARGE_NOCACHE	__pgprot(__PAGE_KERNEL_LARGE_NOCACHE)
151 #define PAGE_KERNEL_LARGE_EXEC		__pgprot(__PAGE_KERNEL_LARGE_EXEC)
152 #define PAGE_KERNEL_VSYSCALL		__pgprot(__PAGE_KERNEL_VSYSCALL)
153 #define PAGE_KERNEL_VVAR		__pgprot(__PAGE_KERNEL_VVAR)
154 #define PAGE_KERNEL_VVAR_NOCACHE	__pgprot(__PAGE_KERNEL_VVAR_NOCACHE)
155 
156 #define PAGE_KERNEL_IO			__pgprot(__PAGE_KERNEL_IO)
157 #define PAGE_KERNEL_IO_NOCACHE		__pgprot(__PAGE_KERNEL_IO_NOCACHE)
158 #define PAGE_KERNEL_IO_UC_MINUS		__pgprot(__PAGE_KERNEL_IO_UC_MINUS)
159 #define PAGE_KERNEL_IO_WC		__pgprot(__PAGE_KERNEL_IO_WC)
160 
161 /*         xwr */
162 #define __P000	PAGE_NONE
163 #define __P001	PAGE_READONLY
164 #define __P010	PAGE_COPY
165 #define __P011	PAGE_COPY
166 #define __P100	PAGE_READONLY_EXEC
167 #define __P101	PAGE_READONLY_EXEC
168 #define __P110	PAGE_COPY_EXEC
169 #define __P111	PAGE_COPY_EXEC
170 
171 #define __S000	PAGE_NONE
172 #define __S001	PAGE_READONLY
173 #define __S010	PAGE_SHARED
174 #define __S011	PAGE_SHARED
175 #define __S100	PAGE_READONLY_EXEC
176 #define __S101	PAGE_READONLY_EXEC
177 #define __S110	PAGE_SHARED_EXEC
178 #define __S111	PAGE_SHARED_EXEC
179 
180 /*
181  * early identity mapping  pte attrib macros.
182  */
183 #ifdef CONFIG_X86_64
184 #define __PAGE_KERNEL_IDENT_LARGE_EXEC	__PAGE_KERNEL_LARGE_EXEC
185 #else
186 /*
187  * For PDE_IDENT_ATTR include USER bit. As the PDE and PTE protection
188  * bits are combined, this will alow user to access the high address mapped
189  * VDSO in the presence of CONFIG_COMPAT_VDSO
190  */
191 #define PTE_IDENT_ATTR	 0x003		/* PRESENT+RW */
192 #define PDE_IDENT_ATTR	 0x067		/* PRESENT+RW+USER+DIRTY+ACCESSED */
193 #define PGD_IDENT_ATTR	 0x001		/* PRESENT (no other attributes) */
194 #endif
195 
196 #ifdef CONFIG_X86_32
197 # include <asm/pgtable_32_types.h>
198 #else
199 # include <asm/pgtable_64_types.h>
200 #endif
201 
202 #ifndef __ASSEMBLY__
203 
204 #include <linux/types.h>
205 
206 /* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */
207 #define PTE_PFN_MASK		((pteval_t)PHYSICAL_PAGE_MASK)
208 
209 /* PTE_FLAGS_MASK extracts the flags from a (pte|pmd|pud|pgd)val_t */
210 #define PTE_FLAGS_MASK		(~PTE_PFN_MASK)
211 
212 typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
213 
214 typedef struct { pgdval_t pgd; } pgd_t;
215 
native_make_pgd(pgdval_t val)216 static inline pgd_t native_make_pgd(pgdval_t val)
217 {
218 	return (pgd_t) { val };
219 }
220 
native_pgd_val(pgd_t pgd)221 static inline pgdval_t native_pgd_val(pgd_t pgd)
222 {
223 	return pgd.pgd;
224 }
225 
pgd_flags(pgd_t pgd)226 static inline pgdval_t pgd_flags(pgd_t pgd)
227 {
228 	return native_pgd_val(pgd) & PTE_FLAGS_MASK;
229 }
230 
231 #if PAGETABLE_LEVELS > 3
232 typedef struct { pudval_t pud; } pud_t;
233 
native_make_pud(pmdval_t val)234 static inline pud_t native_make_pud(pmdval_t val)
235 {
236 	return (pud_t) { val };
237 }
238 
native_pud_val(pud_t pud)239 static inline pudval_t native_pud_val(pud_t pud)
240 {
241 	return pud.pud;
242 }
243 #else
244 #include <asm-generic/pgtable-nopud.h>
245 
native_pud_val(pud_t pud)246 static inline pudval_t native_pud_val(pud_t pud)
247 {
248 	return native_pgd_val(pud.pgd);
249 }
250 #endif
251 
252 #if PAGETABLE_LEVELS > 2
253 typedef struct { pmdval_t pmd; } pmd_t;
254 
native_make_pmd(pmdval_t val)255 static inline pmd_t native_make_pmd(pmdval_t val)
256 {
257 	return (pmd_t) { val };
258 }
259 
native_pmd_val(pmd_t pmd)260 static inline pmdval_t native_pmd_val(pmd_t pmd)
261 {
262 	return pmd.pmd;
263 }
264 #else
265 #include <asm-generic/pgtable-nopmd.h>
266 
native_pmd_val(pmd_t pmd)267 static inline pmdval_t native_pmd_val(pmd_t pmd)
268 {
269 	return native_pgd_val(pmd.pud.pgd);
270 }
271 #endif
272 
pud_flags(pud_t pud)273 static inline pudval_t pud_flags(pud_t pud)
274 {
275 	return native_pud_val(pud) & PTE_FLAGS_MASK;
276 }
277 
pmd_flags(pmd_t pmd)278 static inline pmdval_t pmd_flags(pmd_t pmd)
279 {
280 	return native_pmd_val(pmd) & PTE_FLAGS_MASK;
281 }
282 
native_make_pte(pteval_t val)283 static inline pte_t native_make_pte(pteval_t val)
284 {
285 	return (pte_t) { .pte = val };
286 }
287 
native_pte_val(pte_t pte)288 static inline pteval_t native_pte_val(pte_t pte)
289 {
290 	return pte.pte;
291 }
292 
pte_flags(pte_t pte)293 static inline pteval_t pte_flags(pte_t pte)
294 {
295 	return native_pte_val(pte) & PTE_FLAGS_MASK;
296 }
297 
298 #define pgprot_val(x)	((x).pgprot)
299 #define __pgprot(x)	((pgprot_t) { (x) } )
300 
301 
302 typedef struct page *pgtable_t;
303 
304 extern pteval_t __supported_pte_mask;
305 extern void set_nx(void);
306 extern int nx_enabled;
307 
308 #define pgprot_writecombine	pgprot_writecombine
309 extern pgprot_t pgprot_writecombine(pgprot_t prot);
310 
311 /* Indicate that x86 has its own track and untrack pfn vma functions */
312 #define __HAVE_PFNMAP_TRACKING
313 
314 #define __HAVE_PHYS_MEM_ACCESS_PROT
315 struct file;
316 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
317                               unsigned long size, pgprot_t vma_prot);
318 int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
319                               unsigned long size, pgprot_t *vma_prot);
320 
321 /* Install a pte for a particular vaddr in kernel space. */
322 void set_pte_vaddr(unsigned long vaddr, pte_t pte);
323 
324 #ifdef CONFIG_X86_32
325 extern void native_pagetable_init(void);
326 #else
327 #define native_pagetable_init        paging_init
328 #endif
329 
330 struct seq_file;
331 extern void arch_report_meminfo(struct seq_file *m);
332 
333 enum pg_level {
334 	PG_LEVEL_NONE,
335 	PG_LEVEL_4K,
336 	PG_LEVEL_2M,
337 	PG_LEVEL_1G,
338 	PG_LEVEL_NUM
339 };
340 
341 #ifdef CONFIG_PROC_FS
342 extern void update_page_count(int level, unsigned long pages);
343 #else
update_page_count(int level,unsigned long pages)344 static inline void update_page_count(int level, unsigned long pages) { }
345 #endif
346 
347 /*
348  * Helper function that returns the kernel pagetable entry controlling
349  * the virtual address 'address'. NULL means no pagetable entry present.
350  * NOTE: the return type is pte_t but if the pmd is PSE then we return it
351  * as a pte too.
352  */
353 extern pte_t *lookup_address(unsigned long address, unsigned int *level);
354 extern phys_addr_t slow_virt_to_phys(void *__address);
355 
356 #endif	/* !__ASSEMBLY__ */
357 
358 #endif /* _ASM_X86_PGTABLE_DEFS_H */
359