• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  /* SPDX-License-Identifier: GPL-2.0 */
2  #ifndef _ASM_X86_PGTABLE_DEFS_H
3  #define _ASM_X86_PGTABLE_DEFS_H
4  
5  #include <linux/const.h>
6  #include <linux/mem_encrypt.h>
7  
8  #include <asm/page_types.h>
9  
10  #define FIRST_USER_ADDRESS	0UL
11  
12  #define _PAGE_BIT_PRESENT	0	/* is present */
13  #define _PAGE_BIT_RW		1	/* writeable */
14  #define _PAGE_BIT_USER		2	/* userspace addressable */
15  #define _PAGE_BIT_PWT		3	/* page write through */
16  #define _PAGE_BIT_PCD		4	/* page cache disabled */
17  #define _PAGE_BIT_ACCESSED	5	/* was accessed (raised by CPU) */
18  #define _PAGE_BIT_DIRTY		6	/* was written to (raised by CPU) */
19  #define _PAGE_BIT_PSE		7	/* 4 MB (or 2MB) page */
20  #define _PAGE_BIT_PAT		7	/* on 4KB pages */
21  #define _PAGE_BIT_GLOBAL	8	/* Global TLB entry PPro+ */
22  #define _PAGE_BIT_SOFTW1	9	/* available for programmer */
23  #define _PAGE_BIT_SOFTW2	10	/* " */
24  #define _PAGE_BIT_SOFTW3	11	/* " */
25  #define _PAGE_BIT_PAT_LARGE	12	/* On 2MB or 1GB pages */
26  #define _PAGE_BIT_SOFTW4	58	/* available for programmer */
27  #define _PAGE_BIT_PKEY_BIT0	59	/* Protection Keys, bit 1/4 */
28  #define _PAGE_BIT_PKEY_BIT1	60	/* Protection Keys, bit 2/4 */
29  #define _PAGE_BIT_PKEY_BIT2	61	/* Protection Keys, bit 3/4 */
30  #define _PAGE_BIT_PKEY_BIT3	62	/* Protection Keys, bit 4/4 */
31  #define _PAGE_BIT_NX		63	/* No execute: only valid after cpuid check */
32  
33  #define _PAGE_BIT_SPECIAL	_PAGE_BIT_SOFTW1
34  #define _PAGE_BIT_CPA_TEST	_PAGE_BIT_SOFTW1
35  #define _PAGE_BIT_UFFD_WP	_PAGE_BIT_SOFTW2 /* userfaultfd wrprotected */
36  #define _PAGE_BIT_SOFT_DIRTY	_PAGE_BIT_SOFTW3 /* software dirty tracking */
37  #define _PAGE_BIT_DEVMAP	_PAGE_BIT_SOFTW4
38  
39  /* If _PAGE_BIT_PRESENT is clear, we use these: */
40  /* - if the user mapped it with PROT_NONE; pte_present gives true */
41  #define _PAGE_BIT_PROTNONE	_PAGE_BIT_GLOBAL
42  
43  #define _PAGE_PRESENT	(_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
44  #define _PAGE_RW	(_AT(pteval_t, 1) << _PAGE_BIT_RW)
45  #define _PAGE_USER	(_AT(pteval_t, 1) << _PAGE_BIT_USER)
46  #define _PAGE_PWT	(_AT(pteval_t, 1) << _PAGE_BIT_PWT)
47  #define _PAGE_PCD	(_AT(pteval_t, 1) << _PAGE_BIT_PCD)
48  #define _PAGE_ACCESSED	(_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED)
49  #define _PAGE_DIRTY	(_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
50  #define _PAGE_PSE	(_AT(pteval_t, 1) << _PAGE_BIT_PSE)
51  #define _PAGE_GLOBAL	(_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
52  #define _PAGE_SOFTW1	(_AT(pteval_t, 1) << _PAGE_BIT_SOFTW1)
53  #define _PAGE_SOFTW2	(_AT(pteval_t, 1) << _PAGE_BIT_SOFTW2)
54  #define _PAGE_SOFTW3	(_AT(pteval_t, 1) << _PAGE_BIT_SOFTW3)
55  #define _PAGE_PAT	(_AT(pteval_t, 1) << _PAGE_BIT_PAT)
56  #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
57  #define _PAGE_SPECIAL	(_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
58  #define _PAGE_CPA_TEST	(_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
59  #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
60  #define _PAGE_PKEY_BIT0	(_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT0)
61  #define _PAGE_PKEY_BIT1	(_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT1)
62  #define _PAGE_PKEY_BIT2	(_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT2)
63  #define _PAGE_PKEY_BIT3	(_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT3)
64  #else
65  #define _PAGE_PKEY_BIT0	(_AT(pteval_t, 0))
66  #define _PAGE_PKEY_BIT1	(_AT(pteval_t, 0))
67  #define _PAGE_PKEY_BIT2	(_AT(pteval_t, 0))
68  #define _PAGE_PKEY_BIT3	(_AT(pteval_t, 0))
69  #endif
70  
71  #define _PAGE_PKEY_MASK (_PAGE_PKEY_BIT0 | \
72  			 _PAGE_PKEY_BIT1 | \
73  			 _PAGE_PKEY_BIT2 | \
74  			 _PAGE_PKEY_BIT3)
75  
76  #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
77  #define _PAGE_KNL_ERRATUM_MASK (_PAGE_DIRTY | _PAGE_ACCESSED)
78  #else
79  #define _PAGE_KNL_ERRATUM_MASK 0
80  #endif
81  
82  #ifdef CONFIG_MEM_SOFT_DIRTY
83  #define _PAGE_SOFT_DIRTY	(_AT(pteval_t, 1) << _PAGE_BIT_SOFT_DIRTY)
84  #else
85  #define _PAGE_SOFT_DIRTY	(_AT(pteval_t, 0))
86  #endif
87  
88  /*
89   * Tracking soft dirty bit when a page goes to a swap is tricky.
90   * We need a bit which can be stored in pte _and_ not conflict
91   * with swap entry format. On x86 bits 1-4 are *not* involved
92   * into swap entry computation, but bit 7 is used for thp migration,
93   * so we borrow bit 1 for soft dirty tracking.
94   *
95   * Please note that this bit must be treated as swap dirty page
96   * mark if and only if the PTE/PMD has present bit clear!
97   */
98  #ifdef CONFIG_MEM_SOFT_DIRTY
99  #define _PAGE_SWP_SOFT_DIRTY	_PAGE_RW
100  #else
101  #define _PAGE_SWP_SOFT_DIRTY	(_AT(pteval_t, 0))
102  #endif
103  
104  #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
105  #define _PAGE_UFFD_WP		(_AT(pteval_t, 1) << _PAGE_BIT_UFFD_WP)
106  #define _PAGE_SWP_UFFD_WP	_PAGE_USER
107  #else
108  #define _PAGE_UFFD_WP		(_AT(pteval_t, 0))
109  #define _PAGE_SWP_UFFD_WP	(_AT(pteval_t, 0))
110  #endif
111  
112  #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
113  #define _PAGE_NX	(_AT(pteval_t, 1) << _PAGE_BIT_NX)
114  #define _PAGE_DEVMAP	(_AT(u64, 1) << _PAGE_BIT_DEVMAP)
115  #else
116  #define _PAGE_NX	(_AT(pteval_t, 0))
117  #define _PAGE_DEVMAP	(_AT(pteval_t, 0))
118  #endif
119  
120  #define _PAGE_PROTNONE	(_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
121  
122  /*
123   * Set of bits not changed in pte_modify.  The pte's
124   * protection key is treated like _PAGE_RW, for
125   * instance, and is *not* included in this mask since
126   * pte_modify() does modify it.
127   */
128  #define _COMMON_PAGE_CHG_MASK	(PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT |	       \
129  				 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY |\
130  				 _PAGE_SOFT_DIRTY | _PAGE_DEVMAP | _PAGE_ENC | \
131  				 _PAGE_UFFD_WP)
132  #define _PAGE_CHG_MASK	(_COMMON_PAGE_CHG_MASK | _PAGE_PAT)
133  #define _HPAGE_CHG_MASK (_COMMON_PAGE_CHG_MASK | _PAGE_PSE | _PAGE_PAT_LARGE)
134  
135  /*
136   * The cache modes defined here are used to translate between pure SW usage
137   * and the HW defined cache mode bits and/or PAT entries.
138   *
139   * The resulting bits for PWT, PCD and PAT should be chosen in a way
140   * to have the WB mode at index 0 (all bits clear). This is the default
141   * right now and likely would break too much if changed.
142   */
143  #ifndef __ASSEMBLY__
144  enum page_cache_mode {
145  	_PAGE_CACHE_MODE_WB       = 0,
146  	_PAGE_CACHE_MODE_WC       = 1,
147  	_PAGE_CACHE_MODE_UC_MINUS = 2,
148  	_PAGE_CACHE_MODE_UC       = 3,
149  	_PAGE_CACHE_MODE_WT       = 4,
150  	_PAGE_CACHE_MODE_WP       = 5,
151  
152  	_PAGE_CACHE_MODE_NUM      = 8
153  };
154  #endif
155  
156  #define _PAGE_ENC		(_AT(pteval_t, sme_me_mask))
157  
158  #define _PAGE_CACHE_MASK	(_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)
159  #define _PAGE_LARGE_CACHE_MASK	(_PAGE_PWT | _PAGE_PCD | _PAGE_PAT_LARGE)
160  
161  #define _PAGE_NOCACHE		(cachemode2protval(_PAGE_CACHE_MODE_UC))
162  #define _PAGE_CACHE_WP		(cachemode2protval(_PAGE_CACHE_MODE_WP))
163  
164  #define __PP _PAGE_PRESENT
165  #define __RW _PAGE_RW
166  #define _USR _PAGE_USER
167  #define ___A _PAGE_ACCESSED
168  #define ___D _PAGE_DIRTY
169  #define ___G _PAGE_GLOBAL
170  #define __NX _PAGE_NX
171  
172  #define _ENC _PAGE_ENC
173  #define __WP _PAGE_CACHE_WP
174  #define __NC _PAGE_NOCACHE
175  #define _PSE _PAGE_PSE
176  
177  #define pgprot_val(x)		((x).pgprot)
178  #define __pgprot(x)		((pgprot_t) { (x) } )
179  #define __pg(x)			__pgprot(x)
180  
181  #define _PAGE_PAT_LARGE		(_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
182  
183  #define PAGE_NONE	     __pg(   0|   0|   0|___A|   0|   0|   0|___G)
184  #define PAGE_SHARED	     __pg(__PP|__RW|_USR|___A|__NX|   0|   0|   0)
185  #define PAGE_SHARED_EXEC     __pg(__PP|__RW|_USR|___A|   0|   0|   0|   0)
186  #define PAGE_COPY_NOEXEC     __pg(__PP|   0|_USR|___A|__NX|   0|   0|   0)
187  #define PAGE_COPY_EXEC	     __pg(__PP|   0|_USR|___A|   0|   0|   0|   0)
188  #define PAGE_COPY	     __pg(__PP|   0|_USR|___A|__NX|   0|   0|   0)
189  #define PAGE_READONLY	     __pg(__PP|   0|_USR|___A|__NX|   0|   0|   0)
190  #define PAGE_READONLY_EXEC   __pg(__PP|   0|_USR|___A|   0|   0|   0|   0)
191  
192  #define __PAGE_KERNEL		 (__PP|__RW|   0|___A|__NX|___D|   0|___G)
193  #define __PAGE_KERNEL_EXEC	 (__PP|__RW|   0|___A|   0|___D|   0|___G)
194  #define _KERNPG_TABLE_NOENC	 (__PP|__RW|   0|___A|   0|___D|   0|   0)
195  #define _KERNPG_TABLE		 (__PP|__RW|   0|___A|   0|___D|   0|   0| _ENC)
196  #define _PAGE_TABLE_NOENC	 (__PP|__RW|_USR|___A|   0|___D|   0|   0)
197  #define _PAGE_TABLE		 (__PP|__RW|_USR|___A|   0|___D|   0|   0| _ENC)
198  #define __PAGE_KERNEL_RO	 (__PP|   0|   0|___A|__NX|___D|   0|___G)
199  #define __PAGE_KERNEL_ROX	 (__PP|   0|   0|___A|   0|___D|   0|___G)
200  #define __PAGE_KERNEL_NOCACHE	 (__PP|__RW|   0|___A|__NX|___D|   0|___G| __NC)
201  #define __PAGE_KERNEL_VVAR	 (__PP|   0|_USR|___A|__NX|___D|   0|___G)
202  #define __PAGE_KERNEL_LARGE	 (__PP|__RW|   0|___A|__NX|___D|_PSE|___G)
203  #define __PAGE_KERNEL_LARGE_EXEC (__PP|__RW|   0|___A|   0|___D|_PSE|___G)
204  #define __PAGE_KERNEL_WP	 (__PP|__RW|   0|___A|__NX|___D|   0|___G| __WP)
205  
206  
207  #define __PAGE_KERNEL_IO		__PAGE_KERNEL
208  #define __PAGE_KERNEL_IO_NOCACHE	__PAGE_KERNEL_NOCACHE
209  
210  
211  #ifndef __ASSEMBLY__
212  
213  #define __PAGE_KERNEL_ENC	(__PAGE_KERNEL    | _ENC)
214  #define __PAGE_KERNEL_ENC_WP	(__PAGE_KERNEL_WP | _ENC)
215  #define __PAGE_KERNEL_NOENC	(__PAGE_KERNEL    |    0)
216  #define __PAGE_KERNEL_NOENC_WP	(__PAGE_KERNEL_WP |    0)
217  
218  #define __pgprot_mask(x)	__pgprot((x) & __default_kernel_pte_mask)
219  
220  #define PAGE_KERNEL		__pgprot_mask(__PAGE_KERNEL            | _ENC)
221  #define PAGE_KERNEL_NOENC	__pgprot_mask(__PAGE_KERNEL            |    0)
222  #define PAGE_KERNEL_RO		__pgprot_mask(__PAGE_KERNEL_RO         | _ENC)
223  #define PAGE_KERNEL_EXEC	__pgprot_mask(__PAGE_KERNEL_EXEC       | _ENC)
224  #define PAGE_KERNEL_EXEC_NOENC	__pgprot_mask(__PAGE_KERNEL_EXEC       |    0)
225  #define PAGE_KERNEL_ROX		__pgprot_mask(__PAGE_KERNEL_ROX        | _ENC)
226  #define PAGE_KERNEL_NOCACHE	__pgprot_mask(__PAGE_KERNEL_NOCACHE    | _ENC)
227  #define PAGE_KERNEL_LARGE	__pgprot_mask(__PAGE_KERNEL_LARGE      | _ENC)
228  #define PAGE_KERNEL_LARGE_EXEC	__pgprot_mask(__PAGE_KERNEL_LARGE_EXEC | _ENC)
229  #define PAGE_KERNEL_VVAR	__pgprot_mask(__PAGE_KERNEL_VVAR       | _ENC)
230  
231  #define PAGE_KERNEL_IO		__pgprot_mask(__PAGE_KERNEL_IO)
232  #define PAGE_KERNEL_IO_NOCACHE	__pgprot_mask(__PAGE_KERNEL_IO_NOCACHE)
233  
234  #endif	/* __ASSEMBLY__ */
235  
236  /*         xwr */
237  #define __P000	PAGE_NONE
238  #define __P001	PAGE_READONLY
239  #define __P010	PAGE_COPY
240  #define __P011	PAGE_COPY
241  #define __P100	PAGE_READONLY_EXEC
242  #define __P101	PAGE_READONLY_EXEC
243  #define __P110	PAGE_COPY_EXEC
244  #define __P111	PAGE_COPY_EXEC
245  
246  #define __S000	PAGE_NONE
247  #define __S001	PAGE_READONLY
248  #define __S010	PAGE_SHARED
249  #define __S011	PAGE_SHARED
250  #define __S100	PAGE_READONLY_EXEC
251  #define __S101	PAGE_READONLY_EXEC
252  #define __S110	PAGE_SHARED_EXEC
253  #define __S111	PAGE_SHARED_EXEC
254  
255  /*
256   * early identity mapping  pte attrib macros.
257   */
258  #ifdef CONFIG_X86_64
259  #define __PAGE_KERNEL_IDENT_LARGE_EXEC	__PAGE_KERNEL_LARGE_EXEC
260  #else
261  #define PTE_IDENT_ATTR	 0x003		/* PRESENT+RW */
262  #define PDE_IDENT_ATTR	 0x063		/* PRESENT+RW+DIRTY+ACCESSED */
263  #define PGD_IDENT_ATTR	 0x001		/* PRESENT (no other attributes) */
264  #endif
265  
266  #ifdef CONFIG_X86_32
267  # include <asm/pgtable_32_types.h>
268  #else
269  # include <asm/pgtable_64_types.h>
270  #endif
271  
272  #ifndef __ASSEMBLY__
273  
274  #include <linux/types.h>
275  
276  /* Extracts the PFN from a (pte|pmd|pud|pgd)val_t of a 4KB page */
277  #define PTE_PFN_MASK		((pteval_t)PHYSICAL_PAGE_MASK)
278  
279  /*
280   *  Extracts the flags from a (pte|pmd|pud|pgd)val_t
281   *  This includes the protection key value.
282   */
283  #define PTE_FLAGS_MASK		(~PTE_PFN_MASK)
284  
285  typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
286  
287  typedef struct { pgdval_t pgd; } pgd_t;
288  
pgprot_nx(pgprot_t prot)289  static inline pgprot_t pgprot_nx(pgprot_t prot)
290  {
291  	return __pgprot(pgprot_val(prot) | _PAGE_NX);
292  }
293  #define pgprot_nx pgprot_nx
294  
295  #ifdef CONFIG_X86_PAE
296  
297  /*
298   * PHYSICAL_PAGE_MASK might be non-constant when SME is compiled in, so we can't
299   * use it here.
300   */
301  
302  #define PGD_PAE_PAGE_MASK	((signed long)PAGE_MASK)
303  #define PGD_PAE_PHYS_MASK	(((1ULL << __PHYSICAL_MASK_SHIFT)-1) & PGD_PAE_PAGE_MASK)
304  
305  /*
306   * PAE allows Base Address, P, PWT, PCD and AVL bits to be set in PGD entries.
307   * All other bits are Reserved MBZ
308   */
309  #define PGD_ALLOWED_BITS	(PGD_PAE_PHYS_MASK | _PAGE_PRESENT | \
310  				 _PAGE_PWT | _PAGE_PCD | \
311  				 _PAGE_SOFTW1 | _PAGE_SOFTW2 | _PAGE_SOFTW3)
312  
313  #else
314  /* No need to mask any bits for !PAE */
315  #define PGD_ALLOWED_BITS	(~0ULL)
316  #endif
317  
native_make_pgd(pgdval_t val)318  static inline pgd_t native_make_pgd(pgdval_t val)
319  {
320  	return (pgd_t) { val & PGD_ALLOWED_BITS };
321  }
322  
native_pgd_val(pgd_t pgd)323  static inline pgdval_t native_pgd_val(pgd_t pgd)
324  {
325  	return pgd.pgd & PGD_ALLOWED_BITS;
326  }
327  
pgd_flags(pgd_t pgd)328  static inline pgdval_t pgd_flags(pgd_t pgd)
329  {
330  	return native_pgd_val(pgd) & PTE_FLAGS_MASK;
331  }
332  
333  #if CONFIG_PGTABLE_LEVELS > 4
334  typedef struct { p4dval_t p4d; } p4d_t;
335  
native_make_p4d(pudval_t val)336  static inline p4d_t native_make_p4d(pudval_t val)
337  {
338  	return (p4d_t) { val };
339  }
340  
native_p4d_val(p4d_t p4d)341  static inline p4dval_t native_p4d_val(p4d_t p4d)
342  {
343  	return p4d.p4d;
344  }
345  #else
346  #include <asm-generic/pgtable-nop4d.h>
347  
native_make_p4d(pudval_t val)348  static inline p4d_t native_make_p4d(pudval_t val)
349  {
350  	return (p4d_t) { .pgd = native_make_pgd((pgdval_t)val) };
351  }
352  
native_p4d_val(p4d_t p4d)353  static inline p4dval_t native_p4d_val(p4d_t p4d)
354  {
355  	return native_pgd_val(p4d.pgd);
356  }
357  #endif
358  
359  #if CONFIG_PGTABLE_LEVELS > 3
360  typedef struct { pudval_t pud; } pud_t;
361  
native_make_pud(pmdval_t val)362  static inline pud_t native_make_pud(pmdval_t val)
363  {
364  	return (pud_t) { val };
365  }
366  
native_pud_val(pud_t pud)367  static inline pudval_t native_pud_val(pud_t pud)
368  {
369  	return pud.pud;
370  }
371  #else
372  #include <asm-generic/pgtable-nopud.h>
373  
native_make_pud(pudval_t val)374  static inline pud_t native_make_pud(pudval_t val)
375  {
376  	return (pud_t) { .p4d.pgd = native_make_pgd(val) };
377  }
378  
native_pud_val(pud_t pud)379  static inline pudval_t native_pud_val(pud_t pud)
380  {
381  	return native_pgd_val(pud.p4d.pgd);
382  }
383  #endif
384  
385  #if CONFIG_PGTABLE_LEVELS > 2
386  typedef struct { pmdval_t pmd; } pmd_t;
387  
native_make_pmd(pmdval_t val)388  static inline pmd_t native_make_pmd(pmdval_t val)
389  {
390  	return (pmd_t) { val };
391  }
392  
native_pmd_val(pmd_t pmd)393  static inline pmdval_t native_pmd_val(pmd_t pmd)
394  {
395  	return pmd.pmd;
396  }
397  #else
398  #include <asm-generic/pgtable-nopmd.h>
399  
native_make_pmd(pmdval_t val)400  static inline pmd_t native_make_pmd(pmdval_t val)
401  {
402  	return (pmd_t) { .pud.p4d.pgd = native_make_pgd(val) };
403  }
404  
native_pmd_val(pmd_t pmd)405  static inline pmdval_t native_pmd_val(pmd_t pmd)
406  {
407  	return native_pgd_val(pmd.pud.p4d.pgd);
408  }
409  #endif
410  
p4d_pfn_mask(p4d_t p4d)411  static inline p4dval_t p4d_pfn_mask(p4d_t p4d)
412  {
413  	/* No 512 GiB huge pages yet */
414  	return PTE_PFN_MASK;
415  }
416  
p4d_flags_mask(p4d_t p4d)417  static inline p4dval_t p4d_flags_mask(p4d_t p4d)
418  {
419  	return ~p4d_pfn_mask(p4d);
420  }
421  
p4d_flags(p4d_t p4d)422  static inline p4dval_t p4d_flags(p4d_t p4d)
423  {
424  	return native_p4d_val(p4d) & p4d_flags_mask(p4d);
425  }
426  
pud_pfn_mask(pud_t pud)427  static inline pudval_t pud_pfn_mask(pud_t pud)
428  {
429  	if (native_pud_val(pud) & _PAGE_PSE)
430  		return PHYSICAL_PUD_PAGE_MASK;
431  	else
432  		return PTE_PFN_MASK;
433  }
434  
pud_flags_mask(pud_t pud)435  static inline pudval_t pud_flags_mask(pud_t pud)
436  {
437  	return ~pud_pfn_mask(pud);
438  }
439  
pud_flags(pud_t pud)440  static inline pudval_t pud_flags(pud_t pud)
441  {
442  	return native_pud_val(pud) & pud_flags_mask(pud);
443  }
444  
pmd_pfn_mask(pmd_t pmd)445  static inline pmdval_t pmd_pfn_mask(pmd_t pmd)
446  {
447  	if (native_pmd_val(pmd) & _PAGE_PSE)
448  		return PHYSICAL_PMD_PAGE_MASK;
449  	else
450  		return PTE_PFN_MASK;
451  }
452  
pmd_flags_mask(pmd_t pmd)453  static inline pmdval_t pmd_flags_mask(pmd_t pmd)
454  {
455  	return ~pmd_pfn_mask(pmd);
456  }
457  
pmd_flags(pmd_t pmd)458  static inline pmdval_t pmd_flags(pmd_t pmd)
459  {
460  	return native_pmd_val(pmd) & pmd_flags_mask(pmd);
461  }
462  
native_make_pte(pteval_t val)463  static inline pte_t native_make_pte(pteval_t val)
464  {
465  	return (pte_t) { .pte = val };
466  }
467  
native_pte_val(pte_t pte)468  static inline pteval_t native_pte_val(pte_t pte)
469  {
470  	return pte.pte;
471  }
472  
pte_flags(pte_t pte)473  static inline pteval_t pte_flags(pte_t pte)
474  {
475  	return native_pte_val(pte) & PTE_FLAGS_MASK;
476  }
477  
478  #define __pte2cm_idx(cb)				\
479  	((((cb) >> (_PAGE_BIT_PAT - 2)) & 4) |		\
480  	 (((cb) >> (_PAGE_BIT_PCD - 1)) & 2) |		\
481  	 (((cb) >> _PAGE_BIT_PWT) & 1))
482  #define __cm_idx2pte(i)					\
483  	((((i) & 4) << (_PAGE_BIT_PAT - 2)) |		\
484  	 (((i) & 2) << (_PAGE_BIT_PCD - 1)) |		\
485  	 (((i) & 1) << _PAGE_BIT_PWT))
486  
487  unsigned long cachemode2protval(enum page_cache_mode pcm);
488  
protval_4k_2_large(pgprotval_t val)489  static inline pgprotval_t protval_4k_2_large(pgprotval_t val)
490  {
491  	return (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
492  		((val & _PAGE_PAT) << (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
493  }
pgprot_4k_2_large(pgprot_t pgprot)494  static inline pgprot_t pgprot_4k_2_large(pgprot_t pgprot)
495  {
496  	return __pgprot(protval_4k_2_large(pgprot_val(pgprot)));
497  }
protval_large_2_4k(pgprotval_t val)498  static inline pgprotval_t protval_large_2_4k(pgprotval_t val)
499  {
500  	return (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
501  		((val & _PAGE_PAT_LARGE) >>
502  		 (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
503  }
pgprot_large_2_4k(pgprot_t pgprot)504  static inline pgprot_t pgprot_large_2_4k(pgprot_t pgprot)
505  {
506  	return __pgprot(protval_large_2_4k(pgprot_val(pgprot)));
507  }
508  
509  
510  typedef struct page *pgtable_t;
511  
512  extern pteval_t __supported_pte_mask;
513  extern pteval_t __default_kernel_pte_mask;
514  extern void set_nx(void);
515  extern int nx_enabled;
516  
517  #define pgprot_writecombine	pgprot_writecombine
518  extern pgprot_t pgprot_writecombine(pgprot_t prot);
519  
520  #define pgprot_writethrough	pgprot_writethrough
521  extern pgprot_t pgprot_writethrough(pgprot_t prot);
522  
523  /* Indicate that x86 has its own track and untrack pfn vma functions */
524  #define __HAVE_PFNMAP_TRACKING
525  
526  #define __HAVE_PHYS_MEM_ACCESS_PROT
527  struct file;
528  pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
529                                unsigned long size, pgprot_t vma_prot);
530  
531  /* Install a pte for a particular vaddr in kernel space. */
532  void set_pte_vaddr(unsigned long vaddr, pte_t pte);
533  
534  #ifdef CONFIG_X86_32
535  extern void native_pagetable_init(void);
536  #else
537  #define native_pagetable_init        paging_init
538  #endif
539  
540  struct seq_file;
541  extern void arch_report_meminfo(struct seq_file *m);
542  
543  enum pg_level {
544  	PG_LEVEL_NONE,
545  	PG_LEVEL_4K,
546  	PG_LEVEL_2M,
547  	PG_LEVEL_1G,
548  	PG_LEVEL_512G,
549  	PG_LEVEL_NUM
550  };
551  
552  #ifdef CONFIG_PROC_FS
553  extern void update_page_count(int level, unsigned long pages);
554  #else
update_page_count(int level,unsigned long pages)555  static inline void update_page_count(int level, unsigned long pages) { }
556  #endif
557  
558  /*
559   * Helper function that returns the kernel pagetable entry controlling
560   * the virtual address 'address'. NULL means no pagetable entry present.
561   * NOTE: the return type is pte_t but if the pmd is PSE then we return it
562   * as a pte too.
563   */
564  extern pte_t *lookup_address(unsigned long address, unsigned int *level);
565  extern pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
566  				    unsigned int *level);
567  
568  struct mm_struct;
569  extern pte_t *lookup_address_in_mm(struct mm_struct *mm, unsigned long address,
570  				   unsigned int *level);
571  extern pmd_t *lookup_pmd_address(unsigned long address);
572  extern phys_addr_t slow_virt_to_phys(void *__address);
573  extern int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn,
574  					  unsigned long address,
575  					  unsigned numpages,
576  					  unsigned long page_flags);
577  extern int __init kernel_unmap_pages_in_pgd(pgd_t *pgd, unsigned long address,
578  					    unsigned long numpages);
579  #endif	/* !__ASSEMBLY__ */
580  
581  #endif /* _ASM_X86_PGTABLE_DEFS_H */
582