• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef _ASM_X86_PGTABLE_64_H
2 #define _ASM_X86_PGTABLE_64_H
3 
4 #include <linux/const.h>
5 #include <asm/pgtable_64_types.h>
6 
7 #ifndef __ASSEMBLY__
8 
9 /*
10  * This file contains the functions and defines necessary to modify and use
11  * the x86-64 page table tree.
12  */
13 #include <asm/processor.h>
14 #include <linux/bitops.h>
15 #include <linux/threads.h>
16 
17 extern pud_t level3_kernel_pgt[512];
18 extern pud_t level3_ident_pgt[512];
19 extern pmd_t level2_kernel_pgt[512];
20 extern pmd_t level2_fixmap_pgt[512];
21 extern pmd_t level2_ident_pgt[512];
22 extern pte_t level1_fixmap_pgt[512];
23 extern pgd_t init_level4_pgt[];
24 
25 #define swapper_pg_dir init_level4_pgt
26 
27 extern void paging_init(void);
28 
29 #define pte_ERROR(e)					\
30 	pr_err("%s:%d: bad pte %p(%016lx)\n",		\
31 	       __FILE__, __LINE__, &(e), pte_val(e))
32 #define pmd_ERROR(e)					\
33 	pr_err("%s:%d: bad pmd %p(%016lx)\n",		\
34 	       __FILE__, __LINE__, &(e), pmd_val(e))
35 #define pud_ERROR(e)					\
36 	pr_err("%s:%d: bad pud %p(%016lx)\n",		\
37 	       __FILE__, __LINE__, &(e), pud_val(e))
38 #define pgd_ERROR(e)					\
39 	pr_err("%s:%d: bad pgd %p(%016lx)\n",		\
40 	       __FILE__, __LINE__, &(e), pgd_val(e))
41 
42 struct mm_struct;
43 
44 void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
45 
46 
native_set_pte(pte_t * ptep,pte_t pte)47 static inline void native_set_pte(pte_t *ptep, pte_t pte)
48 {
49 	WRITE_ONCE(*ptep, pte);
50 }
51 
native_pte_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)52 static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
53 				    pte_t *ptep)
54 {
55 	native_set_pte(ptep, native_make_pte(0));
56 }
57 
native_set_pte_atomic(pte_t * ptep,pte_t pte)58 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
59 {
60 	native_set_pte(ptep, pte);
61 }
62 
native_set_pmd(pmd_t * pmdp,pmd_t pmd)63 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
64 {
65 	WRITE_ONCE(*pmdp, pmd);
66 }
67 
native_pmd_clear(pmd_t * pmd)68 static inline void native_pmd_clear(pmd_t *pmd)
69 {
70 	native_set_pmd(pmd, native_make_pmd(0));
71 }
72 
native_ptep_get_and_clear(pte_t * xp)73 static inline pte_t native_ptep_get_and_clear(pte_t *xp)
74 {
75 #ifdef CONFIG_SMP
76 	return native_make_pte(xchg(&xp->pte, 0));
77 #else
78 	/* native_local_ptep_get_and_clear,
79 	   but duplicated because of cyclic dependency */
80 	pte_t ret = *xp;
81 	native_pte_clear(NULL, 0, xp);
82 	return ret;
83 #endif
84 }
85 
native_pmdp_get_and_clear(pmd_t * xp)86 static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
87 {
88 #ifdef CONFIG_SMP
89 	return native_make_pmd(xchg(&xp->pmd, 0));
90 #else
91 	/* native_local_pmdp_get_and_clear,
92 	   but duplicated because of cyclic dependency */
93 	pmd_t ret = *xp;
94 	native_pmd_clear(xp);
95 	return ret;
96 #endif
97 }
98 
native_set_pud(pud_t * pudp,pud_t pud)99 static inline void native_set_pud(pud_t *pudp, pud_t pud)
100 {
101 	WRITE_ONCE(*pudp, pud);
102 }
103 
native_pud_clear(pud_t * pud)104 static inline void native_pud_clear(pud_t *pud)
105 {
106 	native_set_pud(pud, native_make_pud(0));
107 }
108 
109 #ifdef CONFIG_PAGE_TABLE_ISOLATION
110 extern pgd_t kaiser_set_shadow_pgd(pgd_t *pgdp, pgd_t pgd);
111 
native_get_shadow_pgd(pgd_t * pgdp)112 static inline pgd_t *native_get_shadow_pgd(pgd_t *pgdp)
113 {
114 #ifdef CONFIG_DEBUG_VM
115 	/* linux/mmdebug.h may not have been included at this point */
116 	BUG_ON(!kaiser_enabled);
117 #endif
118 	return (pgd_t *)((unsigned long)pgdp | (unsigned long)PAGE_SIZE);
119 }
120 #else
kaiser_set_shadow_pgd(pgd_t * pgdp,pgd_t pgd)121 static inline pgd_t kaiser_set_shadow_pgd(pgd_t *pgdp, pgd_t pgd)
122 {
123 	return pgd;
124 }
native_get_shadow_pgd(pgd_t * pgdp)125 static inline pgd_t *native_get_shadow_pgd(pgd_t *pgdp)
126 {
127 	BUILD_BUG_ON(1);
128 	return NULL;
129 }
130 #endif /* CONFIG_PAGE_TABLE_ISOLATION */
131 
native_set_pgd(pgd_t * pgdp,pgd_t pgd)132 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
133 {
134 	WRITE_ONCE(*pgdp, kaiser_set_shadow_pgd(pgdp, pgd));
135 }
136 
native_pgd_clear(pgd_t * pgd)137 static inline void native_pgd_clear(pgd_t *pgd)
138 {
139 	native_set_pgd(pgd, native_make_pgd(0));
140 }
141 
142 extern void sync_global_pgds(unsigned long start, unsigned long end,
143 			     int removed);
144 
145 /*
146  * Conversion functions: convert a page and protection to a page entry,
147  * and a page entry and page directory to the page they refer to.
148  */
149 
150 /*
151  * Level 4 access.
152  */
pgd_large(pgd_t pgd)153 static inline int pgd_large(pgd_t pgd) { return 0; }
154 #define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE)
155 
156 /* PUD - Level3 access */
157 
158 /* PMD  - Level 2 access */
159 
160 /* PTE - Level 1 access. */
161 
162 /* x86-64 always has all page tables mapped. */
163 #define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
164 #define pte_unmap(pte) ((void)(pte))/* NOP */
165 
166 /*
167  * Encode and de-code a swap entry
168  *
169  * |     ...            | 11| 10|  9|8|7|6|5| 4| 3|2| 1|0| <- bit number
170  * |     ...            |SW3|SW2|SW1|G|L|D|A|CD|WT|U| W|P| <- bit names
171  * | TYPE (59-63) | ~OFFSET (9-58)  |0|0|X|X| X| X|X|SD|0| <- swp entry
172  *
173  * G (8) is aliased and used as a PROT_NONE indicator for
174  * !present ptes.  We need to start storing swap entries above
175  * there.  We also need to avoid using A and D because of an
176  * erratum where they can be incorrectly set by hardware on
177  * non-present PTEs.
178  *
179  * SD (1) in swp entry is used to store soft dirty bit, which helps us
180  * remember soft dirty over page migration
181  *
182  * Bit 7 in swp entry should be 0 because pmd_present checks not only P,
183  * but also L and G.
184  *
185  * The offset is inverted by a binary not operation to make the high
186  * physical bits set.
187  */
188 #define SWP_TYPE_BITS		5
189 
190 #define SWP_OFFSET_FIRST_BIT	(_PAGE_BIT_PROTNONE + 1)
191 
192 /* We always extract/encode the offset by shifting it all the way up, and then down again */
193 #define SWP_OFFSET_SHIFT	(SWP_OFFSET_FIRST_BIT+SWP_TYPE_BITS)
194 
195 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
196 
197 /* Extract the high bits for type */
198 #define __swp_type(x) ((x).val >> (64 - SWP_TYPE_BITS))
199 
200 /* Shift up (to get rid of type), then down to get value */
201 #define __swp_offset(x) (~(x).val << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT)
202 
203 /*
204  * Shift the offset up "too far" by TYPE bits, then down again
205  * The offset is inverted by a binary not operation to make the high
206  * physical bits set.
207  */
208 #define __swp_entry(type, offset) ((swp_entry_t) { \
209 	(~(unsigned long)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \
210 	| ((unsigned long)(type) << (64-SWP_TYPE_BITS)) })
211 
212 #define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val((pte)) })
213 #define __swp_entry_to_pte(x)		((pte_t) { .pte = (x).val })
214 
215 extern int kern_addr_valid(unsigned long addr);
216 extern void cleanup_highmap(void);
217 
218 #define HAVE_ARCH_UNMAPPED_AREA
219 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
220 
221 #define pgtable_cache_init()   do { } while (0)
222 #define check_pgt_cache()      do { } while (0)
223 
224 #define PAGE_AGP    PAGE_KERNEL_NOCACHE
225 #define HAVE_PAGE_AGP 1
226 
227 /* fs/proc/kcore.c */
228 #define	kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK)
229 #define	kc_offset_to_vaddr(o) ((o) | ~__VIRTUAL_MASK)
230 
231 #define __HAVE_ARCH_PTE_SAME
232 
233 #define vmemmap ((struct page *)VMEMMAP_START)
234 
235 extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
236 extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
237 
238 #include <asm/pgtable-invert.h>
239 
240 #endif /* !__ASSEMBLY__ */
241 
242 #endif /* _ASM_X86_PGTABLE_64_H */
243