1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PGTABLE_3LEVEL_H
3 #define _ASM_X86_PGTABLE_3LEVEL_H
4
5 #include <asm/atomic64_32.h>
6
7 /*
8 * Intel Physical Address Extension (PAE) Mode - three-level page
9 * tables on PPro+ CPUs.
10 *
11 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
12 */
13
14 #define pte_ERROR(e) \
15 pr_err("%s:%d: bad pte %p(%08lx%08lx)\n", \
16 __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low)
17 #define pmd_ERROR(e) \
18 pr_err("%s:%d: bad pmd %p(%016Lx)\n", \
19 __FILE__, __LINE__, &(e), pmd_val(e))
20 #define pgd_ERROR(e) \
21 pr_err("%s:%d: bad pgd %p(%016Lx)\n", \
22 __FILE__, __LINE__, &(e), pgd_val(e))
23
24 /* Rules for using set_pte: the pte being assigned *must* be
25 * either not present or in a state where the hardware will
26 * not attempt to update the pte. In places where this is
27 * not possible, use pte_get_and_clear to obtain the old pte
28 * value and then use set_pte to update it. -ben
29 */
native_set_pte(pte_t * ptep,pte_t pte)30 static inline void native_set_pte(pte_t *ptep, pte_t pte)
31 {
32 ptep->pte_high = pte.pte_high;
33 smp_wmb();
34 ptep->pte_low = pte.pte_low;
35 }
36
37 #define pmd_read_atomic pmd_read_atomic
38 /*
39 * pte_offset_map_lock on 32bit PAE kernels was reading the pmd_t with
40 * a "*pmdp" dereference done by gcc. Problem is, in certain places
41 * where pte_offset_map_lock is called, concurrent page faults are
42 * allowed, if the mmap_sem is hold for reading. An example is mincore
43 * vs page faults vs MADV_DONTNEED. On the page fault side
44 * pmd_populate rightfully does a set_64bit, but if we're reading the
45 * pmd_t with a "*pmdp" on the mincore side, a SMP race can happen
46 * because gcc will not read the 64bit of the pmd atomically. To fix
47 * this all places running pmd_offset_map_lock() while holding the
48 * mmap_sem in read mode, shall read the pmdp pointer using this
49 * function to know if the pmd is null nor not, and in turn to know if
50 * they can run pmd_offset_map_lock or pmd_trans_huge or other pmd
51 * operations.
52 *
53 * Without THP if the mmap_sem is hold for reading, the pmd can only
54 * transition from null to not null while pmd_read_atomic runs. So
55 * we can always return atomic pmd values with this function.
56 *
57 * With THP if the mmap_sem is hold for reading, the pmd can become
58 * trans_huge or none or point to a pte (and in turn become "stable")
59 * at any time under pmd_read_atomic. We could read it really
60 * atomically here with a atomic64_read for the THP enabled case (and
61 * it would be a whole lot simpler), but to avoid using cmpxchg8b we
62 * only return an atomic pmdval if the low part of the pmdval is later
63 * found stable (i.e. pointing to a pte). And we're returning a none
64 * pmdval if the low part of the pmd is none. In some cases the high
65 * and low part of the pmdval returned may not be consistent if THP is
66 * enabled (the low part may point to previously mapped hugepage,
67 * while the high part may point to a more recently mapped hugepage),
68 * but pmd_none_or_trans_huge_or_clear_bad() only needs the low part
69 * of the pmd to be read atomically to decide if the pmd is unstable
70 * or not, with the only exception of when the low part of the pmd is
71 * zero in which case we return a none pmd.
72 */
pmd_read_atomic(pmd_t * pmdp)73 static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
74 {
75 pmdval_t ret;
76 u32 *tmp = (u32 *)pmdp;
77
78 ret = (pmdval_t) (*tmp);
79 if (ret) {
80 /*
81 * If the low part is null, we must not read the high part
82 * or we can end up with a partial pmd.
83 */
84 smp_rmb();
85 ret |= ((pmdval_t)*(tmp + 1)) << 32;
86 }
87
88 return (pmd_t) { ret };
89 }
90
native_set_pte_atomic(pte_t * ptep,pte_t pte)91 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
92 {
93 set_64bit((unsigned long long *)(ptep), native_pte_val(pte));
94 }
95
native_set_pmd(pmd_t * pmdp,pmd_t pmd)96 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
97 {
98 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
99 }
100
native_set_pud(pud_t * pudp,pud_t pud)101 static inline void native_set_pud(pud_t *pudp, pud_t pud)
102 {
103 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
104 }
105
106 /*
107 * For PTEs and PDEs, we must clear the P-bit first when clearing a page table
108 * entry, so clear the bottom half first and enforce ordering with a compiler
109 * barrier.
110 */
native_pte_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)111 static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
112 pte_t *ptep)
113 {
114 ptep->pte_low = 0;
115 smp_wmb();
116 ptep->pte_high = 0;
117 }
118
native_pmd_clear(pmd_t * pmd)119 static inline void native_pmd_clear(pmd_t *pmd)
120 {
121 u32 *tmp = (u32 *)pmd;
122 *tmp = 0;
123 smp_wmb();
124 *(tmp + 1) = 0;
125 }
126
native_pud_clear(pud_t * pudp)127 static inline void native_pud_clear(pud_t *pudp)
128 {
129 }
130
pud_clear(pud_t * pudp)131 static inline void pud_clear(pud_t *pudp)
132 {
133 set_pud(pudp, __pud(0));
134
135 /*
136 * According to Intel App note "TLBs, Paging-Structure Caches,
137 * and Their Invalidation", April 2007, document 317080-001,
138 * section 8.1: in PAE mode we explicitly have to flush the
139 * TLB via cr3 if the top-level pgd is changed...
140 *
141 * Currently all places where pud_clear() is called either have
142 * flush_tlb_mm() followed or don't need TLB flush (x86_64 code or
143 * pud_clear_bad()), so we don't need TLB flush here.
144 */
145 }
146
147 #ifdef CONFIG_SMP
native_ptep_get_and_clear(pte_t * ptep)148 static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
149 {
150 pte_t res;
151
152 res.pte = (pteval_t)atomic64_xchg((atomic64_t *)ptep, 0);
153
154 return res;
155 }
156 #else
157 #define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
158 #endif
159
160 #ifdef CONFIG_SMP
161 union split_pmd {
162 struct {
163 u32 pmd_low;
164 u32 pmd_high;
165 };
166 pmd_t pmd;
167 };
native_pmdp_get_and_clear(pmd_t * pmdp)168 static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp)
169 {
170 union split_pmd res, *orig = (union split_pmd *)pmdp;
171
172 /* xchg acts as a barrier before setting of the high bits */
173 res.pmd_low = xchg(&orig->pmd_low, 0);
174 res.pmd_high = orig->pmd_high;
175 orig->pmd_high = 0;
176
177 return res.pmd;
178 }
179 #else
180 #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
181 #endif
182
183 #ifdef CONFIG_SMP
184 union split_pud {
185 struct {
186 u32 pud_low;
187 u32 pud_high;
188 };
189 pud_t pud;
190 };
191
native_pudp_get_and_clear(pud_t * pudp)192 static inline pud_t native_pudp_get_and_clear(pud_t *pudp)
193 {
194 union split_pud res, *orig = (union split_pud *)pudp;
195
196 /* xchg acts as a barrier before setting of the high bits */
197 res.pud_low = xchg(&orig->pud_low, 0);
198 res.pud_high = orig->pud_high;
199 orig->pud_high = 0;
200
201 return res.pud;
202 }
203 #else
204 #define native_pudp_get_and_clear(xp) native_local_pudp_get_and_clear(xp)
205 #endif
206
207 /* Encode and de-code a swap entry */
208 #define SWP_TYPE_BITS 5
209
210 #define SWP_OFFSET_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
211
212 /* We always extract/encode the offset by shifting it all the way up, and then down again */
213 #define SWP_OFFSET_SHIFT (SWP_OFFSET_FIRST_BIT + SWP_TYPE_BITS)
214
215 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5)
216 #define __swp_type(x) (((x).val) & 0x1f)
217 #define __swp_offset(x) ((x).val >> 5)
218 #define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5})
219
220 /*
221 * Normally, __swp_entry() converts from arch-independent swp_entry_t to
222 * arch-dependent swp_entry_t, and __swp_entry_to_pte() just stores the result
223 * to pte. But here we have 32bit swp_entry_t and 64bit pte, and need to use the
224 * whole 64 bits. Thus, we shift the "real" arch-dependent conversion to
225 * __swp_entry_to_pte() through the following helper macro based on 64bit
226 * __swp_entry().
227 */
228 #define __swp_pteval_entry(type, offset) ((pteval_t) { \
229 (~(pteval_t)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \
230 | ((pteval_t)(type) << (64 - SWP_TYPE_BITS)) })
231
232 #define __swp_entry_to_pte(x) ((pte_t){ .pte = \
233 __swp_pteval_entry(__swp_type(x), __swp_offset(x)) })
234 /*
235 * Analogically, __pte_to_swp_entry() doesn't just extract the arch-dependent
236 * swp_entry_t, but also has to convert it from 64bit to the 32bit
237 * intermediate representation, using the following macros based on 64bit
238 * __swp_type() and __swp_offset().
239 */
240 #define __pteval_swp_type(x) ((unsigned long)((x).pte >> (64 - SWP_TYPE_BITS)))
241 #define __pteval_swp_offset(x) ((unsigned long)(~((x).pte) << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT))
242
243 #define __pte_to_swp_entry(pte) (__swp_entry(__pteval_swp_type(pte), \
244 __pteval_swp_offset(pte)))
245
246 #define gup_get_pte gup_get_pte
247 /*
248 * WARNING: only to be used in the get_user_pages_fast() implementation.
249 *
250 * With get_user_pages_fast(), we walk down the pagetables without taking
251 * any locks. For this we would like to load the pointers atomically,
252 * but that is not possible (without expensive cmpxchg8b) on PAE. What
253 * we do have is the guarantee that a PTE will only either go from not
254 * present to present, or present to not present or both -- it will not
255 * switch to a completely different present page without a TLB flush in
256 * between; something that we are blocking by holding interrupts off.
257 *
258 * Setting ptes from not present to present goes:
259 *
260 * ptep->pte_high = h;
261 * smp_wmb();
262 * ptep->pte_low = l;
263 *
264 * And present to not present goes:
265 *
266 * ptep->pte_low = 0;
267 * smp_wmb();
268 * ptep->pte_high = 0;
269 *
270 * We must ensure here that the load of pte_low sees 'l' iff pte_high
271 * sees 'h'. We load pte_high *after* loading pte_low, which ensures we
272 * don't see an older value of pte_high. *Then* we recheck pte_low,
273 * which ensures that we haven't picked up a changed pte high. We might
274 * have gotten rubbish values from pte_low and pte_high, but we are
275 * guaranteed that pte_low will not have the present bit set *unless*
276 * it is 'l'. Because get_user_pages_fast() only operates on present ptes
277 * we're safe.
278 */
gup_get_pte(pte_t * ptep)279 static inline pte_t gup_get_pte(pte_t *ptep)
280 {
281 pte_t pte;
282
283 do {
284 pte.pte_low = ptep->pte_low;
285 smp_rmb();
286 pte.pte_high = ptep->pte_high;
287 smp_rmb();
288 } while (unlikely(pte.pte_low != ptep->pte_low));
289
290 return pte;
291 }
292
293 #include <asm/pgtable-invert.h>
294
295 #endif /* _ASM_X86_PGTABLE_3LEVEL_H */
296