1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2012 ARM Ltd.
4 */
5 #ifndef __ASM_PGTABLE_H
6 #define __ASM_PGTABLE_H
7
8 #include <asm/bug.h>
9 #include <asm/proc-fns.h>
10
11 #include <asm/memory.h>
12 #include <asm/mte.h>
13 #include <asm/pgtable-hwdef.h>
14 #include <asm/pgtable-prot.h>
15 #include <asm/tlbflush.h>
16
17 /*
18 * VMALLOC range.
19 *
20 * VMALLOC_START: beginning of the kernel vmalloc space
21 * VMALLOC_END: extends to the available space below vmemmap
22 */
23 #define VMALLOC_START (MODULES_END)
24 #if VA_BITS == VA_BITS_MIN
25 #define VMALLOC_END (VMEMMAP_START - SZ_8M)
26 #else
27 #define VMEMMAP_UNUSED_NPAGES ((_PAGE_OFFSET(vabits_actual) - PAGE_OFFSET) >> PAGE_SHIFT)
28 #define VMALLOC_END (VMEMMAP_START + VMEMMAP_UNUSED_NPAGES * sizeof(struct page) - SZ_8M)
29 #endif
30
31 #define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
32
33 #ifndef __ASSEMBLY__
34
35 #include <asm/cmpxchg.h>
36 #include <asm/fixmap.h>
37 #include <asm/por.h>
38 #include <linux/mmdebug.h>
39 #include <linux/mm_types.h>
40 #include <linux/sched.h>
41 #include <linux/page_table_check.h>
42
43 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
44 #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
45
46 /* Set stride and tlb_level in flush_*_tlb_range */
47 #define flush_pmd_tlb_range(vma, addr, end) \
48 __flush_tlb_range(vma, addr, end, PMD_SIZE, false, 2)
49 #define flush_pud_tlb_range(vma, addr, end) \
50 __flush_tlb_range(vma, addr, end, PUD_SIZE, false, 1)
51 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
52
53 /*
54 * Outside of a few very special situations (e.g. hibernation), we always
55 * use broadcast TLB invalidation instructions, therefore a spurious page
56 * fault on one CPU which has been handled concurrently by another CPU
57 * does not need to perform additional invalidation.
58 */
59 #define flush_tlb_fix_spurious_fault(vma, address, ptep) do { } while (0)
60
61 /*
62 * ZERO_PAGE is a global shared page that is always zero: used
63 * for zero-mapped memory areas etc..
64 */
65 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
66 #define ZERO_PAGE(vaddr) phys_to_page(__pa_symbol(empty_zero_page))
67
68 #define pte_ERROR(e) \
69 pr_err("%s:%d: bad pte %016llx.\n", __FILE__, __LINE__, pte_val(e))
70
71 /*
72 * Macros to convert between a physical address and its placement in a
73 * page table entry, taking care of 52-bit addresses.
74 */
75 #ifdef CONFIG_ARM64_PA_BITS_52
__pte_to_phys(pte_t pte)76 static inline phys_addr_t __pte_to_phys(pte_t pte)
77 {
78 pte_val(pte) &= ~PTE_MAYBE_SHARED;
79 return (pte_val(pte) & PTE_ADDR_LOW) |
80 ((pte_val(pte) & PTE_ADDR_HIGH) << PTE_ADDR_HIGH_SHIFT);
81 }
__phys_to_pte_val(phys_addr_t phys)82 static inline pteval_t __phys_to_pte_val(phys_addr_t phys)
83 {
84 return (phys | (phys >> PTE_ADDR_HIGH_SHIFT)) & PHYS_TO_PTE_ADDR_MASK;
85 }
86 #else
87 #define __pte_to_phys(pte) (pte_val(pte) & PTE_ADDR_LOW)
88 #define __phys_to_pte_val(phys) (phys)
89 #endif
90
91 #define pte_pfn(pte) (__pte_to_phys(pte) >> PAGE_SHIFT)
92 #define pfn_pte(pfn,prot) \
93 __pte(__phys_to_pte_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
94
95 #define pte_none(pte) (!pte_val(pte))
96 #define __pte_clear(mm, addr, ptep) \
97 __set_pte(ptep, __pte(0))
98 #define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
99
100 /*
101 * The following only work if pte_present(). Undefined behaviour otherwise.
102 */
103 #define pte_present(pte) (pte_valid(pte) || pte_present_invalid(pte))
104 #define pte_young(pte) (!!(pte_val(pte) & PTE_AF))
105 #define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL))
106 #define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE))
107 #define pte_rdonly(pte) (!!(pte_val(pte) & PTE_RDONLY))
108 #define pte_user(pte) (!!(pte_val(pte) & PTE_USER))
109 #define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN))
110 #define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT))
111 #define pte_devmap(pte) (!!(pte_val(pte) & PTE_DEVMAP))
112 #define pte_tagged(pte) ((pte_val(pte) & PTE_ATTRINDX_MASK) == \
113 PTE_ATTRINDX(MT_NORMAL_TAGGED))
114
115 #define pte_cont_addr_end(addr, end) \
116 ({ unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK; \
117 (__boundary - 1 < (end) - 1) ? __boundary : (end); \
118 })
119
120 #define pmd_cont_addr_end(addr, end) \
121 ({ unsigned long __boundary = ((addr) + CONT_PMD_SIZE) & CONT_PMD_MASK; \
122 (__boundary - 1 < (end) - 1) ? __boundary : (end); \
123 })
124
125 #define pte_hw_dirty(pte) (pte_write(pte) && !pte_rdonly(pte))
126 #define pte_sw_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY))
127 #define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
128
129 #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
130 #define pte_present_invalid(pte) \
131 ((pte_val(pte) & (PTE_VALID | PTE_PRESENT_INVALID)) == PTE_PRESENT_INVALID)
132 /*
133 * Execute-only user mappings do not have the PTE_USER bit set. All valid
134 * kernel mappings have the PTE_UXN bit set.
135 */
136 #define pte_valid_not_user(pte) \
137 ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
138 /*
139 * Returns true if the pte is valid and has the contiguous bit set.
140 */
141 #define pte_valid_cont(pte) (pte_valid(pte) && pte_cont(pte))
142 /*
143 * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
144 * so that we don't erroneously return false for pages that have been
145 * remapped as PROT_NONE but are yet to be flushed from the TLB.
146 * Note that we can't make any assumptions based on the state of the access
147 * flag, since __ptep_clear_flush_young() elides a DSB when invalidating the
148 * TLB.
149 */
150 #define pte_accessible(mm, pte) \
151 (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
152
por_el0_allows_pkey(u8 pkey,bool write,bool execute)153 static inline bool por_el0_allows_pkey(u8 pkey, bool write, bool execute)
154 {
155 u64 por;
156
157 if (!system_supports_poe())
158 return true;
159
160 por = read_sysreg_s(SYS_POR_EL0);
161
162 if (write)
163 return por_elx_allows_write(por, pkey);
164
165 if (execute)
166 return por_elx_allows_exec(por, pkey);
167
168 return por_elx_allows_read(por, pkey);
169 }
170
171 /*
172 * p??_access_permitted() is true for valid user mappings (PTE_USER
173 * bit set, subject to the write permission check). For execute-only
174 * mappings, like PROT_EXEC with EPAN (both PTE_USER and PTE_UXN bits
175 * not set) must return false. PROT_NONE mappings do not have the
176 * PTE_VALID bit set.
177 */
178 #define pte_access_permitted_no_overlay(pte, write) \
179 (((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) && (!(write) || pte_write(pte)))
180 #define pte_access_permitted(pte, write) \
181 (pte_access_permitted_no_overlay(pte, write) && \
182 por_el0_allows_pkey(FIELD_GET(PTE_PO_IDX_MASK, pte_val(pte)), write, false))
183 #define pmd_access_permitted(pmd, write) \
184 (pte_access_permitted(pmd_pte(pmd), (write)))
185 #define pud_access_permitted(pud, write) \
186 (pte_access_permitted(pud_pte(pud), (write)))
187
clear_pte_bit(pte_t pte,pgprot_t prot)188 static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
189 {
190 pte_val(pte) &= ~pgprot_val(prot);
191 return pte;
192 }
193
set_pte_bit(pte_t pte,pgprot_t prot)194 static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
195 {
196 pte_val(pte) |= pgprot_val(prot);
197 return pte;
198 }
199
clear_pmd_bit(pmd_t pmd,pgprot_t prot)200 static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot)
201 {
202 pmd_val(pmd) &= ~pgprot_val(prot);
203 return pmd;
204 }
205
set_pmd_bit(pmd_t pmd,pgprot_t prot)206 static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot)
207 {
208 pmd_val(pmd) |= pgprot_val(prot);
209 return pmd;
210 }
211
pte_mkwrite_novma(pte_t pte)212 static inline pte_t pte_mkwrite_novma(pte_t pte)
213 {
214 pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
215 pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
216 return pte;
217 }
218
pte_mkclean(pte_t pte)219 static inline pte_t pte_mkclean(pte_t pte)
220 {
221 pte = clear_pte_bit(pte, __pgprot(PTE_DIRTY));
222 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
223
224 return pte;
225 }
226
pte_mkdirty(pte_t pte)227 static inline pte_t pte_mkdirty(pte_t pte)
228 {
229 pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
230
231 if (pte_write(pte))
232 pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
233
234 return pte;
235 }
236
pte_wrprotect(pte_t pte)237 static inline pte_t pte_wrprotect(pte_t pte)
238 {
239 /*
240 * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
241 * clear), set the PTE_DIRTY bit.
242 */
243 if (pte_hw_dirty(pte))
244 pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
245
246 pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
247 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
248 return pte;
249 }
250
pte_mkold(pte_t pte)251 static inline pte_t pte_mkold(pte_t pte)
252 {
253 return clear_pte_bit(pte, __pgprot(PTE_AF));
254 }
255
pte_mkyoung(pte_t pte)256 static inline pte_t pte_mkyoung(pte_t pte)
257 {
258 return set_pte_bit(pte, __pgprot(PTE_AF));
259 }
260
pte_mkspecial(pte_t pte)261 static inline pte_t pte_mkspecial(pte_t pte)
262 {
263 return set_pte_bit(pte, __pgprot(PTE_SPECIAL));
264 }
265
pte_mkcont(pte_t pte)266 static inline pte_t pte_mkcont(pte_t pte)
267 {
268 pte = set_pte_bit(pte, __pgprot(PTE_CONT));
269 return set_pte_bit(pte, __pgprot(PTE_TYPE_PAGE));
270 }
271
pte_mknoncont(pte_t pte)272 static inline pte_t pte_mknoncont(pte_t pte)
273 {
274 return clear_pte_bit(pte, __pgprot(PTE_CONT));
275 }
276
pte_mkpresent(pte_t pte)277 static inline pte_t pte_mkpresent(pte_t pte)
278 {
279 return set_pte_bit(pte, __pgprot(PTE_VALID));
280 }
281
pte_mkinvalid(pte_t pte)282 static inline pte_t pte_mkinvalid(pte_t pte)
283 {
284 pte = set_pte_bit(pte, __pgprot(PTE_PRESENT_INVALID));
285 pte = clear_pte_bit(pte, __pgprot(PTE_VALID));
286 return pte;
287 }
288
pmd_mkcont(pmd_t pmd)289 static inline pmd_t pmd_mkcont(pmd_t pmd)
290 {
291 return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
292 }
293
pte_mkdevmap(pte_t pte)294 static inline pte_t pte_mkdevmap(pte_t pte)
295 {
296 return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL));
297 }
298
299 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
pte_uffd_wp(pte_t pte)300 static inline int pte_uffd_wp(pte_t pte)
301 {
302 return !!(pte_val(pte) & PTE_UFFD_WP);
303 }
304
pte_mkuffd_wp(pte_t pte)305 static inline pte_t pte_mkuffd_wp(pte_t pte)
306 {
307 return pte_wrprotect(set_pte_bit(pte, __pgprot(PTE_UFFD_WP)));
308 }
309
pte_clear_uffd_wp(pte_t pte)310 static inline pte_t pte_clear_uffd_wp(pte_t pte)
311 {
312 return clear_pte_bit(pte, __pgprot(PTE_UFFD_WP));
313 }
314 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
315
__set_pte_nosync(pte_t * ptep,pte_t pte)316 static inline void __set_pte_nosync(pte_t *ptep, pte_t pte)
317 {
318 WRITE_ONCE(*ptep, pte);
319 }
320
__set_pte(pte_t * ptep,pte_t pte)321 static inline void __set_pte(pte_t *ptep, pte_t pte)
322 {
323 __set_pte_nosync(ptep, pte);
324
325 /*
326 * Only if the new pte is valid and kernel, otherwise TLB maintenance
327 * or update_mmu_cache() have the necessary barriers.
328 */
329 if (pte_valid_not_user(pte)) {
330 dsb(ishst);
331 isb();
332 }
333 }
334
__ptep_get(pte_t * ptep)335 static inline pte_t __ptep_get(pte_t *ptep)
336 {
337 return READ_ONCE(*ptep);
338 }
339
340 extern void __sync_icache_dcache(pte_t pteval);
341 bool pgattr_change_is_safe(u64 old, u64 new);
342
343 /*
344 * PTE bits configuration in the presence of hardware Dirty Bit Management
345 * (PTE_WRITE == PTE_DBM):
346 *
347 * Dirty Writable | PTE_RDONLY PTE_WRITE PTE_DIRTY (sw)
348 * 0 0 | 1 0 0
349 * 0 1 | 1 1 0
350 * 1 0 | 1 0 1
351 * 1 1 | 0 1 x
352 *
353 * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via
354 * the page fault mechanism. Checking the dirty status of a pte becomes:
355 *
356 * PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY)
357 */
358
__check_safe_pte_update(struct mm_struct * mm,pte_t * ptep,pte_t pte)359 static inline void __check_safe_pte_update(struct mm_struct *mm, pte_t *ptep,
360 pte_t pte)
361 {
362 pte_t old_pte;
363
364 if (!IS_ENABLED(CONFIG_DEBUG_VM))
365 return;
366
367 old_pte = __ptep_get(ptep);
368
369 if (!pte_valid(old_pte) || !pte_valid(pte))
370 return;
371 if (mm != current->active_mm && atomic_read(&mm->mm_users) <= 1)
372 return;
373
374 /*
375 * Check for potential race with hardware updates of the pte
376 * (__ptep_set_access_flags safely changes valid ptes without going
377 * through an invalid entry).
378 */
379 VM_WARN_ONCE(!pte_young(pte),
380 "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
381 __func__, pte_val(old_pte), pte_val(pte));
382 VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte),
383 "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
384 __func__, pte_val(old_pte), pte_val(pte));
385 VM_WARN_ONCE(!pgattr_change_is_safe(pte_val(old_pte), pte_val(pte)),
386 "%s: unsafe attribute change: 0x%016llx -> 0x%016llx",
387 __func__, pte_val(old_pte), pte_val(pte));
388 }
389
__sync_cache_and_tags(pte_t pte,unsigned int nr_pages)390 static inline void __sync_cache_and_tags(pte_t pte, unsigned int nr_pages)
391 {
392 if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
393 __sync_icache_dcache(pte);
394
395 /*
396 * If the PTE would provide user space access to the tags associated
397 * with it then ensure that the MTE tags are synchronised. Although
398 * pte_access_permitted_no_overlay() returns false for exec only
399 * mappings, they don't expose tags (instruction fetches don't check
400 * tags).
401 */
402 if (system_supports_mte() && pte_access_permitted_no_overlay(pte, false) &&
403 !pte_special(pte) && pte_tagged(pte))
404 mte_sync_tags(pte, nr_pages);
405 }
406
407 /*
408 * Select all bits except the pfn
409 */
410 #define pte_pgprot pte_pgprot
pte_pgprot(pte_t pte)411 static inline pgprot_t pte_pgprot(pte_t pte)
412 {
413 unsigned long pfn = pte_pfn(pte);
414
415 return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte));
416 }
417
418 #define pte_advance_pfn pte_advance_pfn
pte_advance_pfn(pte_t pte,unsigned long nr)419 static inline pte_t pte_advance_pfn(pte_t pte, unsigned long nr)
420 {
421 return pfn_pte(pte_pfn(pte) + nr, pte_pgprot(pte));
422 }
423
__set_ptes(struct mm_struct * mm,unsigned long __always_unused addr,pte_t * ptep,pte_t pte,unsigned int nr)424 static inline void __set_ptes(struct mm_struct *mm,
425 unsigned long __always_unused addr,
426 pte_t *ptep, pte_t pte, unsigned int nr)
427 {
428 page_table_check_ptes_set(mm, ptep, pte, nr);
429 __sync_cache_and_tags(pte, nr);
430
431 for (;;) {
432 __check_safe_pte_update(mm, ptep, pte);
433 __set_pte(ptep, pte);
434 if (--nr == 0)
435 break;
436 ptep++;
437 pte = pte_advance_pfn(pte, 1);
438 }
439 }
440
441 /*
442 * Huge pte definitions.
443 */
444 #define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT))
445
446 /*
447 * Hugetlb definitions.
448 */
449 #define HUGE_MAX_HSTATE 4
450 #define HPAGE_SHIFT PMD_SHIFT
451 #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
452 #define HPAGE_MASK (~(HPAGE_SIZE - 1))
453 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
454
pgd_pte(pgd_t pgd)455 static inline pte_t pgd_pte(pgd_t pgd)
456 {
457 return __pte(pgd_val(pgd));
458 }
459
p4d_pte(p4d_t p4d)460 static inline pte_t p4d_pte(p4d_t p4d)
461 {
462 return __pte(p4d_val(p4d));
463 }
464
pud_pte(pud_t pud)465 static inline pte_t pud_pte(pud_t pud)
466 {
467 return __pte(pud_val(pud));
468 }
469
pte_pud(pte_t pte)470 static inline pud_t pte_pud(pte_t pte)
471 {
472 return __pud(pte_val(pte));
473 }
474
pud_pmd(pud_t pud)475 static inline pmd_t pud_pmd(pud_t pud)
476 {
477 return __pmd(pud_val(pud));
478 }
479
pmd_pte(pmd_t pmd)480 static inline pte_t pmd_pte(pmd_t pmd)
481 {
482 return __pte(pmd_val(pmd));
483 }
484
pte_pmd(pte_t pte)485 static inline pmd_t pte_pmd(pte_t pte)
486 {
487 return __pmd(pte_val(pte));
488 }
489
mk_pud_sect_prot(pgprot_t prot)490 static inline pgprot_t mk_pud_sect_prot(pgprot_t prot)
491 {
492 return __pgprot((pgprot_val(prot) & ~PUD_TABLE_BIT) | PUD_TYPE_SECT);
493 }
494
mk_pmd_sect_prot(pgprot_t prot)495 static inline pgprot_t mk_pmd_sect_prot(pgprot_t prot)
496 {
497 return __pgprot((pgprot_val(prot) & ~PMD_TABLE_BIT) | PMD_TYPE_SECT);
498 }
499
pte_swp_mkexclusive(pte_t pte)500 static inline pte_t pte_swp_mkexclusive(pte_t pte)
501 {
502 return set_pte_bit(pte, __pgprot(PTE_SWP_EXCLUSIVE));
503 }
504
pte_swp_exclusive(pte_t pte)505 static inline int pte_swp_exclusive(pte_t pte)
506 {
507 return pte_val(pte) & PTE_SWP_EXCLUSIVE;
508 }
509
pte_swp_clear_exclusive(pte_t pte)510 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
511 {
512 return clear_pte_bit(pte, __pgprot(PTE_SWP_EXCLUSIVE));
513 }
514
515 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
pte_swp_mkuffd_wp(pte_t pte)516 static inline pte_t pte_swp_mkuffd_wp(pte_t pte)
517 {
518 return set_pte_bit(pte, __pgprot(PTE_SWP_UFFD_WP));
519 }
520
pte_swp_uffd_wp(pte_t pte)521 static inline int pte_swp_uffd_wp(pte_t pte)
522 {
523 return !!(pte_val(pte) & PTE_SWP_UFFD_WP);
524 }
525
pte_swp_clear_uffd_wp(pte_t pte)526 static inline pte_t pte_swp_clear_uffd_wp(pte_t pte)
527 {
528 return clear_pte_bit(pte, __pgprot(PTE_SWP_UFFD_WP));
529 }
530 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
531
532 #ifdef CONFIG_NUMA_BALANCING
533 /*
534 * See the comment in include/linux/pgtable.h
535 */
pte_protnone(pte_t pte)536 static inline int pte_protnone(pte_t pte)
537 {
538 /*
539 * pte_present_invalid() tells us that the pte is invalid from HW
540 * perspective but present from SW perspective, so the fields are to be
541 * interpretted as per the HW layout. The second 2 checks are the unique
542 * encoding that we use for PROT_NONE. It is insufficient to only use
543 * the first check because we share the same encoding scheme with pmds
544 * which support pmd_mkinvalid(), so can be present-invalid without
545 * being PROT_NONE.
546 */
547 return pte_present_invalid(pte) && !pte_user(pte) && !pte_user_exec(pte);
548 }
549
pmd_protnone(pmd_t pmd)550 static inline int pmd_protnone(pmd_t pmd)
551 {
552 return pte_protnone(pmd_pte(pmd));
553 }
554 #endif
555
556 #define pmd_present(pmd) pte_present(pmd_pte(pmd))
557 #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
558 #define pmd_young(pmd) pte_young(pmd_pte(pmd))
559 #define pmd_valid(pmd) pte_valid(pmd_pte(pmd))
560 #define pmd_user(pmd) pte_user(pmd_pte(pmd))
561 #define pmd_user_exec(pmd) pte_user_exec(pmd_pte(pmd))
562 #define pmd_cont(pmd) pte_cont(pmd_pte(pmd))
563 #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
564 #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
565 #define pmd_mkwrite_novma(pmd) pte_pmd(pte_mkwrite_novma(pmd_pte(pmd)))
566 #define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd)))
567 #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
568 #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
569 #define pmd_mkinvalid(pmd) pte_pmd(pte_mkinvalid(pmd_pte(pmd)))
570 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
571 #define pmd_uffd_wp(pmd) pte_uffd_wp(pmd_pte(pmd))
572 #define pmd_mkuffd_wp(pmd) pte_pmd(pte_mkuffd_wp(pmd_pte(pmd)))
573 #define pmd_clear_uffd_wp(pmd) pte_pmd(pte_clear_uffd_wp(pmd_pte(pmd)))
574 #define pmd_swp_uffd_wp(pmd) pte_swp_uffd_wp(pmd_pte(pmd))
575 #define pmd_swp_mkuffd_wp(pmd) pte_pmd(pte_swp_mkuffd_wp(pmd_pte(pmd)))
576 #define pmd_swp_clear_uffd_wp(pmd) \
577 pte_pmd(pte_swp_clear_uffd_wp(pmd_pte(pmd)))
578 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
579
580 #define pmd_write(pmd) pte_write(pmd_pte(pmd))
581
582 #define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
583
584 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
585 #define pmd_devmap(pmd) pte_devmap(pmd_pte(pmd))
586 #endif
pmd_mkdevmap(pmd_t pmd)587 static inline pmd_t pmd_mkdevmap(pmd_t pmd)
588 {
589 return pte_pmd(set_pte_bit(pmd_pte(pmd), __pgprot(PTE_DEVMAP)));
590 }
591
592 #ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
593 #define pmd_special(pte) (!!((pmd_val(pte) & PTE_SPECIAL)))
pmd_mkspecial(pmd_t pmd)594 static inline pmd_t pmd_mkspecial(pmd_t pmd)
595 {
596 return set_pmd_bit(pmd, __pgprot(PTE_SPECIAL));
597 }
598 #endif
599
600 #define __pmd_to_phys(pmd) __pte_to_phys(pmd_pte(pmd))
601 #define __phys_to_pmd_val(phys) __phys_to_pte_val(phys)
602 #define pmd_pfn(pmd) ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT)
603 #define pfn_pmd(pfn,prot) __pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
604 #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
605
606 #define pud_young(pud) pte_young(pud_pte(pud))
607 #define pud_mkyoung(pud) pte_pud(pte_mkyoung(pud_pte(pud)))
608 #define pud_write(pud) pte_write(pud_pte(pud))
609
610 #define pud_mkhuge(pud) (__pud(pud_val(pud) & ~PUD_TABLE_BIT))
611
612 #define __pud_to_phys(pud) __pte_to_phys(pud_pte(pud))
613 #define __phys_to_pud_val(phys) __phys_to_pte_val(phys)
614 #define pud_pfn(pud) ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT)
615 #define pfn_pud(pfn,prot) __pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
616
617 #ifdef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP
618 #define pud_special(pte) pte_special(pud_pte(pud))
619 #define pud_mkspecial(pte) pte_pud(pte_mkspecial(pud_pte(pud)))
620 #endif
621
622 #define pmd_pgprot pmd_pgprot
pmd_pgprot(pmd_t pmd)623 static inline pgprot_t pmd_pgprot(pmd_t pmd)
624 {
625 unsigned long pfn = pmd_pfn(pmd);
626
627 return __pgprot(pmd_val(pfn_pmd(pfn, __pgprot(0))) ^ pmd_val(pmd));
628 }
629
630 #define pud_pgprot pud_pgprot
pud_pgprot(pud_t pud)631 static inline pgprot_t pud_pgprot(pud_t pud)
632 {
633 unsigned long pfn = pud_pfn(pud);
634
635 return __pgprot(pud_val(pfn_pud(pfn, __pgprot(0))) ^ pud_val(pud));
636 }
637
__set_pte_at(struct mm_struct * mm,unsigned long __always_unused addr,pte_t * ptep,pte_t pte,unsigned int nr)638 static inline void __set_pte_at(struct mm_struct *mm,
639 unsigned long __always_unused addr,
640 pte_t *ptep, pte_t pte, unsigned int nr)
641 {
642 __sync_cache_and_tags(pte, nr);
643 __check_safe_pte_update(mm, ptep, pte);
644 __set_pte(ptep, pte);
645 }
646
set_pmd_at(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp,pmd_t pmd)647 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
648 pmd_t *pmdp, pmd_t pmd)
649 {
650 page_table_check_pmd_set(mm, pmdp, pmd);
651 return __set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd),
652 PMD_SIZE >> PAGE_SHIFT);
653 }
654
set_pud_at(struct mm_struct * mm,unsigned long addr,pud_t * pudp,pud_t pud)655 static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
656 pud_t *pudp, pud_t pud)
657 {
658 page_table_check_pud_set(mm, pudp, pud);
659 return __set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud),
660 PUD_SIZE >> PAGE_SHIFT);
661 }
662
663 #define __p4d_to_phys(p4d) __pte_to_phys(p4d_pte(p4d))
664 #define __phys_to_p4d_val(phys) __phys_to_pte_val(phys)
665
666 #define __pgd_to_phys(pgd) __pte_to_phys(pgd_pte(pgd))
667 #define __phys_to_pgd_val(phys) __phys_to_pte_val(phys)
668
669 #define __pgprot_modify(prot,mask,bits) \
670 __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
671
672 #define pgprot_nx(prot) \
673 __pgprot_modify(prot, PTE_MAYBE_GP, PTE_PXN)
674
675 /*
676 * Mark the prot value as uncacheable and unbufferable.
677 */
678 #define pgprot_noncached(prot) \
679 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
680 #define pgprot_writecombine(prot) \
681 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
682 #define pgprot_device(prot) \
683 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
684 #define pgprot_tagged(prot) \
685 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_TAGGED))
686 #define pgprot_mhp pgprot_tagged
687 /*
688 * DMA allocations for non-coherent devices use what the Arm architecture calls
689 * "Normal non-cacheable" memory, which permits speculation, unaligned accesses
690 * and merging of writes. This is different from "Device-nGnR[nE]" memory which
691 * is intended for MMIO and thus forbids speculation, preserves access size,
692 * requires strict alignment and can also force write responses to come from the
693 * endpoint.
694 */
695 #define pgprot_dmacoherent(prot) \
696 __pgprot_modify(prot, PTE_ATTRINDX_MASK, \
697 PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
698
699 #define __HAVE_PHYS_MEM_ACCESS_PROT
700 struct file;
701 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
702 unsigned long size, pgprot_t vma_prot);
703
704 #define pmd_none(pmd) (!pmd_val(pmd))
705
706 #define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
707 PMD_TYPE_TABLE)
708 #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
709 PMD_TYPE_SECT)
710 #define pmd_leaf(pmd) (pmd_present(pmd) && !pmd_table(pmd))
711 #define pmd_bad(pmd) (!pmd_table(pmd))
712
713 #define pmd_leaf_size(pmd) (pmd_cont(pmd) ? CONT_PMD_SIZE : PMD_SIZE)
714 #define pte_leaf_size(pte) (pte_cont(pte) ? CONT_PTE_SIZE : PAGE_SIZE)
715
716 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_trans_huge(pmd_t pmd)717 static inline int pmd_trans_huge(pmd_t pmd)
718 {
719 /*
720 * If pmd is present-invalid, pmd_table() won't detect it
721 * as a table, so force the valid bit for the comparison.
722 */
723 return pmd_val(pmd) && pmd_present(pmd) &&
724 !pmd_table(__pmd(pmd_val(pmd) | PTE_VALID));
725 }
726 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
727
728 #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
pud_sect(pud_t pud)729 static inline bool pud_sect(pud_t pud) { return false; }
pud_table(pud_t pud)730 static inline bool pud_table(pud_t pud) { return true; }
731 #else
732 #define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
733 PUD_TYPE_SECT)
734 #define pud_table(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
735 PUD_TYPE_TABLE)
736 #endif
737
738 extern pgd_t init_pg_dir[];
739 extern pgd_t init_pg_end[];
740 extern pgd_t swapper_pg_dir[];
741 extern pgd_t idmap_pg_dir[];
742 extern pgd_t tramp_pg_dir[];
743 extern pgd_t reserved_pg_dir[];
744
745 extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd);
746
in_swapper_pgdir(void * addr)747 static inline bool in_swapper_pgdir(void *addr)
748 {
749 return ((unsigned long)addr & PAGE_MASK) ==
750 ((unsigned long)swapper_pg_dir & PAGE_MASK);
751 }
752
set_pmd(pmd_t * pmdp,pmd_t pmd)753 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
754 {
755 #ifdef __PAGETABLE_PMD_FOLDED
756 if (in_swapper_pgdir(pmdp)) {
757 set_swapper_pgd((pgd_t *)pmdp, __pgd(pmd_val(pmd)));
758 return;
759 }
760 #endif /* __PAGETABLE_PMD_FOLDED */
761
762 WRITE_ONCE(*pmdp, pmd);
763
764 if (pmd_valid(pmd)) {
765 dsb(ishst);
766 isb();
767 }
768 }
769
pmd_clear(pmd_t * pmdp)770 static inline void pmd_clear(pmd_t *pmdp)
771 {
772 set_pmd(pmdp, __pmd(0));
773 }
774
pmd_page_paddr(pmd_t pmd)775 static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
776 {
777 return __pmd_to_phys(pmd);
778 }
779
pmd_page_vaddr(pmd_t pmd)780 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
781 {
782 return (unsigned long)__va(pmd_page_paddr(pmd));
783 }
784
785 /* Find an entry in the third-level page table. */
786 #define pte_offset_phys(dir,addr) (pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t))
787
788 #define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr))
789 #define pte_set_fixmap_offset(pmd, addr) pte_set_fixmap(pte_offset_phys(pmd, addr))
790 #define pte_clear_fixmap() clear_fixmap(FIX_PTE)
791
792 #define pmd_page(pmd) phys_to_page(__pmd_to_phys(pmd))
793
794 /* use ONLY for statically allocated translation tables */
795 #define pte_offset_kimg(dir,addr) ((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr))))
796
797 /*
798 * Conversion functions: convert a page and protection to a page entry,
799 * and a page entry and page directory to the page they refer to.
800 */
801 #define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot)
802
803 #if CONFIG_PGTABLE_LEVELS > 2
804
805 #define pmd_ERROR(e) \
806 pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e))
807
808 #define pud_none(pud) (!pud_val(pud))
809 #define pud_bad(pud) ((pud_val(pud) & PUD_TYPE_MASK) != \
810 PUD_TYPE_TABLE)
811 #define pud_present(pud) pte_present(pud_pte(pud))
812 #ifndef __PAGETABLE_PMD_FOLDED
813 #define pud_leaf(pud) (pud_present(pud) && !pud_table(pud))
814 #else
815 #define pud_leaf(pud) false
816 #endif
817 #define pud_valid(pud) pte_valid(pud_pte(pud))
818 #define pud_user(pud) pte_user(pud_pte(pud))
819 #define pud_user_exec(pud) pte_user_exec(pud_pte(pud))
820
821 static inline bool pgtable_l4_enabled(void);
822
set_pud(pud_t * pudp,pud_t pud)823 static inline void set_pud(pud_t *pudp, pud_t pud)
824 {
825 if (!pgtable_l4_enabled() && in_swapper_pgdir(pudp)) {
826 set_swapper_pgd((pgd_t *)pudp, __pgd(pud_val(pud)));
827 return;
828 }
829
830 WRITE_ONCE(*pudp, pud);
831
832 if (pud_valid(pud)) {
833 dsb(ishst);
834 isb();
835 }
836 }
837
pud_clear(pud_t * pudp)838 static inline void pud_clear(pud_t *pudp)
839 {
840 set_pud(pudp, __pud(0));
841 }
842
pud_page_paddr(pud_t pud)843 static inline phys_addr_t pud_page_paddr(pud_t pud)
844 {
845 return __pud_to_phys(pud);
846 }
847
pud_pgtable(pud_t pud)848 static inline pmd_t *pud_pgtable(pud_t pud)
849 {
850 return (pmd_t *)__va(pud_page_paddr(pud));
851 }
852
853 /* Find an entry in the second-level page table. */
854 #define pmd_offset_phys(dir, addr) (pud_page_paddr(READ_ONCE(*(dir))) + pmd_index(addr) * sizeof(pmd_t))
855
856 #define pmd_set_fixmap(addr) ((pmd_t *)set_fixmap_offset(FIX_PMD, addr))
857 #define pmd_set_fixmap_offset(pud, addr) pmd_set_fixmap(pmd_offset_phys(pud, addr))
858 #define pmd_clear_fixmap() clear_fixmap(FIX_PMD)
859
860 #define pud_page(pud) phys_to_page(__pud_to_phys(pud))
861
862 /* use ONLY for statically allocated translation tables */
863 #define pmd_offset_kimg(dir,addr) ((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr))))
864
865 #else
866
867 #define pud_valid(pud) false
868 #define pud_page_paddr(pud) ({ BUILD_BUG(); 0; })
869 #define pud_user_exec(pud) pud_user(pud) /* Always 0 with folding */
870
871 /* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */
872 #define pmd_set_fixmap(addr) NULL
873 #define pmd_set_fixmap_offset(pudp, addr) ((pmd_t *)pudp)
874 #define pmd_clear_fixmap()
875
876 #define pmd_offset_kimg(dir,addr) ((pmd_t *)dir)
877
878 #endif /* CONFIG_PGTABLE_LEVELS > 2 */
879
880 #if CONFIG_PGTABLE_LEVELS > 3
881
pgtable_l4_enabled(void)882 static __always_inline bool pgtable_l4_enabled(void)
883 {
884 if (CONFIG_PGTABLE_LEVELS > 4 || !IS_ENABLED(CONFIG_ARM64_LPA2))
885 return true;
886 if (!alternative_has_cap_likely(ARM64_ALWAYS_BOOT))
887 return vabits_actual == VA_BITS;
888 return alternative_has_cap_unlikely(ARM64_HAS_VA52);
889 }
890
mm_pud_folded(const struct mm_struct * mm)891 static inline bool mm_pud_folded(const struct mm_struct *mm)
892 {
893 return !pgtable_l4_enabled();
894 }
895 #define mm_pud_folded mm_pud_folded
896
897 #define pud_ERROR(e) \
898 pr_err("%s:%d: bad pud %016llx.\n", __FILE__, __LINE__, pud_val(e))
899
900 #define p4d_none(p4d) (pgtable_l4_enabled() && !p4d_val(p4d))
901 #define p4d_bad(p4d) (pgtable_l4_enabled() && !(p4d_val(p4d) & 2))
902 #define p4d_present(p4d) (!p4d_none(p4d))
903
set_p4d(p4d_t * p4dp,p4d_t p4d)904 static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
905 {
906 if (in_swapper_pgdir(p4dp)) {
907 set_swapper_pgd((pgd_t *)p4dp, __pgd(p4d_val(p4d)));
908 return;
909 }
910
911 WRITE_ONCE(*p4dp, p4d);
912 dsb(ishst);
913 isb();
914 }
915
p4d_clear(p4d_t * p4dp)916 static inline void p4d_clear(p4d_t *p4dp)
917 {
918 if (pgtable_l4_enabled())
919 set_p4d(p4dp, __p4d(0));
920 }
921
p4d_page_paddr(p4d_t p4d)922 static inline phys_addr_t p4d_page_paddr(p4d_t p4d)
923 {
924 return __p4d_to_phys(p4d);
925 }
926
927 #define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
928
p4d_to_folded_pud(p4d_t * p4dp,unsigned long addr)929 static inline pud_t *p4d_to_folded_pud(p4d_t *p4dp, unsigned long addr)
930 {
931 return (pud_t *)PTR_ALIGN_DOWN(p4dp, PAGE_SIZE) + pud_index(addr);
932 }
933
p4d_pgtable(p4d_t p4d)934 static inline pud_t *p4d_pgtable(p4d_t p4d)
935 {
936 return (pud_t *)__va(p4d_page_paddr(p4d));
937 }
938
pud_offset_phys(p4d_t * p4dp,unsigned long addr)939 static inline phys_addr_t pud_offset_phys(p4d_t *p4dp, unsigned long addr)
940 {
941 BUG_ON(!pgtable_l4_enabled());
942
943 return p4d_page_paddr(READ_ONCE(*p4dp)) + pud_index(addr) * sizeof(pud_t);
944 }
945
946 static inline
pud_offset_lockless(p4d_t * p4dp,p4d_t p4d,unsigned long addr)947 pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long addr)
948 {
949 if (!pgtable_l4_enabled())
950 return p4d_to_folded_pud(p4dp, addr);
951 return (pud_t *)__va(p4d_page_paddr(p4d)) + pud_index(addr);
952 }
953 #define pud_offset_lockless pud_offset_lockless
954
pud_offset(p4d_t * p4dp,unsigned long addr)955 static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long addr)
956 {
957 return pud_offset_lockless(p4dp, READ_ONCE(*p4dp), addr);
958 }
959 #define pud_offset pud_offset
960
pud_set_fixmap(unsigned long addr)961 static inline pud_t *pud_set_fixmap(unsigned long addr)
962 {
963 if (!pgtable_l4_enabled())
964 return NULL;
965 return (pud_t *)set_fixmap_offset(FIX_PUD, addr);
966 }
967
pud_set_fixmap_offset(p4d_t * p4dp,unsigned long addr)968 static inline pud_t *pud_set_fixmap_offset(p4d_t *p4dp, unsigned long addr)
969 {
970 if (!pgtable_l4_enabled())
971 return p4d_to_folded_pud(p4dp, addr);
972 return pud_set_fixmap(pud_offset_phys(p4dp, addr));
973 }
974
pud_clear_fixmap(void)975 static inline void pud_clear_fixmap(void)
976 {
977 if (pgtable_l4_enabled())
978 clear_fixmap(FIX_PUD);
979 }
980
981 /* use ONLY for statically allocated translation tables */
pud_offset_kimg(p4d_t * p4dp,u64 addr)982 static inline pud_t *pud_offset_kimg(p4d_t *p4dp, u64 addr)
983 {
984 if (!pgtable_l4_enabled())
985 return p4d_to_folded_pud(p4dp, addr);
986 return (pud_t *)__phys_to_kimg(pud_offset_phys(p4dp, addr));
987 }
988
989 #define p4d_page(p4d) pfn_to_page(__phys_to_pfn(__p4d_to_phys(p4d)))
990
991 #else
992
pgtable_l4_enabled(void)993 static inline bool pgtable_l4_enabled(void) { return false; }
994
995 #define p4d_page_paddr(p4d) ({ BUILD_BUG(); 0;})
996
997 /* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */
998 #define pud_set_fixmap(addr) NULL
999 #define pud_set_fixmap_offset(pgdp, addr) ((pud_t *)pgdp)
1000 #define pud_clear_fixmap()
1001
1002 #define pud_offset_kimg(dir,addr) ((pud_t *)dir)
1003
1004 #endif /* CONFIG_PGTABLE_LEVELS > 3 */
1005
1006 #if CONFIG_PGTABLE_LEVELS > 4
1007
pgtable_l5_enabled(void)1008 static __always_inline bool pgtable_l5_enabled(void)
1009 {
1010 if (!alternative_has_cap_likely(ARM64_ALWAYS_BOOT))
1011 return vabits_actual == VA_BITS;
1012 return alternative_has_cap_unlikely(ARM64_HAS_VA52);
1013 }
1014
mm_p4d_folded(const struct mm_struct * mm)1015 static inline bool mm_p4d_folded(const struct mm_struct *mm)
1016 {
1017 return !pgtable_l5_enabled();
1018 }
1019 #define mm_p4d_folded mm_p4d_folded
1020
1021 #define p4d_ERROR(e) \
1022 pr_err("%s:%d: bad p4d %016llx.\n", __FILE__, __LINE__, p4d_val(e))
1023
1024 #define pgd_none(pgd) (pgtable_l5_enabled() && !pgd_val(pgd))
1025 #define pgd_bad(pgd) (pgtable_l5_enabled() && !(pgd_val(pgd) & 2))
1026 #define pgd_present(pgd) (!pgd_none(pgd))
1027
set_pgd(pgd_t * pgdp,pgd_t pgd)1028 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
1029 {
1030 if (in_swapper_pgdir(pgdp)) {
1031 set_swapper_pgd(pgdp, __pgd(pgd_val(pgd)));
1032 return;
1033 }
1034
1035 WRITE_ONCE(*pgdp, pgd);
1036 dsb(ishst);
1037 isb();
1038 }
1039
pgd_clear(pgd_t * pgdp)1040 static inline void pgd_clear(pgd_t *pgdp)
1041 {
1042 if (pgtable_l5_enabled())
1043 set_pgd(pgdp, __pgd(0));
1044 }
1045
pgd_page_paddr(pgd_t pgd)1046 static inline phys_addr_t pgd_page_paddr(pgd_t pgd)
1047 {
1048 return __pgd_to_phys(pgd);
1049 }
1050
1051 #define p4d_index(addr) (((addr) >> P4D_SHIFT) & (PTRS_PER_P4D - 1))
1052
pgd_to_folded_p4d(pgd_t * pgdp,unsigned long addr)1053 static inline p4d_t *pgd_to_folded_p4d(pgd_t *pgdp, unsigned long addr)
1054 {
1055 return (p4d_t *)PTR_ALIGN_DOWN(pgdp, PAGE_SIZE) + p4d_index(addr);
1056 }
1057
p4d_offset_phys(pgd_t * pgdp,unsigned long addr)1058 static inline phys_addr_t p4d_offset_phys(pgd_t *pgdp, unsigned long addr)
1059 {
1060 BUG_ON(!pgtable_l5_enabled());
1061
1062 return pgd_page_paddr(READ_ONCE(*pgdp)) + p4d_index(addr) * sizeof(p4d_t);
1063 }
1064
1065 static inline
p4d_offset_lockless(pgd_t * pgdp,pgd_t pgd,unsigned long addr)1066 p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long addr)
1067 {
1068 if (!pgtable_l5_enabled())
1069 return pgd_to_folded_p4d(pgdp, addr);
1070 return (p4d_t *)__va(pgd_page_paddr(pgd)) + p4d_index(addr);
1071 }
1072 #define p4d_offset_lockless p4d_offset_lockless
1073
p4d_offset(pgd_t * pgdp,unsigned long addr)1074 static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long addr)
1075 {
1076 return p4d_offset_lockless(pgdp, READ_ONCE(*pgdp), addr);
1077 }
1078
p4d_set_fixmap(unsigned long addr)1079 static inline p4d_t *p4d_set_fixmap(unsigned long addr)
1080 {
1081 if (!pgtable_l5_enabled())
1082 return NULL;
1083 return (p4d_t *)set_fixmap_offset(FIX_P4D, addr);
1084 }
1085
p4d_set_fixmap_offset(pgd_t * pgdp,unsigned long addr)1086 static inline p4d_t *p4d_set_fixmap_offset(pgd_t *pgdp, unsigned long addr)
1087 {
1088 if (!pgtable_l5_enabled())
1089 return pgd_to_folded_p4d(pgdp, addr);
1090 return p4d_set_fixmap(p4d_offset_phys(pgdp, addr));
1091 }
1092
p4d_clear_fixmap(void)1093 static inline void p4d_clear_fixmap(void)
1094 {
1095 if (pgtable_l5_enabled())
1096 clear_fixmap(FIX_P4D);
1097 }
1098
1099 /* use ONLY for statically allocated translation tables */
p4d_offset_kimg(pgd_t * pgdp,u64 addr)1100 static inline p4d_t *p4d_offset_kimg(pgd_t *pgdp, u64 addr)
1101 {
1102 if (!pgtable_l5_enabled())
1103 return pgd_to_folded_p4d(pgdp, addr);
1104 return (p4d_t *)__phys_to_kimg(p4d_offset_phys(pgdp, addr));
1105 }
1106
1107 #define pgd_page(pgd) pfn_to_page(__phys_to_pfn(__pgd_to_phys(pgd)))
1108
1109 #else
1110
pgtable_l5_enabled(void)1111 static inline bool pgtable_l5_enabled(void) { return false; }
1112
1113 #define p4d_index(addr) (((addr) >> P4D_SHIFT) & (PTRS_PER_P4D - 1))
1114
1115 /* Match p4d_offset folding in <asm/generic/pgtable-nop4d.h> */
1116 #define p4d_set_fixmap(addr) NULL
1117 #define p4d_set_fixmap_offset(p4dp, addr) ((p4d_t *)p4dp)
1118 #define p4d_clear_fixmap()
1119
1120 #define p4d_offset_kimg(dir,addr) ((p4d_t *)dir)
1121
1122 static inline
p4d_offset_lockless_folded(pgd_t * pgdp,pgd_t pgd,unsigned long addr)1123 p4d_t *p4d_offset_lockless_folded(pgd_t *pgdp, pgd_t pgd, unsigned long addr)
1124 {
1125 /*
1126 * With runtime folding of the pud, pud_offset_lockless() passes
1127 * the 'pgd_t *' we return here to p4d_to_folded_pud(), which
1128 * will offset the pointer assuming that it points into
1129 * a page-table page. However, the fast GUP path passes us a
1130 * pgd_t allocated on the stack and so we must use the original
1131 * pointer in 'pgdp' to construct the p4d pointer instead of
1132 * using the generic p4d_offset_lockless() implementation.
1133 *
1134 * Note: reusing the original pointer means that we may
1135 * dereference the same (live) page-table entry multiple times.
1136 * This is safe because it is still only loaded once in the
1137 * context of each level and the CPU guarantees same-address
1138 * read-after-read ordering.
1139 */
1140 return p4d_offset(pgdp, addr);
1141 }
1142 #define p4d_offset_lockless p4d_offset_lockless_folded
1143
1144 #endif /* CONFIG_PGTABLE_LEVELS > 4 */
1145
1146 #define pgd_ERROR(e) \
1147 pr_err("%s:%d: bad pgd %016llx.\n", __FILE__, __LINE__, pgd_val(e))
1148
1149 #define pgd_set_fixmap(addr) ((pgd_t *)set_fixmap_offset(FIX_PGD, addr))
1150 #define pgd_clear_fixmap() clear_fixmap(FIX_PGD)
1151
pte_modify(pte_t pte,pgprot_t newprot)1152 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
1153 {
1154 /*
1155 * Normal and Normal-Tagged are two different memory types and indices
1156 * in MAIR_EL1. The mask below has to include PTE_ATTRINDX_MASK.
1157 */
1158 const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
1159 PTE_PRESENT_INVALID | PTE_VALID | PTE_WRITE |
1160 PTE_GP | PTE_ATTRINDX_MASK | PTE_PO_IDX_MASK;
1161
1162 /* preserve the hardware dirty information */
1163 if (pte_hw_dirty(pte))
1164 pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
1165
1166 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
1167 /*
1168 * If we end up clearing hw dirtiness for a sw-dirty PTE, set hardware
1169 * dirtiness again.
1170 */
1171 if (pte_sw_dirty(pte))
1172 pte = pte_mkdirty(pte);
1173 return pte;
1174 }
1175
pmd_modify(pmd_t pmd,pgprot_t newprot)1176 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1177 {
1178 return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
1179 }
1180
1181 extern int __ptep_set_access_flags(struct vm_area_struct *vma,
1182 unsigned long address, pte_t *ptep,
1183 pte_t entry, int dirty);
1184
1185 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1186 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
pmdp_set_access_flags(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp,pmd_t entry,int dirty)1187 static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
1188 unsigned long address, pmd_t *pmdp,
1189 pmd_t entry, int dirty)
1190 {
1191 return __ptep_set_access_flags(vma, address, (pte_t *)pmdp,
1192 pmd_pte(entry), dirty);
1193 }
1194
pud_devmap(pud_t pud)1195 static inline int pud_devmap(pud_t pud)
1196 {
1197 return 0;
1198 }
1199
pgd_devmap(pgd_t pgd)1200 static inline int pgd_devmap(pgd_t pgd)
1201 {
1202 return 0;
1203 }
1204 #endif
1205
1206 #ifdef CONFIG_PAGE_TABLE_CHECK
pte_user_accessible_page(pte_t pte)1207 static inline bool pte_user_accessible_page(pte_t pte)
1208 {
1209 return pte_valid(pte) && (pte_user(pte) || pte_user_exec(pte));
1210 }
1211
pmd_user_accessible_page(pmd_t pmd)1212 static inline bool pmd_user_accessible_page(pmd_t pmd)
1213 {
1214 return pmd_valid(pmd) && !pmd_table(pmd) && (pmd_user(pmd) || pmd_user_exec(pmd));
1215 }
1216
pud_user_accessible_page(pud_t pud)1217 static inline bool pud_user_accessible_page(pud_t pud)
1218 {
1219 return pud_valid(pud) && !pud_table(pud) && (pud_user(pud) || pud_user_exec(pud));
1220 }
1221 #endif
1222
1223 /*
1224 * Atomic pte/pmd modifications.
1225 */
__ptep_test_and_clear_young(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)1226 static inline int __ptep_test_and_clear_young(struct vm_area_struct *vma,
1227 unsigned long address,
1228 pte_t *ptep)
1229 {
1230 pte_t old_pte, pte;
1231
1232 pte = __ptep_get(ptep);
1233 do {
1234 old_pte = pte;
1235 pte = pte_mkold(pte);
1236 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
1237 pte_val(old_pte), pte_val(pte));
1238 } while (pte_val(pte) != pte_val(old_pte));
1239
1240 return pte_young(pte);
1241 }
1242
__ptep_clear_flush_young(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)1243 static inline int __ptep_clear_flush_young(struct vm_area_struct *vma,
1244 unsigned long address, pte_t *ptep)
1245 {
1246 int young = __ptep_test_and_clear_young(vma, address, ptep);
1247
1248 if (young) {
1249 /*
1250 * We can elide the trailing DSB here since the worst that can
1251 * happen is that a CPU continues to use the young entry in its
1252 * TLB and we mistakenly reclaim the associated page. The
1253 * window for such an event is bounded by the next
1254 * context-switch, which provides a DSB to complete the TLB
1255 * invalidation.
1256 */
1257 flush_tlb_page_nosync(vma, address);
1258 }
1259
1260 return young;
1261 }
1262
1263 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1264 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
pmdp_test_and_clear_young(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)1265 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1266 unsigned long address,
1267 pmd_t *pmdp)
1268 {
1269 return __ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
1270 }
1271 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1272
__ptep_get_and_clear(struct mm_struct * mm,unsigned long address,pte_t * ptep)1273 static inline pte_t __ptep_get_and_clear(struct mm_struct *mm,
1274 unsigned long address, pte_t *ptep)
1275 {
1276 pte_t pte = __pte(xchg_relaxed(&pte_val(*ptep), 0));
1277
1278 page_table_check_pte_clear(mm, pte);
1279
1280 return pte;
1281 }
1282
__clear_full_ptes(struct mm_struct * mm,unsigned long addr,pte_t * ptep,unsigned int nr,int full)1283 static inline void __clear_full_ptes(struct mm_struct *mm, unsigned long addr,
1284 pte_t *ptep, unsigned int nr, int full)
1285 {
1286 for (;;) {
1287 __ptep_get_and_clear(mm, addr, ptep);
1288 if (--nr == 0)
1289 break;
1290 ptep++;
1291 addr += PAGE_SIZE;
1292 }
1293 }
1294
__get_and_clear_full_ptes(struct mm_struct * mm,unsigned long addr,pte_t * ptep,unsigned int nr,int full)1295 static inline pte_t __get_and_clear_full_ptes(struct mm_struct *mm,
1296 unsigned long addr, pte_t *ptep,
1297 unsigned int nr, int full)
1298 {
1299 pte_t pte, tmp_pte;
1300
1301 pte = __ptep_get_and_clear(mm, addr, ptep);
1302 while (--nr) {
1303 ptep++;
1304 addr += PAGE_SIZE;
1305 tmp_pte = __ptep_get_and_clear(mm, addr, ptep);
1306 if (pte_dirty(tmp_pte))
1307 pte = pte_mkdirty(pte);
1308 if (pte_young(tmp_pte))
1309 pte = pte_mkyoung(pte);
1310 }
1311 return pte;
1312 }
1313
1314 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1315 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
pmdp_huge_get_and_clear(struct mm_struct * mm,unsigned long address,pmd_t * pmdp)1316 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
1317 unsigned long address, pmd_t *pmdp)
1318 {
1319 pmd_t pmd = __pmd(xchg_relaxed(&pmd_val(*pmdp), 0));
1320
1321 page_table_check_pmd_clear(mm, pmd);
1322
1323 return pmd;
1324 }
1325 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1326
___ptep_set_wrprotect(struct mm_struct * mm,unsigned long address,pte_t * ptep,pte_t pte)1327 static inline void ___ptep_set_wrprotect(struct mm_struct *mm,
1328 unsigned long address, pte_t *ptep,
1329 pte_t pte)
1330 {
1331 pte_t old_pte;
1332
1333 do {
1334 old_pte = pte;
1335 pte = pte_wrprotect(pte);
1336 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
1337 pte_val(old_pte), pte_val(pte));
1338 } while (pte_val(pte) != pte_val(old_pte));
1339 }
1340
1341 /*
1342 * __ptep_set_wrprotect - mark read-only while trasferring potential hardware
1343 * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit.
1344 */
__ptep_set_wrprotect(struct mm_struct * mm,unsigned long address,pte_t * ptep)1345 static inline void __ptep_set_wrprotect(struct mm_struct *mm,
1346 unsigned long address, pte_t *ptep)
1347 {
1348 ___ptep_set_wrprotect(mm, address, ptep, __ptep_get(ptep));
1349 }
1350
__wrprotect_ptes(struct mm_struct * mm,unsigned long address,pte_t * ptep,unsigned int nr)1351 static inline void __wrprotect_ptes(struct mm_struct *mm, unsigned long address,
1352 pte_t *ptep, unsigned int nr)
1353 {
1354 unsigned int i;
1355
1356 for (i = 0; i < nr; i++, address += PAGE_SIZE, ptep++)
1357 __ptep_set_wrprotect(mm, address, ptep);
1358 }
1359
__clear_young_dirty_pte(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t pte,cydp_t flags)1360 static inline void __clear_young_dirty_pte(struct vm_area_struct *vma,
1361 unsigned long addr, pte_t *ptep,
1362 pte_t pte, cydp_t flags)
1363 {
1364 pte_t old_pte;
1365
1366 do {
1367 old_pte = pte;
1368
1369 if (flags & CYDP_CLEAR_YOUNG)
1370 pte = pte_mkold(pte);
1371 if (flags & CYDP_CLEAR_DIRTY)
1372 pte = pte_mkclean(pte);
1373
1374 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
1375 pte_val(old_pte), pte_val(pte));
1376 } while (pte_val(pte) != pte_val(old_pte));
1377 }
1378
__clear_young_dirty_ptes(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,unsigned int nr,cydp_t flags)1379 static inline void __clear_young_dirty_ptes(struct vm_area_struct *vma,
1380 unsigned long addr, pte_t *ptep,
1381 unsigned int nr, cydp_t flags)
1382 {
1383 pte_t pte;
1384
1385 for (;;) {
1386 pte = __ptep_get(ptep);
1387
1388 if (flags == (CYDP_CLEAR_YOUNG | CYDP_CLEAR_DIRTY))
1389 __set_pte(ptep, pte_mkclean(pte_mkold(pte)));
1390 else
1391 __clear_young_dirty_pte(vma, addr, ptep, pte, flags);
1392
1393 if (--nr == 0)
1394 break;
1395 ptep++;
1396 addr += PAGE_SIZE;
1397 }
1398 }
1399
1400 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1401 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
pmdp_set_wrprotect(struct mm_struct * mm,unsigned long address,pmd_t * pmdp)1402 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1403 unsigned long address, pmd_t *pmdp)
1404 {
1405 __ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
1406 }
1407
1408 #define pmdp_establish pmdp_establish
pmdp_establish(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp,pmd_t pmd)1409 static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
1410 unsigned long address, pmd_t *pmdp, pmd_t pmd)
1411 {
1412 page_table_check_pmd_set(vma->vm_mm, pmdp, pmd);
1413 return __pmd(xchg_relaxed(&pmd_val(*pmdp), pmd_val(pmd)));
1414 }
1415 #endif
1416
1417 /*
1418 * Encode and decode a swap entry:
1419 * bits 0-1: present (must be zero)
1420 * bits 2: remember PG_anon_exclusive
1421 * bit 3: remember uffd-wp state
1422 * bits 6-10: swap type
1423 * bit 11: PTE_PRESENT_INVALID (must be zero)
1424 * bits 12-61: swap offset
1425 */
1426 #define __SWP_TYPE_SHIFT 6
1427 #define __SWP_TYPE_BITS 5
1428 #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
1429 #define __SWP_OFFSET_SHIFT 12
1430 #define __SWP_OFFSET_BITS 50
1431 #define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1)
1432
1433 #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
1434 #define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
1435 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
1436
1437 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1438 #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
1439
1440 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1441 #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
1442 #define __swp_entry_to_pmd(swp) __pmd((swp).val)
1443 #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
1444
1445 /*
1446 * Ensure that there are not more swap files than can be encoded in the kernel
1447 * PTEs.
1448 */
1449 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
1450
1451 #ifdef CONFIG_ARM64_MTE
1452
1453 #define __HAVE_ARCH_PREPARE_TO_SWAP
1454 extern int arch_prepare_to_swap(struct folio *folio);
1455
1456 #define __HAVE_ARCH_SWAP_INVALIDATE
arch_swap_invalidate_page(int type,pgoff_t offset)1457 static inline void arch_swap_invalidate_page(int type, pgoff_t offset)
1458 {
1459 if (system_supports_mte())
1460 mte_invalidate_tags(type, offset);
1461 }
1462
arch_swap_invalidate_area(int type)1463 static inline void arch_swap_invalidate_area(int type)
1464 {
1465 if (system_supports_mte())
1466 mte_invalidate_tags_area(type);
1467 }
1468
1469 #define __HAVE_ARCH_SWAP_RESTORE
1470 extern void arch_swap_restore(swp_entry_t entry, struct folio *folio);
1471
1472 #endif /* CONFIG_ARM64_MTE */
1473
1474 /*
1475 * On AArch64, the cache coherency is handled via the __set_ptes() function.
1476 */
update_mmu_cache_range(struct vm_fault * vmf,struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,unsigned int nr)1477 static inline void update_mmu_cache_range(struct vm_fault *vmf,
1478 struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
1479 unsigned int nr)
1480 {
1481 /*
1482 * We don't do anything here, so there's a very small chance of
1483 * us retaking a user fault which we just fixed up. The alternative
1484 * is doing a dsb(ishst), but that penalises the fastpath.
1485 */
1486 }
1487
1488 #define update_mmu_cache(vma, addr, ptep) \
1489 update_mmu_cache_range(NULL, vma, addr, ptep, 1)
1490 #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
1491
1492 #ifdef CONFIG_ARM64_PA_BITS_52
1493 #define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
1494 #else
1495 #define phys_to_ttbr(addr) (addr)
1496 #endif
1497
1498 /*
1499 * On arm64 without hardware Access Flag, copying from user will fail because
1500 * the pte is old and cannot be marked young. So we always end up with zeroed
1501 * page after fork() + CoW for pfn mappings. We don't always have a
1502 * hardware-managed access flag on arm64.
1503 */
1504 #define arch_has_hw_pte_young cpu_has_hw_af
1505
1506 /*
1507 * Experimentally, it's cheap to set the access flag in hardware and we
1508 * benefit from prefaulting mappings as 'old' to start with.
1509 */
1510 #define arch_wants_old_prefaulted_pte cpu_has_hw_af
1511
pud_sect_supported(void)1512 static inline bool pud_sect_supported(void)
1513 {
1514 return PAGE_SIZE == SZ_4K;
1515 }
1516
1517
1518 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1519 #define ptep_modify_prot_start ptep_modify_prot_start
1520 extern pte_t ptep_modify_prot_start(struct vm_area_struct *vma,
1521 unsigned long addr, pte_t *ptep);
1522
1523 #define ptep_modify_prot_commit ptep_modify_prot_commit
1524 extern void ptep_modify_prot_commit(struct vm_area_struct *vma,
1525 unsigned long addr, pte_t *ptep,
1526 pte_t old_pte, pte_t new_pte);
1527
1528 #ifdef CONFIG_ARM64_CONTPTE
1529
1530 /*
1531 * The contpte APIs are used to transparently manage the contiguous bit in ptes
1532 * where it is possible and makes sense to do so. The PTE_CONT bit is considered
1533 * a private implementation detail of the public ptep API (see below).
1534 */
1535 extern void __contpte_try_fold(struct mm_struct *mm, unsigned long addr,
1536 pte_t *ptep, pte_t pte);
1537 extern void __contpte_try_unfold(struct mm_struct *mm, unsigned long addr,
1538 pte_t *ptep, pte_t pte);
1539 extern pte_t contpte_ptep_get(pte_t *ptep, pte_t orig_pte);
1540 extern pte_t contpte_ptep_get_lockless(pte_t *orig_ptep);
1541 extern void contpte_set_ptes(struct mm_struct *mm, unsigned long addr,
1542 pte_t *ptep, pte_t pte, unsigned int nr);
1543 extern void contpte_clear_full_ptes(struct mm_struct *mm, unsigned long addr,
1544 pte_t *ptep, unsigned int nr, int full);
1545 extern pte_t contpte_get_and_clear_full_ptes(struct mm_struct *mm,
1546 unsigned long addr, pte_t *ptep,
1547 unsigned int nr, int full);
1548 extern int contpte_ptep_test_and_clear_young(struct vm_area_struct *vma,
1549 unsigned long addr, pte_t *ptep);
1550 extern int contpte_ptep_clear_flush_young(struct vm_area_struct *vma,
1551 unsigned long addr, pte_t *ptep);
1552 extern void contpte_wrprotect_ptes(struct mm_struct *mm, unsigned long addr,
1553 pte_t *ptep, unsigned int nr);
1554 extern int contpte_ptep_set_access_flags(struct vm_area_struct *vma,
1555 unsigned long addr, pte_t *ptep,
1556 pte_t entry, int dirty);
1557 extern void contpte_clear_young_dirty_ptes(struct vm_area_struct *vma,
1558 unsigned long addr, pte_t *ptep,
1559 unsigned int nr, cydp_t flags);
1560
contpte_try_fold(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte)1561 static __always_inline void contpte_try_fold(struct mm_struct *mm,
1562 unsigned long addr, pte_t *ptep, pte_t pte)
1563 {
1564 /*
1565 * Only bother trying if both the virtual and physical addresses are
1566 * aligned and correspond to the last entry in a contig range. The core
1567 * code mostly modifies ranges from low to high, so this is the likely
1568 * the last modification in the contig range, so a good time to fold.
1569 * We can't fold special mappings, because there is no associated folio.
1570 */
1571
1572 const unsigned long contmask = CONT_PTES - 1;
1573 bool valign = ((addr >> PAGE_SHIFT) & contmask) == contmask;
1574
1575 if (unlikely(valign)) {
1576 bool palign = (pte_pfn(pte) & contmask) == contmask;
1577
1578 if (unlikely(palign &&
1579 pte_valid(pte) && !pte_cont(pte) && !pte_special(pte)))
1580 __contpte_try_fold(mm, addr, ptep, pte);
1581 }
1582 }
1583
contpte_try_unfold(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte)1584 static __always_inline void contpte_try_unfold(struct mm_struct *mm,
1585 unsigned long addr, pte_t *ptep, pte_t pte)
1586 {
1587 if (unlikely(pte_valid_cont(pte)))
1588 __contpte_try_unfold(mm, addr, ptep, pte);
1589 }
1590
1591 #define pte_batch_hint pte_batch_hint
pte_batch_hint(pte_t * ptep,pte_t pte)1592 static inline unsigned int pte_batch_hint(pte_t *ptep, pte_t pte)
1593 {
1594 if (!pte_valid_cont(pte))
1595 return 1;
1596
1597 return CONT_PTES - (((unsigned long)ptep >> 3) & (CONT_PTES - 1));
1598 }
1599
1600 /*
1601 * The below functions constitute the public API that arm64 presents to the
1602 * core-mm to manipulate PTE entries within their page tables (or at least this
1603 * is the subset of the API that arm64 needs to implement). These public
1604 * versions will automatically and transparently apply the contiguous bit where
1605 * it makes sense to do so. Therefore any users that are contig-aware (e.g.
1606 * hugetlb, kernel mapper) should NOT use these APIs, but instead use the
1607 * private versions, which are prefixed with double underscore. All of these
1608 * APIs except for ptep_get_lockless() are expected to be called with the PTL
1609 * held. Although the contiguous bit is considered private to the
1610 * implementation, it is deliberately allowed to leak through the getters (e.g.
1611 * ptep_get()), back to core code. This is required so that pte_leaf_size() can
1612 * provide an accurate size for perf_get_pgtable_size(). But this leakage means
1613 * its possible a pte will be passed to a setter with the contiguous bit set, so
1614 * we explicitly clear the contiguous bit in those cases to prevent accidentally
1615 * setting it in the pgtable.
1616 */
1617
1618 #define ptep_get ptep_get
ptep_get(pte_t * ptep)1619 static inline pte_t ptep_get(pte_t *ptep)
1620 {
1621 pte_t pte = __ptep_get(ptep);
1622
1623 if (likely(!pte_valid_cont(pte)))
1624 return pte;
1625
1626 return contpte_ptep_get(ptep, pte);
1627 }
1628
1629 #define ptep_get_lockless ptep_get_lockless
ptep_get_lockless(pte_t * ptep)1630 static inline pte_t ptep_get_lockless(pte_t *ptep)
1631 {
1632 pte_t pte = __ptep_get(ptep);
1633
1634 if (likely(!pte_valid_cont(pte)))
1635 return pte;
1636
1637 return contpte_ptep_get_lockless(ptep);
1638 }
1639
set_pte(pte_t * ptep,pte_t pte)1640 static inline void set_pte(pte_t *ptep, pte_t pte)
1641 {
1642 /*
1643 * We don't have the mm or vaddr so cannot unfold contig entries (since
1644 * it requires tlb maintenance). set_pte() is not used in core code, so
1645 * this should never even be called. Regardless do our best to service
1646 * any call and emit a warning if there is any attempt to set a pte on
1647 * top of an existing contig range.
1648 */
1649 pte_t orig_pte = __ptep_get(ptep);
1650
1651 WARN_ON_ONCE(pte_valid_cont(orig_pte));
1652 __set_pte(ptep, pte_mknoncont(pte));
1653 }
1654
1655 #define set_ptes set_ptes
set_ptes(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte,unsigned int nr)1656 static __always_inline void set_ptes(struct mm_struct *mm, unsigned long addr,
1657 pte_t *ptep, pte_t pte, unsigned int nr)
1658 {
1659 pte = pte_mknoncont(pte);
1660
1661 if (likely(nr == 1)) {
1662 contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep));
1663 __set_ptes(mm, addr, ptep, pte, 1);
1664 contpte_try_fold(mm, addr, ptep, pte);
1665 } else {
1666 contpte_set_ptes(mm, addr, ptep, pte, nr);
1667 }
1668 }
1669
pte_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)1670 static inline void pte_clear(struct mm_struct *mm,
1671 unsigned long addr, pte_t *ptep)
1672 {
1673 contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep));
1674 __pte_clear(mm, addr, ptep);
1675 }
1676
1677 #define clear_full_ptes clear_full_ptes
clear_full_ptes(struct mm_struct * mm,unsigned long addr,pte_t * ptep,unsigned int nr,int full)1678 static inline void clear_full_ptes(struct mm_struct *mm, unsigned long addr,
1679 pte_t *ptep, unsigned int nr, int full)
1680 {
1681 if (likely(nr == 1)) {
1682 contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep));
1683 __clear_full_ptes(mm, addr, ptep, nr, full);
1684 } else {
1685 contpte_clear_full_ptes(mm, addr, ptep, nr, full);
1686 }
1687 }
1688
1689 #define get_and_clear_full_ptes get_and_clear_full_ptes
get_and_clear_full_ptes(struct mm_struct * mm,unsigned long addr,pte_t * ptep,unsigned int nr,int full)1690 static inline pte_t get_and_clear_full_ptes(struct mm_struct *mm,
1691 unsigned long addr, pte_t *ptep,
1692 unsigned int nr, int full)
1693 {
1694 pte_t pte;
1695
1696 if (likely(nr == 1)) {
1697 contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep));
1698 pte = __get_and_clear_full_ptes(mm, addr, ptep, nr, full);
1699 } else {
1700 pte = contpte_get_and_clear_full_ptes(mm, addr, ptep, nr, full);
1701 }
1702
1703 return pte;
1704 }
1705
1706 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
ptep_get_and_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)1707 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1708 unsigned long addr, pte_t *ptep)
1709 {
1710 contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep));
1711 return __ptep_get_and_clear(mm, addr, ptep);
1712 }
1713
1714 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
ptep_test_and_clear_young(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)1715 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
1716 unsigned long addr, pte_t *ptep)
1717 {
1718 pte_t orig_pte = __ptep_get(ptep);
1719
1720 if (likely(!pte_valid_cont(orig_pte)))
1721 return __ptep_test_and_clear_young(vma, addr, ptep);
1722
1723 return contpte_ptep_test_and_clear_young(vma, addr, ptep);
1724 }
1725
1726 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
ptep_clear_flush_young(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)1727 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
1728 unsigned long addr, pte_t *ptep)
1729 {
1730 pte_t orig_pte = __ptep_get(ptep);
1731
1732 if (likely(!pte_valid_cont(orig_pte)))
1733 return __ptep_clear_flush_young(vma, addr, ptep);
1734
1735 return contpte_ptep_clear_flush_young(vma, addr, ptep);
1736 }
1737
1738 #define wrprotect_ptes wrprotect_ptes
wrprotect_ptes(struct mm_struct * mm,unsigned long addr,pte_t * ptep,unsigned int nr)1739 static __always_inline void wrprotect_ptes(struct mm_struct *mm,
1740 unsigned long addr, pte_t *ptep, unsigned int nr)
1741 {
1742 if (likely(nr == 1)) {
1743 /*
1744 * Optimization: wrprotect_ptes() can only be called for present
1745 * ptes so we only need to check contig bit as condition for
1746 * unfold, and we can remove the contig bit from the pte we read
1747 * to avoid re-reading. This speeds up fork() which is sensitive
1748 * for order-0 folios. Equivalent to contpte_try_unfold().
1749 */
1750 pte_t orig_pte = __ptep_get(ptep);
1751
1752 if (unlikely(pte_cont(orig_pte))) {
1753 __contpte_try_unfold(mm, addr, ptep, orig_pte);
1754 orig_pte = pte_mknoncont(orig_pte);
1755 }
1756 ___ptep_set_wrprotect(mm, addr, ptep, orig_pte);
1757 } else {
1758 contpte_wrprotect_ptes(mm, addr, ptep, nr);
1759 }
1760 }
1761
1762 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
ptep_set_wrprotect(struct mm_struct * mm,unsigned long addr,pte_t * ptep)1763 static inline void ptep_set_wrprotect(struct mm_struct *mm,
1764 unsigned long addr, pte_t *ptep)
1765 {
1766 wrprotect_ptes(mm, addr, ptep, 1);
1767 }
1768
1769 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
ptep_set_access_flags(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t entry,int dirty)1770 static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1771 unsigned long addr, pte_t *ptep,
1772 pte_t entry, int dirty)
1773 {
1774 pte_t orig_pte = __ptep_get(ptep);
1775
1776 entry = pte_mknoncont(entry);
1777
1778 if (likely(!pte_valid_cont(orig_pte)))
1779 return __ptep_set_access_flags(vma, addr, ptep, entry, dirty);
1780
1781 return contpte_ptep_set_access_flags(vma, addr, ptep, entry, dirty);
1782 }
1783
1784 #define clear_young_dirty_ptes clear_young_dirty_ptes
clear_young_dirty_ptes(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,unsigned int nr,cydp_t flags)1785 static inline void clear_young_dirty_ptes(struct vm_area_struct *vma,
1786 unsigned long addr, pte_t *ptep,
1787 unsigned int nr, cydp_t flags)
1788 {
1789 if (likely(nr == 1 && !pte_cont(__ptep_get(ptep))))
1790 __clear_young_dirty_ptes(vma, addr, ptep, nr, flags);
1791 else
1792 contpte_clear_young_dirty_ptes(vma, addr, ptep, nr, flags);
1793 }
1794
1795 #else /* CONFIG_ARM64_CONTPTE */
1796
1797 #define ptep_get __ptep_get
1798 #define set_pte __set_pte
1799 #define set_ptes __set_ptes
1800 #define pte_clear __pte_clear
1801 #define clear_full_ptes __clear_full_ptes
1802 #define get_and_clear_full_ptes __get_and_clear_full_ptes
1803 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1804 #define ptep_get_and_clear __ptep_get_and_clear
1805 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1806 #define ptep_test_and_clear_young __ptep_test_and_clear_young
1807 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1808 #define ptep_clear_flush_young __ptep_clear_flush_young
1809 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
1810 #define ptep_set_wrprotect __ptep_set_wrprotect
1811 #define wrprotect_ptes __wrprotect_ptes
1812 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1813 #define ptep_set_access_flags __ptep_set_access_flags
1814 #define clear_young_dirty_ptes __clear_young_dirty_ptes
1815
1816 #endif /* CONFIG_ARM64_CONTPTE */
1817
1818 #endif /* !__ASSEMBLY__ */
1819
1820 #endif /* __ASM_PGTABLE_H */
1821