• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012 ARM Ltd.
4  */
5 #ifndef __ASM_PGTABLE_H
6 #define __ASM_PGTABLE_H
7 
8 #include <asm/bug.h>
9 #include <asm/proc-fns.h>
10 
11 #include <asm/memory.h>
12 #include <asm/mte.h>
13 #include <asm/pgtable-hwdef.h>
14 #include <asm/pgtable-prot.h>
15 #include <asm/tlbflush.h>
16 
17 /*
18  * VMALLOC range.
19  *
20  * VMALLOC_START: beginning of the kernel vmalloc space
21  * VMALLOC_END: extends to the available space below vmemmap, PCI I/O space
22  *	and fixed mappings
23  */
24 #define VMALLOC_START		(MODULES_END)
25 #define VMALLOC_END		(- PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
26 
27 #define vmemmap			((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
28 
29 #define FIRST_USER_ADDRESS	0UL
30 
31 #ifndef __ASSEMBLY__
32 
33 #include <asm/cmpxchg.h>
34 #include <asm/fixmap.h>
35 #include <linux/mmdebug.h>
36 #include <linux/mm_types.h>
37 #include <linux/sched.h>
38 
39 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
40 #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
41 
42 /* Set stride and tlb_level in flush_*_tlb_range */
43 #define flush_pmd_tlb_range(vma, addr, end)	\
44 	__flush_tlb_range(vma, addr, end, PMD_SIZE, false, 2)
45 #define flush_pud_tlb_range(vma, addr, end)	\
46 	__flush_tlb_range(vma, addr, end, PUD_SIZE, false, 1)
47 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
48 
49 /*
50  * Outside of a few very special situations (e.g. hibernation), we always
51  * use broadcast TLB invalidation instructions, therefore a spurious page
52  * fault on one CPU which has been handled concurrently by another CPU
53  * does not need to perform additional invalidation.
54  */
55 #define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
56 
57 /*
58  * ZERO_PAGE is a global shared page that is always zero: used
59  * for zero-mapped memory areas etc..
60  */
61 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
62 #define ZERO_PAGE(vaddr)	phys_to_page(__pa_symbol(empty_zero_page))
63 
64 #define pte_ERROR(e)	\
65 	pr_err("%s:%d: bad pte %016llx.\n", __FILE__, __LINE__, pte_val(e))
66 
67 /*
68  * Macros to convert between a physical address and its placement in a
69  * page table entry, taking care of 52-bit addresses.
70  */
71 #ifdef CONFIG_ARM64_PA_BITS_52
__pte_to_phys(pte_t pte)72 static inline phys_addr_t __pte_to_phys(pte_t pte)
73 {
74 	return (pte_val(pte) & PTE_ADDR_LOW) |
75 		((pte_val(pte) & PTE_ADDR_HIGH) << 36);
76 }
__phys_to_pte_val(phys_addr_t phys)77 static inline pteval_t __phys_to_pte_val(phys_addr_t phys)
78 {
79 	return (phys | (phys >> 36)) & PTE_ADDR_MASK;
80 }
81 #else
82 #define __pte_to_phys(pte)	(pte_val(pte) & PTE_ADDR_MASK)
83 #define __phys_to_pte_val(phys)	(phys)
84 #endif
85 
86 #define pte_pfn(pte)		(__pte_to_phys(pte) >> PAGE_SHIFT)
87 #define pfn_pte(pfn,prot)	\
88 	__pte(__phys_to_pte_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
89 
90 #define pte_none(pte)		(!pte_val(pte))
91 #define pte_clear(mm,addr,ptep)	set_pte(ptep, __pte(0))
92 #define pte_page(pte)		(pfn_to_page(pte_pfn(pte)))
93 
94 /*
95  * The following only work if pte_present(). Undefined behaviour otherwise.
96  */
97 #define pte_present(pte)	(!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
98 #define pte_young(pte)		(!!(pte_val(pte) & PTE_AF))
99 #define pte_special(pte)	(!!(pte_val(pte) & PTE_SPECIAL))
100 #define pte_write(pte)		(!!(pte_val(pte) & PTE_WRITE))
101 #define pte_user_exec(pte)	(!(pte_val(pte) & PTE_UXN))
102 #define pte_cont(pte)		(!!(pte_val(pte) & PTE_CONT))
103 #define pte_devmap(pte)		(!!(pte_val(pte) & PTE_DEVMAP))
104 #define pte_tagged(pte)		((pte_val(pte) & PTE_ATTRINDX_MASK) == \
105 				 PTE_ATTRINDX(MT_NORMAL_TAGGED))
106 
107 #define pte_cont_addr_end(addr, end)						\
108 ({	unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK;	\
109 	(__boundary - 1 < (end) - 1) ? __boundary : (end);			\
110 })
111 
112 #define pmd_cont_addr_end(addr, end)						\
113 ({	unsigned long __boundary = ((addr) + CONT_PMD_SIZE) & CONT_PMD_MASK;	\
114 	(__boundary - 1 < (end) - 1) ? __boundary : (end);			\
115 })
116 
117 #define pte_hw_dirty(pte)	(pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
118 #define pte_sw_dirty(pte)	(!!(pte_val(pte) & PTE_DIRTY))
119 #define pte_dirty(pte)		(pte_sw_dirty(pte) || pte_hw_dirty(pte))
120 
121 #define pte_valid(pte)		(!!(pte_val(pte) & PTE_VALID))
122 #define pte_valid_not_user(pte) \
123 	((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
124 #define pte_valid_user(pte) \
125 	((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
126 
127 /*
128  * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
129  * so that we don't erroneously return false for pages that have been
130  * remapped as PROT_NONE but are yet to be flushed from the TLB.
131  * Note that we can't make any assumptions based on the state of the access
132  * flag, since ptep_clear_flush_young() elides a DSB when invalidating the
133  * TLB.
134  */
135 #define pte_accessible(mm, pte)	\
136 	(mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
137 
138 /*
139  * p??_access_permitted() is true for valid user mappings (subject to the
140  * write permission check). PROT_NONE mappings do not have the PTE_VALID bit
141  * set.
142  */
143 #define pte_access_permitted(pte, write) \
144 	(pte_valid_user(pte) && (!(write) || pte_write(pte)))
145 #define pmd_access_permitted(pmd, write) \
146 	(pte_access_permitted(pmd_pte(pmd), (write)))
147 #define pud_access_permitted(pud, write) \
148 	(pte_access_permitted(pud_pte(pud), (write)))
149 
clear_pte_bit(pte_t pte,pgprot_t prot)150 static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
151 {
152 	pte_val(pte) &= ~pgprot_val(prot);
153 	return pte;
154 }
155 
set_pte_bit(pte_t pte,pgprot_t prot)156 static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
157 {
158 	pte_val(pte) |= pgprot_val(prot);
159 	return pte;
160 }
161 
clear_pmd_bit(pmd_t pmd,pgprot_t prot)162 static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot)
163 {
164 	pmd_val(pmd) &= ~pgprot_val(prot);
165 	return pmd;
166 }
167 
set_pmd_bit(pmd_t pmd,pgprot_t prot)168 static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot)
169 {
170 	pmd_val(pmd) |= pgprot_val(prot);
171 	return pmd;
172 }
173 
pte_mkwrite(pte_t pte)174 static inline pte_t pte_mkwrite(pte_t pte)
175 {
176 	pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
177 	pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
178 	return pte;
179 }
180 
pte_mkclean(pte_t pte)181 static inline pte_t pte_mkclean(pte_t pte)
182 {
183 	pte = clear_pte_bit(pte, __pgprot(PTE_DIRTY));
184 	pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
185 
186 	return pte;
187 }
188 
pte_mkdirty(pte_t pte)189 static inline pte_t pte_mkdirty(pte_t pte)
190 {
191 	pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
192 
193 	if (pte_write(pte))
194 		pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
195 
196 	return pte;
197 }
198 
pte_wrprotect(pte_t pte)199 static inline pte_t pte_wrprotect(pte_t pte)
200 {
201 	/*
202 	 * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
203 	 * clear), set the PTE_DIRTY bit.
204 	 */
205 	if (pte_hw_dirty(pte))
206 		pte = pte_mkdirty(pte);
207 
208 	pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
209 	pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
210 	return pte;
211 }
212 
pte_mkold(pte_t pte)213 static inline pte_t pte_mkold(pte_t pte)
214 {
215 	return clear_pte_bit(pte, __pgprot(PTE_AF));
216 }
217 
pte_mkyoung(pte_t pte)218 static inline pte_t pte_mkyoung(pte_t pte)
219 {
220 	return set_pte_bit(pte, __pgprot(PTE_AF));
221 }
222 
pte_mkspecial(pte_t pte)223 static inline pte_t pte_mkspecial(pte_t pte)
224 {
225 	return set_pte_bit(pte, __pgprot(PTE_SPECIAL));
226 }
227 
pte_mkcont(pte_t pte)228 static inline pte_t pte_mkcont(pte_t pte)
229 {
230 	pte = set_pte_bit(pte, __pgprot(PTE_CONT));
231 	return set_pte_bit(pte, __pgprot(PTE_TYPE_PAGE));
232 }
233 
pte_mknoncont(pte_t pte)234 static inline pte_t pte_mknoncont(pte_t pte)
235 {
236 	return clear_pte_bit(pte, __pgprot(PTE_CONT));
237 }
238 
pte_mkpresent(pte_t pte)239 static inline pte_t pte_mkpresent(pte_t pte)
240 {
241 	return set_pte_bit(pte, __pgprot(PTE_VALID));
242 }
243 
pmd_mkcont(pmd_t pmd)244 static inline pmd_t pmd_mkcont(pmd_t pmd)
245 {
246 	return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
247 }
248 
pte_mkdevmap(pte_t pte)249 static inline pte_t pte_mkdevmap(pte_t pte)
250 {
251 	return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL));
252 }
253 
set_pte(pte_t * ptep,pte_t pte)254 static inline void set_pte(pte_t *ptep, pte_t pte)
255 {
256 	WRITE_ONCE(*ptep, pte);
257 
258 	/*
259 	 * Only if the new pte is valid and kernel, otherwise TLB maintenance
260 	 * or update_mmu_cache() have the necessary barriers.
261 	 */
262 	if (pte_valid_not_user(pte)) {
263 		dsb(ishst);
264 		isb();
265 	}
266 }
267 
268 extern void __sync_icache_dcache(pte_t pteval);
269 
270 /*
271  * PTE bits configuration in the presence of hardware Dirty Bit Management
272  * (PTE_WRITE == PTE_DBM):
273  *
274  * Dirty  Writable | PTE_RDONLY  PTE_WRITE  PTE_DIRTY (sw)
275  *   0      0      |   1           0          0
276  *   0      1      |   1           1          0
277  *   1      0      |   1           0          1
278  *   1      1      |   0           1          x
279  *
280  * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via
281  * the page fault mechanism. Checking the dirty status of a pte becomes:
282  *
283  *   PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY)
284  */
285 
__check_racy_pte_update(struct mm_struct * mm,pte_t * ptep,pte_t pte)286 static inline void __check_racy_pte_update(struct mm_struct *mm, pte_t *ptep,
287 					   pte_t pte)
288 {
289 	pte_t old_pte;
290 
291 	if (!IS_ENABLED(CONFIG_DEBUG_VM))
292 		return;
293 
294 	old_pte = READ_ONCE(*ptep);
295 
296 	if (!pte_valid(old_pte) || !pte_valid(pte))
297 		return;
298 	if (mm != current->active_mm && atomic_read(&mm->mm_users) <= 1)
299 		return;
300 
301 	/*
302 	 * Check for potential race with hardware updates of the pte
303 	 * (ptep_set_access_flags safely changes valid ptes without going
304 	 * through an invalid entry).
305 	 */
306 	VM_WARN_ONCE(!pte_young(pte),
307 		     "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
308 		     __func__, pte_val(old_pte), pte_val(pte));
309 	VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte),
310 		     "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
311 		     __func__, pte_val(old_pte), pte_val(pte));
312 }
313 
set_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte)314 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
315 			      pte_t *ptep, pte_t pte)
316 {
317 	if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
318 		__sync_icache_dcache(pte);
319 
320 	/*
321 	 * If the PTE would provide user space access to the tags associated
322 	 * with it then ensure that the MTE tags are synchronised.  Although
323 	 * pte_access_permitted() returns false for exec only mappings, they
324 	 * don't expose tags (instruction fetches don't check tags).
325 	 */
326 	if (system_supports_mte() && pte_access_permitted(pte, false) &&
327 	    !pte_special(pte)) {
328 		pte_t old_pte = READ_ONCE(*ptep);
329 		/*
330 		 * We only need to synchronise if the new PTE has tags enabled
331 		 * or if swapping in (in which case another mapping may have
332 		 * set tags in the past even if this PTE isn't tagged).
333 		 * (!pte_none() && !pte_present()) is an open coded version of
334 		 * is_swap_pte()
335 		 */
336 		if (pte_tagged(pte) || (!pte_none(old_pte) && !pte_present(old_pte)))
337 			mte_sync_tags(old_pte, pte);
338 	}
339 
340 	__check_racy_pte_update(mm, ptep, pte);
341 
342 	set_pte(ptep, pte);
343 }
344 
345 /*
346  * Huge pte definitions.
347  */
348 #define pte_mkhuge(pte)		(__pte(pte_val(pte) & ~PTE_TABLE_BIT))
349 
350 /*
351  * Hugetlb definitions.
352  */
353 #define HUGE_MAX_HSTATE		4
354 #define HPAGE_SHIFT		PMD_SHIFT
355 #define HPAGE_SIZE		(_AC(1, UL) << HPAGE_SHIFT)
356 #define HPAGE_MASK		(~(HPAGE_SIZE - 1))
357 #define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
358 
pgd_pte(pgd_t pgd)359 static inline pte_t pgd_pte(pgd_t pgd)
360 {
361 	return __pte(pgd_val(pgd));
362 }
363 
p4d_pte(p4d_t p4d)364 static inline pte_t p4d_pte(p4d_t p4d)
365 {
366 	return __pte(p4d_val(p4d));
367 }
368 
pud_pte(pud_t pud)369 static inline pte_t pud_pte(pud_t pud)
370 {
371 	return __pte(pud_val(pud));
372 }
373 
pte_pud(pte_t pte)374 static inline pud_t pte_pud(pte_t pte)
375 {
376 	return __pud(pte_val(pte));
377 }
378 
pud_pmd(pud_t pud)379 static inline pmd_t pud_pmd(pud_t pud)
380 {
381 	return __pmd(pud_val(pud));
382 }
383 
pmd_pte(pmd_t pmd)384 static inline pte_t pmd_pte(pmd_t pmd)
385 {
386 	return __pte(pmd_val(pmd));
387 }
388 
pte_pmd(pte_t pte)389 static inline pmd_t pte_pmd(pte_t pte)
390 {
391 	return __pmd(pte_val(pte));
392 }
393 
mk_pud_sect_prot(pgprot_t prot)394 static inline pgprot_t mk_pud_sect_prot(pgprot_t prot)
395 {
396 	return __pgprot((pgprot_val(prot) & ~PUD_TABLE_BIT) | PUD_TYPE_SECT);
397 }
398 
mk_pmd_sect_prot(pgprot_t prot)399 static inline pgprot_t mk_pmd_sect_prot(pgprot_t prot)
400 {
401 	return __pgprot((pgprot_val(prot) & ~PMD_TABLE_BIT) | PMD_TYPE_SECT);
402 }
403 
404 #ifdef CONFIG_NUMA_BALANCING
405 /*
406  * See the comment in include/linux/pgtable.h
407  */
pte_protnone(pte_t pte)408 static inline int pte_protnone(pte_t pte)
409 {
410 	return (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) == PTE_PROT_NONE;
411 }
412 
pmd_protnone(pmd_t pmd)413 static inline int pmd_protnone(pmd_t pmd)
414 {
415 	return pte_protnone(pmd_pte(pmd));
416 }
417 #endif
418 
419 #define pmd_present_invalid(pmd)     (!!(pmd_val(pmd) & PMD_PRESENT_INVALID))
420 
pmd_present(pmd_t pmd)421 static inline int pmd_present(pmd_t pmd)
422 {
423 	return pte_present(pmd_pte(pmd)) || pmd_present_invalid(pmd);
424 }
425 
426 /*
427  * THP definitions.
428  */
429 
430 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_trans_huge(pmd_t pmd)431 static inline int pmd_trans_huge(pmd_t pmd)
432 {
433 	return pmd_val(pmd) && pmd_present(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
434 }
435 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
436 
437 #define pmd_dirty(pmd)		pte_dirty(pmd_pte(pmd))
438 #define pmd_young(pmd)		pte_young(pmd_pte(pmd))
439 #define pmd_valid(pmd)		pte_valid(pmd_pte(pmd))
440 #define pmd_wrprotect(pmd)	pte_pmd(pte_wrprotect(pmd_pte(pmd)))
441 #define pmd_mkold(pmd)		pte_pmd(pte_mkold(pmd_pte(pmd)))
442 #define pmd_mkwrite(pmd)	pte_pmd(pte_mkwrite(pmd_pte(pmd)))
443 #define pmd_mkclean(pmd)	pte_pmd(pte_mkclean(pmd_pte(pmd)))
444 #define pmd_mkdirty(pmd)	pte_pmd(pte_mkdirty(pmd_pte(pmd)))
445 #define pmd_mkyoung(pmd)	pte_pmd(pte_mkyoung(pmd_pte(pmd)))
446 
pmd_mkinvalid(pmd_t pmd)447 static inline pmd_t pmd_mkinvalid(pmd_t pmd)
448 {
449 	pmd = set_pmd_bit(pmd, __pgprot(PMD_PRESENT_INVALID));
450 	pmd = clear_pmd_bit(pmd, __pgprot(PMD_SECT_VALID));
451 
452 	return pmd;
453 }
454 
455 #define pmd_thp_or_huge(pmd)	(pmd_huge(pmd) || pmd_trans_huge(pmd))
456 
457 #define pmd_write(pmd)		pte_write(pmd_pte(pmd))
458 
459 #define pmd_mkhuge(pmd)		(__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
460 
461 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
462 #define pmd_devmap(pmd)		pte_devmap(pmd_pte(pmd))
463 #endif
pmd_mkdevmap(pmd_t pmd)464 static inline pmd_t pmd_mkdevmap(pmd_t pmd)
465 {
466 	return pte_pmd(set_pte_bit(pmd_pte(pmd), __pgprot(PTE_DEVMAP)));
467 }
468 
469 #define __pmd_to_phys(pmd)	__pte_to_phys(pmd_pte(pmd))
470 #define __phys_to_pmd_val(phys)	__phys_to_pte_val(phys)
471 #define pmd_pfn(pmd)		((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT)
472 #define pfn_pmd(pfn,prot)	__pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
473 #define mk_pmd(page,prot)	pfn_pmd(page_to_pfn(page),prot)
474 
475 #define pud_young(pud)		pte_young(pud_pte(pud))
476 #define pud_mkyoung(pud)	pte_pud(pte_mkyoung(pud_pte(pud)))
477 #define pud_write(pud)		pte_write(pud_pte(pud))
478 
479 #define pud_mkhuge(pud)		(__pud(pud_val(pud) & ~PUD_TABLE_BIT))
480 
481 #define __pud_to_phys(pud)	__pte_to_phys(pud_pte(pud))
482 #define __phys_to_pud_val(phys)	__phys_to_pte_val(phys)
483 #define pud_pfn(pud)		((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT)
484 #define pfn_pud(pfn,prot)	__pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
485 
486 #define set_pmd_at(mm, addr, pmdp, pmd)	set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
487 #define set_pud_at(mm, addr, pudp, pud)	set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud))
488 
489 #define __p4d_to_phys(p4d)	__pte_to_phys(p4d_pte(p4d))
490 #define __phys_to_p4d_val(phys)	__phys_to_pte_val(phys)
491 
492 #define __pgd_to_phys(pgd)	__pte_to_phys(pgd_pte(pgd))
493 #define __phys_to_pgd_val(phys)	__phys_to_pte_val(phys)
494 
495 #define __pgprot_modify(prot,mask,bits) \
496 	__pgprot((pgprot_val(prot) & ~(mask)) | (bits))
497 
498 #define pgprot_nx(prot) \
499 	__pgprot_modify(prot, PTE_MAYBE_GP, PTE_PXN)
500 
501 /*
502  * Mark the prot value as uncacheable and unbufferable.
503  */
504 #define pgprot_noncached(prot) \
505 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
506 #define pgprot_writecombine(prot) \
507 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
508 #define pgprot_device(prot) \
509 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
510 #define pgprot_tagged(prot) \
511 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_TAGGED))
512 #define pgprot_mhp	pgprot_tagged
513 /*
514  * DMA allocations for non-coherent devices use what the Arm architecture calls
515  * "Normal non-cacheable" memory, which permits speculation, unaligned accesses
516  * and merging of writes.  This is different from "Device-nGnR[nE]" memory which
517  * is intended for MMIO and thus forbids speculation, preserves access size,
518  * requires strict alignment and can also force write responses to come from the
519  * endpoint.
520  */
521 #define pgprot_dmacoherent(prot) \
522 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, \
523 			PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
524 
525 /*
526  * Mark the prot value as outer cacheable and inner non-cacheable. Non-coherent
527  * devices on a system with support for a system or last level cache use these
528  * attributes to cache allocations in the system cache.
529  */
530 #define pgprot_syscached(prot) \
531 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, \
532 			PTE_ATTRINDX(MT_NORMAL_iNC_oWB) | PTE_PXN | PTE_UXN)
533 
534 #define __HAVE_PHYS_MEM_ACCESS_PROT
535 struct file;
536 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
537 				     unsigned long size, pgprot_t vma_prot);
538 
539 #define pmd_none(pmd)		(!pmd_val(pmd))
540 
541 #define pmd_table(pmd)		((pmd_val(pmd) & PMD_TYPE_MASK) == \
542 				 PMD_TYPE_TABLE)
543 #define pmd_sect(pmd)		((pmd_val(pmd) & PMD_TYPE_MASK) == \
544 				 PMD_TYPE_SECT)
545 #define pmd_leaf(pmd)		(pmd_present(pmd) && !pmd_table(pmd))
546 #define pmd_bad(pmd)		(!pmd_table(pmd))
547 
548 #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
pud_sect(pud_t pud)549 static inline bool pud_sect(pud_t pud) { return false; }
pud_table(pud_t pud)550 static inline bool pud_table(pud_t pud) { return true; }
551 #else
552 #define pud_sect(pud)		((pud_val(pud) & PUD_TYPE_MASK) == \
553 				 PUD_TYPE_SECT)
554 #define pud_table(pud)		((pud_val(pud) & PUD_TYPE_MASK) == \
555 				 PUD_TYPE_TABLE)
556 #endif
557 
558 extern pgd_t init_pg_dir[PTRS_PER_PGD];
559 extern pgd_t init_pg_end[];
560 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
561 extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
562 extern pgd_t idmap_pg_end[];
563 extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
564 extern pgd_t reserved_pg_dir[PTRS_PER_PGD];
565 
566 extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd);
567 
568 #ifdef CONFIG_MEMORY_HOTPLUG
569 extern int populate_range_driver_managed(u64 start, u64 size,
570 		const char *resource_name);
571 extern int depopulate_range_driver_managed(u64 start, u64 size,
572 		const char *resource_name);
573 #endif
574 
in_swapper_pgdir(void * addr)575 static inline bool in_swapper_pgdir(void *addr)
576 {
577 	return ((unsigned long)addr & PAGE_MASK) ==
578 	        ((unsigned long)swapper_pg_dir & PAGE_MASK);
579 }
580 
set_pmd(pmd_t * pmdp,pmd_t pmd)581 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
582 {
583 #ifdef __PAGETABLE_PMD_FOLDED
584 	if (in_swapper_pgdir(pmdp)) {
585 		set_swapper_pgd((pgd_t *)pmdp, __pgd(pmd_val(pmd)));
586 		return;
587 	}
588 #endif /* __PAGETABLE_PMD_FOLDED */
589 
590 	WRITE_ONCE(*pmdp, pmd);
591 
592 	if (pmd_valid(pmd)) {
593 		dsb(ishst);
594 		isb();
595 	}
596 }
597 
pmd_clear(pmd_t * pmdp)598 static inline void pmd_clear(pmd_t *pmdp)
599 {
600 	set_pmd(pmdp, __pmd(0));
601 }
602 
pmd_page_paddr(pmd_t pmd)603 static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
604 {
605 	return __pmd_to_phys(pmd);
606 }
607 
pmd_page_vaddr(pmd_t pmd)608 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
609 {
610 	return (unsigned long)__va(pmd_page_paddr(pmd));
611 }
612 
613 /* Find an entry in the third-level page table. */
614 #define pte_offset_phys(dir,addr)	(pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t))
615 
616 #define pte_set_fixmap(addr)		((pte_t *)set_fixmap_offset(FIX_PTE, addr))
617 #define pte_set_fixmap_offset(pmd, addr)	pte_set_fixmap(pte_offset_phys(pmd, addr))
618 #define pte_clear_fixmap()		clear_fixmap(FIX_PTE)
619 
620 #define pmd_page(pmd)			phys_to_page(__pmd_to_phys(pmd))
621 
622 /* use ONLY for statically allocated translation tables */
623 #define pte_offset_kimg(dir,addr)	((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr))))
624 
625 /*
626  * Conversion functions: convert a page and protection to a page entry,
627  * and a page entry and page directory to the page they refer to.
628  */
629 #define mk_pte(page,prot)	pfn_pte(page_to_pfn(page),prot)
630 
631 #if CONFIG_PGTABLE_LEVELS > 2
632 
633 #define pmd_ERROR(e)	\
634 	pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e))
635 
636 #define pud_none(pud)		(!pud_val(pud))
637 #define pud_bad(pud)		(!pud_table(pud))
638 #define pud_present(pud)	pte_present(pud_pte(pud))
639 #define pud_leaf(pud)		(pud_present(pud) && !pud_table(pud))
640 #define pud_valid(pud)		pte_valid(pud_pte(pud))
641 
set_pud(pud_t * pudp,pud_t pud)642 static inline void set_pud(pud_t *pudp, pud_t pud)
643 {
644 #ifdef __PAGETABLE_PUD_FOLDED
645 	if (in_swapper_pgdir(pudp)) {
646 		set_swapper_pgd((pgd_t *)pudp, __pgd(pud_val(pud)));
647 		return;
648 	}
649 #endif /* __PAGETABLE_PUD_FOLDED */
650 
651 	WRITE_ONCE(*pudp, pud);
652 
653 	if (pud_valid(pud)) {
654 		dsb(ishst);
655 		isb();
656 	}
657 }
658 
pud_clear(pud_t * pudp)659 static inline void pud_clear(pud_t *pudp)
660 {
661 	set_pud(pudp, __pud(0));
662 }
663 
pud_page_paddr(pud_t pud)664 static inline phys_addr_t pud_page_paddr(pud_t pud)
665 {
666 	return __pud_to_phys(pud);
667 }
668 
pud_pgtable(pud_t pud)669 static inline pmd_t *pud_pgtable(pud_t pud)
670 {
671 	return (pmd_t *)__va(pud_page_paddr(pud));
672 }
673 
674 /* Find an entry in the second-level page table. */
675 #define pmd_offset_phys(dir, addr)	(pud_page_paddr(READ_ONCE(*(dir))) + pmd_index(addr) * sizeof(pmd_t))
676 
677 #define pmd_set_fixmap(addr)		((pmd_t *)set_fixmap_offset(FIX_PMD, addr))
678 #define pmd_set_fixmap_offset(pud, addr)	pmd_set_fixmap(pmd_offset_phys(pud, addr))
679 #define pmd_clear_fixmap()		clear_fixmap(FIX_PMD)
680 
681 #define pud_page(pud)			phys_to_page(__pud_to_phys(pud))
682 
683 /* use ONLY for statically allocated translation tables */
684 #define pmd_offset_kimg(dir,addr)	((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr))))
685 
686 #else
687 
688 #define pud_page_paddr(pud)	({ BUILD_BUG(); 0; })
689 
690 /* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */
691 #define pmd_set_fixmap(addr)		NULL
692 #define pmd_set_fixmap_offset(pudp, addr)	((pmd_t *)pudp)
693 #define pmd_clear_fixmap()
694 
695 #define pmd_offset_kimg(dir,addr)	((pmd_t *)dir)
696 
697 #endif	/* CONFIG_PGTABLE_LEVELS > 2 */
698 
699 #if CONFIG_PGTABLE_LEVELS > 3
700 
701 #define pud_ERROR(e)	\
702 	pr_err("%s:%d: bad pud %016llx.\n", __FILE__, __LINE__, pud_val(e))
703 
704 #define p4d_none(p4d)		(!p4d_val(p4d))
705 #define p4d_bad(p4d)		(!(p4d_val(p4d) & 2))
706 #define p4d_present(p4d)	(p4d_val(p4d))
707 
set_p4d(p4d_t * p4dp,p4d_t p4d)708 static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
709 {
710 	if (in_swapper_pgdir(p4dp)) {
711 		set_swapper_pgd((pgd_t *)p4dp, __pgd(p4d_val(p4d)));
712 		return;
713 	}
714 
715 	WRITE_ONCE(*p4dp, p4d);
716 	dsb(ishst);
717 	isb();
718 }
719 
p4d_clear(p4d_t * p4dp)720 static inline void p4d_clear(p4d_t *p4dp)
721 {
722 	set_p4d(p4dp, __p4d(0));
723 }
724 
p4d_page_paddr(p4d_t p4d)725 static inline phys_addr_t p4d_page_paddr(p4d_t p4d)
726 {
727 	return __p4d_to_phys(p4d);
728 }
729 
p4d_pgtable(p4d_t p4d)730 static inline pud_t *p4d_pgtable(p4d_t p4d)
731 {
732 	return (pud_t *)__va(p4d_page_paddr(p4d));
733 }
734 
735 /* Find an entry in the frst-level page table. */
736 #define pud_offset_phys(dir, addr)	(p4d_page_paddr(READ_ONCE(*(dir))) + pud_index(addr) * sizeof(pud_t))
737 
738 #define pud_set_fixmap(addr)		((pud_t *)set_fixmap_offset(FIX_PUD, addr))
739 #define pud_set_fixmap_offset(p4d, addr)	pud_set_fixmap(pud_offset_phys(p4d, addr))
740 #define pud_clear_fixmap()		clear_fixmap(FIX_PUD)
741 
742 #define p4d_page(p4d)		pfn_to_page(__phys_to_pfn(__p4d_to_phys(p4d)))
743 
744 /* use ONLY for statically allocated translation tables */
745 #define pud_offset_kimg(dir,addr)	((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr))))
746 
747 #else
748 
749 #define p4d_page_paddr(p4d)	({ BUILD_BUG(); 0;})
750 #define pgd_page_paddr(pgd)	({ BUILD_BUG(); 0;})
751 
752 /* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */
753 #define pud_set_fixmap(addr)		NULL
754 #define pud_set_fixmap_offset(pgdp, addr)	((pud_t *)pgdp)
755 #define pud_clear_fixmap()
756 
757 #define pud_offset_kimg(dir,addr)	((pud_t *)dir)
758 
759 #endif  /* CONFIG_PGTABLE_LEVELS > 3 */
760 
761 #define pgd_ERROR(e)	\
762 	pr_err("%s:%d: bad pgd %016llx.\n", __FILE__, __LINE__, pgd_val(e))
763 
764 #define pgd_set_fixmap(addr)	((pgd_t *)set_fixmap_offset(FIX_PGD, addr))
765 #define pgd_clear_fixmap()	clear_fixmap(FIX_PGD)
766 
pte_modify(pte_t pte,pgprot_t newprot)767 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
768 {
769 	/*
770 	 * Normal and Normal-Tagged are two different memory types and indices
771 	 * in MAIR_EL1. The mask below has to include PTE_ATTRINDX_MASK.
772 	 */
773 	const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
774 			      PTE_PROT_NONE | PTE_VALID | PTE_WRITE | PTE_GP |
775 			      PTE_ATTRINDX_MASK;
776 	/* preserve the hardware dirty information */
777 	if (pte_hw_dirty(pte))
778 		pte = pte_mkdirty(pte);
779 	pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
780 	/*
781 	 * If we end up clearing hw dirtiness for a sw-dirty PTE, set hardware
782 	 * dirtiness again.
783 	 */
784 	if (pte_sw_dirty(pte))
785 		pte = pte_mkdirty(pte);
786 	return pte;
787 }
788 
pmd_modify(pmd_t pmd,pgprot_t newprot)789 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
790 {
791 	return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
792 }
793 
794 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
795 extern int ptep_set_access_flags(struct vm_area_struct *vma,
796 				 unsigned long address, pte_t *ptep,
797 				 pte_t entry, int dirty);
798 
799 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
800 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
pmdp_set_access_flags(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp,pmd_t entry,int dirty)801 static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
802 					unsigned long address, pmd_t *pmdp,
803 					pmd_t entry, int dirty)
804 {
805 	return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
806 }
807 
pud_devmap(pud_t pud)808 static inline int pud_devmap(pud_t pud)
809 {
810 	return 0;
811 }
812 
pgd_devmap(pgd_t pgd)813 static inline int pgd_devmap(pgd_t pgd)
814 {
815 	return 0;
816 }
817 #endif
818 
819 /*
820  * Atomic pte/pmd modifications.
821  */
822 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
__ptep_test_and_clear_young(pte_t * ptep)823 static inline int __ptep_test_and_clear_young(pte_t *ptep)
824 {
825 	pte_t old_pte, pte;
826 
827 	pte = READ_ONCE(*ptep);
828 	do {
829 		old_pte = pte;
830 		pte = pte_mkold(pte);
831 		pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
832 					       pte_val(old_pte), pte_val(pte));
833 	} while (pte_val(pte) != pte_val(old_pte));
834 
835 	return pte_young(pte);
836 }
837 
ptep_test_and_clear_young(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)838 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
839 					    unsigned long address,
840 					    pte_t *ptep)
841 {
842 	return __ptep_test_and_clear_young(ptep);
843 }
844 
845 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
846 extern bool should_flush_tlb_when_young(void);
847 
ptep_clear_flush_young(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)848 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
849 					 unsigned long address, pte_t *ptep)
850 {
851 	int young = ptep_test_and_clear_young(vma, address, ptep);
852 
853 	if (young && should_flush_tlb_when_young()) {
854 		/*
855 		 * We can elide the trailing DSB here since the worst that can
856 		 * happen is that a CPU continues to use the young entry in its
857 		 * TLB and we mistakenly reclaim the associated page. The
858 		 * window for such an event is bounded by the next
859 		 * context-switch, which provides a DSB to complete the TLB
860 		 * invalidation.
861 		 */
862 		flush_tlb_page_nosync(vma, address);
863 	}
864 
865 	return young;
866 }
867 
868 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
869 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
pmdp_test_and_clear_young(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)870 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
871 					    unsigned long address,
872 					    pmd_t *pmdp)
873 {
874 	return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
875 }
876 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
877 
878 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
ptep_get_and_clear(struct mm_struct * mm,unsigned long address,pte_t * ptep)879 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
880 				       unsigned long address, pte_t *ptep)
881 {
882 	return __pte(xchg_relaxed(&pte_val(*ptep), 0));
883 }
884 
885 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
886 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
pmdp_huge_get_and_clear(struct mm_struct * mm,unsigned long address,pmd_t * pmdp)887 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
888 					    unsigned long address, pmd_t *pmdp)
889 {
890 	return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp));
891 }
892 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
893 
894 /*
895  * ptep_set_wrprotect - mark read-only while trasferring potential hardware
896  * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit.
897  */
898 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
ptep_set_wrprotect(struct mm_struct * mm,unsigned long address,pte_t * ptep)899 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
900 {
901 	pte_t old_pte, pte;
902 
903 	pte = READ_ONCE(*ptep);
904 	do {
905 		old_pte = pte;
906 		pte = pte_wrprotect(pte);
907 		pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
908 					       pte_val(old_pte), pte_val(pte));
909 	} while (pte_val(pte) != pte_val(old_pte));
910 }
911 
912 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
913 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
pmdp_set_wrprotect(struct mm_struct * mm,unsigned long address,pmd_t * pmdp)914 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
915 				      unsigned long address, pmd_t *pmdp)
916 {
917 	ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
918 }
919 
920 #define pmdp_establish pmdp_establish
pmdp_establish(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp,pmd_t pmd)921 static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
922 		unsigned long address, pmd_t *pmdp, pmd_t pmd)
923 {
924 	return __pmd(xchg_relaxed(&pmd_val(*pmdp), pmd_val(pmd)));
925 }
926 #endif
927 
928 /*
929  * Encode and decode a swap entry:
930  *	bits 0-1:	present (must be zero)
931  *	bits 2-7:	swap type
932  *	bits 8-57:	swap offset
933  *	bit  58:	PTE_PROT_NONE (must be zero)
934  */
935 #define __SWP_TYPE_SHIFT	2
936 #define __SWP_TYPE_BITS		6
937 #define __SWP_OFFSET_BITS	50
938 #define __SWP_TYPE_MASK		((1 << __SWP_TYPE_BITS) - 1)
939 #define __SWP_OFFSET_SHIFT	(__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
940 #define __SWP_OFFSET_MASK	((1UL << __SWP_OFFSET_BITS) - 1)
941 
942 #define __swp_type(x)		(((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
943 #define __swp_offset(x)		(((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
944 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
945 
946 #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
947 #define __swp_entry_to_pte(swp)	((pte_t) { (swp).val })
948 
949 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
950 #define __pmd_to_swp_entry(pmd)		((swp_entry_t) { pmd_val(pmd) })
951 #define __swp_entry_to_pmd(swp)		__pmd((swp).val)
952 #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
953 
954 /*
955  * Ensure that there are not more swap files than can be encoded in the kernel
956  * PTEs.
957  */
958 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
959 
960 extern int kern_addr_valid(unsigned long addr);
961 
962 #ifdef CONFIG_ARM64_MTE
963 
964 #define __HAVE_ARCH_PREPARE_TO_SWAP
arch_prepare_to_swap(struct page * page)965 static inline int arch_prepare_to_swap(struct page *page)
966 {
967 	if (system_supports_mte())
968 		return mte_save_tags(page);
969 	return 0;
970 }
971 
972 #define __HAVE_ARCH_SWAP_INVALIDATE
arch_swap_invalidate_page(int type,pgoff_t offset)973 static inline void arch_swap_invalidate_page(int type, pgoff_t offset)
974 {
975 	if (system_supports_mte())
976 		mte_invalidate_tags(type, offset);
977 }
978 
arch_swap_invalidate_area(int type)979 static inline void arch_swap_invalidate_area(int type)
980 {
981 	if (system_supports_mte())
982 		mte_invalidate_tags_area(type);
983 }
984 
985 #define __HAVE_ARCH_SWAP_RESTORE
arch_swap_restore(swp_entry_t entry,struct page * page)986 static inline void arch_swap_restore(swp_entry_t entry, struct page *page)
987 {
988 	if (system_supports_mte() && mte_restore_tags(entry, page))
989 		set_bit(PG_mte_tagged, &page->flags);
990 }
991 
992 #endif /* CONFIG_ARM64_MTE */
993 
994 /*
995  * On AArch64, the cache coherency is handled via the set_pte_at() function.
996  */
update_mmu_cache(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)997 static inline void update_mmu_cache(struct vm_area_struct *vma,
998 				    unsigned long addr, pte_t *ptep)
999 {
1000 	/*
1001 	 * We don't do anything here, so there's a very small chance of
1002 	 * us retaking a user fault which we just fixed up. The alternative
1003 	 * is doing a dsb(ishst), but that penalises the fastpath.
1004 	 */
1005 }
1006 
1007 #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
1008 
1009 #ifdef CONFIG_ARM64_PA_BITS_52
1010 #define phys_to_ttbr(addr)	(((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
1011 #else
1012 #define phys_to_ttbr(addr)	(addr)
1013 #endif
1014 
1015 /*
1016  * On arm64 without hardware Access Flag, copying from user will fail because
1017  * the pte is old and cannot be marked young. So we always end up with zeroed
1018  * page after fork() + CoW for pfn mappings. We don't always have a
1019  * hardware-managed access flag on arm64.
1020  */
1021 #define arch_has_hw_pte_young		cpu_has_hw_af
1022 
1023 /*
1024  * Experimentally, it's cheap to set the access flag in hardware and we
1025  * benefit from prefaulting mappings as 'old' to start with.
1026  */
1027 #define arch_wants_old_prefaulted_pte	cpu_has_hw_af
1028 
1029 #endif /* !__ASSEMBLY__ */
1030 
1031 #endif /* __ASM_PGTABLE_H */
1032