• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012 ARM Ltd.
4  */
5 #ifndef __ASM_PGTABLE_H
6 #define __ASM_PGTABLE_H
7 
8 #include <asm/bug.h>
9 #include <asm/proc-fns.h>
10 
11 #include <asm/memory.h>
12 #include <asm/mte.h>
13 #include <asm/pgtable-hwdef.h>
14 #include <asm/pgtable-prot.h>
15 #include <asm/tlbflush.h>
16 
17 /*
18  * VMALLOC range.
19  *
20  * VMALLOC_START: beginning of the kernel vmalloc space
21  * VMALLOC_END: extends to the available space below vmemmap, PCI I/O space
22  *	and fixed mappings
23  */
24 #define VMALLOC_START		(MODULES_END)
25 #define VMALLOC_END		(- PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
26 
27 #define vmemmap			((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
28 
29 #define FIRST_USER_ADDRESS	0UL
30 
31 #ifndef __ASSEMBLY__
32 
33 #include <asm/cmpxchg.h>
34 #include <asm/fixmap.h>
35 #include <linux/mmdebug.h>
36 #include <linux/mm_types.h>
37 #include <linux/sched.h>
38 
39 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
40 #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
41 
42 /* Set stride and tlb_level in flush_*_tlb_range */
43 #define flush_pmd_tlb_range(vma, addr, end)	\
44 	__flush_tlb_range(vma, addr, end, PMD_SIZE, false, 2)
45 #define flush_pud_tlb_range(vma, addr, end)	\
46 	__flush_tlb_range(vma, addr, end, PUD_SIZE, false, 1)
47 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
48 
49 /*
50  * Outside of a few very special situations (e.g. hibernation), we always
51  * use broadcast TLB invalidation instructions, therefore a spurious page
52  * fault on one CPU which has been handled concurrently by another CPU
53  * does not need to perform additional invalidation.
54  */
55 #define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
56 
57 /*
58  * ZERO_PAGE is a global shared page that is always zero: used
59  * for zero-mapped memory areas etc..
60  */
61 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
62 #define ZERO_PAGE(vaddr)	phys_to_page(__pa_symbol(empty_zero_page))
63 
64 #define pte_ERROR(e)	\
65 	pr_err("%s:%d: bad pte %016llx.\n", __FILE__, __LINE__, pte_val(e))
66 
67 /*
68  * Macros to convert between a physical address and its placement in a
69  * page table entry, taking care of 52-bit addresses.
70  */
71 #ifdef CONFIG_ARM64_PA_BITS_52
__pte_to_phys(pte_t pte)72 static inline phys_addr_t __pte_to_phys(pte_t pte)
73 {
74 	return (pte_val(pte) & PTE_ADDR_LOW) |
75 		((pte_val(pte) & PTE_ADDR_HIGH) << 36);
76 }
__phys_to_pte_val(phys_addr_t phys)77 static inline pteval_t __phys_to_pte_val(phys_addr_t phys)
78 {
79 	return (phys | (phys >> 36)) & PTE_ADDR_MASK;
80 }
81 #else
82 #define __pte_to_phys(pte)	(pte_val(pte) & PTE_ADDR_MASK)
83 #define __phys_to_pte_val(phys)	(phys)
84 #endif
85 
86 #define pte_pfn(pte)		(__pte_to_phys(pte) >> PAGE_SHIFT)
87 #define pfn_pte(pfn,prot)	\
88 	__pte(__phys_to_pte_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
89 
90 #define pte_none(pte)		(!pte_val(pte))
91 #define pte_clear(mm,addr,ptep)	set_pte(ptep, __pte(0))
92 #define pte_page(pte)		(pfn_to_page(pte_pfn(pte)))
93 
94 /*
95  * The following only work if pte_present(). Undefined behaviour otherwise.
96  */
97 #define pte_present(pte)	(!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
98 #define pte_young(pte)		(!!(pte_val(pte) & PTE_AF))
99 #define pte_special(pte)	(!!(pte_val(pte) & PTE_SPECIAL))
100 #define pte_write(pte)		(!!(pte_val(pte) & PTE_WRITE))
101 #define pte_user_exec(pte)	(!(pte_val(pte) & PTE_UXN))
102 #define pte_cont(pte)		(!!(pte_val(pte) & PTE_CONT))
103 #define pte_devmap(pte)		(!!(pte_val(pte) & PTE_DEVMAP))
104 #define pte_tagged(pte)		((pte_val(pte) & PTE_ATTRINDX_MASK) == \
105 				 PTE_ATTRINDX(MT_NORMAL_TAGGED))
106 
107 #define pte_cont_addr_end(addr, end)						\
108 ({	unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK;	\
109 	(__boundary - 1 < (end) - 1) ? __boundary : (end);			\
110 })
111 
112 #define pmd_cont_addr_end(addr, end)						\
113 ({	unsigned long __boundary = ((addr) + CONT_PMD_SIZE) & CONT_PMD_MASK;	\
114 	(__boundary - 1 < (end) - 1) ? __boundary : (end);			\
115 })
116 
117 #define pte_hw_dirty(pte)	(pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
118 #define pte_sw_dirty(pte)	(!!(pte_val(pte) & PTE_DIRTY))
119 #define pte_dirty(pte)		(pte_sw_dirty(pte) || pte_hw_dirty(pte))
120 
121 #define pte_valid(pte)		(!!(pte_val(pte) & PTE_VALID))
122 #define pte_valid_not_user(pte) \
123 	((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
124 #define pte_valid_user(pte) \
125 	((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
126 
127 /*
128  * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
129  * so that we don't erroneously return false for pages that have been
130  * remapped as PROT_NONE but are yet to be flushed from the TLB.
131  * Note that we can't make any assumptions based on the state of the access
132  * flag, since ptep_clear_flush_young() elides a DSB when invalidating the
133  * TLB.
134  */
135 #define pte_accessible(mm, pte)	\
136 	(mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
137 
138 /*
139  * p??_access_permitted() is true for valid user mappings (subject to the
140  * write permission check). PROT_NONE mappings do not have the PTE_VALID bit
141  * set.
142  */
143 #define pte_access_permitted(pte, write) \
144 	(pte_valid_user(pte) && (!(write) || pte_write(pte)))
145 #define pmd_access_permitted(pmd, write) \
146 	(pte_access_permitted(pmd_pte(pmd), (write)))
147 #define pud_access_permitted(pud, write) \
148 	(pte_access_permitted(pud_pte(pud), (write)))
149 
clear_pte_bit(pte_t pte,pgprot_t prot)150 static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
151 {
152 	pte_val(pte) &= ~pgprot_val(prot);
153 	return pte;
154 }
155 
set_pte_bit(pte_t pte,pgprot_t prot)156 static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
157 {
158 	pte_val(pte) |= pgprot_val(prot);
159 	return pte;
160 }
161 
clear_pmd_bit(pmd_t pmd,pgprot_t prot)162 static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot)
163 {
164 	pmd_val(pmd) &= ~pgprot_val(prot);
165 	return pmd;
166 }
167 
set_pmd_bit(pmd_t pmd,pgprot_t prot)168 static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot)
169 {
170 	pmd_val(pmd) |= pgprot_val(prot);
171 	return pmd;
172 }
173 
pte_mkwrite(pte_t pte)174 static inline pte_t pte_mkwrite(pte_t pte)
175 {
176 	pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
177 	pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
178 	return pte;
179 }
180 
pte_mkclean(pte_t pte)181 static inline pte_t pte_mkclean(pte_t pte)
182 {
183 	pte = clear_pte_bit(pte, __pgprot(PTE_DIRTY));
184 	pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
185 
186 	return pte;
187 }
188 
pte_mkdirty(pte_t pte)189 static inline pte_t pte_mkdirty(pte_t pte)
190 {
191 	pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
192 
193 	if (pte_write(pte))
194 		pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
195 
196 	return pte;
197 }
198 
pte_wrprotect(pte_t pte)199 static inline pte_t pte_wrprotect(pte_t pte)
200 {
201 	/*
202 	 * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
203 	 * clear), set the PTE_DIRTY bit.
204 	 */
205 	if (pte_hw_dirty(pte))
206 		pte = pte_mkdirty(pte);
207 
208 	pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
209 	pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
210 	return pte;
211 }
212 
pte_mkold(pte_t pte)213 static inline pte_t pte_mkold(pte_t pte)
214 {
215 	return clear_pte_bit(pte, __pgprot(PTE_AF));
216 }
217 
pte_mkyoung(pte_t pte)218 static inline pte_t pte_mkyoung(pte_t pte)
219 {
220 	return set_pte_bit(pte, __pgprot(PTE_AF));
221 }
222 
pte_mkspecial(pte_t pte)223 static inline pte_t pte_mkspecial(pte_t pte)
224 {
225 	return set_pte_bit(pte, __pgprot(PTE_SPECIAL));
226 }
227 
pte_mkcont(pte_t pte)228 static inline pte_t pte_mkcont(pte_t pte)
229 {
230 	pte = set_pte_bit(pte, __pgprot(PTE_CONT));
231 	return set_pte_bit(pte, __pgprot(PTE_TYPE_PAGE));
232 }
233 
pte_mknoncont(pte_t pte)234 static inline pte_t pte_mknoncont(pte_t pte)
235 {
236 	return clear_pte_bit(pte, __pgprot(PTE_CONT));
237 }
238 
pte_mkpresent(pte_t pte)239 static inline pte_t pte_mkpresent(pte_t pte)
240 {
241 	return set_pte_bit(pte, __pgprot(PTE_VALID));
242 }
243 
pmd_mkcont(pmd_t pmd)244 static inline pmd_t pmd_mkcont(pmd_t pmd)
245 {
246 	return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
247 }
248 
pte_mkdevmap(pte_t pte)249 static inline pte_t pte_mkdevmap(pte_t pte)
250 {
251 	return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL));
252 }
253 
set_pte(pte_t * ptep,pte_t pte)254 static inline void set_pte(pte_t *ptep, pte_t pte)
255 {
256 	WRITE_ONCE(*ptep, pte);
257 
258 	/*
259 	 * Only if the new pte is valid and kernel, otherwise TLB maintenance
260 	 * or update_mmu_cache() have the necessary barriers.
261 	 */
262 	if (pte_valid_not_user(pte)) {
263 		dsb(ishst);
264 		isb();
265 	}
266 }
267 
268 extern void __sync_icache_dcache(pte_t pteval);
269 
270 /*
271  * PTE bits configuration in the presence of hardware Dirty Bit Management
272  * (PTE_WRITE == PTE_DBM):
273  *
274  * Dirty  Writable | PTE_RDONLY  PTE_WRITE  PTE_DIRTY (sw)
275  *   0      0      |   1           0          0
276  *   0      1      |   1           1          0
277  *   1      0      |   1           0          1
278  *   1      1      |   0           1          x
279  *
280  * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via
281  * the page fault mechanism. Checking the dirty status of a pte becomes:
282  *
283  *   PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY)
284  */
285 
__check_racy_pte_update(struct mm_struct * mm,pte_t * ptep,pte_t pte)286 static inline void __check_racy_pte_update(struct mm_struct *mm, pte_t *ptep,
287 					   pte_t pte)
288 {
289 	pte_t old_pte;
290 
291 	if (!IS_ENABLED(CONFIG_DEBUG_VM))
292 		return;
293 
294 	old_pte = READ_ONCE(*ptep);
295 
296 	if (!pte_valid(old_pte) || !pte_valid(pte))
297 		return;
298 	if (mm != current->active_mm && atomic_read(&mm->mm_users) <= 1)
299 		return;
300 
301 	/*
302 	 * Check for potential race with hardware updates of the pte
303 	 * (ptep_set_access_flags safely changes valid ptes without going
304 	 * through an invalid entry).
305 	 */
306 	VM_WARN_ONCE(!pte_young(pte),
307 		     "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
308 		     __func__, pte_val(old_pte), pte_val(pte));
309 	VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte),
310 		     "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
311 		     __func__, pte_val(old_pte), pte_val(pte));
312 }
313 
set_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte)314 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
315 			      pte_t *ptep, pte_t pte)
316 {
317 	if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
318 		__sync_icache_dcache(pte);
319 
320 	if (system_supports_mte() &&
321 	    pte_present(pte) && pte_tagged(pte) && !pte_special(pte))
322 		mte_sync_tags(ptep, pte);
323 
324 	__check_racy_pte_update(mm, ptep, pte);
325 
326 	set_pte(ptep, pte);
327 }
328 
329 /*
330  * Huge pte definitions.
331  */
332 #define pte_mkhuge(pte)		(__pte(pte_val(pte) & ~PTE_TABLE_BIT))
333 
334 /*
335  * Hugetlb definitions.
336  */
337 #define HUGE_MAX_HSTATE		4
338 #define HPAGE_SHIFT		PMD_SHIFT
339 #define HPAGE_SIZE		(_AC(1, UL) << HPAGE_SHIFT)
340 #define HPAGE_MASK		(~(HPAGE_SIZE - 1))
341 #define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
342 
pgd_pte(pgd_t pgd)343 static inline pte_t pgd_pte(pgd_t pgd)
344 {
345 	return __pte(pgd_val(pgd));
346 }
347 
p4d_pte(p4d_t p4d)348 static inline pte_t p4d_pte(p4d_t p4d)
349 {
350 	return __pte(p4d_val(p4d));
351 }
352 
pud_pte(pud_t pud)353 static inline pte_t pud_pte(pud_t pud)
354 {
355 	return __pte(pud_val(pud));
356 }
357 
pte_pud(pte_t pte)358 static inline pud_t pte_pud(pte_t pte)
359 {
360 	return __pud(pte_val(pte));
361 }
362 
pud_pmd(pud_t pud)363 static inline pmd_t pud_pmd(pud_t pud)
364 {
365 	return __pmd(pud_val(pud));
366 }
367 
pmd_pte(pmd_t pmd)368 static inline pte_t pmd_pte(pmd_t pmd)
369 {
370 	return __pte(pmd_val(pmd));
371 }
372 
pte_pmd(pte_t pte)373 static inline pmd_t pte_pmd(pte_t pte)
374 {
375 	return __pmd(pte_val(pte));
376 }
377 
mk_pud_sect_prot(pgprot_t prot)378 static inline pgprot_t mk_pud_sect_prot(pgprot_t prot)
379 {
380 	return __pgprot((pgprot_val(prot) & ~PUD_TABLE_BIT) | PUD_TYPE_SECT);
381 }
382 
mk_pmd_sect_prot(pgprot_t prot)383 static inline pgprot_t mk_pmd_sect_prot(pgprot_t prot)
384 {
385 	return __pgprot((pgprot_val(prot) & ~PMD_TABLE_BIT) | PMD_TYPE_SECT);
386 }
387 
388 #ifdef CONFIG_NUMA_BALANCING
389 /*
390  * See the comment in include/linux/pgtable.h
391  */
pte_protnone(pte_t pte)392 static inline int pte_protnone(pte_t pte)
393 {
394 	return (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) == PTE_PROT_NONE;
395 }
396 
pmd_protnone(pmd_t pmd)397 static inline int pmd_protnone(pmd_t pmd)
398 {
399 	return pte_protnone(pmd_pte(pmd));
400 }
401 #endif
402 
403 #define pmd_present_invalid(pmd)     (!!(pmd_val(pmd) & PMD_PRESENT_INVALID))
404 
pmd_present(pmd_t pmd)405 static inline int pmd_present(pmd_t pmd)
406 {
407 	return pte_present(pmd_pte(pmd)) || pmd_present_invalid(pmd);
408 }
409 
410 /*
411  * THP definitions.
412  */
413 
414 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_trans_huge(pmd_t pmd)415 static inline int pmd_trans_huge(pmd_t pmd)
416 {
417 	return pmd_val(pmd) && pmd_present(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
418 }
419 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
420 
421 #define pmd_dirty(pmd)		pte_dirty(pmd_pte(pmd))
422 #define pmd_young(pmd)		pte_young(pmd_pte(pmd))
423 #define pmd_valid(pmd)		pte_valid(pmd_pte(pmd))
424 #define pmd_wrprotect(pmd)	pte_pmd(pte_wrprotect(pmd_pte(pmd)))
425 #define pmd_mkold(pmd)		pte_pmd(pte_mkold(pmd_pte(pmd)))
426 #define pmd_mkwrite(pmd)	pte_pmd(pte_mkwrite(pmd_pte(pmd)))
427 #define pmd_mkclean(pmd)	pte_pmd(pte_mkclean(pmd_pte(pmd)))
428 #define pmd_mkdirty(pmd)	pte_pmd(pte_mkdirty(pmd_pte(pmd)))
429 #define pmd_mkyoung(pmd)	pte_pmd(pte_mkyoung(pmd_pte(pmd)))
430 
pmd_mkinvalid(pmd_t pmd)431 static inline pmd_t pmd_mkinvalid(pmd_t pmd)
432 {
433 	pmd = set_pmd_bit(pmd, __pgprot(PMD_PRESENT_INVALID));
434 	pmd = clear_pmd_bit(pmd, __pgprot(PMD_SECT_VALID));
435 
436 	return pmd;
437 }
438 
439 #define pmd_thp_or_huge(pmd)	(pmd_huge(pmd) || pmd_trans_huge(pmd))
440 
441 #define pmd_write(pmd)		pte_write(pmd_pte(pmd))
442 
443 #define pmd_mkhuge(pmd)		(__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
444 
445 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
446 #define pmd_devmap(pmd)		pte_devmap(pmd_pte(pmd))
447 #endif
pmd_mkdevmap(pmd_t pmd)448 static inline pmd_t pmd_mkdevmap(pmd_t pmd)
449 {
450 	return pte_pmd(set_pte_bit(pmd_pte(pmd), __pgprot(PTE_DEVMAP)));
451 }
452 
453 #define __pmd_to_phys(pmd)	__pte_to_phys(pmd_pte(pmd))
454 #define __phys_to_pmd_val(phys)	__phys_to_pte_val(phys)
455 #define pmd_pfn(pmd)		((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT)
456 #define pfn_pmd(pfn,prot)	__pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
457 #define mk_pmd(page,prot)	pfn_pmd(page_to_pfn(page),prot)
458 
459 #define pud_young(pud)		pte_young(pud_pte(pud))
460 #define pud_mkyoung(pud)	pte_pud(pte_mkyoung(pud_pte(pud)))
461 #define pud_write(pud)		pte_write(pud_pte(pud))
462 
463 #define pud_mkhuge(pud)		(__pud(pud_val(pud) & ~PUD_TABLE_BIT))
464 
465 #define __pud_to_phys(pud)	__pte_to_phys(pud_pte(pud))
466 #define __phys_to_pud_val(phys)	__phys_to_pte_val(phys)
467 #define pud_pfn(pud)		((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT)
468 #define pfn_pud(pfn,prot)	__pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
469 
470 #define set_pmd_at(mm, addr, pmdp, pmd)	set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
471 
472 #define __p4d_to_phys(p4d)	__pte_to_phys(p4d_pte(p4d))
473 #define __phys_to_p4d_val(phys)	__phys_to_pte_val(phys)
474 
475 #define __pgd_to_phys(pgd)	__pte_to_phys(pgd_pte(pgd))
476 #define __phys_to_pgd_val(phys)	__phys_to_pte_val(phys)
477 
478 #define __pgprot_modify(prot,mask,bits) \
479 	__pgprot((pgprot_val(prot) & ~(mask)) | (bits))
480 
481 #define pgprot_nx(prot) \
482 	__pgprot_modify(prot, PTE_MAYBE_GP, PTE_PXN)
483 
484 /*
485  * Mark the prot value as uncacheable and unbufferable.
486  */
487 #define pgprot_noncached(prot) \
488 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
489 #define pgprot_writecombine(prot) \
490 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
491 #define pgprot_device(prot) \
492 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
493 #define pgprot_tagged(prot) \
494 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_TAGGED))
495 #define pgprot_mhp	pgprot_tagged
496 /*
497  * DMA allocations for non-coherent devices use what the Arm architecture calls
498  * "Normal non-cacheable" memory, which permits speculation, unaligned accesses
499  * and merging of writes.  This is different from "Device-nGnR[nE]" memory which
500  * is intended for MMIO and thus forbids speculation, preserves access size,
501  * requires strict alignment and can also force write responses to come from the
502  * endpoint.
503  */
504 #define pgprot_dmacoherent(prot) \
505 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, \
506 			PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
507 
508 #define __HAVE_PHYS_MEM_ACCESS_PROT
509 struct file;
510 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
511 				     unsigned long size, pgprot_t vma_prot);
512 
513 #define pmd_none(pmd)		(!pmd_val(pmd))
514 
515 #define pmd_table(pmd)		((pmd_val(pmd) & PMD_TYPE_MASK) == \
516 				 PMD_TYPE_TABLE)
517 #define pmd_sect(pmd)		((pmd_val(pmd) & PMD_TYPE_MASK) == \
518 				 PMD_TYPE_SECT)
519 #define pmd_leaf(pmd)		(pmd_present(pmd) && !pmd_table(pmd))
520 #define pmd_bad(pmd)		(!pmd_table(pmd))
521 
522 #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
pud_sect(pud_t pud)523 static inline bool pud_sect(pud_t pud) { return false; }
pud_table(pud_t pud)524 static inline bool pud_table(pud_t pud) { return true; }
525 #else
526 #define pud_sect(pud)		((pud_val(pud) & PUD_TYPE_MASK) == \
527 				 PUD_TYPE_SECT)
528 #define pud_table(pud)		((pud_val(pud) & PUD_TYPE_MASK) == \
529 				 PUD_TYPE_TABLE)
530 #endif
531 
532 extern pgd_t init_pg_dir[PTRS_PER_PGD];
533 extern pgd_t init_pg_end[];
534 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
535 extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
536 extern pgd_t idmap_pg_end[];
537 extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
538 extern pgd_t reserved_pg_dir[PTRS_PER_PGD];
539 
540 extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd);
541 
in_swapper_pgdir(void * addr)542 static inline bool in_swapper_pgdir(void *addr)
543 {
544 	return ((unsigned long)addr & PAGE_MASK) ==
545 	        ((unsigned long)swapper_pg_dir & PAGE_MASK);
546 }
547 
set_pmd(pmd_t * pmdp,pmd_t pmd)548 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
549 {
550 #ifdef __PAGETABLE_PMD_FOLDED
551 	if (in_swapper_pgdir(pmdp)) {
552 		set_swapper_pgd((pgd_t *)pmdp, __pgd(pmd_val(pmd)));
553 		return;
554 	}
555 #endif /* __PAGETABLE_PMD_FOLDED */
556 
557 	WRITE_ONCE(*pmdp, pmd);
558 
559 	if (pmd_valid(pmd)) {
560 		dsb(ishst);
561 		isb();
562 	}
563 }
564 
pmd_clear(pmd_t * pmdp)565 static inline void pmd_clear(pmd_t *pmdp)
566 {
567 	set_pmd(pmdp, __pmd(0));
568 }
569 
pmd_page_paddr(pmd_t pmd)570 static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
571 {
572 	return __pmd_to_phys(pmd);
573 }
574 
pmd_page_vaddr(pmd_t pmd)575 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
576 {
577 	return (unsigned long)__va(pmd_page_paddr(pmd));
578 }
579 
580 /* Find an entry in the third-level page table. */
581 #define pte_offset_phys(dir,addr)	(pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t))
582 
583 #define pte_set_fixmap(addr)		((pte_t *)set_fixmap_offset(FIX_PTE, addr))
584 #define pte_set_fixmap_offset(pmd, addr)	pte_set_fixmap(pte_offset_phys(pmd, addr))
585 #define pte_clear_fixmap()		clear_fixmap(FIX_PTE)
586 
587 #define pmd_page(pmd)			phys_to_page(__pmd_to_phys(pmd))
588 
589 /* use ONLY for statically allocated translation tables */
590 #define pte_offset_kimg(dir,addr)	((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr))))
591 
592 /*
593  * Conversion functions: convert a page and protection to a page entry,
594  * and a page entry and page directory to the page they refer to.
595  */
596 #define mk_pte(page,prot)	pfn_pte(page_to_pfn(page),prot)
597 
598 #if CONFIG_PGTABLE_LEVELS > 2
599 
600 #define pmd_ERROR(e)	\
601 	pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e))
602 
603 #define pud_none(pud)		(!pud_val(pud))
604 #define pud_bad(pud)		(!pud_table(pud))
605 #define pud_present(pud)	pte_present(pud_pte(pud))
606 #define pud_leaf(pud)		(pud_present(pud) && !pud_table(pud))
607 #define pud_valid(pud)		pte_valid(pud_pte(pud))
608 
set_pud(pud_t * pudp,pud_t pud)609 static inline void set_pud(pud_t *pudp, pud_t pud)
610 {
611 #ifdef __PAGETABLE_PUD_FOLDED
612 	if (in_swapper_pgdir(pudp)) {
613 		set_swapper_pgd((pgd_t *)pudp, __pgd(pud_val(pud)));
614 		return;
615 	}
616 #endif /* __PAGETABLE_PUD_FOLDED */
617 
618 	WRITE_ONCE(*pudp, pud);
619 
620 	if (pud_valid(pud)) {
621 		dsb(ishst);
622 		isb();
623 	}
624 }
625 
pud_clear(pud_t * pudp)626 static inline void pud_clear(pud_t *pudp)
627 {
628 	set_pud(pudp, __pud(0));
629 }
630 
pud_page_paddr(pud_t pud)631 static inline phys_addr_t pud_page_paddr(pud_t pud)
632 {
633 	return __pud_to_phys(pud);
634 }
635 
pud_page_vaddr(pud_t pud)636 static inline unsigned long pud_page_vaddr(pud_t pud)
637 {
638 	return (unsigned long)__va(pud_page_paddr(pud));
639 }
640 
641 /* Find an entry in the second-level page table. */
642 #define pmd_offset_phys(dir, addr)	(pud_page_paddr(READ_ONCE(*(dir))) + pmd_index(addr) * sizeof(pmd_t))
643 
644 #define pmd_set_fixmap(addr)		((pmd_t *)set_fixmap_offset(FIX_PMD, addr))
645 #define pmd_set_fixmap_offset(pud, addr)	pmd_set_fixmap(pmd_offset_phys(pud, addr))
646 #define pmd_clear_fixmap()		clear_fixmap(FIX_PMD)
647 
648 #define pud_page(pud)			phys_to_page(__pud_to_phys(pud))
649 
650 /* use ONLY for statically allocated translation tables */
651 #define pmd_offset_kimg(dir,addr)	((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr))))
652 
653 #else
654 
655 #define pud_page_paddr(pud)	({ BUILD_BUG(); 0; })
656 
657 /* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */
658 #define pmd_set_fixmap(addr)		NULL
659 #define pmd_set_fixmap_offset(pudp, addr)	((pmd_t *)pudp)
660 #define pmd_clear_fixmap()
661 
662 #define pmd_offset_kimg(dir,addr)	((pmd_t *)dir)
663 
664 #endif	/* CONFIG_PGTABLE_LEVELS > 2 */
665 
666 #if CONFIG_PGTABLE_LEVELS > 3
667 
668 #define pud_ERROR(e)	\
669 	pr_err("%s:%d: bad pud %016llx.\n", __FILE__, __LINE__, pud_val(e))
670 
671 #define p4d_none(p4d)		(!p4d_val(p4d))
672 #define p4d_bad(p4d)		(!(p4d_val(p4d) & 2))
673 #define p4d_present(p4d)	(p4d_val(p4d))
674 
set_p4d(p4d_t * p4dp,p4d_t p4d)675 static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
676 {
677 	if (in_swapper_pgdir(p4dp)) {
678 		set_swapper_pgd((pgd_t *)p4dp, __pgd(p4d_val(p4d)));
679 		return;
680 	}
681 
682 	WRITE_ONCE(*p4dp, p4d);
683 	dsb(ishst);
684 	isb();
685 }
686 
p4d_clear(p4d_t * p4dp)687 static inline void p4d_clear(p4d_t *p4dp)
688 {
689 	set_p4d(p4dp, __p4d(0));
690 }
691 
p4d_page_paddr(p4d_t p4d)692 static inline phys_addr_t p4d_page_paddr(p4d_t p4d)
693 {
694 	return __p4d_to_phys(p4d);
695 }
696 
p4d_page_vaddr(p4d_t p4d)697 static inline unsigned long p4d_page_vaddr(p4d_t p4d)
698 {
699 	return (unsigned long)__va(p4d_page_paddr(p4d));
700 }
701 
702 /* Find an entry in the frst-level page table. */
703 #define pud_offset_phys(dir, addr)	(p4d_page_paddr(READ_ONCE(*(dir))) + pud_index(addr) * sizeof(pud_t))
704 
705 #define pud_set_fixmap(addr)		((pud_t *)set_fixmap_offset(FIX_PUD, addr))
706 #define pud_set_fixmap_offset(p4d, addr)	pud_set_fixmap(pud_offset_phys(p4d, addr))
707 #define pud_clear_fixmap()		clear_fixmap(FIX_PUD)
708 
709 #define p4d_page(p4d)		pfn_to_page(__phys_to_pfn(__p4d_to_phys(p4d)))
710 
711 /* use ONLY for statically allocated translation tables */
712 #define pud_offset_kimg(dir,addr)	((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr))))
713 
714 #else
715 
716 #define p4d_page_paddr(p4d)	({ BUILD_BUG(); 0;})
717 #define pgd_page_paddr(pgd)	({ BUILD_BUG(); 0;})
718 
719 /* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */
720 #define pud_set_fixmap(addr)		NULL
721 #define pud_set_fixmap_offset(pgdp, addr)	((pud_t *)pgdp)
722 #define pud_clear_fixmap()
723 
724 #define pud_offset_kimg(dir,addr)	((pud_t *)dir)
725 
726 #endif  /* CONFIG_PGTABLE_LEVELS > 3 */
727 
728 #define pgd_ERROR(e)	\
729 	pr_err("%s:%d: bad pgd %016llx.\n", __FILE__, __LINE__, pgd_val(e))
730 
731 #define pgd_set_fixmap(addr)	((pgd_t *)set_fixmap_offset(FIX_PGD, addr))
732 #define pgd_clear_fixmap()	clear_fixmap(FIX_PGD)
733 
pte_modify(pte_t pte,pgprot_t newprot)734 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
735 {
736 	/*
737 	 * Normal and Normal-Tagged are two different memory types and indices
738 	 * in MAIR_EL1. The mask below has to include PTE_ATTRINDX_MASK.
739 	 */
740 	const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
741 			      PTE_PROT_NONE | PTE_VALID | PTE_WRITE | PTE_GP |
742 			      PTE_ATTRINDX_MASK;
743 	/* preserve the hardware dirty information */
744 	if (pte_hw_dirty(pte))
745 		pte = pte_mkdirty(pte);
746 	pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
747 	return pte;
748 }
749 
pmd_modify(pmd_t pmd,pgprot_t newprot)750 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
751 {
752 	return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
753 }
754 
755 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
756 extern int ptep_set_access_flags(struct vm_area_struct *vma,
757 				 unsigned long address, pte_t *ptep,
758 				 pte_t entry, int dirty);
759 
760 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
761 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
pmdp_set_access_flags(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp,pmd_t entry,int dirty)762 static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
763 					unsigned long address, pmd_t *pmdp,
764 					pmd_t entry, int dirty)
765 {
766 	return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
767 }
768 
pud_devmap(pud_t pud)769 static inline int pud_devmap(pud_t pud)
770 {
771 	return 0;
772 }
773 
pgd_devmap(pgd_t pgd)774 static inline int pgd_devmap(pgd_t pgd)
775 {
776 	return 0;
777 }
778 #endif
779 
780 /*
781  * Atomic pte/pmd modifications.
782  */
783 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
__ptep_test_and_clear_young(pte_t * ptep)784 static inline int __ptep_test_and_clear_young(pte_t *ptep)
785 {
786 	pte_t old_pte, pte;
787 
788 	pte = READ_ONCE(*ptep);
789 	do {
790 		old_pte = pte;
791 		pte = pte_mkold(pte);
792 		pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
793 					       pte_val(old_pte), pte_val(pte));
794 	} while (pte_val(pte) != pte_val(old_pte));
795 
796 	return pte_young(pte);
797 }
798 
ptep_test_and_clear_young(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)799 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
800 					    unsigned long address,
801 					    pte_t *ptep)
802 {
803 	return __ptep_test_and_clear_young(ptep);
804 }
805 
806 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
ptep_clear_flush_young(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)807 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
808 					 unsigned long address, pte_t *ptep)
809 {
810 	int young = ptep_test_and_clear_young(vma, address, ptep);
811 
812 	if (young) {
813 		/*
814 		 * We can elide the trailing DSB here since the worst that can
815 		 * happen is that a CPU continues to use the young entry in its
816 		 * TLB and we mistakenly reclaim the associated page. The
817 		 * window for such an event is bounded by the next
818 		 * context-switch, which provides a DSB to complete the TLB
819 		 * invalidation.
820 		 */
821 		flush_tlb_page_nosync(vma, address);
822 	}
823 
824 	return young;
825 }
826 
827 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
828 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
pmdp_test_and_clear_young(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)829 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
830 					    unsigned long address,
831 					    pmd_t *pmdp)
832 {
833 	return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
834 }
835 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
836 
837 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
ptep_get_and_clear(struct mm_struct * mm,unsigned long address,pte_t * ptep)838 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
839 				       unsigned long address, pte_t *ptep)
840 {
841 	return __pte(xchg_relaxed(&pte_val(*ptep), 0));
842 }
843 
844 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
845 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
pmdp_huge_get_and_clear(struct mm_struct * mm,unsigned long address,pmd_t * pmdp)846 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
847 					    unsigned long address, pmd_t *pmdp)
848 {
849 	return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp));
850 }
851 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
852 
853 /*
854  * ptep_set_wrprotect - mark read-only while trasferring potential hardware
855  * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit.
856  */
857 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
ptep_set_wrprotect(struct mm_struct * mm,unsigned long address,pte_t * ptep)858 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
859 {
860 	pte_t old_pte, pte;
861 
862 	pte = READ_ONCE(*ptep);
863 	do {
864 		old_pte = pte;
865 		pte = pte_wrprotect(pte);
866 		pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
867 					       pte_val(old_pte), pte_val(pte));
868 	} while (pte_val(pte) != pte_val(old_pte));
869 }
870 
871 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
872 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
pmdp_set_wrprotect(struct mm_struct * mm,unsigned long address,pmd_t * pmdp)873 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
874 				      unsigned long address, pmd_t *pmdp)
875 {
876 	ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
877 }
878 
879 #define pmdp_establish pmdp_establish
pmdp_establish(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp,pmd_t pmd)880 static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
881 		unsigned long address, pmd_t *pmdp, pmd_t pmd)
882 {
883 	return __pmd(xchg_relaxed(&pmd_val(*pmdp), pmd_val(pmd)));
884 }
885 #endif
886 
887 /*
888  * Encode and decode a swap entry:
889  *	bits 0-1:	present (must be zero)
890  *	bits 2-7:	swap type
891  *	bits 8-57:	swap offset
892  *	bit  58:	PTE_PROT_NONE (must be zero)
893  */
894 #define __SWP_TYPE_SHIFT	2
895 #define __SWP_TYPE_BITS		6
896 #define __SWP_OFFSET_BITS	50
897 #define __SWP_TYPE_MASK		((1 << __SWP_TYPE_BITS) - 1)
898 #define __SWP_OFFSET_SHIFT	(__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
899 #define __SWP_OFFSET_MASK	((1UL << __SWP_OFFSET_BITS) - 1)
900 
901 #define __swp_type(x)		(((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
902 #define __swp_offset(x)		(((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
903 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
904 
905 #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
906 #define __swp_entry_to_pte(swp)	((pte_t) { (swp).val })
907 
908 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
909 #define __pmd_to_swp_entry(pmd)		((swp_entry_t) { pmd_val(pmd) })
910 #define __swp_entry_to_pmd(swp)		__pmd((swp).val)
911 #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
912 
913 /*
914  * Ensure that there are not more swap files than can be encoded in the kernel
915  * PTEs.
916  */
917 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
918 
919 extern int kern_addr_valid(unsigned long addr);
920 
921 #ifdef CONFIG_ARM64_MTE
922 
923 #define __HAVE_ARCH_PREPARE_TO_SWAP
arch_prepare_to_swap(struct page * page)924 static inline int arch_prepare_to_swap(struct page *page)
925 {
926 	if (system_supports_mte())
927 		return mte_save_tags(page);
928 	return 0;
929 }
930 
931 #define __HAVE_ARCH_SWAP_INVALIDATE
arch_swap_invalidate_page(int type,pgoff_t offset)932 static inline void arch_swap_invalidate_page(int type, pgoff_t offset)
933 {
934 	if (system_supports_mte())
935 		mte_invalidate_tags(type, offset);
936 }
937 
arch_swap_invalidate_area(int type)938 static inline void arch_swap_invalidate_area(int type)
939 {
940 	if (system_supports_mte())
941 		mte_invalidate_tags_area(type);
942 }
943 
944 #define __HAVE_ARCH_SWAP_RESTORE
arch_swap_restore(swp_entry_t entry,struct page * page)945 static inline void arch_swap_restore(swp_entry_t entry, struct page *page)
946 {
947 	if (system_supports_mte() && mte_restore_tags(entry, page))
948 		set_bit(PG_mte_tagged, &page->flags);
949 }
950 
951 #endif /* CONFIG_ARM64_MTE */
952 
953 /*
954  * On AArch64, the cache coherency is handled via the set_pte_at() function.
955  */
update_mmu_cache(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)956 static inline void update_mmu_cache(struct vm_area_struct *vma,
957 				    unsigned long addr, pte_t *ptep)
958 {
959 	/*
960 	 * We don't do anything here, so there's a very small chance of
961 	 * us retaking a user fault which we just fixed up. The alternative
962 	 * is doing a dsb(ishst), but that penalises the fastpath.
963 	 */
964 }
965 
966 #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
967 
968 #ifdef CONFIG_ARM64_PA_BITS_52
969 #define phys_to_ttbr(addr)	(((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
970 #else
971 #define phys_to_ttbr(addr)	(addr)
972 #endif
973 
974 /*
975  * On arm64 without hardware Access Flag, copying from user will fail because
976  * the pte is old and cannot be marked young. So we always end up with zeroed
977  * page after fork() + CoW for pfn mappings. We don't always have a
978  * hardware-managed access flag on arm64.
979  */
arch_faults_on_old_pte(void)980 static inline bool arch_faults_on_old_pte(void)
981 {
982 	WARN_ON(preemptible());
983 
984 	return !cpu_has_hw_af();
985 }
986 #define arch_faults_on_old_pte arch_faults_on_old_pte
987 
988 #endif /* !__ASSEMBLY__ */
989 
990 #endif /* __ASM_PGTABLE_H */
991