• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2012 ARM Ltd.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
15  */
16 #ifndef __ASM_PGTABLE_H
17 #define __ASM_PGTABLE_H
18 
19 #include <asm/bug.h>
20 #include <asm/proc-fns.h>
21 
22 #include <asm/memory.h>
23 #include <asm/pgtable-hwdef.h>
24 
25 /*
26  * Software defined PTE bits definition.
27  */
28 #define PTE_VALID		(_AT(pteval_t, 1) << 0)
29 #define PTE_WRITE		(PTE_DBM)		 /* same as DBM (51) */
30 #define PTE_DIRTY		(_AT(pteval_t, 1) << 55)
31 #define PTE_SPECIAL		(_AT(pteval_t, 1) << 56)
32 #define PTE_PROT_NONE		(_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */
33 
34 /*
35  * VMALLOC and SPARSEMEM_VMEMMAP ranges.
36  *
37  * VMEMAP_SIZE: allows the whole linear region to be covered by a struct page array
38  *	(rounded up to PUD_SIZE).
39  * VMALLOC_START: beginning of the kernel vmalloc space
40  * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space,
41  *	fixed mappings and modules
42  */
43 #define VMEMMAP_SIZE		ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE)
44 
45 #define VMALLOC_START		(MODULES_END)
46 #define VMALLOC_END		(PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
47 
48 #define VMEMMAP_START		(VMALLOC_END + SZ_64K)
49 #define vmemmap			((struct page *)VMEMMAP_START - \
50 				 SECTION_ALIGN_DOWN(memstart_addr >> PAGE_SHIFT))
51 
52 #define FIRST_USER_ADDRESS	0UL
53 
54 #ifndef __ASSEMBLY__
55 
56 #include <asm/fixmap.h>
57 #include <linux/mmdebug.h>
58 
59 extern void __pte_error(const char *file, int line, unsigned long val);
60 extern void __pmd_error(const char *file, int line, unsigned long val);
61 extern void __pud_error(const char *file, int line, unsigned long val);
62 extern void __pgd_error(const char *file, int line, unsigned long val);
63 
64 #define _PROT_DEFAULT		(PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
65 #define _PROT_SECT_DEFAULT	(PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
66 
67 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
68 #define PROT_DEFAULT		(_PROT_DEFAULT | PTE_NG)
69 #define PROT_SECT_DEFAULT	(_PROT_SECT_DEFAULT | PMD_SECT_NG)
70 #else
71 #define PROT_DEFAULT		_PROT_DEFAULT
72 #define PROT_SECT_DEFAULT	_PROT_SECT_DEFAULT
73 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
74 
75 #define PROT_DEVICE_nGnRnE	(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
76 #define PROT_DEVICE_nGnRE	(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
77 #define PROT_NORMAL_NC		(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC))
78 #define PROT_NORMAL_WT		(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT))
79 #define PROT_NORMAL		(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL))
80 
81 #define PROT_SECT_DEVICE_nGnRE	(PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
82 #define PROT_SECT_NORMAL	(PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
83 #define PROT_SECT_NORMAL_EXEC	(PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
84 
85 #define _PAGE_DEFAULT		(PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
86 #define _HYP_PAGE_DEFAULT	(_PAGE_DEFAULT & ~PTE_NG)
87 
88 #define PAGE_KERNEL		__pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
89 #define PAGE_KERNEL_RO		__pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
90 #define PAGE_KERNEL_ROX		__pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
91 #define PAGE_KERNEL_EXEC	__pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
92 #define PAGE_KERNEL_EXEC_CONT	__pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT)
93 
94 #define PAGE_HYP		__pgprot(_HYP_PAGE_DEFAULT | PTE_HYP)
95 #define PAGE_HYP_DEVICE		__pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
96 
97 #define PAGE_S2			__pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
98 #define PAGE_S2_DEVICE		__pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
99 
100 #define PAGE_NONE		__pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_NG | PTE_PXN | PTE_UXN)
101 #define PAGE_SHARED		__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
102 #define PAGE_SHARED_EXEC	__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
103 #define PAGE_COPY		__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
104 #define PAGE_COPY_EXEC		__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
105 #define PAGE_READONLY		__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
106 #define PAGE_READONLY_EXEC	__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
107 
108 #define __P000  PAGE_NONE
109 #define __P001  PAGE_READONLY
110 #define __P010  PAGE_COPY
111 #define __P011  PAGE_COPY
112 #define __P100  PAGE_READONLY_EXEC
113 #define __P101  PAGE_READONLY_EXEC
114 #define __P110  PAGE_COPY_EXEC
115 #define __P111  PAGE_COPY_EXEC
116 
117 #define __S000  PAGE_NONE
118 #define __S001  PAGE_READONLY
119 #define __S010  PAGE_SHARED
120 #define __S011  PAGE_SHARED
121 #define __S100  PAGE_READONLY_EXEC
122 #define __S101  PAGE_READONLY_EXEC
123 #define __S110  PAGE_SHARED_EXEC
124 #define __S111  PAGE_SHARED_EXEC
125 
126 /*
127  * ZERO_PAGE is a global shared page that is always zero: used
128  * for zero-mapped memory areas etc..
129  */
130 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
131 #define ZERO_PAGE(vaddr)	phys_to_page(__pa_symbol(empty_zero_page))
132 
133 #define pte_ERROR(pte)		__pte_error(__FILE__, __LINE__, pte_val(pte))
134 
135 #define pte_pfn(pte)		((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT)
136 
137 #define pfn_pte(pfn,prot)	(__pte(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
138 
139 #define pte_none(pte)		(!pte_val(pte))
140 #define pte_clear(mm,addr,ptep)	set_pte(ptep, __pte(0))
141 #define pte_page(pte)		(pfn_to_page(pte_pfn(pte)))
142 
143 /*
144  * The following only work if pte_present(). Undefined behaviour otherwise.
145  */
146 #define pte_present(pte)	(!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
147 #define pte_young(pte)		(!!(pte_val(pte) & PTE_AF))
148 #define pte_special(pte)	(!!(pte_val(pte) & PTE_SPECIAL))
149 #define pte_write(pte)		(!!(pte_val(pte) & PTE_WRITE))
150 #define pte_exec(pte)		(!(pte_val(pte) & PTE_UXN))
151 #define pte_cont(pte)		(!!(pte_val(pte) & PTE_CONT))
152 #define pte_user(pte)		(!!(pte_val(pte) & PTE_USER))
153 
154 #ifdef CONFIG_ARM64_HW_AFDBM
155 #define pte_hw_dirty(pte)	(pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
156 #else
157 #define pte_hw_dirty(pte)	(0)
158 #endif
159 #define pte_sw_dirty(pte)	(!!(pte_val(pte) & PTE_DIRTY))
160 #define pte_dirty(pte)		(pte_sw_dirty(pte) || pte_hw_dirty(pte))
161 
162 #define pte_valid(pte)		(!!(pte_val(pte) & PTE_VALID))
163 #define pte_valid_not_user(pte) \
164 	((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
165 #define pte_valid_young(pte) \
166 	((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
167 
168 /*
169  * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
170  * so that we don't erroneously return false for pages that have been
171  * remapped as PROT_NONE but are yet to be flushed from the TLB.
172  */
173 #define pte_accessible(mm, pte)	\
174 	(mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte))
175 
clear_pte_bit(pte_t pte,pgprot_t prot)176 static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
177 {
178 	pte_val(pte) &= ~pgprot_val(prot);
179 	return pte;
180 }
181 
set_pte_bit(pte_t pte,pgprot_t prot)182 static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
183 {
184 	pte_val(pte) |= pgprot_val(prot);
185 	return pte;
186 }
187 
pte_wrprotect(pte_t pte)188 static inline pte_t pte_wrprotect(pte_t pte)
189 {
190 	return clear_pte_bit(pte, __pgprot(PTE_WRITE));
191 }
192 
pte_mkwrite(pte_t pte)193 static inline pte_t pte_mkwrite(pte_t pte)
194 {
195 	return set_pte_bit(pte, __pgprot(PTE_WRITE));
196 }
197 
pte_mkclean(pte_t pte)198 static inline pte_t pte_mkclean(pte_t pte)
199 {
200 	return clear_pte_bit(pte, __pgprot(PTE_DIRTY));
201 }
202 
pte_mkdirty(pte_t pte)203 static inline pte_t pte_mkdirty(pte_t pte)
204 {
205 	return set_pte_bit(pte, __pgprot(PTE_DIRTY));
206 }
207 
pte_mkold(pte_t pte)208 static inline pte_t pte_mkold(pte_t pte)
209 {
210 	return clear_pte_bit(pte, __pgprot(PTE_AF));
211 }
212 
pte_mkyoung(pte_t pte)213 static inline pte_t pte_mkyoung(pte_t pte)
214 {
215 	return set_pte_bit(pte, __pgprot(PTE_AF));
216 }
217 
pte_mkspecial(pte_t pte)218 static inline pte_t pte_mkspecial(pte_t pte)
219 {
220 	return set_pte_bit(pte, __pgprot(PTE_SPECIAL));
221 }
222 
pte_mkcont(pte_t pte)223 static inline pte_t pte_mkcont(pte_t pte)
224 {
225 	pte = set_pte_bit(pte, __pgprot(PTE_CONT));
226 	return set_pte_bit(pte, __pgprot(PTE_TYPE_PAGE));
227 }
228 
pte_mknoncont(pte_t pte)229 static inline pte_t pte_mknoncont(pte_t pte)
230 {
231 	return clear_pte_bit(pte, __pgprot(PTE_CONT));
232 }
233 
pmd_mkcont(pmd_t pmd)234 static inline pmd_t pmd_mkcont(pmd_t pmd)
235 {
236 	return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
237 }
238 
set_pte(pte_t * ptep,pte_t pte)239 static inline void set_pte(pte_t *ptep, pte_t pte)
240 {
241 	*ptep = pte;
242 
243 	/*
244 	 * Only if the new pte is valid and kernel, otherwise TLB maintenance
245 	 * or update_mmu_cache() have the necessary barriers.
246 	 */
247 	if (pte_valid_not_user(pte)) {
248 		dsb(ishst);
249 		isb();
250 	}
251 }
252 
253 struct mm_struct;
254 struct vm_area_struct;
255 
256 extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
257 
258 /*
259  * PTE bits configuration in the presence of hardware Dirty Bit Management
260  * (PTE_WRITE == PTE_DBM):
261  *
262  * Dirty  Writable | PTE_RDONLY  PTE_WRITE  PTE_DIRTY (sw)
263  *   0      0      |   1           0          0
264  *   0      1      |   1           1          0
265  *   1      0      |   1           0          1
266  *   1      1      |   0           1          x
267  *
268  * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via
269  * the page fault mechanism. Checking the dirty status of a pte becomes:
270  *
271  *   PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY)
272  */
set_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte)273 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
274 			      pte_t *ptep, pte_t pte)
275 {
276 	if (pte_present(pte)) {
277 		if (pte_sw_dirty(pte) && pte_write(pte))
278 			pte_val(pte) &= ~PTE_RDONLY;
279 		else
280 			pte_val(pte) |= PTE_RDONLY;
281 		if (pte_user(pte) && pte_exec(pte) && !pte_special(pte))
282 			__sync_icache_dcache(pte, addr);
283 	}
284 
285 	/*
286 	 * If the existing pte is valid, check for potential race with
287 	 * hardware updates of the pte (ptep_set_access_flags safely changes
288 	 * valid ptes without going through an invalid entry).
289 	 */
290 	if (IS_ENABLED(CONFIG_ARM64_HW_AFDBM) &&
291 	    pte_valid(*ptep) && pte_valid(pte)) {
292 		VM_WARN_ONCE(!pte_young(pte),
293 			     "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
294 			     __func__, pte_val(*ptep), pte_val(pte));
295 		VM_WARN_ONCE(pte_write(*ptep) && !pte_dirty(pte),
296 			     "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
297 			     __func__, pte_val(*ptep), pte_val(pte));
298 	}
299 
300 	set_pte(ptep, pte);
301 }
302 
303 /*
304  * Huge pte definitions.
305  */
306 #define pte_huge(pte)		(!(pte_val(pte) & PTE_TABLE_BIT))
307 #define pte_mkhuge(pte)		(__pte(pte_val(pte) & ~PTE_TABLE_BIT))
308 
309 /*
310  * Hugetlb definitions.
311  */
312 #define HUGE_MAX_HSTATE		4
313 #define HPAGE_SHIFT		PMD_SHIFT
314 #define HPAGE_SIZE		(_AC(1, UL) << HPAGE_SHIFT)
315 #define HPAGE_MASK		(~(HPAGE_SIZE - 1))
316 #define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
317 
318 #define __HAVE_ARCH_PTE_SPECIAL
319 
pud_pte(pud_t pud)320 static inline pte_t pud_pte(pud_t pud)
321 {
322 	return __pte(pud_val(pud));
323 }
324 
pud_pmd(pud_t pud)325 static inline pmd_t pud_pmd(pud_t pud)
326 {
327 	return __pmd(pud_val(pud));
328 }
329 
pmd_pte(pmd_t pmd)330 static inline pte_t pmd_pte(pmd_t pmd)
331 {
332 	return __pte(pmd_val(pmd));
333 }
334 
pte_pmd(pte_t pte)335 static inline pmd_t pte_pmd(pte_t pte)
336 {
337 	return __pmd(pte_val(pte));
338 }
339 
mk_sect_prot(pgprot_t prot)340 static inline pgprot_t mk_sect_prot(pgprot_t prot)
341 {
342 	return __pgprot(pgprot_val(prot) & ~PTE_TABLE_BIT);
343 }
344 
345 /*
346  * THP definitions.
347  */
348 
349 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
350 #define pmd_trans_huge(pmd)	(pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
351 #define pmd_trans_splitting(pmd)	pte_special(pmd_pte(pmd))
352 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
353 #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
354 struct vm_area_struct;
355 void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
356 			  pmd_t *pmdp);
357 #endif /* CONFIG_HAVE_RCU_TABLE_FREE */
358 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
359 
360 #define pmd_present(pmd)	pte_present(pmd_pte(pmd))
361 #define pmd_dirty(pmd)		pte_dirty(pmd_pte(pmd))
362 #define pmd_young(pmd)		pte_young(pmd_pte(pmd))
363 #define pmd_wrprotect(pmd)	pte_pmd(pte_wrprotect(pmd_pte(pmd)))
364 #define pmd_mksplitting(pmd)	pte_pmd(pte_mkspecial(pmd_pte(pmd)))
365 #define pmd_mkold(pmd)		pte_pmd(pte_mkold(pmd_pte(pmd)))
366 #define pmd_mkwrite(pmd)	pte_pmd(pte_mkwrite(pmd_pte(pmd)))
367 #define pmd_mkdirty(pmd)	pte_pmd(pte_mkdirty(pmd_pte(pmd)))
368 #define pmd_mkyoung(pmd)	pte_pmd(pte_mkyoung(pmd_pte(pmd)))
369 #define pmd_mknotpresent(pmd)	(__pmd(pmd_val(pmd) & ~PMD_SECT_VALID))
370 
371 #define __HAVE_ARCH_PMD_WRITE
372 #define pmd_write(pmd)		pte_write(pmd_pte(pmd))
373 
374 #define pmd_mkhuge(pmd)		(__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
375 
376 #define pmd_pfn(pmd)		(((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
377 #define pfn_pmd(pfn,prot)	(__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
378 #define mk_pmd(page,prot)	pfn_pmd(page_to_pfn(page),prot)
379 
380 #define pud_write(pud)		pte_write(pud_pte(pud))
381 #define pud_pfn(pud)		(((pud_val(pud) & PUD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
382 
383 #define set_pmd_at(mm, addr, pmdp, pmd)	set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
384 
has_transparent_hugepage(void)385 static inline int has_transparent_hugepage(void)
386 {
387 	return 1;
388 }
389 
390 #define __pgprot_modify(prot,mask,bits) \
391 	__pgprot((pgprot_val(prot) & ~(mask)) | (bits))
392 
393 /*
394  * Mark the prot value as uncacheable and unbufferable.
395  */
396 #define pgprot_noncached(prot) \
397 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
398 #define pgprot_writecombine(prot) \
399 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
400 #define pgprot_device(prot) \
401 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
402 #define __HAVE_PHYS_MEM_ACCESS_PROT
403 struct file;
404 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
405 				     unsigned long size, pgprot_t vma_prot);
406 
407 #define pmd_none(pmd)		(!pmd_val(pmd))
408 
409 #define pmd_bad(pmd)		(!(pmd_val(pmd) & 2))
410 
411 #define pmd_table(pmd)		((pmd_val(pmd) & PMD_TYPE_MASK) == \
412 				 PMD_TYPE_TABLE)
413 #define pmd_sect(pmd)		((pmd_val(pmd) & PMD_TYPE_MASK) == \
414 				 PMD_TYPE_SECT)
415 
416 #ifdef CONFIG_ARM64_64K_PAGES
417 #define pud_sect(pud)		(0)
418 #define pud_table(pud)		(1)
419 #else
420 #define pud_sect(pud)		((pud_val(pud) & PUD_TYPE_MASK) == \
421 				 PUD_TYPE_SECT)
422 #define pud_table(pud)		((pud_val(pud) & PUD_TYPE_MASK) == \
423 				 PUD_TYPE_TABLE)
424 #endif
425 
set_pmd(pmd_t * pmdp,pmd_t pmd)426 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
427 {
428 	*pmdp = pmd;
429 	dsb(ishst);
430 	isb();
431 }
432 
pmd_clear(pmd_t * pmdp)433 static inline void pmd_clear(pmd_t *pmdp)
434 {
435 	set_pmd(pmdp, __pmd(0));
436 }
437 
pmd_page_paddr(pmd_t pmd)438 static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
439 {
440 	return pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK;
441 }
442 
443 /* Find an entry in the third-level page table. */
444 #define pte_index(addr)		(((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
445 
446 #define pte_offset_phys(dir,addr)	(pmd_page_paddr(*(dir)) + pte_index(addr) * sizeof(pte_t))
447 #define pte_offset_kernel(dir,addr)	((pte_t *)__va(pte_offset_phys((dir), (addr))))
448 
449 #define pte_offset_map(dir,addr)	pte_offset_kernel((dir), (addr))
450 #define pte_offset_map_nested(dir,addr)	pte_offset_kernel((dir), (addr))
451 #define pte_unmap(pte)			do { } while (0)
452 #define pte_unmap_nested(pte)		do { } while (0)
453 
454 #define pte_set_fixmap(addr)		((pte_t *)set_fixmap_offset(FIX_PTE, addr))
455 #define pte_set_fixmap_offset(pmd, addr)	pte_set_fixmap(pte_offset_phys(pmd, addr))
456 #define pte_clear_fixmap()		clear_fixmap(FIX_PTE)
457 
458 #define pmd_page(pmd)		pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
459 
460 /* use ONLY for statically allocated translation tables */
461 #define pte_offset_kimg(dir,addr)	((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr))))
462 
463 /*
464  * Conversion functions: convert a page and protection to a page entry,
465  * and a page entry and page directory to the page they refer to.
466  */
467 #define mk_pte(page,prot)	pfn_pte(page_to_pfn(page),prot)
468 
469 #if CONFIG_PGTABLE_LEVELS > 2
470 
471 #define pmd_ERROR(pmd)		__pmd_error(__FILE__, __LINE__, pmd_val(pmd))
472 
473 #define pud_none(pud)		(!pud_val(pud))
474 #define pud_bad(pud)		(!(pud_val(pud) & 2))
475 #define pud_present(pud)	(pud_val(pud))
476 
set_pud(pud_t * pudp,pud_t pud)477 static inline void set_pud(pud_t *pudp, pud_t pud)
478 {
479 	*pudp = pud;
480 	dsb(ishst);
481 	isb();
482 }
483 
pud_clear(pud_t * pudp)484 static inline void pud_clear(pud_t *pudp)
485 {
486 	set_pud(pudp, __pud(0));
487 }
488 
pud_page_paddr(pud_t pud)489 static inline phys_addr_t pud_page_paddr(pud_t pud)
490 {
491 	return pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK;
492 }
493 
494 /* Find an entry in the second-level page table. */
495 #define pmd_index(addr)		(((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
496 
497 #define pmd_offset_phys(dir, addr)	(pud_page_paddr(*(dir)) + pmd_index(addr) * sizeof(pmd_t))
498 #define pmd_offset(dir, addr)		((pmd_t *)__va(pmd_offset_phys((dir), (addr))))
499 
500 #define pmd_set_fixmap(addr)		((pmd_t *)set_fixmap_offset(FIX_PMD, addr))
501 #define pmd_set_fixmap_offset(pud, addr)	pmd_set_fixmap(pmd_offset_phys(pud, addr))
502 #define pmd_clear_fixmap()		clear_fixmap(FIX_PMD)
503 
504 #define pud_page(pud)		pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK))
505 
506 /* use ONLY for statically allocated translation tables */
507 #define pmd_offset_kimg(dir,addr)	((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr))))
508 
509 #else
510 
511 #define pud_page_paddr(pud)	({ BUILD_BUG(); 0; })
512 
513 /* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */
514 #define pmd_set_fixmap(addr)		NULL
515 #define pmd_set_fixmap_offset(pudp, addr)	((pmd_t *)pudp)
516 #define pmd_clear_fixmap()
517 
518 #define pmd_offset_kimg(dir,addr)	((pmd_t *)dir)
519 
520 #endif	/* CONFIG_PGTABLE_LEVELS > 2 */
521 
522 #if CONFIG_PGTABLE_LEVELS > 3
523 
524 #define pud_ERROR(pud)		__pud_error(__FILE__, __LINE__, pud_val(pud))
525 
526 #define pgd_none(pgd)		(!pgd_val(pgd))
527 #define pgd_bad(pgd)		(!(pgd_val(pgd) & 2))
528 #define pgd_present(pgd)	(pgd_val(pgd))
529 
set_pgd(pgd_t * pgdp,pgd_t pgd)530 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
531 {
532 	*pgdp = pgd;
533 	dsb(ishst);
534 }
535 
pgd_clear(pgd_t * pgdp)536 static inline void pgd_clear(pgd_t *pgdp)
537 {
538 	set_pgd(pgdp, __pgd(0));
539 }
540 
pgd_page_paddr(pgd_t pgd)541 static inline phys_addr_t pgd_page_paddr(pgd_t pgd)
542 {
543 	return pgd_val(pgd) & PHYS_MASK & (s32)PAGE_MASK;
544 }
545 
546 /* Find an entry in the frst-level page table. */
547 #define pud_index(addr)		(((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
548 
549 #define pud_offset_phys(dir, addr)	(pgd_page_paddr(*(dir)) + pud_index(addr) * sizeof(pud_t))
550 #define pud_offset(dir, addr)		((pud_t *)__va(pud_offset_phys((dir), (addr))))
551 
552 #define pud_set_fixmap(addr)		((pud_t *)set_fixmap_offset(FIX_PUD, addr))
553 #define pud_set_fixmap_offset(pgd, addr)	pud_set_fixmap(pud_offset_phys(pgd, addr))
554 #define pud_clear_fixmap()		clear_fixmap(FIX_PUD)
555 
556 #define pgd_page(pgd)		pfn_to_page(__phys_to_pfn(pgd_val(pgd) & PHYS_MASK))
557 
558 /* use ONLY for statically allocated translation tables */
559 #define pud_offset_kimg(dir,addr)	((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr))))
560 
561 #else
562 
563 #define pgd_page_paddr(pgd)	({ BUILD_BUG(); 0;})
564 
565 /* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */
566 #define pud_set_fixmap(addr)		NULL
567 #define pud_set_fixmap_offset(pgdp, addr)	((pud_t *)pgdp)
568 #define pud_clear_fixmap()
569 
570 #define pud_offset_kimg(dir,addr)	((pud_t *)dir)
571 
572 #endif  /* CONFIG_PGTABLE_LEVELS > 3 */
573 
574 #define pgd_ERROR(pgd)		__pgd_error(__FILE__, __LINE__, pgd_val(pgd))
575 
576 /* to find an entry in a page-table-directory */
577 #define pgd_index(addr)		(((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
578 
579 #define pgd_offset_raw(pgd, addr)	((pgd) + pgd_index(addr))
580 
581 #define pgd_offset(mm, addr)	(pgd_offset_raw((mm)->pgd, (addr)))
582 
583 /* to find an entry in a kernel page-table-directory */
584 #define pgd_offset_k(addr)	pgd_offset(&init_mm, addr)
585 
586 #define pgd_set_fixmap(addr)	((pgd_t *)set_fixmap_offset(FIX_PGD, addr))
587 #define pgd_clear_fixmap()	clear_fixmap(FIX_PGD)
588 
pte_modify(pte_t pte,pgprot_t newprot)589 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
590 {
591 	const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
592 			      PTE_PROT_NONE | PTE_VALID | PTE_WRITE;
593 	/* preserve the hardware dirty information */
594 	if (pte_hw_dirty(pte))
595 		pte = pte_mkdirty(pte);
596 	pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
597 	return pte;
598 }
599 
pmd_modify(pmd_t pmd,pgprot_t newprot)600 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
601 {
602 	return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
603 }
604 
605 #ifdef CONFIG_ARM64_HW_AFDBM
606 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
607 extern int ptep_set_access_flags(struct vm_area_struct *vma,
608 				 unsigned long address, pte_t *ptep,
609 				 pte_t entry, int dirty);
610 
611 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
612 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
pmdp_set_access_flags(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp,pmd_t entry,int dirty)613 static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
614 					unsigned long address, pmd_t *pmdp,
615 					pmd_t entry, int dirty)
616 {
617 	return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
618 }
619 #endif
620 
621 /*
622  * Atomic pte/pmd modifications.
623  */
624 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
ptep_test_and_clear_young(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)625 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
626 					    unsigned long address,
627 					    pte_t *ptep)
628 {
629 	pteval_t pteval;
630 	unsigned int tmp, res;
631 
632 	asm volatile("//	ptep_test_and_clear_young\n"
633 	"	prfm	pstl1strm, %2\n"
634 	"1:	ldxr	%0, %2\n"
635 	"	ubfx	%w3, %w0, %5, #1	// extract PTE_AF (young)\n"
636 	"	and	%0, %0, %4		// clear PTE_AF\n"
637 	"	stxr	%w1, %0, %2\n"
638 	"	cbnz	%w1, 1b\n"
639 	: "=&r" (pteval), "=&r" (tmp), "+Q" (pte_val(*ptep)), "=&r" (res)
640 	: "L" (~PTE_AF), "I" (ilog2(PTE_AF)));
641 
642 	return res;
643 }
644 
645 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
646 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
pmdp_test_and_clear_young(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)647 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
648 					    unsigned long address,
649 					    pmd_t *pmdp)
650 {
651 	return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
652 }
653 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
654 
655 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
ptep_get_and_clear(struct mm_struct * mm,unsigned long address,pte_t * ptep)656 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
657 				       unsigned long address, pte_t *ptep)
658 {
659 	pteval_t old_pteval;
660 	unsigned int tmp;
661 
662 	asm volatile("//	ptep_get_and_clear\n"
663 	"	prfm	pstl1strm, %2\n"
664 	"1:	ldxr	%0, %2\n"
665 	"	stxr	%w1, xzr, %2\n"
666 	"	cbnz	%w1, 1b\n"
667 	: "=&r" (old_pteval), "=&r" (tmp), "+Q" (pte_val(*ptep)));
668 
669 	return __pte(old_pteval);
670 }
671 
672 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
673 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
pmdp_huge_get_and_clear(struct mm_struct * mm,unsigned long address,pmd_t * pmdp)674 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
675 					    unsigned long address, pmd_t *pmdp)
676 {
677 	return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp));
678 }
679 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
680 
681 /*
682  * ptep_set_wrprotect - mark read-only while trasferring potential hardware
683  * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit.
684  */
685 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
ptep_set_wrprotect(struct mm_struct * mm,unsigned long address,pte_t * ptep)686 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
687 {
688 	pteval_t pteval;
689 	unsigned long tmp;
690 
691 	asm volatile("//	ptep_set_wrprotect\n"
692 	"	prfm	pstl1strm, %2\n"
693 	"1:	ldxr	%0, %2\n"
694 	"	tst	%0, %4			// check for hw dirty (!PTE_RDONLY)\n"
695 	"	csel	%1, %3, xzr, eq		// set PTE_DIRTY|PTE_RDONLY if dirty\n"
696 	"	orr	%0, %0, %1		// if !dirty, PTE_RDONLY is already set\n"
697 	"	and	%0, %0, %5		// clear PTE_WRITE/PTE_DBM\n"
698 	"	stxr	%w1, %0, %2\n"
699 	"	cbnz	%w1, 1b\n"
700 	: "=&r" (pteval), "=&r" (tmp), "+Q" (pte_val(*ptep))
701 	: "r" (PTE_DIRTY|PTE_RDONLY), "L" (PTE_RDONLY), "L" (~PTE_WRITE)
702 	: "cc");
703 }
704 
705 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
706 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
pmdp_set_wrprotect(struct mm_struct * mm,unsigned long address,pmd_t * pmdp)707 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
708 				      unsigned long address, pmd_t *pmdp)
709 {
710 	ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
711 }
712 #endif
713 #endif	/* CONFIG_ARM64_HW_AFDBM */
714 
715 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
716 extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
717 extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
718 
719 /*
720  * Encode and decode a swap entry:
721  *	bits 0-1:	present (must be zero)
722  *	bits 2-7:	swap type
723  *	bits 8-57:	swap offset
724  *	bit  58:	PTE_PROT_NONE (must be zero)
725  */
726 #define __SWP_TYPE_SHIFT	2
727 #define __SWP_TYPE_BITS		6
728 #define __SWP_OFFSET_BITS	50
729 #define __SWP_TYPE_MASK		((1 << __SWP_TYPE_BITS) - 1)
730 #define __SWP_OFFSET_SHIFT	(__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
731 #define __SWP_OFFSET_MASK	((1UL << __SWP_OFFSET_BITS) - 1)
732 
733 #define __swp_type(x)		(((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
734 #define __swp_offset(x)		(((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
735 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
736 
737 #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
738 #define __swp_entry_to_pte(swp)	((pte_t) { (swp).val })
739 
740 /*
741  * Ensure that there are not more swap files than can be encoded in the kernel
742  * PTEs.
743  */
744 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
745 
746 extern int kern_addr_valid(unsigned long addr);
747 
748 #include <asm-generic/pgtable.h>
749 
750 void pgd_cache_init(void);
751 #define pgtable_cache_init	pgd_cache_init
752 
753 /*
754  * On AArch64, the cache coherency is handled via the set_pte_at() function.
755  */
update_mmu_cache(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)756 static inline void update_mmu_cache(struct vm_area_struct *vma,
757 				    unsigned long addr, pte_t *ptep)
758 {
759 	/*
760 	 * We don't do anything here, so there's a very small chance of
761 	 * us retaking a user fault which we just fixed up. The alternative
762 	 * is doing a dsb(ishst), but that penalises the fastpath.
763 	 */
764 }
765 
766 #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
767 
768 #define kc_vaddr_to_offset(v)	((v) & ~VA_START)
769 #define kc_offset_to_vaddr(o)	((o) | VA_START)
770 
771 #endif /* !__ASSEMBLY__ */
772 
773 #endif /* __ASM_PGTABLE_H */
774