• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2012 ARM Ltd.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
15  */
16 #ifndef __ASM_PGTABLE_H
17 #define __ASM_PGTABLE_H
18 
19 #include <asm/bug.h>
20 #include <asm/proc-fns.h>
21 
22 #include <asm/memory.h>
23 #include <asm/pgtable-hwdef.h>
24 #include <asm/pgtable-prot.h>
25 
26 /*
27  * VMALLOC range.
28  *
29  * VMALLOC_START: beginning of the kernel vmalloc space
30  * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space
31  *	and fixed mappings
32  */
33 #define VMALLOC_START		(MODULES_END)
34 #define VMALLOC_END		(PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
35 
36 #define vmemmap			((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
37 
38 #define FIRST_USER_ADDRESS	0UL
39 
40 #ifndef __ASSEMBLY__
41 
42 #include <asm/cmpxchg.h>
43 #include <asm/fixmap.h>
44 #include <linux/mmdebug.h>
45 
46 extern void __pte_error(const char *file, int line, unsigned long val);
47 extern void __pmd_error(const char *file, int line, unsigned long val);
48 extern void __pud_error(const char *file, int line, unsigned long val);
49 extern void __pgd_error(const char *file, int line, unsigned long val);
50 
51 /*
52  * ZERO_PAGE is a global shared page that is always zero: used
53  * for zero-mapped memory areas etc..
54  */
55 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
56 #define ZERO_PAGE(vaddr)	phys_to_page(__pa_symbol(empty_zero_page))
57 
58 #define pte_ERROR(pte)		__pte_error(__FILE__, __LINE__, pte_val(pte))
59 
60 #define pte_pfn(pte)		((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT)
61 
62 #define pfn_pte(pfn,prot)	(__pte(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
63 
64 #define pte_none(pte)		(!pte_val(pte))
65 #define pte_clear(mm,addr,ptep)	set_pte(ptep, __pte(0))
66 #define pte_page(pte)		(pfn_to_page(pte_pfn(pte)))
67 
68 /*
69  * The following only work if pte_present(). Undefined behaviour otherwise.
70  */
71 #define pte_present(pte)	(!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
72 #define pte_young(pte)		(!!(pte_val(pte) & PTE_AF))
73 #define pte_special(pte)	(!!(pte_val(pte) & PTE_SPECIAL))
74 #define pte_write(pte)		(!!(pte_val(pte) & PTE_WRITE))
75 #define pte_user_exec(pte)	(!(pte_val(pte) & PTE_UXN))
76 #define pte_cont(pte)		(!!(pte_val(pte) & PTE_CONT))
77 
78 #define pte_cont_addr_end(addr, end)						\
79 ({	unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK;	\
80 	(__boundary - 1 < (end) - 1) ? __boundary : (end);			\
81 })
82 
83 #define pmd_cont_addr_end(addr, end)						\
84 ({	unsigned long __boundary = ((addr) + CONT_PMD_SIZE) & CONT_PMD_MASK;	\
85 	(__boundary - 1 < (end) - 1) ? __boundary : (end);			\
86 })
87 
88 #define pte_hw_dirty(pte)	(pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
89 #define pte_sw_dirty(pte)	(!!(pte_val(pte) & PTE_DIRTY))
90 #define pte_dirty(pte)		(pte_sw_dirty(pte) || pte_hw_dirty(pte))
91 
92 #define pte_valid(pte)		(!!(pte_val(pte) & PTE_VALID))
93 #define pte_valid_not_user(pte) \
94 	((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
95 #define pte_valid_young(pte) \
96 	((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
97 #define pte_valid_user(pte) \
98 	((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
99 
100 /*
101  * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
102  * so that we don't erroneously return false for pages that have been
103  * remapped as PROT_NONE but are yet to be flushed from the TLB.
104  */
105 #define pte_accessible(mm, pte)	\
106 	(mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte))
107 
108 /*
109  * p??_access_permitted() is true for valid user mappings (subject to the
110  * write permission check). PROT_NONE mappings do not have the PTE_VALID bit
111  * set.
112  */
113 #define pte_access_permitted(pte, write) \
114 	(pte_valid_user(pte) && (!(write) || pte_write(pte)))
115 #define pmd_access_permitted(pmd, write) \
116 	(pte_access_permitted(pmd_pte(pmd), (write)))
117 #define pud_access_permitted(pud, write) \
118 	(pte_access_permitted(pud_pte(pud), (write)))
119 
clear_pte_bit(pte_t pte,pgprot_t prot)120 static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
121 {
122 	pte_val(pte) &= ~pgprot_val(prot);
123 	return pte;
124 }
125 
set_pte_bit(pte_t pte,pgprot_t prot)126 static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
127 {
128 	pte_val(pte) |= pgprot_val(prot);
129 	return pte;
130 }
131 
pte_wrprotect(pte_t pte)132 static inline pte_t pte_wrprotect(pte_t pte)
133 {
134 	pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
135 	pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
136 	return pte;
137 }
138 
pte_mkwrite(pte_t pte)139 static inline pte_t pte_mkwrite(pte_t pte)
140 {
141 	pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
142 	pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
143 	return pte;
144 }
145 
pte_mkclean(pte_t pte)146 static inline pte_t pte_mkclean(pte_t pte)
147 {
148 	pte = clear_pte_bit(pte, __pgprot(PTE_DIRTY));
149 	pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
150 
151 	return pte;
152 }
153 
pte_mkdirty(pte_t pte)154 static inline pte_t pte_mkdirty(pte_t pte)
155 {
156 	pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
157 
158 	if (pte_write(pte))
159 		pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
160 
161 	return pte;
162 }
163 
pte_mkold(pte_t pte)164 static inline pte_t pte_mkold(pte_t pte)
165 {
166 	return clear_pte_bit(pte, __pgprot(PTE_AF));
167 }
168 
pte_mkyoung(pte_t pte)169 static inline pte_t pte_mkyoung(pte_t pte)
170 {
171 	return set_pte_bit(pte, __pgprot(PTE_AF));
172 }
173 
pte_mkspecial(pte_t pte)174 static inline pte_t pte_mkspecial(pte_t pte)
175 {
176 	return set_pte_bit(pte, __pgprot(PTE_SPECIAL));
177 }
178 
pte_mkcont(pte_t pte)179 static inline pte_t pte_mkcont(pte_t pte)
180 {
181 	pte = set_pte_bit(pte, __pgprot(PTE_CONT));
182 	return set_pte_bit(pte, __pgprot(PTE_TYPE_PAGE));
183 }
184 
pte_mknoncont(pte_t pte)185 static inline pte_t pte_mknoncont(pte_t pte)
186 {
187 	return clear_pte_bit(pte, __pgprot(PTE_CONT));
188 }
189 
pte_mkpresent(pte_t pte)190 static inline pte_t pte_mkpresent(pte_t pte)
191 {
192 	return set_pte_bit(pte, __pgprot(PTE_VALID));
193 }
194 
pmd_mkcont(pmd_t pmd)195 static inline pmd_t pmd_mkcont(pmd_t pmd)
196 {
197 	return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
198 }
199 
set_pte(pte_t * ptep,pte_t pte)200 static inline void set_pte(pte_t *ptep, pte_t pte)
201 {
202 	*ptep = pte;
203 
204 	/*
205 	 * Only if the new pte is valid and kernel, otherwise TLB maintenance
206 	 * or update_mmu_cache() have the necessary barriers.
207 	 */
208 	if (pte_valid_not_user(pte)) {
209 		dsb(ishst);
210 		isb();
211 	}
212 }
213 
214 struct mm_struct;
215 struct vm_area_struct;
216 
217 extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
218 
219 /*
220  * PTE bits configuration in the presence of hardware Dirty Bit Management
221  * (PTE_WRITE == PTE_DBM):
222  *
223  * Dirty  Writable | PTE_RDONLY  PTE_WRITE  PTE_DIRTY (sw)
224  *   0      0      |   1           0          0
225  *   0      1      |   1           1          0
226  *   1      0      |   1           0          1
227  *   1      1      |   0           1          x
228  *
229  * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via
230  * the page fault mechanism. Checking the dirty status of a pte becomes:
231  *
232  *   PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY)
233  */
set_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte)234 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
235 			      pte_t *ptep, pte_t pte)
236 {
237 	if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
238 		__sync_icache_dcache(pte, addr);
239 
240 	/*
241 	 * If the existing pte is valid, check for potential race with
242 	 * hardware updates of the pte (ptep_set_access_flags safely changes
243 	 * valid ptes without going through an invalid entry).
244 	 */
245 	if (pte_valid(*ptep) && pte_valid(pte)) {
246 		VM_WARN_ONCE(!pte_young(pte),
247 			     "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
248 			     __func__, pte_val(*ptep), pte_val(pte));
249 		VM_WARN_ONCE(pte_write(*ptep) && !pte_dirty(pte),
250 			     "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
251 			     __func__, pte_val(*ptep), pte_val(pte));
252 	}
253 
254 	set_pte(ptep, pte);
255 }
256 
257 /*
258  * Huge pte definitions.
259  */
260 #define pte_huge(pte)		(!(pte_val(pte) & PTE_TABLE_BIT))
261 #define pte_mkhuge(pte)		(__pte(pte_val(pte) & ~PTE_TABLE_BIT))
262 
263 /*
264  * Hugetlb definitions.
265  */
266 #define HUGE_MAX_HSTATE		4
267 #define HPAGE_SHIFT		PMD_SHIFT
268 #define HPAGE_SIZE		(_AC(1, UL) << HPAGE_SHIFT)
269 #define HPAGE_MASK		(~(HPAGE_SIZE - 1))
270 #define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
271 
272 #define __HAVE_ARCH_PTE_SPECIAL
273 
pud_pte(pud_t pud)274 static inline pte_t pud_pte(pud_t pud)
275 {
276 	return __pte(pud_val(pud));
277 }
278 
pud_pmd(pud_t pud)279 static inline pmd_t pud_pmd(pud_t pud)
280 {
281 	return __pmd(pud_val(pud));
282 }
283 
pmd_pte(pmd_t pmd)284 static inline pte_t pmd_pte(pmd_t pmd)
285 {
286 	return __pte(pmd_val(pmd));
287 }
288 
pte_pmd(pte_t pte)289 static inline pmd_t pte_pmd(pte_t pte)
290 {
291 	return __pmd(pte_val(pte));
292 }
293 
mk_sect_prot(pgprot_t prot)294 static inline pgprot_t mk_sect_prot(pgprot_t prot)
295 {
296 	return __pgprot(pgprot_val(prot) & ~PTE_TABLE_BIT);
297 }
298 
299 #ifdef CONFIG_NUMA_BALANCING
300 /*
301  * See the comment in include/asm-generic/pgtable.h
302  */
pte_protnone(pte_t pte)303 static inline int pte_protnone(pte_t pte)
304 {
305 	return (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) == PTE_PROT_NONE;
306 }
307 
pmd_protnone(pmd_t pmd)308 static inline int pmd_protnone(pmd_t pmd)
309 {
310 	return pte_protnone(pmd_pte(pmd));
311 }
312 #endif
313 
314 /*
315  * THP definitions.
316  */
317 
318 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
319 #define pmd_trans_huge(pmd)	(pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
320 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
321 
322 #define pmd_present(pmd)	pte_present(pmd_pte(pmd))
323 #define pmd_dirty(pmd)		pte_dirty(pmd_pte(pmd))
324 #define pmd_young(pmd)		pte_young(pmd_pte(pmd))
325 #define pmd_wrprotect(pmd)	pte_pmd(pte_wrprotect(pmd_pte(pmd)))
326 #define pmd_mkold(pmd)		pte_pmd(pte_mkold(pmd_pte(pmd)))
327 #define pmd_mkwrite(pmd)	pte_pmd(pte_mkwrite(pmd_pte(pmd)))
328 #define pmd_mkclean(pmd)	pte_pmd(pte_mkclean(pmd_pte(pmd)))
329 #define pmd_mkdirty(pmd)	pte_pmd(pte_mkdirty(pmd_pte(pmd)))
330 #define pmd_mkyoung(pmd)	pte_pmd(pte_mkyoung(pmd_pte(pmd)))
331 #define pmd_mknotpresent(pmd)	(__pmd(pmd_val(pmd) & ~PMD_SECT_VALID))
332 
333 #define pmd_thp_or_huge(pmd)	(pmd_huge(pmd) || pmd_trans_huge(pmd))
334 
335 #define __HAVE_ARCH_PMD_WRITE
336 #define pmd_write(pmd)		pte_write(pmd_pte(pmd))
337 
338 #define pmd_mkhuge(pmd)		(__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
339 
340 #define pmd_pfn(pmd)		(((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
341 #define pfn_pmd(pfn,prot)	(__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
342 #define mk_pmd(page,prot)	pfn_pmd(page_to_pfn(page),prot)
343 
344 #define pud_write(pud)		pte_write(pud_pte(pud))
345 #define pud_pfn(pud)		(((pud_val(pud) & PUD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
346 #define pfn_pud(pfn,prot)	(__pud(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
347 
348 #define set_pmd_at(mm, addr, pmdp, pmd)	set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
349 
350 #define __pgprot_modify(prot,mask,bits) \
351 	__pgprot((pgprot_val(prot) & ~(mask)) | (bits))
352 
353 /*
354  * Mark the prot value as uncacheable and unbufferable.
355  */
356 #define pgprot_noncached(prot) \
357 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
358 #define pgprot_writecombine(prot) \
359 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
360 #define pgprot_device(prot) \
361 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
362 #define __HAVE_PHYS_MEM_ACCESS_PROT
363 struct file;
364 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
365 				     unsigned long size, pgprot_t vma_prot);
366 
367 #define pmd_none(pmd)		(!pmd_val(pmd))
368 
369 #define pmd_bad(pmd)		(!(pmd_val(pmd) & PMD_TABLE_BIT))
370 
371 #define pmd_table(pmd)		((pmd_val(pmd) & PMD_TYPE_MASK) == \
372 				 PMD_TYPE_TABLE)
373 #define pmd_sect(pmd)		((pmd_val(pmd) & PMD_TYPE_MASK) == \
374 				 PMD_TYPE_SECT)
375 
376 #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
pud_sect(pud_t pud)377 static inline bool pud_sect(pud_t pud) { return false; }
pud_table(pud_t pud)378 static inline bool pud_table(pud_t pud) { return true; }
379 #else
380 #define pud_sect(pud)		((pud_val(pud) & PUD_TYPE_MASK) == \
381 				 PUD_TYPE_SECT)
382 #define pud_table(pud)		((pud_val(pud) & PUD_TYPE_MASK) == \
383 				 PUD_TYPE_TABLE)
384 #endif
385 
set_pmd(pmd_t * pmdp,pmd_t pmd)386 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
387 {
388 	*pmdp = pmd;
389 	dsb(ishst);
390 	isb();
391 }
392 
pmd_clear(pmd_t * pmdp)393 static inline void pmd_clear(pmd_t *pmdp)
394 {
395 	set_pmd(pmdp, __pmd(0));
396 }
397 
pmd_page_paddr(pmd_t pmd)398 static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
399 {
400 	return pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK;
401 }
402 
pte_unmap(pte_t * pte)403 static inline void pte_unmap(pte_t *pte) { }
404 
405 /* Find an entry in the third-level page table. */
406 #define pte_index(addr)		(((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
407 
408 #define pte_offset_phys(dir,addr)	(pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t))
409 #define pte_offset_kernel(dir,addr)	((pte_t *)__va(pte_offset_phys((dir), (addr))))
410 
411 #define pte_offset_map(dir,addr)	pte_offset_kernel((dir), (addr))
412 #define pte_offset_map_nested(dir,addr)	pte_offset_kernel((dir), (addr))
413 #define pte_unmap_nested(pte)		do { } while (0)
414 
415 #define pte_set_fixmap(addr)		((pte_t *)set_fixmap_offset(FIX_PTE, addr))
416 #define pte_set_fixmap_offset(pmd, addr)	pte_set_fixmap(pte_offset_phys(pmd, addr))
417 #define pte_clear_fixmap()		clear_fixmap(FIX_PTE)
418 
419 #define pmd_page(pmd)		pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
420 
421 /* use ONLY for statically allocated translation tables */
422 #define pte_offset_kimg(dir,addr)	((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr))))
423 
424 /*
425  * Conversion functions: convert a page and protection to a page entry,
426  * and a page entry and page directory to the page they refer to.
427  */
428 #define mk_pte(page,prot)	pfn_pte(page_to_pfn(page),prot)
429 
430 #if CONFIG_PGTABLE_LEVELS > 2
431 
432 #define pmd_ERROR(pmd)		__pmd_error(__FILE__, __LINE__, pmd_val(pmd))
433 
434 #define pud_none(pud)		(!pud_val(pud))
435 #define pud_bad(pud)		(!(pud_val(pud) & PUD_TABLE_BIT))
436 #define pud_present(pud)	pte_present(pud_pte(pud))
437 
set_pud(pud_t * pudp,pud_t pud)438 static inline void set_pud(pud_t *pudp, pud_t pud)
439 {
440 	*pudp = pud;
441 	dsb(ishst);
442 	isb();
443 }
444 
pud_clear(pud_t * pudp)445 static inline void pud_clear(pud_t *pudp)
446 {
447 	set_pud(pudp, __pud(0));
448 }
449 
pud_page_paddr(pud_t pud)450 static inline phys_addr_t pud_page_paddr(pud_t pud)
451 {
452 	return pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK;
453 }
454 
455 /* Find an entry in the second-level page table. */
456 #define pmd_index(addr)		(((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
457 
458 #define pmd_offset_phys(dir, addr)	(pud_page_paddr(*(dir)) + pmd_index(addr) * sizeof(pmd_t))
459 #define pmd_offset(dir, addr)		((pmd_t *)__va(pmd_offset_phys((dir), (addr))))
460 
461 #define pmd_set_fixmap(addr)		((pmd_t *)set_fixmap_offset(FIX_PMD, addr))
462 #define pmd_set_fixmap_offset(pud, addr)	pmd_set_fixmap(pmd_offset_phys(pud, addr))
463 #define pmd_clear_fixmap()		clear_fixmap(FIX_PMD)
464 
465 #define pud_page(pud)		pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK))
466 
467 /* use ONLY for statically allocated translation tables */
468 #define pmd_offset_kimg(dir,addr)	((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr))))
469 
470 #else
471 
472 #define pud_page_paddr(pud)	({ BUILD_BUG(); 0; })
473 
474 /* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */
475 #define pmd_set_fixmap(addr)		NULL
476 #define pmd_set_fixmap_offset(pudp, addr)	((pmd_t *)pudp)
477 #define pmd_clear_fixmap()
478 
479 #define pmd_offset_kimg(dir,addr)	((pmd_t *)dir)
480 
481 #endif	/* CONFIG_PGTABLE_LEVELS > 2 */
482 
483 #if CONFIG_PGTABLE_LEVELS > 3
484 
485 #define pud_ERROR(pud)		__pud_error(__FILE__, __LINE__, pud_val(pud))
486 
487 #define pgd_none(pgd)		(!pgd_val(pgd))
488 #define pgd_bad(pgd)		(!(pgd_val(pgd) & 2))
489 #define pgd_present(pgd)	(pgd_val(pgd))
490 
set_pgd(pgd_t * pgdp,pgd_t pgd)491 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
492 {
493 	*pgdp = pgd;
494 	dsb(ishst);
495 }
496 
pgd_clear(pgd_t * pgdp)497 static inline void pgd_clear(pgd_t *pgdp)
498 {
499 	set_pgd(pgdp, __pgd(0));
500 }
501 
pgd_page_paddr(pgd_t pgd)502 static inline phys_addr_t pgd_page_paddr(pgd_t pgd)
503 {
504 	return pgd_val(pgd) & PHYS_MASK & (s32)PAGE_MASK;
505 }
506 
507 /* Find an entry in the frst-level page table. */
508 #define pud_index(addr)		(((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
509 
510 #define pud_offset_phys(dir, addr)	(pgd_page_paddr(*(dir)) + pud_index(addr) * sizeof(pud_t))
511 #define pud_offset(dir, addr)		((pud_t *)__va(pud_offset_phys((dir), (addr))))
512 
513 #define pud_set_fixmap(addr)		((pud_t *)set_fixmap_offset(FIX_PUD, addr))
514 #define pud_set_fixmap_offset(pgd, addr)	pud_set_fixmap(pud_offset_phys(pgd, addr))
515 #define pud_clear_fixmap()		clear_fixmap(FIX_PUD)
516 
517 #define pgd_page(pgd)		pfn_to_page(__phys_to_pfn(pgd_val(pgd) & PHYS_MASK))
518 
519 /* use ONLY for statically allocated translation tables */
520 #define pud_offset_kimg(dir,addr)	((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr))))
521 
522 #else
523 
524 #define pgd_page_paddr(pgd)	({ BUILD_BUG(); 0;})
525 
526 /* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */
527 #define pud_set_fixmap(addr)		NULL
528 #define pud_set_fixmap_offset(pgdp, addr)	((pud_t *)pgdp)
529 #define pud_clear_fixmap()
530 
531 #define pud_offset_kimg(dir,addr)	((pud_t *)dir)
532 
533 #endif  /* CONFIG_PGTABLE_LEVELS > 3 */
534 
535 #define pgd_ERROR(pgd)		__pgd_error(__FILE__, __LINE__, pgd_val(pgd))
536 
537 /* to find an entry in a page-table-directory */
538 #define pgd_index(addr)		(((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
539 
540 #define pgd_offset_raw(pgd, addr)	((pgd) + pgd_index(addr))
541 
542 #define pgd_offset(mm, addr)	(pgd_offset_raw((mm)->pgd, (addr)))
543 
544 /* to find an entry in a kernel page-table-directory */
545 #define pgd_offset_k(addr)	pgd_offset(&init_mm, addr)
546 
547 #define pgd_set_fixmap(addr)	((pgd_t *)set_fixmap_offset(FIX_PGD, addr))
548 #define pgd_clear_fixmap()	clear_fixmap(FIX_PGD)
549 
pte_modify(pte_t pte,pgprot_t newprot)550 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
551 {
552 	const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
553 			      PTE_PROT_NONE | PTE_VALID | PTE_WRITE;
554 	/* preserve the hardware dirty information */
555 	if (pte_hw_dirty(pte))
556 		pte = pte_mkdirty(pte);
557 	pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
558 	return pte;
559 }
560 
pmd_modify(pmd_t pmd,pgprot_t newprot)561 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
562 {
563 	return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
564 }
565 
566 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
567 extern int ptep_set_access_flags(struct vm_area_struct *vma,
568 				 unsigned long address, pte_t *ptep,
569 				 pte_t entry, int dirty);
570 
571 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
572 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
pmdp_set_access_flags(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp,pmd_t entry,int dirty)573 static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
574 					unsigned long address, pmd_t *pmdp,
575 					pmd_t entry, int dirty)
576 {
577 	return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
578 }
579 #endif
580 
581 /*
582  * Atomic pte/pmd modifications.
583  */
584 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
__ptep_test_and_clear_young(pte_t * ptep)585 static inline int __ptep_test_and_clear_young(pte_t *ptep)
586 {
587 	pte_t old_pte, pte;
588 
589 	pte = READ_ONCE(*ptep);
590 	do {
591 		old_pte = pte;
592 		pte = pte_mkold(pte);
593 		pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
594 					       pte_val(old_pte), pte_val(pte));
595 	} while (pte_val(pte) != pte_val(old_pte));
596 
597 	return pte_young(pte);
598 }
599 
ptep_test_and_clear_young(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)600 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
601 					    unsigned long address,
602 					    pte_t *ptep)
603 {
604 	return __ptep_test_and_clear_young(ptep);
605 }
606 
607 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
608 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
pmdp_test_and_clear_young(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)609 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
610 					    unsigned long address,
611 					    pmd_t *pmdp)
612 {
613 	return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
614 }
615 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
616 
617 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
ptep_get_and_clear(struct mm_struct * mm,unsigned long address,pte_t * ptep)618 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
619 				       unsigned long address, pte_t *ptep)
620 {
621 	return __pte(xchg_relaxed(&pte_val(*ptep), 0));
622 }
623 
624 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
625 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
pmdp_huge_get_and_clear(struct mm_struct * mm,unsigned long address,pmd_t * pmdp)626 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
627 					    unsigned long address, pmd_t *pmdp)
628 {
629 	return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp));
630 }
631 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
632 
633 /*
634  * ptep_set_wrprotect - mark read-only while trasferring potential hardware
635  * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit.
636  */
637 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
ptep_set_wrprotect(struct mm_struct * mm,unsigned long address,pte_t * ptep)638 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
639 {
640 	pte_t old_pte, pte;
641 
642 	pte = READ_ONCE(*ptep);
643 	do {
644 		old_pte = pte;
645 		/*
646 		 * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
647 		 * clear), set the PTE_DIRTY bit.
648 		 */
649 		if (pte_hw_dirty(pte))
650 			pte = pte_mkdirty(pte);
651 		pte = pte_wrprotect(pte);
652 		pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
653 					       pte_val(old_pte), pte_val(pte));
654 	} while (pte_val(pte) != pte_val(old_pte));
655 }
656 
657 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
658 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
pmdp_set_wrprotect(struct mm_struct * mm,unsigned long address,pmd_t * pmdp)659 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
660 				      unsigned long address, pmd_t *pmdp)
661 {
662 	ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
663 }
664 #endif
665 
666 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
667 extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
668 extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
669 
670 /*
671  * Encode and decode a swap entry:
672  *	bits 0-1:	present (must be zero)
673  *	bits 2-7:	swap type
674  *	bits 8-57:	swap offset
675  *	bit  58:	PTE_PROT_NONE (must be zero)
676  */
677 #define __SWP_TYPE_SHIFT	2
678 #define __SWP_TYPE_BITS		6
679 #define __SWP_OFFSET_BITS	50
680 #define __SWP_TYPE_MASK		((1 << __SWP_TYPE_BITS) - 1)
681 #define __SWP_OFFSET_SHIFT	(__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
682 #define __SWP_OFFSET_MASK	((1UL << __SWP_OFFSET_BITS) - 1)
683 
684 #define __swp_type(x)		(((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
685 #define __swp_offset(x)		(((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
686 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
687 
688 #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
689 #define __swp_entry_to_pte(swp)	((pte_t) { (swp).val })
690 
691 /*
692  * Ensure that there are not more swap files than can be encoded in the kernel
693  * PTEs.
694  */
695 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
696 
697 extern int kern_addr_valid(unsigned long addr);
698 
699 #include <asm-generic/pgtable.h>
700 
701 void pgd_cache_init(void);
702 #define pgtable_cache_init	pgd_cache_init
703 
704 /*
705  * On AArch64, the cache coherency is handled via the set_pte_at() function.
706  */
update_mmu_cache(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)707 static inline void update_mmu_cache(struct vm_area_struct *vma,
708 				    unsigned long addr, pte_t *ptep)
709 {
710 	/*
711 	 * We don't do anything here, so there's a very small chance of
712 	 * us retaking a user fault which we just fixed up. The alternative
713 	 * is doing a dsb(ishst), but that penalises the fastpath.
714 	 */
715 }
716 
717 #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
718 
719 #define kc_vaddr_to_offset(v)	((v) & ~VA_START)
720 #define kc_offset_to_vaddr(o)	((o) | VA_START)
721 
722 #endif /* !__ASSEMBLY__ */
723 
724 #endif /* __ASM_PGTABLE_H */
725