1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * arch/arm/include/asm/pgtable.h
4 *
5 * Copyright (C) 1995-2002 Russell King
6 */
7 #ifndef _ASMARM_PGTABLE_H
8 #define _ASMARM_PGTABLE_H
9
10 #include <linux/const.h>
11 #include <asm/proc-fns.h>
12
13 #ifndef __ASSEMBLY__
14 /*
15 * ZERO_PAGE is a global shared page that is always zero: used
16 * for zero-mapped memory areas etc..
17 */
18 extern struct page *empty_zero_page;
19 #define ZERO_PAGE(vaddr) (empty_zero_page)
20 #endif
21
22 #ifndef CONFIG_MMU
23
24 #include <asm-generic/pgtable-nopud.h>
25 #include <asm/pgtable-nommu.h>
26
27 #else
28
29 #include <asm-generic/pgtable-nopud.h>
30 #include <asm/memory.h>
31 #include <asm/pgtable-hwdef.h>
32
33
34 #include <asm/tlbflush.h>
35
36 #ifdef CONFIG_ARM_LPAE
37 #include <asm/pgtable-3level.h>
38 #else
39 #include <asm/pgtable-2level.h>
40 #endif
41
42 /*
43 * Just any arbitrary offset to the start of the vmalloc VM area: the
44 * current 8MB value just means that there will be a 8MB "hole" after the
45 * physical memory until the kernel virtual memory starts. That means that
46 * any out-of-bounds memory accesses will hopefully be caught.
47 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
48 * area for the same reason. ;)
49 */
50 #define VMALLOC_OFFSET (8*1024*1024)
51 #define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
52 #define VMALLOC_END 0xff800000UL
53
54 #define LIBRARY_TEXT_START 0x0c000000
55
56 #ifndef __ASSEMBLY__
57 extern void __pte_error(const char *file, int line, pte_t);
58 extern void __pmd_error(const char *file, int line, pmd_t);
59 extern void __pgd_error(const char *file, int line, pgd_t);
60
61 #define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte)
62 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
63 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
64
65 /*
66 * This is the lowest virtual address we can permit any user space
67 * mapping to be mapped at. This is particularly important for
68 * non-high vector CPUs.
69 */
70 #define FIRST_USER_ADDRESS (PAGE_SIZE * 2)
71
72 /*
73 * Use TASK_SIZE as the ceiling argument for free_pgtables() and
74 * free_pgd_range() to avoid freeing the modules pmd when LPAE is enabled (pmd
75 * page shared between user and kernel).
76 */
77 #ifdef CONFIG_ARM_LPAE
78 #define USER_PGTABLES_CEILING TASK_SIZE
79 #endif
80
81 /*
82 * The pgprot_* and protection_map entries will be fixed up in runtime
83 * to include the cachable and bufferable bits based on memory policy,
84 * as well as any architecture dependent bits like global/ASID and SMP
85 * shared mapping bits.
86 */
87 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
88
89 extern pgprot_t pgprot_user;
90 extern pgprot_t pgprot_kernel;
91
92 #define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
93
94 #define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY | L_PTE_NONE)
95 #define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN)
96 #define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER)
97 #define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
98 #define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
99 #define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
100 #define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
101 #define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN)
102 #define PAGE_KERNEL_EXEC pgprot_kernel
103
104 #define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE)
105 #define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
106 #define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER)
107 #define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
108 #define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
109 #define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
110 #define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
111
112 #define __pgprot_modify(prot,mask,bits) \
113 __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
114
115 #define pgprot_noncached(prot) \
116 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
117
118 #define pgprot_writecombine(prot) \
119 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
120
121 #define pgprot_stronglyordered(prot) \
122 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
123
124 #define pgprot_device(prot) \
125 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_DEV_SHARED | L_PTE_SHARED | L_PTE_DIRTY | L_PTE_XN)
126
127 #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
128 #define pgprot_dmacoherent(prot) \
129 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN)
130 #define __HAVE_PHYS_MEM_ACCESS_PROT
131 struct file;
132 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
133 unsigned long size, pgprot_t vma_prot);
134 #else
135 #define pgprot_dmacoherent(prot) \
136 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN)
137 #endif
138
139 #endif /* __ASSEMBLY__ */
140
141 /*
142 * The table below defines the page protection levels that we insert into our
143 * Linux page table version. These get translated into the best that the
144 * architecture can perform. Note that on most ARM hardware:
145 * 1) We cannot do execute protection
146 * 2) If we could do execute protection, then read is implied
147 * 3) write implies read permissions
148 */
149 #define __P000 __PAGE_NONE
150 #define __P001 __PAGE_READONLY
151 #define __P010 __PAGE_COPY
152 #define __P011 __PAGE_COPY
153 #define __P100 __PAGE_READONLY_EXEC
154 #define __P101 __PAGE_READONLY_EXEC
155 #define __P110 __PAGE_COPY_EXEC
156 #define __P111 __PAGE_COPY_EXEC
157
158 #define __S000 __PAGE_NONE
159 #define __S001 __PAGE_READONLY
160 #define __S010 __PAGE_SHARED
161 #define __S011 __PAGE_SHARED
162 #define __S100 __PAGE_READONLY_EXEC
163 #define __S101 __PAGE_READONLY_EXEC
164 #define __S110 __PAGE_SHARED_EXEC
165 #define __S111 __PAGE_SHARED_EXEC
166
167 #ifndef __ASSEMBLY__
168
169 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
170
171 #define pud_page(pud) pmd_page(__pmd(pud_val(pud)))
172 #define pud_write(pud) pmd_write(__pmd(pud_val(pud)))
173
174 #define pmd_none(pmd) (!pmd_val(pmd))
175
pmd_page_vaddr(pmd_t pmd)176 static inline pte_t *pmd_page_vaddr(pmd_t pmd)
177 {
178 return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK);
179 }
180
181 #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
182
183 #define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT)
184 #define pfn_pte(pfn,prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot))
185
186 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
187 #define mk_pte(page,prot) pfn_pte(page_to_pfn(page), prot)
188
189 #define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0)
190
191 #define pte_isset(pte, val) ((u32)(val) == (val) ? pte_val(pte) & (val) \
192 : !!(pte_val(pte) & (val)))
193 #define pte_isclear(pte, val) (!(pte_val(pte) & (val)))
194
195 #define pte_none(pte) (!pte_val(pte))
196 #define pte_present(pte) (pte_isset((pte), L_PTE_PRESENT))
197 #define pte_valid(pte) (pte_isset((pte), L_PTE_VALID))
198 #define pte_accessible(mm, pte) (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
199 #define pte_write(pte) (pte_isclear((pte), L_PTE_RDONLY))
200 #define pte_dirty(pte) (pte_isset((pte), L_PTE_DIRTY))
201 #define pte_young(pte) (pte_isset((pte), L_PTE_YOUNG))
202 #define pte_exec(pte) (pte_isclear((pte), L_PTE_XN))
203
204 #define pte_valid_user(pte) \
205 (pte_valid(pte) && pte_isset((pte), L_PTE_USER) && pte_young(pte))
206
pte_access_permitted(pte_t pte,bool write)207 static inline bool pte_access_permitted(pte_t pte, bool write)
208 {
209 pteval_t mask = L_PTE_PRESENT | L_PTE_USER;
210 pteval_t needed = mask;
211
212 if (write)
213 mask |= L_PTE_RDONLY;
214
215 return (pte_val(pte) & mask) == needed;
216 }
217 #define pte_access_permitted pte_access_permitted
218
219 #if __LINUX_ARM_ARCH__ < 6
__sync_icache_dcache(pte_t pteval)220 static inline void __sync_icache_dcache(pte_t pteval)
221 {
222 }
223 #else
224 extern void __sync_icache_dcache(pte_t pteval);
225 #endif
226
227 void set_pte_at(struct mm_struct *mm, unsigned long addr,
228 pte_t *ptep, pte_t pteval);
229
clear_pte_bit(pte_t pte,pgprot_t prot)230 static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
231 {
232 pte_val(pte) &= ~pgprot_val(prot);
233 return pte;
234 }
235
set_pte_bit(pte_t pte,pgprot_t prot)236 static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
237 {
238 pte_val(pte) |= pgprot_val(prot);
239 return pte;
240 }
241
pte_wrprotect(pte_t pte)242 static inline pte_t pte_wrprotect(pte_t pte)
243 {
244 return set_pte_bit(pte, __pgprot(L_PTE_RDONLY));
245 }
246
pte_mkwrite(pte_t pte)247 static inline pte_t pte_mkwrite(pte_t pte)
248 {
249 return clear_pte_bit(pte, __pgprot(L_PTE_RDONLY));
250 }
251
pte_mkclean(pte_t pte)252 static inline pte_t pte_mkclean(pte_t pte)
253 {
254 return clear_pte_bit(pte, __pgprot(L_PTE_DIRTY));
255 }
256
pte_mkdirty(pte_t pte)257 static inline pte_t pte_mkdirty(pte_t pte)
258 {
259 return set_pte_bit(pte, __pgprot(L_PTE_DIRTY));
260 }
261
pte_mkold(pte_t pte)262 static inline pte_t pte_mkold(pte_t pte)
263 {
264 return clear_pte_bit(pte, __pgprot(L_PTE_YOUNG));
265 }
266
pte_mkyoung(pte_t pte)267 static inline pte_t pte_mkyoung(pte_t pte)
268 {
269 return set_pte_bit(pte, __pgprot(L_PTE_YOUNG));
270 }
271
pte_mkexec(pte_t pte)272 static inline pte_t pte_mkexec(pte_t pte)
273 {
274 return clear_pte_bit(pte, __pgprot(L_PTE_XN));
275 }
276
pte_mknexec(pte_t pte)277 static inline pte_t pte_mknexec(pte_t pte)
278 {
279 return set_pte_bit(pte, __pgprot(L_PTE_XN));
280 }
281
pte_modify(pte_t pte,pgprot_t newprot)282 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
283 {
284 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
285 L_PTE_NONE | L_PTE_VALID;
286 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
287 return pte;
288 }
289
290 /*
291 * Encode and decode a swap entry. Swap entries are stored in the Linux
292 * page tables as follows:
293 *
294 * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
295 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
296 * <--------------- offset ------------------------> < type -> 0 0
297 *
298 * This gives us up to 31 swap files and 128GB per swap file. Note that
299 * the offset field is always non-zero.
300 */
301 #define __SWP_TYPE_SHIFT 2
302 #define __SWP_TYPE_BITS 5
303 #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
304 #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
305
306 #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
307 #define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
308 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
309
310 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
311 #define __swp_entry_to_pte(swp) __pte((swp).val | PTE_TYPE_FAULT)
312
313 /*
314 * It is an error for the kernel to have more swap files than we can
315 * encode in the PTEs. This ensures that we know when MAX_SWAPFILES
316 * is increased beyond what we presently support.
317 */
318 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
319
320 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
321 /* FIXME: this is not correct */
322 #define kern_addr_valid(addr) (1)
323
324 /*
325 * We provide our own arch_get_unmapped_area to cope with VIPT caches.
326 */
327 #define HAVE_ARCH_UNMAPPED_AREA
328 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
329
330 #endif /* !__ASSEMBLY__ */
331
332 #endif /* CONFIG_MMU */
333
334 #endif /* _ASMARM_PGTABLE_H */
335