• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  arch/arm/include/asm/pgtable.h
3  *
4  *  Copyright (C) 1995-2002 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #ifndef _ASMARM_PGTABLE_H
11 #define _ASMARM_PGTABLE_H
12 
13 #include <linux/const.h>
14 #include <asm/proc-fns.h>
15 
16 #ifndef CONFIG_MMU
17 
18 #include <asm-generic/4level-fixup.h>
19 #include "pgtable-nommu.h"
20 
21 #else
22 
23 #include <asm-generic/pgtable-nopud.h>
24 #include <asm/memory.h>
25 #include <asm/pgtable-hwdef.h>
26 
27 #ifdef CONFIG_ARM_LPAE
28 #include <asm/pgtable-3level.h>
29 #else
30 #include <asm/pgtable-2level.h>
31 #endif
32 
33 /*
34  * Just any arbitrary offset to the start of the vmalloc VM area: the
35  * current 8MB value just means that there will be a 8MB "hole" after the
36  * physical memory until the kernel virtual memory starts.  That means that
37  * any out-of-bounds memory accesses will hopefully be caught.
38  * The vmalloc() routines leaves a hole of 4kB between each vmalloced
39  * area for the same reason. ;)
40  */
41 #define VMALLOC_OFFSET		(8*1024*1024)
42 #define VMALLOC_START		(((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
43 #define VMALLOC_END		0xff000000UL
44 
45 #define LIBRARY_TEXT_START	0x0c000000
46 
47 #ifndef __ASSEMBLY__
48 extern void __pte_error(const char *file, int line, pte_t);
49 extern void __pmd_error(const char *file, int line, pmd_t);
50 extern void __pgd_error(const char *file, int line, pgd_t);
51 
52 #define pte_ERROR(pte)		__pte_error(__FILE__, __LINE__, pte)
53 #define pmd_ERROR(pmd)		__pmd_error(__FILE__, __LINE__, pmd)
54 #define pgd_ERROR(pgd)		__pgd_error(__FILE__, __LINE__, pgd)
55 
56 /*
57  * This is the lowest virtual address we can permit any user space
58  * mapping to be mapped at.  This is particularly important for
59  * non-high vector CPUs.
60  */
61 #define FIRST_USER_ADDRESS	PAGE_SIZE
62 
63 /*
64  * Use TASK_SIZE as the ceiling argument for free_pgtables() and
65  * free_pgd_range() to avoid freeing the modules pmd when LPAE is enabled (pmd
66  * page shared between user and kernel).
67  */
68 #ifdef CONFIG_ARM_LPAE
69 #define USER_PGTABLES_CEILING	TASK_SIZE
70 #endif
71 
72 /*
73  * The pgprot_* and protection_map entries will be fixed up in runtime
74  * to include the cachable and bufferable bits based on memory policy,
75  * as well as any architecture dependent bits like global/ASID and SMP
76  * shared mapping bits.
77  */
78 #define _L_PTE_DEFAULT	L_PTE_PRESENT | L_PTE_YOUNG
79 
80 extern pgprot_t		pgprot_user;
81 extern pgprot_t		pgprot_kernel;
82 
83 #define _MOD_PROT(p, b)	__pgprot(pgprot_val(p) | (b))
84 
85 #define PAGE_NONE		_MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY)
86 #define PAGE_SHARED		_MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN)
87 #define PAGE_SHARED_EXEC	_MOD_PROT(pgprot_user, L_PTE_USER)
88 #define PAGE_COPY		_MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
89 #define PAGE_COPY_EXEC		_MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
90 #define PAGE_READONLY		_MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
91 #define PAGE_READONLY_EXEC	_MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
92 #define PAGE_KERNEL		_MOD_PROT(pgprot_kernel, L_PTE_XN)
93 #define PAGE_KERNEL_EXEC	pgprot_kernel
94 
95 #define __PAGE_NONE		__pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN)
96 #define __PAGE_SHARED		__pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
97 #define __PAGE_SHARED_EXEC	__pgprot(_L_PTE_DEFAULT | L_PTE_USER)
98 #define __PAGE_COPY		__pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
99 #define __PAGE_COPY_EXEC	__pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
100 #define __PAGE_READONLY		__pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
101 #define __PAGE_READONLY_EXEC	__pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
102 
103 #define __pgprot_modify(prot,mask,bits)		\
104 	__pgprot((pgprot_val(prot) & ~(mask)) | (bits))
105 
106 #define pgprot_noncached(prot) \
107 	__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
108 
109 #define pgprot_writecombine(prot) \
110 	__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
111 
112 #define pgprot_stronglyordered(prot) \
113 	__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
114 
115 #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
116 #define pgprot_dmacoherent(prot) \
117 	__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN)
118 #define __HAVE_PHYS_MEM_ACCESS_PROT
119 struct file;
120 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
121 				     unsigned long size, pgprot_t vma_prot);
122 #else
123 #define pgprot_dmacoherent(prot) \
124 	__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN)
125 #endif
126 
127 #endif /* __ASSEMBLY__ */
128 
129 /*
130  * The table below defines the page protection levels that we insert into our
131  * Linux page table version.  These get translated into the best that the
132  * architecture can perform.  Note that on most ARM hardware:
133  *  1) We cannot do execute protection
134  *  2) If we could do execute protection, then read is implied
135  *  3) write implies read permissions
136  */
137 #define __P000  __PAGE_NONE
138 #define __P001  __PAGE_READONLY
139 #define __P010  __PAGE_COPY
140 #define __P011  __PAGE_COPY
141 #define __P100  __PAGE_READONLY_EXEC
142 #define __P101  __PAGE_READONLY_EXEC
143 #define __P110  __PAGE_COPY_EXEC
144 #define __P111  __PAGE_COPY_EXEC
145 
146 #define __S000  __PAGE_NONE
147 #define __S001  __PAGE_READONLY
148 #define __S010  __PAGE_SHARED
149 #define __S011  __PAGE_SHARED
150 #define __S100  __PAGE_READONLY_EXEC
151 #define __S101  __PAGE_READONLY_EXEC
152 #define __S110  __PAGE_SHARED_EXEC
153 #define __S111  __PAGE_SHARED_EXEC
154 
155 #ifndef __ASSEMBLY__
156 /*
157  * ZERO_PAGE is a global shared page that is always zero: used
158  * for zero-mapped memory areas etc..
159  */
160 extern struct page *empty_zero_page;
161 #define ZERO_PAGE(vaddr)	(empty_zero_page)
162 
163 
164 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
165 
166 /* to find an entry in a page-table-directory */
167 #define pgd_index(addr)		((addr) >> PGDIR_SHIFT)
168 
169 #define pgd_offset(mm, addr)	((mm)->pgd + pgd_index(addr))
170 
171 /* to find an entry in a kernel page-table-directory */
172 #define pgd_offset_k(addr)	pgd_offset(&init_mm, addr)
173 
174 #define pmd_none(pmd)		(!pmd_val(pmd))
175 #define pmd_present(pmd)	(pmd_val(pmd))
176 
pmd_page_vaddr(pmd_t pmd)177 static inline pte_t *pmd_page_vaddr(pmd_t pmd)
178 {
179 	return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK);
180 }
181 
182 #define pmd_page(pmd)		pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
183 
184 #ifndef CONFIG_HIGHPTE
185 #define __pte_map(pmd)		pmd_page_vaddr(*(pmd))
186 #define __pte_unmap(pte)	do { } while (0)
187 #else
188 #define __pte_map(pmd)		(pte_t *)kmap_atomic(pmd_page(*(pmd)))
189 #define __pte_unmap(pte)	kunmap_atomic(pte)
190 #endif
191 
192 #define pte_index(addr)		(((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
193 
194 #define pte_offset_kernel(pmd,addr)	(pmd_page_vaddr(*(pmd)) + pte_index(addr))
195 
196 #define pte_offset_map(pmd,addr)	(__pte_map(pmd) + pte_index(addr))
197 #define pte_unmap(pte)			__pte_unmap(pte)
198 
199 #define pte_pfn(pte)		((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT)
200 #define pfn_pte(pfn,prot)	__pte(__pfn_to_phys(pfn) | pgprot_val(prot))
201 
202 #define pte_page(pte)		pfn_to_page(pte_pfn(pte))
203 #define mk_pte(page,prot)	pfn_pte(page_to_pfn(page), prot)
204 
205 #define pte_clear(mm,addr,ptep)	set_pte_ext(ptep, __pte(0), 0)
206 
207 #define pte_none(pte)		(!pte_val(pte))
208 #define pte_present(pte)	(pte_val(pte) & L_PTE_PRESENT)
209 #define pte_write(pte)		(!(pte_val(pte) & L_PTE_RDONLY))
210 #define pte_dirty(pte)		(pte_val(pte) & L_PTE_DIRTY)
211 #define pte_young(pte)		(pte_val(pte) & L_PTE_YOUNG)
212 #define pte_exec(pte)		(!(pte_val(pte) & L_PTE_XN))
213 #define pte_special(pte)	(0)
214 
215 #define pte_present_user(pte) \
216 	((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \
217 	 (L_PTE_PRESENT | L_PTE_USER))
218 
219 #if __LINUX_ARM_ARCH__ < 6
__sync_icache_dcache(pte_t pteval)220 static inline void __sync_icache_dcache(pte_t pteval)
221 {
222 }
223 #else
224 extern void __sync_icache_dcache(pte_t pteval);
225 #endif
226 
set_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pteval)227 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
228 			      pte_t *ptep, pte_t pteval)
229 {
230 	unsigned long ext = 0;
231 
232 	if (addr < TASK_SIZE && pte_present_user(pteval)) {
233 		__sync_icache_dcache(pteval);
234 		ext |= PTE_EXT_NG;
235 	}
236 
237 	set_pte_ext(ptep, pteval, ext);
238 }
239 
240 #define PTE_BIT_FUNC(fn,op) \
241 static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
242 
243 PTE_BIT_FUNC(wrprotect, |= L_PTE_RDONLY);
244 PTE_BIT_FUNC(mkwrite,   &= ~L_PTE_RDONLY);
245 PTE_BIT_FUNC(mkclean,   &= ~L_PTE_DIRTY);
246 PTE_BIT_FUNC(mkdirty,   |= L_PTE_DIRTY);
247 PTE_BIT_FUNC(mkold,     &= ~L_PTE_YOUNG);
248 PTE_BIT_FUNC(mkyoung,   |= L_PTE_YOUNG);
249 
pte_mkspecial(pte_t pte)250 static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
251 
pte_modify(pte_t pte,pgprot_t newprot)252 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
253 {
254 	const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER;
255 	pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
256 	return pte;
257 }
258 
259 /*
260  * Encode and decode a swap entry.  Swap entries are stored in the Linux
261  * page tables as follows:
262  *
263  *   3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
264  *   1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
265  *   <--------------- offset ----------------------> < type -> 0 0 0
266  *
267  * This gives us up to 31 swap files and 64GB per swap file.  Note that
268  * the offset field is always non-zero.
269  */
270 #define __SWP_TYPE_SHIFT	3
271 #define __SWP_TYPE_BITS		5
272 #define __SWP_TYPE_MASK		((1 << __SWP_TYPE_BITS) - 1)
273 #define __SWP_OFFSET_SHIFT	(__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
274 
275 #define __swp_type(x)		(((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
276 #define __swp_offset(x)		((x).val >> __SWP_OFFSET_SHIFT)
277 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
278 
279 #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
280 #define __swp_entry_to_pte(swp)	((pte_t) { (swp).val })
281 
282 /*
283  * It is an error for the kernel to have more swap files than we can
284  * encode in the PTEs.  This ensures that we know when MAX_SWAPFILES
285  * is increased beyond what we presently support.
286  */
287 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
288 
289 /*
290  * Encode and decode a file entry.  File entries are stored in the Linux
291  * page tables as follows:
292  *
293  *   3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
294  *   1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
295  *   <----------------------- offset ------------------------> 1 0 0
296  */
297 #define pte_file(pte)		(pte_val(pte) & L_PTE_FILE)
298 #define pte_to_pgoff(x)		(pte_val(x) >> 3)
299 #define pgoff_to_pte(x)		__pte(((x) << 3) | L_PTE_FILE)
300 
301 #define PTE_FILE_MAX_BITS	29
302 
303 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
304 /* FIXME: this is not correct */
305 #define kern_addr_valid(addr)	(1)
306 
307 #include <asm-generic/pgtable.h>
308 
309 /*
310  * We provide our own arch_get_unmapped_area to cope with VIPT caches.
311  */
312 #define HAVE_ARCH_UNMAPPED_AREA
313 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
314 
315 /*
316  * remap a physical page `pfn' of size `size' with page protection `prot'
317  * into virtual address `from'
318  */
319 #define io_remap_pfn_range(vma,from,pfn,size,prot) \
320 		remap_pfn_range(vma, from, pfn, size, prot)
321 
322 #define pgtable_cache_init() do { } while (0)
323 
324 #endif /* !__ASSEMBLY__ */
325 
326 #endif /* CONFIG_MMU */
327 
328 #endif /* _ASMARM_PGTABLE_H */
329