• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_M32R_PGTABLE_H
3 #define _ASM_M32R_PGTABLE_H
4 
5 #include <asm-generic/4level-fixup.h>
6 
7 #ifdef __KERNEL__
8 /*
9  * The Linux memory management assumes a three-level page table setup. On
10  * the M32R, we use that, but "fold" the mid level into the top-level page
11  * table, so that we physically have the same two-level page table as the
12  * M32R mmu expects.
13  *
14  * This file contains the functions and defines necessary to modify and use
15  * the M32R page table tree.
16  */
17 
18 /* CAUTION!: If you change macro definitions in this file, you might have to
19  * change arch/m32r/mmu.S manually.
20  */
21 
22 #ifndef __ASSEMBLY__
23 
24 #include <linux/threads.h>
25 #include <linux/bitops.h>
26 #include <asm/processor.h>
27 #include <asm/addrspace.h>
28 #include <asm/page.h>
29 
30 struct mm_struct;
31 struct vm_area_struct;
32 
33 extern pgd_t swapper_pg_dir[1024];
34 extern void paging_init(void);
35 
36 /*
37  * ZERO_PAGE is a global shared page that is always zero: used
38  * for zero-mapped memory areas etc..
39  */
40 extern unsigned long empty_zero_page[1024];
41 #define ZERO_PAGE(vaddr)	(virt_to_page(empty_zero_page))
42 
43 #endif /* !__ASSEMBLY__ */
44 
45 #ifndef __ASSEMBLY__
46 #include <asm/pgtable-2level.h>
47 #endif
48 
49 #define pgtable_cache_init()	do { } while (0)
50 
51 #define PMD_SIZE	(1UL << PMD_SHIFT)
52 #define PMD_MASK	(~(PMD_SIZE - 1))
53 #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
54 #define PGDIR_MASK	(~(PGDIR_SIZE - 1))
55 
56 #define USER_PTRS_PER_PGD	(TASK_SIZE / PGDIR_SIZE)
57 #define FIRST_USER_ADDRESS	0UL
58 
59 #ifndef __ASSEMBLY__
60 /* Just any arbitrary offset to the start of the vmalloc VM area: the
61  * current 8MB value just means that there will be a 8MB "hole" after the
62  * physical memory until the kernel virtual memory starts.  That means that
63  * any out-of-bounds memory accesses will hopefully be caught.
64  * The vmalloc() routines leaves a hole of 4kB between each vmalloced
65  * area for the same reason. ;)
66  */
67 #define VMALLOC_START		KSEG2
68 #define VMALLOC_END		KSEG3
69 
70 /*
71  *     M32R TLB format
72  *
73  *     [0]    [1:19]           [20:23]       [24:31]
74  *     +-----------------------+----+-------------+
75  *     |          VPN          |0000|    ASID     |
76  *     +-----------------------+----+-------------+
77  *     +-+---------------------+----+-+---+-+-+-+-+
78  *     |0         PPN          |0000|N|AC |L|G|V| |
79  *     +-+---------------------+----+-+---+-+-+-+-+
80  *                                     RWX
81  */
82 
83 #define _PAGE_BIT_DIRTY		0	/* software: page changed */
84 #define _PAGE_BIT_PRESENT	1	/* Valid: page is valid */
85 #define _PAGE_BIT_GLOBAL	2	/* Global */
86 #define _PAGE_BIT_LARGE		3	/* Large */
87 #define _PAGE_BIT_EXEC		4	/* Execute */
88 #define _PAGE_BIT_WRITE		5	/* Write */
89 #define _PAGE_BIT_READ		6	/* Read */
90 #define _PAGE_BIT_NONCACHABLE	7	/* Non cachable */
91 #define _PAGE_BIT_ACCESSED	8	/* software: page referenced */
92 #define _PAGE_BIT_PROTNONE	9	/* software: if not present */
93 
94 #define _PAGE_DIRTY		(1UL << _PAGE_BIT_DIRTY)
95 #define _PAGE_PRESENT		(1UL << _PAGE_BIT_PRESENT)
96 #define _PAGE_GLOBAL		(1UL << _PAGE_BIT_GLOBAL)
97 #define _PAGE_LARGE		(1UL << _PAGE_BIT_LARGE)
98 #define _PAGE_EXEC		(1UL << _PAGE_BIT_EXEC)
99 #define _PAGE_WRITE		(1UL << _PAGE_BIT_WRITE)
100 #define _PAGE_READ		(1UL << _PAGE_BIT_READ)
101 #define _PAGE_NONCACHABLE	(1UL << _PAGE_BIT_NONCACHABLE)
102 #define _PAGE_ACCESSED		(1UL << _PAGE_BIT_ACCESSED)
103 #define _PAGE_PROTNONE		(1UL << _PAGE_BIT_PROTNONE)
104 
105 #define _PAGE_TABLE	\
106 	( _PAGE_PRESENT | _PAGE_WRITE | _PAGE_READ | _PAGE_ACCESSED \
107 	| _PAGE_DIRTY )
108 #define _KERNPG_TABLE	\
109 	( _PAGE_PRESENT | _PAGE_WRITE | _PAGE_READ | _PAGE_ACCESSED \
110 	| _PAGE_DIRTY )
111 #define _PAGE_CHG_MASK	\
112 	( PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY )
113 
114 #ifdef CONFIG_MMU
115 #define PAGE_NONE	\
116 	__pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
117 #define PAGE_SHARED	\
118 	__pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_READ | _PAGE_ACCESSED)
119 #define PAGE_SHARED_EXEC \
120 	__pgprot(_PAGE_PRESENT | _PAGE_EXEC | _PAGE_WRITE | _PAGE_READ \
121 		| _PAGE_ACCESSED)
122 #define PAGE_COPY	\
123 	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_ACCESSED)
124 #define PAGE_COPY_EXEC	\
125 	__pgprot(_PAGE_PRESENT | _PAGE_EXEC | _PAGE_READ | _PAGE_ACCESSED)
126 #define PAGE_READONLY	\
127 	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_ACCESSED)
128 #define PAGE_READONLY_EXEC \
129 	__pgprot(_PAGE_PRESENT | _PAGE_EXEC | _PAGE_READ | _PAGE_ACCESSED)
130 
131 #define __PAGE_KERNEL	\
132 	( _PAGE_PRESENT | _PAGE_EXEC | _PAGE_WRITE | _PAGE_READ | _PAGE_DIRTY \
133 	| _PAGE_ACCESSED )
134 #define __PAGE_KERNEL_RO	( __PAGE_KERNEL & ~_PAGE_WRITE )
135 #define __PAGE_KERNEL_NOCACHE	( __PAGE_KERNEL | _PAGE_NONCACHABLE)
136 
137 #define MAKE_GLOBAL(x)	__pgprot((x) | _PAGE_GLOBAL)
138 
139 #define PAGE_KERNEL		MAKE_GLOBAL(__PAGE_KERNEL)
140 #define PAGE_KERNEL_RO		MAKE_GLOBAL(__PAGE_KERNEL_RO)
141 #define PAGE_KERNEL_NOCACHE	MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
142 
143 #else
144 #define PAGE_NONE		__pgprot(0)
145 #define PAGE_SHARED		__pgprot(0)
146 #define PAGE_SHARED_EXEC	__pgprot(0)
147 #define PAGE_COPY		__pgprot(0)
148 #define PAGE_COPY_EXEC		__pgprot(0)
149 #define PAGE_READONLY		__pgprot(0)
150 #define PAGE_READONLY_EXEC	__pgprot(0)
151 
152 #define PAGE_KERNEL		__pgprot(0)
153 #define PAGE_KERNEL_RO		__pgprot(0)
154 #define PAGE_KERNEL_NOCACHE	__pgprot(0)
155 #endif /* CONFIG_MMU */
156 
157 	/* xwr */
158 #define __P000	PAGE_NONE
159 #define __P001	PAGE_READONLY
160 #define __P010	PAGE_COPY
161 #define __P011	PAGE_COPY
162 #define __P100	PAGE_READONLY_EXEC
163 #define __P101	PAGE_READONLY_EXEC
164 #define __P110	PAGE_COPY_EXEC
165 #define __P111	PAGE_COPY_EXEC
166 
167 #define __S000	PAGE_NONE
168 #define __S001	PAGE_READONLY
169 #define __S010	PAGE_SHARED
170 #define __S011	PAGE_SHARED
171 #define __S100	PAGE_READONLY_EXEC
172 #define __S101	PAGE_READONLY_EXEC
173 #define __S110	PAGE_SHARED_EXEC
174 #define __S111	PAGE_SHARED_EXEC
175 
176 /* page table for 0-4MB for everybody */
177 
178 #define pte_present(x)	(pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE))
179 #define pte_clear(mm,addr,xp)	do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
180 
181 #define pmd_none(x)	(!pmd_val(x))
182 #define pmd_present(x)	(pmd_val(x) & _PAGE_PRESENT)
183 #define pmd_clear(xp)	do { set_pmd(xp, __pmd(0)); } while (0)
184 #define	pmd_bad(x)	((pmd_val(x) & ~PAGE_MASK) != _KERNPG_TABLE)
185 
186 #define pages_to_mb(x)	((x) >> (20 - PAGE_SHIFT))
187 
188 /*
189  * The following only work if pte_present() is true.
190  * Undefined behaviour if not..
191  */
pte_dirty(pte_t pte)192 static inline int pte_dirty(pte_t pte)
193 {
194 	return pte_val(pte) & _PAGE_DIRTY;
195 }
196 
pte_young(pte_t pte)197 static inline int pte_young(pte_t pte)
198 {
199 	return pte_val(pte) & _PAGE_ACCESSED;
200 }
201 
pte_write(pte_t pte)202 static inline int pte_write(pte_t pte)
203 {
204 	return pte_val(pte) & _PAGE_WRITE;
205 }
206 
pte_special(pte_t pte)207 static inline int pte_special(pte_t pte)
208 {
209 	return 0;
210 }
211 
pte_mkclean(pte_t pte)212 static inline pte_t pte_mkclean(pte_t pte)
213 {
214 	pte_val(pte) &= ~_PAGE_DIRTY;
215 	return pte;
216 }
217 
pte_mkold(pte_t pte)218 static inline pte_t pte_mkold(pte_t pte)
219 {
220 	pte_val(pte) &= ~_PAGE_ACCESSED;
221 	return pte;
222 }
223 
pte_wrprotect(pte_t pte)224 static inline pte_t pte_wrprotect(pte_t pte)
225 {
226 	pte_val(pte) &= ~_PAGE_WRITE;
227 	return pte;
228 }
229 
pte_mkdirty(pte_t pte)230 static inline pte_t pte_mkdirty(pte_t pte)
231 {
232 	pte_val(pte) |= _PAGE_DIRTY;
233 	return pte;
234 }
235 
pte_mkyoung(pte_t pte)236 static inline pte_t pte_mkyoung(pte_t pte)
237 {
238 	pte_val(pte) |= _PAGE_ACCESSED;
239 	return pte;
240 }
241 
pte_mkwrite(pte_t pte)242 static inline pte_t pte_mkwrite(pte_t pte)
243 {
244 	pte_val(pte) |= _PAGE_WRITE;
245 	return pte;
246 }
247 
pte_mkspecial(pte_t pte)248 static inline pte_t pte_mkspecial(pte_t pte)
249 {
250 	return pte;
251 }
252 
ptep_test_and_clear_young(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)253 static inline  int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
254 {
255 	return test_and_clear_bit(_PAGE_BIT_ACCESSED, ptep);
256 }
257 
ptep_set_wrprotect(struct mm_struct * mm,unsigned long addr,pte_t * ptep)258 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
259 {
260 	clear_bit(_PAGE_BIT_WRITE, ptep);
261 }
262 
263 /*
264  * Macro and implementation to make a page protection as uncachable.
265  */
pgprot_noncached(pgprot_t _prot)266 static inline pgprot_t pgprot_noncached(pgprot_t _prot)
267 {
268 	unsigned long prot = pgprot_val(_prot);
269 
270 	prot |= _PAGE_NONCACHABLE;
271 	return __pgprot(prot);
272 }
273 
274 #define pgprot_writecombine(prot) pgprot_noncached(prot)
275 
276 /*
277  * Conversion functions: convert a page and protection to a page entry,
278  * and a page entry and page directory to the page they refer to.
279  */
280 #define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), pgprot)
281 
pte_modify(pte_t pte,pgprot_t newprot)282 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
283 {
284 	set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) \
285 		| pgprot_val(newprot)));
286 
287 	return pte;
288 }
289 
290 /*
291  * Conversion functions: convert a page and protection to a page entry,
292  * and a page entry and page directory to the page they refer to.
293  */
294 
pmd_set(pmd_t * pmdp,pte_t * ptep)295 static inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
296 {
297 	pmd_val(*pmdp) = (((unsigned long) ptep) & PAGE_MASK);
298 }
299 
300 #define pmd_page_vaddr(pmd)	\
301 	((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
302 
303 #ifndef CONFIG_DISCONTIGMEM
304 #define pmd_page(pmd)	(mem_map + ((pmd_val(pmd) >> PAGE_SHIFT) - PFN_BASE))
305 #endif /* !CONFIG_DISCONTIGMEM */
306 
307 /* to find an entry in a page-table-directory. */
308 #define pgd_index(address)	\
309 	(((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
310 
311 #define pgd_offset(mm, address)	((mm)->pgd + pgd_index(address))
312 
313 /* to find an entry in a kernel page-table-directory */
314 #define pgd_offset_k(address)	pgd_offset(&init_mm, address)
315 
316 #define pmd_index(address)	\
317 	(((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
318 
319 #define pte_index(address)	\
320 	(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
321 #define pte_offset_kernel(dir, address)	\
322 	((pte_t *)pmd_page_vaddr(*(dir)) + pte_index(address))
323 #define pte_offset_map(dir, address)	\
324 	((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
325 #define pte_unmap(pte)		do { } while (0)
326 
327 /* Encode and de-code a swap entry */
328 #define __swp_type(x)			(((x).val >> 2) & 0x1f)
329 #define __swp_offset(x)			((x).val >> 10)
330 #define __swp_entry(type, offset)	\
331 	((swp_entry_t) { ((type) << 2) | ((offset) << 10) })
332 #define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) })
333 #define __swp_entry_to_pte(x)		((pte_t) { (x).val })
334 
335 #endif /* !__ASSEMBLY__ */
336 
337 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
338 #define kern_addr_valid(addr)	(1)
339 
340 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
341 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
342 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
343 #define __HAVE_ARCH_PTE_SAME
344 #include <asm-generic/pgtable.h>
345 
346 #endif /* __KERNEL__ */
347 
348 #endif /* _ASM_M32R_PGTABLE_H */
349