1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4 * Copyright 2003 PathScale, Inc.
5 * Derived from include/asm-i386/pgtable.h
6 */
7
8 #ifndef __UM_PGTABLE_H
9 #define __UM_PGTABLE_H
10
11 #include <asm/fixmap.h>
12
13 #define _PAGE_PRESENT 0x001
14 #define _PAGE_NEWPAGE 0x002
15 #define _PAGE_NEWPROT 0x004
16 #define _PAGE_RW 0x020
17 #define _PAGE_USER 0x040
18 #define _PAGE_ACCESSED 0x080
19 #define _PAGE_DIRTY 0x100
20 /* If _PAGE_PRESENT is clear, we use these: */
21 #define _PAGE_PROTNONE 0x010 /* if the user mapped it with PROT_NONE;
22 pte_present gives true */
23
24 #ifdef CONFIG_3_LEVEL_PGTABLES
25 #include <asm/pgtable-3level.h>
26 #else
27 #include <asm/pgtable-2level.h>
28 #endif
29
30 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
31
32 /* zero page used for uninitialized stuff */
33 extern unsigned long *empty_zero_page;
34
35 /* Just any arbitrary offset to the start of the vmalloc VM area: the
36 * current 8MB value just means that there will be a 8MB "hole" after the
37 * physical memory until the kernel virtual memory starts. That means that
38 * any out-of-bounds memory accesses will hopefully be caught.
39 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
40 * area for the same reason. ;)
41 */
42
43 extern unsigned long end_iomem;
44
45 #define VMALLOC_OFFSET (__va_space)
46 #define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
47 #define PKMAP_BASE ((FIXADDR_START - LAST_PKMAP * PAGE_SIZE) & PMD_MASK)
48 #define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
49 #define MODULES_VADDR VMALLOC_START
50 #define MODULES_END VMALLOC_END
51 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
52
53 #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
54 #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
55 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
56 #define __PAGE_KERNEL_EXEC \
57 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
58 #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
59 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
60 #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
61 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
62 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
63 #define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
64
65 /*
66 * The i386 can't do page protection for execute, and considers that the same
67 * are read.
68 * Also, write permissions imply read permissions. This is the closest we can
69 * get..
70 */
71 #define __P000 PAGE_NONE
72 #define __P001 PAGE_READONLY
73 #define __P010 PAGE_COPY
74 #define __P011 PAGE_COPY
75 #define __P100 PAGE_READONLY
76 #define __P101 PAGE_READONLY
77 #define __P110 PAGE_COPY
78 #define __P111 PAGE_COPY
79
80 #define __S000 PAGE_NONE
81 #define __S001 PAGE_READONLY
82 #define __S010 PAGE_SHARED
83 #define __S011 PAGE_SHARED
84 #define __S100 PAGE_READONLY
85 #define __S101 PAGE_READONLY
86 #define __S110 PAGE_SHARED
87 #define __S111 PAGE_SHARED
88
89 /*
90 * ZERO_PAGE is a global shared page that is always zero: used
91 * for zero-mapped memory areas etc..
92 */
93 #define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
94
95 #define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE))
96
97 #define pmd_none(x) (!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE))
98 #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
99
100 #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
101 #define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0)
102
103 #define pmd_newpage(x) (pmd_val(x) & _PAGE_NEWPAGE)
104 #define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEWPAGE)
105
106 #define pud_newpage(x) (pud_val(x) & _PAGE_NEWPAGE)
107 #define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE)
108
109 #define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK)
110
111 #define pte_page(x) pfn_to_page(pte_pfn(x))
112
113 #define pte_present(x) pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE))
114
115 /*
116 * =================================
117 * Flags checking section.
118 * =================================
119 */
120
pte_none(pte_t pte)121 static inline int pte_none(pte_t pte)
122 {
123 return pte_is_zero(pte);
124 }
125
126 /*
127 * The following only work if pte_present() is true.
128 * Undefined behaviour if not..
129 */
pte_read(pte_t pte)130 static inline int pte_read(pte_t pte)
131 {
132 return((pte_get_bits(pte, _PAGE_USER)) &&
133 !(pte_get_bits(pte, _PAGE_PROTNONE)));
134 }
135
pte_exec(pte_t pte)136 static inline int pte_exec(pte_t pte){
137 return((pte_get_bits(pte, _PAGE_USER)) &&
138 !(pte_get_bits(pte, _PAGE_PROTNONE)));
139 }
140
pte_write(pte_t pte)141 static inline int pte_write(pte_t pte)
142 {
143 return((pte_get_bits(pte, _PAGE_RW)) &&
144 !(pte_get_bits(pte, _PAGE_PROTNONE)));
145 }
146
pte_dirty(pte_t pte)147 static inline int pte_dirty(pte_t pte)
148 {
149 return pte_get_bits(pte, _PAGE_DIRTY);
150 }
151
pte_young(pte_t pte)152 static inline int pte_young(pte_t pte)
153 {
154 return pte_get_bits(pte, _PAGE_ACCESSED);
155 }
156
pte_newpage(pte_t pte)157 static inline int pte_newpage(pte_t pte)
158 {
159 return pte_get_bits(pte, _PAGE_NEWPAGE);
160 }
161
pte_newprot(pte_t pte)162 static inline int pte_newprot(pte_t pte)
163 {
164 return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT)));
165 }
166
pte_special(pte_t pte)167 static inline int pte_special(pte_t pte)
168 {
169 return 0;
170 }
171
172 /*
173 * =================================
174 * Flags setting section.
175 * =================================
176 */
177
pte_mknewprot(pte_t pte)178 static inline pte_t pte_mknewprot(pte_t pte)
179 {
180 pte_set_bits(pte, _PAGE_NEWPROT);
181 return(pte);
182 }
183
pte_mkclean(pte_t pte)184 static inline pte_t pte_mkclean(pte_t pte)
185 {
186 pte_clear_bits(pte, _PAGE_DIRTY);
187 return(pte);
188 }
189
pte_mkold(pte_t pte)190 static inline pte_t pte_mkold(pte_t pte)
191 {
192 pte_clear_bits(pte, _PAGE_ACCESSED);
193 return(pte);
194 }
195
pte_wrprotect(pte_t pte)196 static inline pte_t pte_wrprotect(pte_t pte)
197 {
198 if (likely(pte_get_bits(pte, _PAGE_RW)))
199 pte_clear_bits(pte, _PAGE_RW);
200 else
201 return pte;
202 return(pte_mknewprot(pte));
203 }
204
pte_mkread(pte_t pte)205 static inline pte_t pte_mkread(pte_t pte)
206 {
207 if (unlikely(pte_get_bits(pte, _PAGE_USER)))
208 return pte;
209 pte_set_bits(pte, _PAGE_USER);
210 return(pte_mknewprot(pte));
211 }
212
pte_mkdirty(pte_t pte)213 static inline pte_t pte_mkdirty(pte_t pte)
214 {
215 pte_set_bits(pte, _PAGE_DIRTY);
216 return(pte);
217 }
218
pte_mkyoung(pte_t pte)219 static inline pte_t pte_mkyoung(pte_t pte)
220 {
221 pte_set_bits(pte, _PAGE_ACCESSED);
222 return(pte);
223 }
224
pte_mkwrite(pte_t pte)225 static inline pte_t pte_mkwrite(pte_t pte)
226 {
227 if (unlikely(pte_get_bits(pte, _PAGE_RW)))
228 return pte;
229 pte_set_bits(pte, _PAGE_RW);
230 return(pte_mknewprot(pte));
231 }
232
pte_mkuptodate(pte_t pte)233 static inline pte_t pte_mkuptodate(pte_t pte)
234 {
235 pte_clear_bits(pte, _PAGE_NEWPAGE);
236 if(pte_present(pte))
237 pte_clear_bits(pte, _PAGE_NEWPROT);
238 return(pte);
239 }
240
pte_mknewpage(pte_t pte)241 static inline pte_t pte_mknewpage(pte_t pte)
242 {
243 pte_set_bits(pte, _PAGE_NEWPAGE);
244 return(pte);
245 }
246
pte_mkspecial(pte_t pte)247 static inline pte_t pte_mkspecial(pte_t pte)
248 {
249 return(pte);
250 }
251
set_pte(pte_t * pteptr,pte_t pteval)252 static inline void set_pte(pte_t *pteptr, pte_t pteval)
253 {
254 pte_copy(*pteptr, pteval);
255
256 /* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so
257 * fix_range knows to unmap it. _PAGE_NEWPROT is specific to
258 * mapped pages.
259 */
260
261 *pteptr = pte_mknewpage(*pteptr);
262 if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr);
263 }
264
set_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * pteptr,pte_t pteval)265 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
266 pte_t *pteptr, pte_t pteval)
267 {
268 set_pte(pteptr, pteval);
269 }
270
271 #define __HAVE_ARCH_PTE_SAME
pte_same(pte_t pte_a,pte_t pte_b)272 static inline int pte_same(pte_t pte_a, pte_t pte_b)
273 {
274 return !((pte_val(pte_a) ^ pte_val(pte_b)) & ~_PAGE_NEWPAGE);
275 }
276
277 /*
278 * Conversion functions: convert a page and protection to a page entry,
279 * and a page entry and page directory to the page they refer to.
280 */
281
282 #define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys))
283 #define __virt_to_page(virt) phys_to_page(__pa(virt))
284 #define page_to_phys(page) pfn_to_phys(page_to_pfn(page))
285 #define virt_to_page(addr) __virt_to_page((const unsigned long) addr)
286
287 #define mk_pte(page, pgprot) \
288 ({ pte_t pte; \
289 \
290 pte_set_val(pte, page_to_phys(page), (pgprot)); \
291 if (pte_present(pte)) \
292 pte_mknewprot(pte_mknewpage(pte)); \
293 pte;})
294
pte_modify(pte_t pte,pgprot_t newprot)295 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
296 {
297 pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot);
298 return pte;
299 }
300
301 /*
302 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
303 *
304 * this macro returns the index of the entry in the pgd page which would
305 * control the given virtual address
306 */
307 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
308
309 /*
310 * pgd_offset() returns a (pgd_t *)
311 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
312 */
313 #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
314
315 /*
316 * a shortcut which implies the use of the kernel's pgd, instead
317 * of a process's
318 */
319 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
320
321 /*
322 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
323 *
324 * this macro returns the index of the entry in the pmd page which would
325 * control the given virtual address
326 */
327 #define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
328 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
329
330 #define pmd_page_vaddr(pmd) \
331 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
332
333 /*
334 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
335 *
336 * this macro returns the index of the entry in the pte page which would
337 * control the given virtual address
338 */
339 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
340 #define pte_offset_kernel(dir, address) \
341 ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
342 #define pte_offset_map(dir, address) \
343 ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
344 #define pte_unmap(pte) do { } while (0)
345
346 struct mm_struct;
347 extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
348
349 #define update_mmu_cache(vma,address,ptep) do ; while (0)
350
351 /* Encode and de-code a swap entry */
352 #define __swp_type(x) (((x).val >> 5) & 0x1f)
353 #define __swp_offset(x) ((x).val >> 11)
354
355 #define __swp_entry(type, offset) \
356 ((swp_entry_t) { ((type) << 5) | ((offset) << 11) })
357 #define __pte_to_swp_entry(pte) \
358 ((swp_entry_t) { pte_val(pte_mkuptodate(pte)) })
359 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
360
361 #define kern_addr_valid(addr) (1)
362
363 #include <asm-generic/pgtable.h>
364
365 /* Clear a kernel PTE and flush it from the TLB */
366 #define kpte_clear_flush(ptep, vaddr) \
367 do { \
368 pte_clear(&init_mm, (vaddr), (ptep)); \
369 __flush_tlb_one((vaddr)); \
370 } while (0)
371
372 #endif
373