1 /*
2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Copyright 2003 PathScale, Inc.
4 * Derived from include/asm-i386/pgtable.h
5 * Licensed under the GPL
6 */
7
8 #ifndef __UM_PGTABLE_H
9 #define __UM_PGTABLE_H
10
11 #include <asm/fixmap.h>
12
13 #define _PAGE_PRESENT 0x001
14 #define _PAGE_NEWPAGE 0x002
15 #define _PAGE_NEWPROT 0x004
16 #define _PAGE_RW 0x020
17 #define _PAGE_USER 0x040
18 #define _PAGE_ACCESSED 0x080
19 #define _PAGE_DIRTY 0x100
20 /* If _PAGE_PRESENT is clear, we use these: */
21 #define _PAGE_FILE 0x008 /* nonlinear file mapping, saved PTE; unset:swap */
22 #define _PAGE_PROTNONE 0x010 /* if the user mapped it with PROT_NONE;
23 pte_present gives true */
24
25 #ifdef CONFIG_3_LEVEL_PGTABLES
26 #include "asm/pgtable-3level.h"
27 #else
28 #include "asm/pgtable-2level.h"
29 #endif
30
31 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
32
33 /* zero page used for uninitialized stuff */
34 extern unsigned long *empty_zero_page;
35
36 #define pgtable_cache_init() do ; while (0)
37
38 /* Just any arbitrary offset to the start of the vmalloc VM area: the
39 * current 8MB value just means that there will be a 8MB "hole" after the
40 * physical memory until the kernel virtual memory starts. That means that
41 * any out-of-bounds memory accesses will hopefully be caught.
42 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
43 * area for the same reason. ;)
44 */
45
46 extern unsigned long end_iomem;
47
48 #define VMALLOC_OFFSET (__va_space)
49 #define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
50 #define PKMAP_BASE ((FIXADDR_START - LAST_PKMAP * PAGE_SIZE) & PMD_MASK)
51 #ifdef CONFIG_HIGHMEM
52 # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
53 #else
54 # define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
55 #endif
56
57 #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
58 #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
59 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
60
61 #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
62 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
63 #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
64 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
65 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
66
67 /*
68 * The i386 can't do page protection for execute, and considers that the same
69 * are read.
70 * Also, write permissions imply read permissions. This is the closest we can
71 * get..
72 */
73 #define __P000 PAGE_NONE
74 #define __P001 PAGE_READONLY
75 #define __P010 PAGE_COPY
76 #define __P011 PAGE_COPY
77 #define __P100 PAGE_READONLY
78 #define __P101 PAGE_READONLY
79 #define __P110 PAGE_COPY
80 #define __P111 PAGE_COPY
81
82 #define __S000 PAGE_NONE
83 #define __S001 PAGE_READONLY
84 #define __S010 PAGE_SHARED
85 #define __S011 PAGE_SHARED
86 #define __S100 PAGE_READONLY
87 #define __S101 PAGE_READONLY
88 #define __S110 PAGE_SHARED
89 #define __S111 PAGE_SHARED
90
91 /*
92 * ZERO_PAGE is a global shared page that is always zero: used
93 * for zero-mapped memory areas etc..
94 */
95 #define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
96
97 #define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE))
98
99 #define pmd_none(x) (!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE))
100 #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
101
102 #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
103 #define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0)
104
105 #define pmd_newpage(x) (pmd_val(x) & _PAGE_NEWPAGE)
106 #define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEWPAGE)
107
108 #define pud_newpage(x) (pud_val(x) & _PAGE_NEWPAGE)
109 #define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE)
110
111 #define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK)
112
113 #define pte_page(x) pfn_to_page(pte_pfn(x))
114
115 #define pte_present(x) pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE))
116
117 /*
118 * =================================
119 * Flags checking section.
120 * =================================
121 */
122
pte_none(pte_t pte)123 static inline int pte_none(pte_t pte)
124 {
125 return pte_is_zero(pte);
126 }
127
128 /*
129 * The following only work if pte_present() is true.
130 * Undefined behaviour if not..
131 */
pte_read(pte_t pte)132 static inline int pte_read(pte_t pte)
133 {
134 return((pte_get_bits(pte, _PAGE_USER)) &&
135 !(pte_get_bits(pte, _PAGE_PROTNONE)));
136 }
137
pte_exec(pte_t pte)138 static inline int pte_exec(pte_t pte){
139 return((pte_get_bits(pte, _PAGE_USER)) &&
140 !(pte_get_bits(pte, _PAGE_PROTNONE)));
141 }
142
pte_write(pte_t pte)143 static inline int pte_write(pte_t pte)
144 {
145 return((pte_get_bits(pte, _PAGE_RW)) &&
146 !(pte_get_bits(pte, _PAGE_PROTNONE)));
147 }
148
149 /*
150 * The following only works if pte_present() is not true.
151 */
pte_file(pte_t pte)152 static inline int pte_file(pte_t pte)
153 {
154 return pte_get_bits(pte, _PAGE_FILE);
155 }
156
pte_dirty(pte_t pte)157 static inline int pte_dirty(pte_t pte)
158 {
159 return pte_get_bits(pte, _PAGE_DIRTY);
160 }
161
pte_young(pte_t pte)162 static inline int pte_young(pte_t pte)
163 {
164 return pte_get_bits(pte, _PAGE_ACCESSED);
165 }
166
pte_newpage(pte_t pte)167 static inline int pte_newpage(pte_t pte)
168 {
169 return pte_get_bits(pte, _PAGE_NEWPAGE);
170 }
171
pte_newprot(pte_t pte)172 static inline int pte_newprot(pte_t pte)
173 {
174 return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT)));
175 }
176
pte_special(pte_t pte)177 static inline int pte_special(pte_t pte)
178 {
179 return 0;
180 }
181
182 /*
183 * =================================
184 * Flags setting section.
185 * =================================
186 */
187
pte_mknewprot(pte_t pte)188 static inline pte_t pte_mknewprot(pte_t pte)
189 {
190 pte_set_bits(pte, _PAGE_NEWPROT);
191 return(pte);
192 }
193
pte_mkclean(pte_t pte)194 static inline pte_t pte_mkclean(pte_t pte)
195 {
196 pte_clear_bits(pte, _PAGE_DIRTY);
197 return(pte);
198 }
199
pte_mkold(pte_t pte)200 static inline pte_t pte_mkold(pte_t pte)
201 {
202 pte_clear_bits(pte, _PAGE_ACCESSED);
203 return(pte);
204 }
205
pte_wrprotect(pte_t pte)206 static inline pte_t pte_wrprotect(pte_t pte)
207 {
208 pte_clear_bits(pte, _PAGE_RW);
209 return(pte_mknewprot(pte));
210 }
211
pte_mkread(pte_t pte)212 static inline pte_t pte_mkread(pte_t pte)
213 {
214 pte_set_bits(pte, _PAGE_USER);
215 return(pte_mknewprot(pte));
216 }
217
pte_mkdirty(pte_t pte)218 static inline pte_t pte_mkdirty(pte_t pte)
219 {
220 pte_set_bits(pte, _PAGE_DIRTY);
221 return(pte);
222 }
223
pte_mkyoung(pte_t pte)224 static inline pte_t pte_mkyoung(pte_t pte)
225 {
226 pte_set_bits(pte, _PAGE_ACCESSED);
227 return(pte);
228 }
229
pte_mkwrite(pte_t pte)230 static inline pte_t pte_mkwrite(pte_t pte)
231 {
232 pte_set_bits(pte, _PAGE_RW);
233 return(pte_mknewprot(pte));
234 }
235
pte_mkuptodate(pte_t pte)236 static inline pte_t pte_mkuptodate(pte_t pte)
237 {
238 pte_clear_bits(pte, _PAGE_NEWPAGE);
239 if(pte_present(pte))
240 pte_clear_bits(pte, _PAGE_NEWPROT);
241 return(pte);
242 }
243
pte_mknewpage(pte_t pte)244 static inline pte_t pte_mknewpage(pte_t pte)
245 {
246 pte_set_bits(pte, _PAGE_NEWPAGE);
247 return(pte);
248 }
249
pte_mkspecial(pte_t pte)250 static inline pte_t pte_mkspecial(pte_t pte)
251 {
252 return(pte);
253 }
254
set_pte(pte_t * pteptr,pte_t pteval)255 static inline void set_pte(pte_t *pteptr, pte_t pteval)
256 {
257 pte_copy(*pteptr, pteval);
258
259 /* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so
260 * fix_range knows to unmap it. _PAGE_NEWPROT is specific to
261 * mapped pages.
262 */
263
264 *pteptr = pte_mknewpage(*pteptr);
265 if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr);
266 }
267 #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
268
269 /*
270 * Conversion functions: convert a page and protection to a page entry,
271 * and a page entry and page directory to the page they refer to.
272 */
273
274 #define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys))
275 #define __virt_to_page(virt) phys_to_page(__pa(virt))
276 #define page_to_phys(page) pfn_to_phys((pfn_t) page_to_pfn(page))
277 #define virt_to_page(addr) __virt_to_page((const unsigned long) addr)
278
279 #define mk_pte(page, pgprot) \
280 ({ pte_t pte; \
281 \
282 pte_set_val(pte, page_to_phys(page), (pgprot)); \
283 if (pte_present(pte)) \
284 pte_mknewprot(pte_mknewpage(pte)); \
285 pte;})
286
pte_modify(pte_t pte,pgprot_t newprot)287 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
288 {
289 pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot);
290 return pte;
291 }
292
293 /*
294 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
295 *
296 * this macro returns the index of the entry in the pgd page which would
297 * control the given virtual address
298 */
299 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
300
301 /*
302 * pgd_offset() returns a (pgd_t *)
303 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
304 */
305 #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
306
307 /*
308 * a shortcut which implies the use of the kernel's pgd, instead
309 * of a process's
310 */
311 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
312
313 /*
314 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
315 *
316 * this macro returns the index of the entry in the pmd page which would
317 * control the given virtual address
318 */
319 #define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
320 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
321
322 #define pmd_page_vaddr(pmd) \
323 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
324
325 /*
326 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
327 *
328 * this macro returns the index of the entry in the pte page which would
329 * control the given virtual address
330 */
331 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
332 #define pte_offset_kernel(dir, address) \
333 ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
334 #define pte_offset_map(dir, address) \
335 ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
336 #define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
337 #define pte_unmap(pte) do { } while (0)
338 #define pte_unmap_nested(pte) do { } while (0)
339
340 struct mm_struct;
341 extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
342
343 #define update_mmu_cache(vma,address,pte) do ; while (0)
344
345 /* Encode and de-code a swap entry */
346 #define __swp_type(x) (((x).val >> 4) & 0x3f)
347 #define __swp_offset(x) ((x).val >> 11)
348
349 #define __swp_entry(type, offset) \
350 ((swp_entry_t) { ((type) << 4) | ((offset) << 11) })
351 #define __pte_to_swp_entry(pte) \
352 ((swp_entry_t) { pte_val(pte_mkuptodate(pte)) })
353 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
354
355 #define kern_addr_valid(addr) (1)
356
357 #include <asm-generic/pgtable.h>
358
359 /* Clear a kernel PTE and flush it from the TLB */
360 #define kpte_clear_flush(ptep, vaddr) \
361 do { \
362 pte_clear(&init_mm, (vaddr), (ptep)); \
363 __flush_tlb_one((vaddr)); \
364 } while (0)
365
366 #endif
367