• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef _ASM_X86_PGTABLE_H
2 #define _ASM_X86_PGTABLE_H
3 
4 #define FIRST_USER_ADDRESS	0
5 
6 #define _PAGE_BIT_PRESENT	0	/* is present */
7 #define _PAGE_BIT_RW		1	/* writeable */
8 #define _PAGE_BIT_USER		2	/* userspace addressable */
9 #define _PAGE_BIT_PWT		3	/* page write through */
10 #define _PAGE_BIT_PCD		4	/* page cache disabled */
11 #define _PAGE_BIT_ACCESSED	5	/* was accessed (raised by CPU) */
12 #define _PAGE_BIT_DIRTY		6	/* was written to (raised by CPU) */
13 #define _PAGE_BIT_PSE		7	/* 4 MB (or 2MB) page */
14 #define _PAGE_BIT_PAT		7	/* on 4KB pages */
15 #define _PAGE_BIT_GLOBAL	8	/* Global TLB entry PPro+ */
16 #define _PAGE_BIT_UNUSED1	9	/* available for programmer */
17 #define _PAGE_BIT_IOMAP		10	/* flag used to indicate IO mapping */
18 #define _PAGE_BIT_UNUSED3	11
19 #define _PAGE_BIT_PAT_LARGE	12	/* On 2MB or 1GB pages */
20 #define _PAGE_BIT_SPECIAL	_PAGE_BIT_UNUSED1
21 #define _PAGE_BIT_CPA_TEST	_PAGE_BIT_UNUSED1
22 #define _PAGE_BIT_NX           63       /* No execute: only valid after cpuid check */
23 
24 /* If _PAGE_BIT_PRESENT is clear, we use these: */
25 /* - if the user mapped it with PROT_NONE; pte_present gives true */
26 #define _PAGE_BIT_PROTNONE	_PAGE_BIT_GLOBAL
27 /* - set: nonlinear file mapping, saved PTE; unset:swap */
28 #define _PAGE_BIT_FILE		_PAGE_BIT_DIRTY
29 
30 #define _PAGE_PRESENT	(_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
31 #define _PAGE_RW	(_AT(pteval_t, 1) << _PAGE_BIT_RW)
32 #define _PAGE_USER	(_AT(pteval_t, 1) << _PAGE_BIT_USER)
33 #define _PAGE_PWT	(_AT(pteval_t, 1) << _PAGE_BIT_PWT)
34 #define _PAGE_PCD	(_AT(pteval_t, 1) << _PAGE_BIT_PCD)
35 #define _PAGE_ACCESSED	(_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED)
36 #define _PAGE_DIRTY	(_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
37 #define _PAGE_PSE	(_AT(pteval_t, 1) << _PAGE_BIT_PSE)
38 #define _PAGE_GLOBAL	(_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
39 #define _PAGE_UNUSED1	(_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
40 #define _PAGE_IOMAP	(_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
41 #define _PAGE_UNUSED3	(_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3)
42 #define _PAGE_PAT	(_AT(pteval_t, 1) << _PAGE_BIT_PAT)
43 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
44 #define _PAGE_SPECIAL	(_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
45 #define _PAGE_CPA_TEST	(_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
46 #define __HAVE_ARCH_PTE_SPECIAL
47 
48 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
49 #define _PAGE_NX	(_AT(pteval_t, 1) << _PAGE_BIT_NX)
50 #else
51 #define _PAGE_NX	(_AT(pteval_t, 0))
52 #endif
53 
54 #define _PAGE_FILE	(_AT(pteval_t, 1) << _PAGE_BIT_FILE)
55 #define _PAGE_PROTNONE	(_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
56 
57 #define _PAGE_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |	\
58 			 _PAGE_ACCESSED | _PAGE_DIRTY)
59 #define _KERNPG_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED |	\
60 			 _PAGE_DIRTY)
61 
62 /* Set of bits not changed in pte_modify */
63 #define _PAGE_CHG_MASK	(PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT |		\
64 			 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY)
65 
66 #define _PAGE_CACHE_MASK	(_PAGE_PCD | _PAGE_PWT)
67 #define _PAGE_CACHE_WB		(0)
68 #define _PAGE_CACHE_WC		(_PAGE_PWT)
69 #define _PAGE_CACHE_UC_MINUS	(_PAGE_PCD)
70 #define _PAGE_CACHE_UC		(_PAGE_PCD | _PAGE_PWT)
71 
72 #define PAGE_NONE	__pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
73 #define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
74 				 _PAGE_ACCESSED | _PAGE_NX)
75 
76 #define PAGE_SHARED_EXEC	__pgprot(_PAGE_PRESENT | _PAGE_RW |	\
77 					 _PAGE_USER | _PAGE_ACCESSED)
78 #define PAGE_COPY_NOEXEC	__pgprot(_PAGE_PRESENT | _PAGE_USER |	\
79 					 _PAGE_ACCESSED | _PAGE_NX)
80 #define PAGE_COPY_EXEC		__pgprot(_PAGE_PRESENT | _PAGE_USER |	\
81 					 _PAGE_ACCESSED)
82 #define PAGE_COPY		PAGE_COPY_NOEXEC
83 #define PAGE_READONLY		__pgprot(_PAGE_PRESENT | _PAGE_USER |	\
84 					 _PAGE_ACCESSED | _PAGE_NX)
85 #define PAGE_READONLY_EXEC	__pgprot(_PAGE_PRESENT | _PAGE_USER |	\
86 					 _PAGE_ACCESSED)
87 
88 #define __PAGE_KERNEL_EXEC						\
89 	(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
90 #define __PAGE_KERNEL		(__PAGE_KERNEL_EXEC | _PAGE_NX)
91 
92 #define __PAGE_KERNEL_RO		(__PAGE_KERNEL & ~_PAGE_RW)
93 #define __PAGE_KERNEL_RX		(__PAGE_KERNEL_EXEC & ~_PAGE_RW)
94 #define __PAGE_KERNEL_EXEC_NOCACHE	(__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT)
95 #define __PAGE_KERNEL_WC		(__PAGE_KERNEL | _PAGE_CACHE_WC)
96 #define __PAGE_KERNEL_NOCACHE		(__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
97 #define __PAGE_KERNEL_UC_MINUS		(__PAGE_KERNEL | _PAGE_PCD)
98 #define __PAGE_KERNEL_VSYSCALL		(__PAGE_KERNEL_RX | _PAGE_USER)
99 #define __PAGE_KERNEL_VSYSCALL_NOCACHE	(__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
100 #define __PAGE_KERNEL_LARGE		(__PAGE_KERNEL | _PAGE_PSE)
101 #define __PAGE_KERNEL_LARGE_NOCACHE	(__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
102 #define __PAGE_KERNEL_LARGE_EXEC	(__PAGE_KERNEL_EXEC | _PAGE_PSE)
103 
104 #define __PAGE_KERNEL_IO		(__PAGE_KERNEL | _PAGE_IOMAP)
105 #define __PAGE_KERNEL_IO_NOCACHE	(__PAGE_KERNEL_NOCACHE | _PAGE_IOMAP)
106 #define __PAGE_KERNEL_IO_UC_MINUS	(__PAGE_KERNEL_UC_MINUS | _PAGE_IOMAP)
107 #define __PAGE_KERNEL_IO_WC		(__PAGE_KERNEL_WC | _PAGE_IOMAP)
108 
109 #define PAGE_KERNEL			__pgprot(__PAGE_KERNEL)
110 #define PAGE_KERNEL_RO			__pgprot(__PAGE_KERNEL_RO)
111 #define PAGE_KERNEL_EXEC		__pgprot(__PAGE_KERNEL_EXEC)
112 #define PAGE_KERNEL_RX			__pgprot(__PAGE_KERNEL_RX)
113 #define PAGE_KERNEL_WC			__pgprot(__PAGE_KERNEL_WC)
114 #define PAGE_KERNEL_NOCACHE		__pgprot(__PAGE_KERNEL_NOCACHE)
115 #define PAGE_KERNEL_UC_MINUS		__pgprot(__PAGE_KERNEL_UC_MINUS)
116 #define PAGE_KERNEL_EXEC_NOCACHE	__pgprot(__PAGE_KERNEL_EXEC_NOCACHE)
117 #define PAGE_KERNEL_LARGE		__pgprot(__PAGE_KERNEL_LARGE)
118 #define PAGE_KERNEL_LARGE_NOCACHE	__pgprot(__PAGE_KERNEL_LARGE_NOCACHE)
119 #define PAGE_KERNEL_LARGE_EXEC		__pgprot(__PAGE_KERNEL_LARGE_EXEC)
120 #define PAGE_KERNEL_VSYSCALL		__pgprot(__PAGE_KERNEL_VSYSCALL)
121 #define PAGE_KERNEL_VSYSCALL_NOCACHE	__pgprot(__PAGE_KERNEL_VSYSCALL_NOCACHE)
122 
123 #define PAGE_KERNEL_IO			__pgprot(__PAGE_KERNEL_IO)
124 #define PAGE_KERNEL_IO_NOCACHE		__pgprot(__PAGE_KERNEL_IO_NOCACHE)
125 #define PAGE_KERNEL_IO_UC_MINUS		__pgprot(__PAGE_KERNEL_IO_UC_MINUS)
126 #define PAGE_KERNEL_IO_WC		__pgprot(__PAGE_KERNEL_IO_WC)
127 
128 /*         xwr */
129 #define __P000	PAGE_NONE
130 #define __P001	PAGE_READONLY
131 #define __P010	PAGE_COPY
132 #define __P011	PAGE_COPY
133 #define __P100	PAGE_READONLY_EXEC
134 #define __P101	PAGE_READONLY_EXEC
135 #define __P110	PAGE_COPY_EXEC
136 #define __P111	PAGE_COPY_EXEC
137 
138 #define __S000	PAGE_NONE
139 #define __S001	PAGE_READONLY
140 #define __S010	PAGE_SHARED
141 #define __S011	PAGE_SHARED
142 #define __S100	PAGE_READONLY_EXEC
143 #define __S101	PAGE_READONLY_EXEC
144 #define __S110	PAGE_SHARED_EXEC
145 #define __S111	PAGE_SHARED_EXEC
146 
147 /*
148  * early identity mapping  pte attrib macros.
149  */
150 #ifdef CONFIG_X86_64
151 #define __PAGE_KERNEL_IDENT_LARGE_EXEC	__PAGE_KERNEL_LARGE_EXEC
152 #else
153 /*
154  * For PDE_IDENT_ATTR include USER bit. As the PDE and PTE protection
155  * bits are combined, this will alow user to access the high address mapped
156  * VDSO in the presence of CONFIG_COMPAT_VDSO
157  */
158 #define PTE_IDENT_ATTR	 0x003		/* PRESENT+RW */
159 #define PDE_IDENT_ATTR	 0x067		/* PRESENT+RW+USER+DIRTY+ACCESSED */
160 #define PGD_IDENT_ATTR	 0x001		/* PRESENT (no other attributes) */
161 #endif
162 
163 /*
164  * Macro to mark a page protection value as UC-
165  */
166 #define pgprot_noncached(prot)					\
167 	((boot_cpu_data.x86 > 3)				\
168 	 ? (__pgprot(pgprot_val(prot) | _PAGE_CACHE_UC_MINUS))	\
169 	 : (prot))
170 
171 #ifndef __ASSEMBLY__
172 
173 #define pgprot_writecombine	pgprot_writecombine
174 extern pgprot_t pgprot_writecombine(pgprot_t prot);
175 
176 /*
177  * ZERO_PAGE is a global shared page that is always zero: used
178  * for zero-mapped memory areas etc..
179  */
180 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
181 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
182 
183 extern spinlock_t pgd_lock;
184 extern struct list_head pgd_list;
185 
186 /*
187  * The following only work if pte_present() is true.
188  * Undefined behaviour if not..
189  */
pte_dirty(pte_t pte)190 static inline int pte_dirty(pte_t pte)
191 {
192 	return pte_flags(pte) & _PAGE_DIRTY;
193 }
194 
pte_young(pte_t pte)195 static inline int pte_young(pte_t pte)
196 {
197 	return pte_flags(pte) & _PAGE_ACCESSED;
198 }
199 
pte_write(pte_t pte)200 static inline int pte_write(pte_t pte)
201 {
202 	return pte_flags(pte) & _PAGE_RW;
203 }
204 
pte_file(pte_t pte)205 static inline int pte_file(pte_t pte)
206 {
207 	return pte_flags(pte) & _PAGE_FILE;
208 }
209 
pte_huge(pte_t pte)210 static inline int pte_huge(pte_t pte)
211 {
212 	return pte_flags(pte) & _PAGE_PSE;
213 }
214 
pte_global(pte_t pte)215 static inline int pte_global(pte_t pte)
216 {
217 	return pte_flags(pte) & _PAGE_GLOBAL;
218 }
219 
pte_exec(pte_t pte)220 static inline int pte_exec(pte_t pte)
221 {
222 	return !(pte_flags(pte) & _PAGE_NX);
223 }
224 
pte_special(pte_t pte)225 static inline int pte_special(pte_t pte)
226 {
227 	return pte_flags(pte) & _PAGE_SPECIAL;
228 }
229 
pte_pfn(pte_t pte)230 static inline unsigned long pte_pfn(pte_t pte)
231 {
232 	return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
233 }
234 
235 #define pte_page(pte)	pfn_to_page(pte_pfn(pte))
236 
pmd_large(pmd_t pte)237 static inline int pmd_large(pmd_t pte)
238 {
239 	return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
240 		(_PAGE_PSE | _PAGE_PRESENT);
241 }
242 
pte_mkclean(pte_t pte)243 static inline pte_t pte_mkclean(pte_t pte)
244 {
245 	return __pte(pte_val(pte) & ~_PAGE_DIRTY);
246 }
247 
pte_mkold(pte_t pte)248 static inline pte_t pte_mkold(pte_t pte)
249 {
250 	return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
251 }
252 
pte_wrprotect(pte_t pte)253 static inline pte_t pte_wrprotect(pte_t pte)
254 {
255 	return __pte(pte_val(pte) & ~_PAGE_RW);
256 }
257 
pte_mkexec(pte_t pte)258 static inline pte_t pte_mkexec(pte_t pte)
259 {
260 	return __pte(pte_val(pte) & ~_PAGE_NX);
261 }
262 
pte_mkdirty(pte_t pte)263 static inline pte_t pte_mkdirty(pte_t pte)
264 {
265 	return __pte(pte_val(pte) | _PAGE_DIRTY);
266 }
267 
pte_mkyoung(pte_t pte)268 static inline pte_t pte_mkyoung(pte_t pte)
269 {
270 	return __pte(pte_val(pte) | _PAGE_ACCESSED);
271 }
272 
pte_mkwrite(pte_t pte)273 static inline pte_t pte_mkwrite(pte_t pte)
274 {
275 	return __pte(pte_val(pte) | _PAGE_RW);
276 }
277 
pte_mkhuge(pte_t pte)278 static inline pte_t pte_mkhuge(pte_t pte)
279 {
280 	return __pte(pte_val(pte) | _PAGE_PSE);
281 }
282 
pte_clrhuge(pte_t pte)283 static inline pte_t pte_clrhuge(pte_t pte)
284 {
285 	return __pte(pte_val(pte) & ~_PAGE_PSE);
286 }
287 
pte_mkglobal(pte_t pte)288 static inline pte_t pte_mkglobal(pte_t pte)
289 {
290 	return __pte(pte_val(pte) | _PAGE_GLOBAL);
291 }
292 
pte_clrglobal(pte_t pte)293 static inline pte_t pte_clrglobal(pte_t pte)
294 {
295 	return __pte(pte_val(pte) & ~_PAGE_GLOBAL);
296 }
297 
pte_mkspecial(pte_t pte)298 static inline pte_t pte_mkspecial(pte_t pte)
299 {
300 	return __pte(pte_val(pte) | _PAGE_SPECIAL);
301 }
302 
303 extern pteval_t __supported_pte_mask;
304 
305 /*
306  * Mask out unsupported bits in a present pgprot.  Non-present pgprots
307  * can use those bits for other purposes, so leave them be.
308  */
massage_pgprot(pgprot_t pgprot)309 static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
310 {
311 	pgprotval_t protval = pgprot_val(pgprot);
312 
313 	if (protval & _PAGE_PRESENT)
314 		protval &= __supported_pte_mask;
315 
316 	return protval;
317 }
318 
pfn_pte(unsigned long page_nr,pgprot_t pgprot)319 static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
320 {
321 	return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
322 		     massage_pgprot(pgprot));
323 }
324 
pfn_pmd(unsigned long page_nr,pgprot_t pgprot)325 static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
326 {
327 	return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) |
328 		     massage_pgprot(pgprot));
329 }
330 
pte_modify(pte_t pte,pgprot_t newprot)331 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
332 {
333 	pteval_t val = pte_val(pte);
334 
335 	/*
336 	 * Chop off the NX bit (if present), and add the NX portion of
337 	 * the newprot (if present):
338 	 */
339 	val &= _PAGE_CHG_MASK;
340 	val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK;
341 
342 	return __pte(val);
343 }
344 
345 /* mprotect needs to preserve PAT bits when updating vm_page_prot */
346 #define pgprot_modify pgprot_modify
pgprot_modify(pgprot_t oldprot,pgprot_t newprot)347 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
348 {
349 	pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
350 	pgprotval_t addbits = pgprot_val(newprot);
351 	return __pgprot(preservebits | addbits);
352 }
353 
354 #define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK)
355 
356 #define canon_pgprot(p) __pgprot(massage_pgprot(p))
357 
is_new_memtype_allowed(unsigned long flags,unsigned long new_flags)358 static inline int is_new_memtype_allowed(unsigned long flags,
359 						unsigned long new_flags)
360 {
361 	/*
362 	 * Certain new memtypes are not allowed with certain
363 	 * requested memtype:
364 	 * - request is uncached, return cannot be write-back
365 	 * - request is write-combine, return cannot be write-back
366 	 */
367 	if ((flags == _PAGE_CACHE_UC_MINUS &&
368 	     new_flags == _PAGE_CACHE_WB) ||
369 	    (flags == _PAGE_CACHE_WC &&
370 	     new_flags == _PAGE_CACHE_WB)) {
371 		return 0;
372 	}
373 
374 	return 1;
375 }
376 
377 #ifndef __ASSEMBLY__
378 /* Indicate that x86 has its own track and untrack pfn vma functions */
379 #define __HAVE_PFNMAP_TRACKING
380 
381 #define __HAVE_PHYS_MEM_ACCESS_PROT
382 struct file;
383 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
384                               unsigned long size, pgprot_t vma_prot);
385 int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
386                               unsigned long size, pgprot_t *vma_prot);
387 #endif
388 
389 /* Install a pte for a particular vaddr in kernel space. */
390 void set_pte_vaddr(unsigned long vaddr, pte_t pte);
391 
392 #ifdef CONFIG_X86_32
393 extern void native_pagetable_setup_start(pgd_t *base);
394 extern void native_pagetable_setup_done(pgd_t *base);
395 #else
native_pagetable_setup_start(pgd_t * base)396 static inline void native_pagetable_setup_start(pgd_t *base) {}
native_pagetable_setup_done(pgd_t * base)397 static inline void native_pagetable_setup_done(pgd_t *base) {}
398 #endif
399 
400 struct seq_file;
401 extern void arch_report_meminfo(struct seq_file *m);
402 
403 #ifdef CONFIG_PARAVIRT
404 #include <asm/paravirt.h>
405 #else  /* !CONFIG_PARAVIRT */
406 #define set_pte(ptep, pte)		native_set_pte(ptep, pte)
407 #define set_pte_at(mm, addr, ptep, pte)	native_set_pte_at(mm, addr, ptep, pte)
408 
409 #define set_pte_present(mm, addr, ptep, pte)				\
410 	native_set_pte_present(mm, addr, ptep, pte)
411 #define set_pte_atomic(ptep, pte)					\
412 	native_set_pte_atomic(ptep, pte)
413 
414 #define set_pmd(pmdp, pmd)		native_set_pmd(pmdp, pmd)
415 
416 #ifndef __PAGETABLE_PUD_FOLDED
417 #define set_pgd(pgdp, pgd)		native_set_pgd(pgdp, pgd)
418 #define pgd_clear(pgd)			native_pgd_clear(pgd)
419 #endif
420 
421 #ifndef set_pud
422 # define set_pud(pudp, pud)		native_set_pud(pudp, pud)
423 #endif
424 
425 #ifndef __PAGETABLE_PMD_FOLDED
426 #define pud_clear(pud)			native_pud_clear(pud)
427 #endif
428 
429 #define pte_clear(mm, addr, ptep)	native_pte_clear(mm, addr, ptep)
430 #define pmd_clear(pmd)			native_pmd_clear(pmd)
431 
432 #define pte_update(mm, addr, ptep)              do { } while (0)
433 #define pte_update_defer(mm, addr, ptep)        do { } while (0)
434 
paravirt_pagetable_setup_start(pgd_t * base)435 static inline void __init paravirt_pagetable_setup_start(pgd_t *base)
436 {
437 	native_pagetable_setup_start(base);
438 }
439 
paravirt_pagetable_setup_done(pgd_t * base)440 static inline void __init paravirt_pagetable_setup_done(pgd_t *base)
441 {
442 	native_pagetable_setup_done(base);
443 }
444 #endif	/* CONFIG_PARAVIRT */
445 
446 #endif	/* __ASSEMBLY__ */
447 
448 #ifdef CONFIG_X86_32
449 # include "pgtable_32.h"
450 #else
451 # include "pgtable_64.h"
452 #endif
453 
454 /*
455  * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
456  *
457  * this macro returns the index of the entry in the pgd page which would
458  * control the given virtual address
459  */
460 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
461 
462 /*
463  * pgd_offset() returns a (pgd_t *)
464  * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
465  */
466 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
467 /*
468  * a shortcut which implies the use of the kernel's pgd, instead
469  * of a process's
470  */
471 #define pgd_offset_k(address) pgd_offset(&init_mm, (address))
472 
473 
474 #define KERNEL_PGD_BOUNDARY	pgd_index(PAGE_OFFSET)
475 #define KERNEL_PGD_PTRS		(PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
476 
477 #ifndef __ASSEMBLY__
478 
479 enum {
480 	PG_LEVEL_NONE,
481 	PG_LEVEL_4K,
482 	PG_LEVEL_2M,
483 	PG_LEVEL_1G,
484 	PG_LEVEL_NUM
485 };
486 
487 #ifdef CONFIG_PROC_FS
488 extern void update_page_count(int level, unsigned long pages);
489 #else
update_page_count(int level,unsigned long pages)490 static inline void update_page_count(int level, unsigned long pages) { }
491 #endif
492 
493 /*
494  * Helper function that returns the kernel pagetable entry controlling
495  * the virtual address 'address'. NULL means no pagetable entry present.
496  * NOTE: the return type is pte_t but if the pmd is PSE then we return it
497  * as a pte too.
498  */
499 extern pte_t *lookup_address(unsigned long address, unsigned int *level);
500 
501 /* local pte updates need not use xchg for locking */
native_local_ptep_get_and_clear(pte_t * ptep)502 static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
503 {
504 	pte_t res = *ptep;
505 
506 	/* Pure native function needs no input for mm, addr */
507 	native_pte_clear(NULL, 0, ptep);
508 	return res;
509 }
510 
native_set_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte)511 static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
512 				     pte_t *ptep , pte_t pte)
513 {
514 	native_set_pte(ptep, pte);
515 }
516 
517 #ifndef CONFIG_PARAVIRT
518 /*
519  * Rules for using pte_update - it must be called after any PTE update which
520  * has not been done using the set_pte / clear_pte interfaces.  It is used by
521  * shadow mode hypervisors to resynchronize the shadow page tables.  Kernel PTE
522  * updates should either be sets, clears, or set_pte_atomic for P->P
523  * transitions, which means this hook should only be called for user PTEs.
524  * This hook implies a P->P protection or access change has taken place, which
525  * requires a subsequent TLB flush.  The notification can optionally be delayed
526  * until the TLB flush event by using the pte_update_defer form of the
527  * interface, but care must be taken to assure that the flush happens while
528  * still holding the same page table lock so that the shadow and primary pages
529  * do not become out of sync on SMP.
530  */
531 #define pte_update(mm, addr, ptep)		do { } while (0)
532 #define pte_update_defer(mm, addr, ptep)	do { } while (0)
533 #endif
534 
535 /*
536  * We only update the dirty/accessed state if we set
537  * the dirty bit by hand in the kernel, since the hardware
538  * will do the accessed bit for us, and we don't want to
539  * race with other CPU's that might be updating the dirty
540  * bit at the same time.
541  */
542 struct vm_area_struct;
543 
544 #define  __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
545 extern int ptep_set_access_flags(struct vm_area_struct *vma,
546 				 unsigned long address, pte_t *ptep,
547 				 pte_t entry, int dirty);
548 
549 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
550 extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
551 				     unsigned long addr, pte_t *ptep);
552 
553 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
554 extern int ptep_clear_flush_young(struct vm_area_struct *vma,
555 				  unsigned long address, pte_t *ptep);
556 
557 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
ptep_get_and_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)558 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
559 				       pte_t *ptep)
560 {
561 	pte_t pte = native_ptep_get_and_clear(ptep);
562 	pte_update(mm, addr, ptep);
563 	return pte;
564 }
565 
566 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
ptep_get_and_clear_full(struct mm_struct * mm,unsigned long addr,pte_t * ptep,int full)567 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
568 					    unsigned long addr, pte_t *ptep,
569 					    int full)
570 {
571 	pte_t pte;
572 	if (full) {
573 		/*
574 		 * Full address destruction in progress; paravirt does not
575 		 * care about updates and native needs no locking
576 		 */
577 		pte = native_local_ptep_get_and_clear(ptep);
578 	} else {
579 		pte = ptep_get_and_clear(mm, addr, ptep);
580 	}
581 	return pte;
582 }
583 
584 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
ptep_set_wrprotect(struct mm_struct * mm,unsigned long addr,pte_t * ptep)585 static inline void ptep_set_wrprotect(struct mm_struct *mm,
586 				      unsigned long addr, pte_t *ptep)
587 {
588 	clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
589 	pte_update(mm, addr, ptep);
590 }
591 
592 /*
593  * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
594  *
595  *  dst - pointer to pgd range anwhere on a pgd page
596  *  src - ""
597  *  count - the number of pgds to copy.
598  *
599  * dst and src can be on the same page, but the range must not overlap,
600  * and must not cross a page boundary.
601  */
clone_pgd_range(pgd_t * dst,pgd_t * src,int count)602 static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
603 {
604        memcpy(dst, src, count * sizeof(pgd_t));
605 }
606 
607 
608 #include <asm-generic/pgtable.h>
609 #endif	/* __ASSEMBLY__ */
610 
611 #endif /* _ASM_X86_PGTABLE_H */
612