• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef _ASM_X86_PGTABLE_H
2 #define _ASM_X86_PGTABLE_H
3 
4 #include <asm/page.h>
5 #include <asm/e820.h>
6 
7 #include <asm/pgtable_types.h>
8 
9 /*
10  * Macro to mark a page protection value as UC-
11  */
12 #define pgprot_noncached(prot)						\
13 	((boot_cpu_data.x86 > 3)					\
14 	 ? (__pgprot(pgprot_val(prot) |					\
15 		     cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS)))	\
16 	 : (prot))
17 
18 #ifndef __ASSEMBLY__
19 #include <asm/x86_init.h>
20 
21 #ifdef CONFIG_PAGE_TABLE_ISOLATION
22 extern int kaiser_enabled;
23 #else
24 #define kaiser_enabled 0
25 #endif
26 
27 void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd);
28 void ptdump_walk_pgd_level_checkwx(void);
29 
30 #ifdef CONFIG_DEBUG_WX
31 #define debug_checkwx() ptdump_walk_pgd_level_checkwx()
32 #else
33 #define debug_checkwx() do { } while (0)
34 #endif
35 
36 /*
37  * ZERO_PAGE is a global shared page that is always zero: used
38  * for zero-mapped memory areas etc..
39  */
40 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
41 	__visible;
42 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
43 
44 extern spinlock_t pgd_lock;
45 extern struct list_head pgd_list;
46 
47 extern struct mm_struct *pgd_page_get_mm(struct page *page);
48 
49 #ifdef CONFIG_PARAVIRT
50 #include <asm/paravirt.h>
51 #else  /* !CONFIG_PARAVIRT */
52 #define set_pte(ptep, pte)		native_set_pte(ptep, pte)
53 #define set_pte_at(mm, addr, ptep, pte)	native_set_pte_at(mm, addr, ptep, pte)
54 #define set_pmd_at(mm, addr, pmdp, pmd)	native_set_pmd_at(mm, addr, pmdp, pmd)
55 
56 #define set_pte_atomic(ptep, pte)					\
57 	native_set_pte_atomic(ptep, pte)
58 
59 #define set_pmd(pmdp, pmd)		native_set_pmd(pmdp, pmd)
60 
61 #ifndef __PAGETABLE_PUD_FOLDED
62 #define set_pgd(pgdp, pgd)		native_set_pgd(pgdp, pgd)
63 #define pgd_clear(pgd)			native_pgd_clear(pgd)
64 #endif
65 
66 #ifndef set_pud
67 # define set_pud(pudp, pud)		native_set_pud(pudp, pud)
68 #endif
69 
70 #ifndef __PAGETABLE_PMD_FOLDED
71 #define pud_clear(pud)			native_pud_clear(pud)
72 #endif
73 
74 #define pte_clear(mm, addr, ptep)	native_pte_clear(mm, addr, ptep)
75 #define pmd_clear(pmd)			native_pmd_clear(pmd)
76 
77 #define pte_update(mm, addr, ptep)              do { } while (0)
78 #define pte_update_defer(mm, addr, ptep)        do { } while (0)
79 #define pmd_update(mm, addr, ptep)              do { } while (0)
80 #define pmd_update_defer(mm, addr, ptep)        do { } while (0)
81 
82 #define pgd_val(x)	native_pgd_val(x)
83 #define __pgd(x)	native_make_pgd(x)
84 
85 #ifndef __PAGETABLE_PUD_FOLDED
86 #define pud_val(x)	native_pud_val(x)
87 #define __pud(x)	native_make_pud(x)
88 #endif
89 
90 #ifndef __PAGETABLE_PMD_FOLDED
91 #define pmd_val(x)	native_pmd_val(x)
92 #define __pmd(x)	native_make_pmd(x)
93 #endif
94 
95 #define pte_val(x)	native_pte_val(x)
96 #define __pte(x)	native_make_pte(x)
97 
98 #define arch_end_context_switch(prev)	do {} while(0)
99 
100 #endif	/* CONFIG_PARAVIRT */
101 
102 /*
103  * The following only work if pte_present() is true.
104  * Undefined behaviour if not..
105  */
pte_dirty(pte_t pte)106 static inline int pte_dirty(pte_t pte)
107 {
108 	return pte_flags(pte) & _PAGE_DIRTY;
109 }
110 
pte_young(pte_t pte)111 static inline int pte_young(pte_t pte)
112 {
113 	return pte_flags(pte) & _PAGE_ACCESSED;
114 }
115 
pmd_dirty(pmd_t pmd)116 static inline int pmd_dirty(pmd_t pmd)
117 {
118 	return pmd_flags(pmd) & _PAGE_DIRTY;
119 }
120 
pmd_young(pmd_t pmd)121 static inline int pmd_young(pmd_t pmd)
122 {
123 	return pmd_flags(pmd) & _PAGE_ACCESSED;
124 }
125 
pte_write(pte_t pte)126 static inline int pte_write(pte_t pte)
127 {
128 	return pte_flags(pte) & _PAGE_RW;
129 }
130 
pte_huge(pte_t pte)131 static inline int pte_huge(pte_t pte)
132 {
133 	return pte_flags(pte) & _PAGE_PSE;
134 }
135 
pte_global(pte_t pte)136 static inline int pte_global(pte_t pte)
137 {
138 	return pte_flags(pte) & _PAGE_GLOBAL;
139 }
140 
pte_exec(pte_t pte)141 static inline int pte_exec(pte_t pte)
142 {
143 	return !(pte_flags(pte) & _PAGE_NX);
144 }
145 
pte_special(pte_t pte)146 static inline int pte_special(pte_t pte)
147 {
148 	return pte_flags(pte) & _PAGE_SPECIAL;
149 }
150 
151 /* Entries that were set to PROT_NONE are inverted */
152 
153 static inline u64 protnone_mask(u64 val);
154 
pte_pfn(pte_t pte)155 static inline unsigned long pte_pfn(pte_t pte)
156 {
157 	phys_addr_t pfn = pte_val(pte);
158 	pfn ^= protnone_mask(pfn);
159 	return (pfn & PTE_PFN_MASK) >> PAGE_SHIFT;
160 }
161 
pmd_pfn(pmd_t pmd)162 static inline unsigned long pmd_pfn(pmd_t pmd)
163 {
164 	phys_addr_t pfn = pmd_val(pmd);
165 	pfn ^= protnone_mask(pfn);
166 	return (pfn & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
167 }
168 
pud_pfn(pud_t pud)169 static inline unsigned long pud_pfn(pud_t pud)
170 {
171 	phys_addr_t pfn = pud_val(pud);
172 	pfn ^= protnone_mask(pfn);
173 	return (pfn & pud_pfn_mask(pud)) >> PAGE_SHIFT;
174 }
175 
pgd_pfn(pgd_t pgd)176 static inline unsigned long pgd_pfn(pgd_t pgd)
177 {
178 	return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
179 }
180 
181 #define pte_page(pte)	pfn_to_page(pte_pfn(pte))
182 
pmd_large(pmd_t pte)183 static inline int pmd_large(pmd_t pte)
184 {
185 	return pmd_flags(pte) & _PAGE_PSE;
186 }
187 
188 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_trans_splitting(pmd_t pmd)189 static inline int pmd_trans_splitting(pmd_t pmd)
190 {
191 	return pmd_val(pmd) & _PAGE_SPLITTING;
192 }
193 
pmd_trans_huge(pmd_t pmd)194 static inline int pmd_trans_huge(pmd_t pmd)
195 {
196 	return pmd_val(pmd) & _PAGE_PSE;
197 }
198 
has_transparent_hugepage(void)199 static inline int has_transparent_hugepage(void)
200 {
201 	return cpu_has_pse;
202 }
203 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
204 
pte_set_flags(pte_t pte,pteval_t set)205 static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
206 {
207 	pteval_t v = native_pte_val(pte);
208 
209 	return native_make_pte(v | set);
210 }
211 
pte_clear_flags(pte_t pte,pteval_t clear)212 static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
213 {
214 	pteval_t v = native_pte_val(pte);
215 
216 	return native_make_pte(v & ~clear);
217 }
218 
pte_mkclean(pte_t pte)219 static inline pte_t pte_mkclean(pte_t pte)
220 {
221 	return pte_clear_flags(pte, _PAGE_DIRTY);
222 }
223 
pte_mkold(pte_t pte)224 static inline pte_t pte_mkold(pte_t pte)
225 {
226 	return pte_clear_flags(pte, _PAGE_ACCESSED);
227 }
228 
pte_wrprotect(pte_t pte)229 static inline pte_t pte_wrprotect(pte_t pte)
230 {
231 	return pte_clear_flags(pte, _PAGE_RW);
232 }
233 
pte_mkexec(pte_t pte)234 static inline pte_t pte_mkexec(pte_t pte)
235 {
236 	return pte_clear_flags(pte, _PAGE_NX);
237 }
238 
pte_mkdirty(pte_t pte)239 static inline pte_t pte_mkdirty(pte_t pte)
240 {
241 	return pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
242 }
243 
pte_mkyoung(pte_t pte)244 static inline pte_t pte_mkyoung(pte_t pte)
245 {
246 	return pte_set_flags(pte, _PAGE_ACCESSED);
247 }
248 
pte_mkwrite(pte_t pte)249 static inline pte_t pte_mkwrite(pte_t pte)
250 {
251 	return pte_set_flags(pte, _PAGE_RW);
252 }
253 
pte_mkhuge(pte_t pte)254 static inline pte_t pte_mkhuge(pte_t pte)
255 {
256 	return pte_set_flags(pte, _PAGE_PSE);
257 }
258 
pte_clrhuge(pte_t pte)259 static inline pte_t pte_clrhuge(pte_t pte)
260 {
261 	return pte_clear_flags(pte, _PAGE_PSE);
262 }
263 
pte_mkglobal(pte_t pte)264 static inline pte_t pte_mkglobal(pte_t pte)
265 {
266 	return pte_set_flags(pte, _PAGE_GLOBAL);
267 }
268 
pte_clrglobal(pte_t pte)269 static inline pte_t pte_clrglobal(pte_t pte)
270 {
271 	return pte_clear_flags(pte, _PAGE_GLOBAL);
272 }
273 
pte_mkspecial(pte_t pte)274 static inline pte_t pte_mkspecial(pte_t pte)
275 {
276 	return pte_set_flags(pte, _PAGE_SPECIAL);
277 }
278 
pmd_set_flags(pmd_t pmd,pmdval_t set)279 static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
280 {
281 	pmdval_t v = native_pmd_val(pmd);
282 
283 	return __pmd(v | set);
284 }
285 
pmd_clear_flags(pmd_t pmd,pmdval_t clear)286 static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
287 {
288 	pmdval_t v = native_pmd_val(pmd);
289 
290 	return __pmd(v & ~clear);
291 }
292 
pmd_mkold(pmd_t pmd)293 static inline pmd_t pmd_mkold(pmd_t pmd)
294 {
295 	return pmd_clear_flags(pmd, _PAGE_ACCESSED);
296 }
297 
pmd_wrprotect(pmd_t pmd)298 static inline pmd_t pmd_wrprotect(pmd_t pmd)
299 {
300 	return pmd_clear_flags(pmd, _PAGE_RW);
301 }
302 
pmd_mkdirty(pmd_t pmd)303 static inline pmd_t pmd_mkdirty(pmd_t pmd)
304 {
305 	return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
306 }
307 
pmd_mkhuge(pmd_t pmd)308 static inline pmd_t pmd_mkhuge(pmd_t pmd)
309 {
310 	return pmd_set_flags(pmd, _PAGE_PSE);
311 }
312 
pmd_mkyoung(pmd_t pmd)313 static inline pmd_t pmd_mkyoung(pmd_t pmd)
314 {
315 	return pmd_set_flags(pmd, _PAGE_ACCESSED);
316 }
317 
pmd_mkwrite(pmd_t pmd)318 static inline pmd_t pmd_mkwrite(pmd_t pmd)
319 {
320 	return pmd_set_flags(pmd, _PAGE_RW);
321 }
322 
323 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
pte_soft_dirty(pte_t pte)324 static inline int pte_soft_dirty(pte_t pte)
325 {
326 	return pte_flags(pte) & _PAGE_SOFT_DIRTY;
327 }
328 
pmd_soft_dirty(pmd_t pmd)329 static inline int pmd_soft_dirty(pmd_t pmd)
330 {
331 	return pmd_flags(pmd) & _PAGE_SOFT_DIRTY;
332 }
333 
pte_mksoft_dirty(pte_t pte)334 static inline pte_t pte_mksoft_dirty(pte_t pte)
335 {
336 	return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
337 }
338 
pmd_mksoft_dirty(pmd_t pmd)339 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
340 {
341 	return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
342 }
343 
pte_clear_soft_dirty(pte_t pte)344 static inline pte_t pte_clear_soft_dirty(pte_t pte)
345 {
346 	return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
347 }
348 
pmd_clear_soft_dirty(pmd_t pmd)349 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
350 {
351 	return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY);
352 }
353 
354 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
355 
356 /*
357  * Mask out unsupported bits in a present pgprot.  Non-present pgprots
358  * can use those bits for other purposes, so leave them be.
359  */
massage_pgprot(pgprot_t pgprot)360 static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
361 {
362 	pgprotval_t protval = pgprot_val(pgprot);
363 
364 	if (protval & _PAGE_PRESENT)
365 		protval &= __supported_pte_mask;
366 
367 	return protval;
368 }
369 
pfn_pte(unsigned long page_nr,pgprot_t pgprot)370 static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
371 {
372 	phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
373 	pfn ^= protnone_mask(pgprot_val(pgprot));
374 	pfn &= PTE_PFN_MASK;
375 	return __pte(pfn | massage_pgprot(pgprot));
376 }
377 
pfn_pmd(unsigned long page_nr,pgprot_t pgprot)378 static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
379 {
380 	phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
381 	pfn ^= protnone_mask(pgprot_val(pgprot));
382 	pfn &= PHYSICAL_PMD_PAGE_MASK;
383 	return __pmd(pfn | massage_pgprot(pgprot));
384 }
385 
pfn_pud(unsigned long page_nr,pgprot_t pgprot)386 static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot)
387 {
388 	phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
389 	pfn ^= protnone_mask(pgprot_val(pgprot));
390 	pfn &= PHYSICAL_PUD_PAGE_MASK;
391 	return __pud(pfn | massage_pgprot(pgprot));
392 }
393 
pmd_mknotpresent(pmd_t pmd)394 static inline pmd_t pmd_mknotpresent(pmd_t pmd)
395 {
396 	return pfn_pmd(pmd_pfn(pmd),
397 		       __pgprot(pmd_flags(pmd) & ~(_PAGE_PRESENT|_PAGE_PROTNONE)));
398 }
399 
pud_set_flags(pud_t pud,pudval_t set)400 static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
401 {
402 	pudval_t v = native_pud_val(pud);
403 
404 	return __pud(v | set);
405 }
406 
pud_clear_flags(pud_t pud,pudval_t clear)407 static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear)
408 {
409 	pudval_t v = native_pud_val(pud);
410 
411 	return __pud(v & ~clear);
412 }
413 
pud_mkhuge(pud_t pud)414 static inline pud_t pud_mkhuge(pud_t pud)
415 {
416 	return pud_set_flags(pud, _PAGE_PSE);
417 }
418 
419 static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask);
420 
pte_modify(pte_t pte,pgprot_t newprot)421 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
422 {
423 	pteval_t val = pte_val(pte), oldval = val;
424 
425 	/*
426 	 * Chop off the NX bit (if present), and add the NX portion of
427 	 * the newprot (if present):
428 	 */
429 	val &= _PAGE_CHG_MASK;
430 	val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK;
431 	val = flip_protnone_guard(oldval, val, PTE_PFN_MASK);
432 	return __pte(val);
433 }
434 
pmd_modify(pmd_t pmd,pgprot_t newprot)435 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
436 {
437 	pmdval_t val = pmd_val(pmd), oldval = val;
438 
439 	val &= _HPAGE_CHG_MASK;
440 	val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK;
441 	val = flip_protnone_guard(oldval, val, PHYSICAL_PMD_PAGE_MASK);
442 	return __pmd(val);
443 }
444 
445 /* mprotect needs to preserve PAT bits when updating vm_page_prot */
446 #define pgprot_modify pgprot_modify
pgprot_modify(pgprot_t oldprot,pgprot_t newprot)447 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
448 {
449 	pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
450 	pgprotval_t addbits = pgprot_val(newprot);
451 	return __pgprot(preservebits | addbits);
452 }
453 
454 #define pte_pgprot(x) __pgprot(pte_flags(x))
455 #define pmd_pgprot(x) __pgprot(pmd_flags(x))
456 #define pud_pgprot(x) __pgprot(pud_flags(x))
457 
458 #define canon_pgprot(p) __pgprot(massage_pgprot(p))
459 
is_new_memtype_allowed(u64 paddr,unsigned long size,enum page_cache_mode pcm,enum page_cache_mode new_pcm)460 static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
461 					 enum page_cache_mode pcm,
462 					 enum page_cache_mode new_pcm)
463 {
464 	/*
465 	 * PAT type is always WB for untracked ranges, so no need to check.
466 	 */
467 	if (x86_platform.is_untracked_pat_range(paddr, paddr + size))
468 		return 1;
469 
470 	/*
471 	 * Certain new memtypes are not allowed with certain
472 	 * requested memtype:
473 	 * - request is uncached, return cannot be write-back
474 	 * - request is write-combine, return cannot be write-back
475 	 * - request is write-through, return cannot be write-back
476 	 * - request is write-through, return cannot be write-combine
477 	 */
478 	if ((pcm == _PAGE_CACHE_MODE_UC_MINUS &&
479 	     new_pcm == _PAGE_CACHE_MODE_WB) ||
480 	    (pcm == _PAGE_CACHE_MODE_WC &&
481 	     new_pcm == _PAGE_CACHE_MODE_WB) ||
482 	    (pcm == _PAGE_CACHE_MODE_WT &&
483 	     new_pcm == _PAGE_CACHE_MODE_WB) ||
484 	    (pcm == _PAGE_CACHE_MODE_WT &&
485 	     new_pcm == _PAGE_CACHE_MODE_WC)) {
486 		return 0;
487 	}
488 
489 	return 1;
490 }
491 
492 pmd_t *populate_extra_pmd(unsigned long vaddr);
493 pte_t *populate_extra_pte(unsigned long vaddr);
494 #endif	/* __ASSEMBLY__ */
495 
496 #ifdef CONFIG_X86_32
497 # include <asm/pgtable_32.h>
498 #else
499 # include <asm/pgtable_64.h>
500 #endif
501 
502 #ifndef __ASSEMBLY__
503 #include <linux/mm_types.h>
504 #include <linux/mmdebug.h>
505 #include <linux/log2.h>
506 
pte_none(pte_t pte)507 static inline int pte_none(pte_t pte)
508 {
509 	return !pte.pte;
510 }
511 
512 #define __HAVE_ARCH_PTE_SAME
pte_same(pte_t a,pte_t b)513 static inline int pte_same(pte_t a, pte_t b)
514 {
515 	return a.pte == b.pte;
516 }
517 
pte_present(pte_t a)518 static inline int pte_present(pte_t a)
519 {
520 	return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
521 }
522 
523 #define pte_accessible pte_accessible
pte_accessible(struct mm_struct * mm,pte_t a)524 static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
525 {
526 	if (pte_flags(a) & _PAGE_PRESENT)
527 		return true;
528 
529 	if ((pte_flags(a) & _PAGE_PROTNONE) &&
530 			mm_tlb_flush_pending(mm))
531 		return true;
532 
533 	return false;
534 }
535 
pte_hidden(pte_t pte)536 static inline int pte_hidden(pte_t pte)
537 {
538 	return pte_flags(pte) & _PAGE_HIDDEN;
539 }
540 
pmd_present(pmd_t pmd)541 static inline int pmd_present(pmd_t pmd)
542 {
543 	/*
544 	 * Checking for _PAGE_PSE is needed too because
545 	 * split_huge_page will temporarily clear the present bit (but
546 	 * the _PAGE_PSE flag will remain set at all times while the
547 	 * _PAGE_PRESENT bit is clear).
548 	 */
549 	return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
550 }
551 
552 #ifdef CONFIG_NUMA_BALANCING
553 /*
554  * These work without NUMA balancing but the kernel does not care. See the
555  * comment in include/asm-generic/pgtable.h
556  */
pte_protnone(pte_t pte)557 static inline int pte_protnone(pte_t pte)
558 {
559 	return (pte_flags(pte) & (_PAGE_PROTNONE | _PAGE_PRESENT))
560 		== _PAGE_PROTNONE;
561 }
562 
pmd_protnone(pmd_t pmd)563 static inline int pmd_protnone(pmd_t pmd)
564 {
565 	return (pmd_flags(pmd) & (_PAGE_PROTNONE | _PAGE_PRESENT))
566 		== _PAGE_PROTNONE;
567 }
568 #endif /* CONFIG_NUMA_BALANCING */
569 
pmd_none(pmd_t pmd)570 static inline int pmd_none(pmd_t pmd)
571 {
572 	/* Only check low word on 32-bit platforms, since it might be
573 	   out of sync with upper half. */
574 	return (unsigned long)native_pmd_val(pmd) == 0;
575 }
576 
pmd_page_vaddr(pmd_t pmd)577 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
578 {
579 	return (unsigned long)__va(pmd_val(pmd) & pmd_pfn_mask(pmd));
580 }
581 
582 /*
583  * Currently stuck as a macro due to indirect forward reference to
584  * linux/mmzone.h's __section_mem_map_addr() definition:
585  */
586 #define pmd_page(pmd)	pfn_to_page(pmd_pfn(pmd))
587 
588 /*
589  * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
590  *
591  * this macro returns the index of the entry in the pmd page which would
592  * control the given virtual address
593  */
pmd_index(unsigned long address)594 static inline unsigned long pmd_index(unsigned long address)
595 {
596 	return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
597 }
598 
599 /*
600  * Conversion functions: convert a page and protection to a page entry,
601  * and a page entry and page directory to the page they refer to.
602  *
603  * (Currently stuck as a macro because of indirect forward reference
604  * to linux/mm.h:page_to_nid())
605  */
606 #define mk_pte(page, pgprot)   pfn_pte(page_to_pfn(page), (pgprot))
607 
608 /*
609  * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
610  *
611  * this function returns the index of the entry in the pte page which would
612  * control the given virtual address
613  */
pte_index(unsigned long address)614 static inline unsigned long pte_index(unsigned long address)
615 {
616 	return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
617 }
618 
pte_offset_kernel(pmd_t * pmd,unsigned long address)619 static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
620 {
621 	return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
622 }
623 
pmd_bad(pmd_t pmd)624 static inline int pmd_bad(pmd_t pmd)
625 {
626 	return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
627 }
628 
pages_to_mb(unsigned long npg)629 static inline unsigned long pages_to_mb(unsigned long npg)
630 {
631 	return npg >> (20 - PAGE_SHIFT);
632 }
633 
634 #if CONFIG_PGTABLE_LEVELS > 2
pud_none(pud_t pud)635 static inline int pud_none(pud_t pud)
636 {
637 	return native_pud_val(pud) == 0;
638 }
639 
pud_present(pud_t pud)640 static inline int pud_present(pud_t pud)
641 {
642 	return pud_flags(pud) & _PAGE_PRESENT;
643 }
644 
pud_page_vaddr(pud_t pud)645 static inline unsigned long pud_page_vaddr(pud_t pud)
646 {
647 	return (unsigned long)__va(pud_val(pud) & pud_pfn_mask(pud));
648 }
649 
650 /*
651  * Currently stuck as a macro due to indirect forward reference to
652  * linux/mmzone.h's __section_mem_map_addr() definition:
653  */
654 #define pud_page(pud)	pfn_to_page(pud_pfn(pud))
655 
656 /* Find an entry in the second-level page table.. */
pmd_offset(pud_t * pud,unsigned long address)657 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
658 {
659 	return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
660 }
661 
pud_large(pud_t pud)662 static inline int pud_large(pud_t pud)
663 {
664 	return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
665 		(_PAGE_PSE | _PAGE_PRESENT);
666 }
667 
pud_bad(pud_t pud)668 static inline int pud_bad(pud_t pud)
669 {
670 	return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
671 }
672 #else
pud_large(pud_t pud)673 static inline int pud_large(pud_t pud)
674 {
675 	return 0;
676 }
677 #endif	/* CONFIG_PGTABLE_LEVELS > 2 */
678 
679 #if CONFIG_PGTABLE_LEVELS > 3
pgd_present(pgd_t pgd)680 static inline int pgd_present(pgd_t pgd)
681 {
682 	return pgd_flags(pgd) & _PAGE_PRESENT;
683 }
684 
pgd_page_vaddr(pgd_t pgd)685 static inline unsigned long pgd_page_vaddr(pgd_t pgd)
686 {
687 	return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
688 }
689 
690 /*
691  * Currently stuck as a macro due to indirect forward reference to
692  * linux/mmzone.h's __section_mem_map_addr() definition:
693  */
694 #define pgd_page(pgd)		pfn_to_page(pgd_pfn(pgd))
695 
696 /* to find an entry in a page-table-directory. */
pud_index(unsigned long address)697 static inline unsigned long pud_index(unsigned long address)
698 {
699 	return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
700 }
701 
pud_offset(pgd_t * pgd,unsigned long address)702 static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
703 {
704 	return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
705 }
706 
pgd_bad(pgd_t pgd)707 static inline int pgd_bad(pgd_t pgd)
708 {
709 	pgdval_t ignore_flags = _PAGE_USER;
710 	/*
711 	 * We set NX on KAISER pgds that map userspace memory so
712 	 * that userspace can not meaningfully use the kernel
713 	 * page table by accident; it will fault on the first
714 	 * instruction it tries to run.  See native_set_pgd().
715 	 */
716 	if (kaiser_enabled)
717 		ignore_flags |= _PAGE_NX;
718 
719 	return (pgd_flags(pgd) & ~ignore_flags) != _KERNPG_TABLE;
720 }
721 
pgd_none(pgd_t pgd)722 static inline int pgd_none(pgd_t pgd)
723 {
724 	return !native_pgd_val(pgd);
725 }
726 #endif	/* CONFIG_PGTABLE_LEVELS > 3 */
727 
728 #endif	/* __ASSEMBLY__ */
729 
730 /*
731  * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
732  *
733  * this macro returns the index of the entry in the pgd page which would
734  * control the given virtual address
735  */
736 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
737 
738 /*
739  * pgd_offset() returns a (pgd_t *)
740  * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
741  */
742 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
743 /*
744  * a shortcut which implies the use of the kernel's pgd, instead
745  * of a process's
746  */
747 #define pgd_offset_k(address) pgd_offset(&init_mm, (address))
748 
749 
750 #define KERNEL_PGD_BOUNDARY	pgd_index(PAGE_OFFSET)
751 #define KERNEL_PGD_PTRS		(PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
752 
753 #ifndef __ASSEMBLY__
754 
755 extern int direct_gbpages;
756 void init_mem_mapping(void);
757 void early_alloc_pgt_buf(void);
758 
759 /* local pte updates need not use xchg for locking */
native_local_ptep_get_and_clear(pte_t * ptep)760 static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
761 {
762 	pte_t res = *ptep;
763 
764 	/* Pure native function needs no input for mm, addr */
765 	native_pte_clear(NULL, 0, ptep);
766 	return res;
767 }
768 
native_local_pmdp_get_and_clear(pmd_t * pmdp)769 static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
770 {
771 	pmd_t res = *pmdp;
772 
773 	native_pmd_clear(pmdp);
774 	return res;
775 }
776 
native_set_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte)777 static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
778 				     pte_t *ptep , pte_t pte)
779 {
780 	native_set_pte(ptep, pte);
781 }
782 
native_set_pmd_at(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp,pmd_t pmd)783 static inline void native_set_pmd_at(struct mm_struct *mm, unsigned long addr,
784 				     pmd_t *pmdp , pmd_t pmd)
785 {
786 	native_set_pmd(pmdp, pmd);
787 }
788 
789 #ifndef CONFIG_PARAVIRT
790 /*
791  * Rules for using pte_update - it must be called after any PTE update which
792  * has not been done using the set_pte / clear_pte interfaces.  It is used by
793  * shadow mode hypervisors to resynchronize the shadow page tables.  Kernel PTE
794  * updates should either be sets, clears, or set_pte_atomic for P->P
795  * transitions, which means this hook should only be called for user PTEs.
796  * This hook implies a P->P protection or access change has taken place, which
797  * requires a subsequent TLB flush.  The notification can optionally be delayed
798  * until the TLB flush event by using the pte_update_defer form of the
799  * interface, but care must be taken to assure that the flush happens while
800  * still holding the same page table lock so that the shadow and primary pages
801  * do not become out of sync on SMP.
802  */
803 #define pte_update(mm, addr, ptep)		do { } while (0)
804 #define pte_update_defer(mm, addr, ptep)	do { } while (0)
805 #endif
806 
807 /*
808  * We only update the dirty/accessed state if we set
809  * the dirty bit by hand in the kernel, since the hardware
810  * will do the accessed bit for us, and we don't want to
811  * race with other CPU's that might be updating the dirty
812  * bit at the same time.
813  */
814 struct vm_area_struct;
815 
816 #define  __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
817 extern int ptep_set_access_flags(struct vm_area_struct *vma,
818 				 unsigned long address, pte_t *ptep,
819 				 pte_t entry, int dirty);
820 
821 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
822 extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
823 				     unsigned long addr, pte_t *ptep);
824 
825 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
826 extern int ptep_clear_flush_young(struct vm_area_struct *vma,
827 				  unsigned long address, pte_t *ptep);
828 
829 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
ptep_get_and_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)830 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
831 				       pte_t *ptep)
832 {
833 	pte_t pte = native_ptep_get_and_clear(ptep);
834 	pte_update(mm, addr, ptep);
835 	return pte;
836 }
837 
838 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
ptep_get_and_clear_full(struct mm_struct * mm,unsigned long addr,pte_t * ptep,int full)839 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
840 					    unsigned long addr, pte_t *ptep,
841 					    int full)
842 {
843 	pte_t pte;
844 	if (full) {
845 		/*
846 		 * Full address destruction in progress; paravirt does not
847 		 * care about updates and native needs no locking
848 		 */
849 		pte = native_local_ptep_get_and_clear(ptep);
850 	} else {
851 		pte = ptep_get_and_clear(mm, addr, ptep);
852 	}
853 	return pte;
854 }
855 
856 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
ptep_set_wrprotect(struct mm_struct * mm,unsigned long addr,pte_t * ptep)857 static inline void ptep_set_wrprotect(struct mm_struct *mm,
858 				      unsigned long addr, pte_t *ptep)
859 {
860 	clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
861 	pte_update(mm, addr, ptep);
862 }
863 
864 #define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
865 
866 #define mk_pmd(page, pgprot)   pfn_pmd(page_to_pfn(page), (pgprot))
867 
868 #define  __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
869 extern int pmdp_set_access_flags(struct vm_area_struct *vma,
870 				 unsigned long address, pmd_t *pmdp,
871 				 pmd_t entry, int dirty);
872 
873 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
874 extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
875 				     unsigned long addr, pmd_t *pmdp);
876 
877 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
878 extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
879 				  unsigned long address, pmd_t *pmdp);
880 
881 
882 #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
883 extern void pmdp_splitting_flush(struct vm_area_struct *vma,
884 				 unsigned long addr, pmd_t *pmdp);
885 
886 #define __HAVE_ARCH_PMD_WRITE
pmd_write(pmd_t pmd)887 static inline int pmd_write(pmd_t pmd)
888 {
889 	return pmd_flags(pmd) & _PAGE_RW;
890 }
891 
892 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
pmdp_huge_get_and_clear(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp)893 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr,
894 				       pmd_t *pmdp)
895 {
896 	pmd_t pmd = native_pmdp_get_and_clear(pmdp);
897 	pmd_update(mm, addr, pmdp);
898 	return pmd;
899 }
900 
901 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
pmdp_set_wrprotect(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp)902 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
903 				      unsigned long addr, pmd_t *pmdp)
904 {
905 	clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
906 	pmd_update(mm, addr, pmdp);
907 }
908 
909 /*
910  * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
911  *
912  *  dst - pointer to pgd range anwhere on a pgd page
913  *  src - ""
914  *  count - the number of pgds to copy.
915  *
916  * dst and src can be on the same page, but the range must not overlap,
917  * and must not cross a page boundary.
918  */
clone_pgd_range(pgd_t * dst,pgd_t * src,int count)919 static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
920 {
921 	memcpy(dst, src, count * sizeof(pgd_t));
922 #ifdef CONFIG_PAGE_TABLE_ISOLATION
923 	if (kaiser_enabled) {
924 		/* Clone the shadow pgd part as well */
925 		memcpy(native_get_shadow_pgd(dst),
926 			native_get_shadow_pgd(src),
927 			count * sizeof(pgd_t));
928 	}
929 #endif
930 }
931 
932 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
page_level_shift(enum pg_level level)933 static inline int page_level_shift(enum pg_level level)
934 {
935 	return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT;
936 }
page_level_size(enum pg_level level)937 static inline unsigned long page_level_size(enum pg_level level)
938 {
939 	return 1UL << page_level_shift(level);
940 }
page_level_mask(enum pg_level level)941 static inline unsigned long page_level_mask(enum pg_level level)
942 {
943 	return ~(page_level_size(level) - 1);
944 }
945 
946 /*
947  * The x86 doesn't have any external MMU info: the kernel page
948  * tables contain all the necessary information.
949  */
update_mmu_cache(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)950 static inline void update_mmu_cache(struct vm_area_struct *vma,
951 		unsigned long addr, pte_t *ptep)
952 {
953 }
update_mmu_cache_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmd)954 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
955 		unsigned long addr, pmd_t *pmd)
956 {
957 }
958 
959 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
pte_swp_mksoft_dirty(pte_t pte)960 static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
961 {
962 	return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
963 }
964 
pte_swp_soft_dirty(pte_t pte)965 static inline int pte_swp_soft_dirty(pte_t pte)
966 {
967 	return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
968 }
969 
pte_swp_clear_soft_dirty(pte_t pte)970 static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
971 {
972 	return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
973 }
974 #endif
975 
976 #define __HAVE_ARCH_PFN_MODIFY_ALLOWED 1
977 extern bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot);
978 
arch_has_pfn_modify_check(void)979 static inline bool arch_has_pfn_modify_check(void)
980 {
981 	return boot_cpu_has_bug(X86_BUG_L1TF);
982 }
983 
984 #include <asm-generic/pgtable.h>
985 #endif	/* __ASSEMBLY__ */
986 
987 #endif /* _ASM_X86_PGTABLE_H */
988