1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PGTABLE_H
3 #define _ASM_X86_PGTABLE_H
4
5 #include <linux/mem_encrypt.h>
6 #include <asm/page.h>
7 #include <asm/pgtable_types.h>
8
9 /*
10 * Macro to mark a page protection value as UC-
11 */
12 #define pgprot_noncached(prot) \
13 ((boot_cpu_data.x86 > 3) \
14 ? (__pgprot(pgprot_val(prot) | \
15 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS))) \
16 : (prot))
17
18 /*
19 * Macros to add or remove encryption attribute
20 */
21 #define pgprot_encrypted(prot) __pgprot(__sme_set(pgprot_val(prot)))
22 #define pgprot_decrypted(prot) __pgprot(__sme_clr(pgprot_val(prot)))
23
24 #ifndef __ASSEMBLY__
25 #include <asm/x86_init.h>
26
27 extern pgd_t early_top_pgt[PTRS_PER_PGD];
28 int __init __early_make_pgtable(unsigned long address, pmdval_t pmd);
29
30 void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd);
31 void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user);
32 void ptdump_walk_pgd_level_checkwx(void);
33
34 #ifdef CONFIG_DEBUG_WX
35 #define debug_checkwx() ptdump_walk_pgd_level_checkwx()
36 #else
37 #define debug_checkwx() do { } while (0)
38 #endif
39
40 /*
41 * ZERO_PAGE is a global shared page that is always zero: used
42 * for zero-mapped memory areas etc..
43 */
44 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
45 __visible;
46 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
47
48 extern spinlock_t pgd_lock;
49 extern struct list_head pgd_list;
50
51 extern struct mm_struct *pgd_page_get_mm(struct page *page);
52
53 extern pmdval_t early_pmd_flags;
54
55 #ifdef CONFIG_PARAVIRT
56 #include <asm/paravirt.h>
57 #else /* !CONFIG_PARAVIRT */
58 #define set_pte(ptep, pte) native_set_pte(ptep, pte)
59 #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
60
61 #define set_pte_atomic(ptep, pte) \
62 native_set_pte_atomic(ptep, pte)
63
64 #define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
65
66 #ifndef __PAGETABLE_P4D_FOLDED
67 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
68 #define pgd_clear(pgd) native_pgd_clear(pgd)
69 #endif
70
71 #ifndef set_p4d
72 # define set_p4d(p4dp, p4d) native_set_p4d(p4dp, p4d)
73 #endif
74
75 #ifndef __PAGETABLE_PUD_FOLDED
76 #define p4d_clear(p4d) native_p4d_clear(p4d)
77 #endif
78
79 #ifndef set_pud
80 # define set_pud(pudp, pud) native_set_pud(pudp, pud)
81 #endif
82
83 #ifndef __PAGETABLE_PUD_FOLDED
84 #define pud_clear(pud) native_pud_clear(pud)
85 #endif
86
87 #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
88 #define pmd_clear(pmd) native_pmd_clear(pmd)
89
90 #define pgd_val(x) native_pgd_val(x)
91 #define __pgd(x) native_make_pgd(x)
92
93 #ifndef __PAGETABLE_P4D_FOLDED
94 #define p4d_val(x) native_p4d_val(x)
95 #define __p4d(x) native_make_p4d(x)
96 #endif
97
98 #ifndef __PAGETABLE_PUD_FOLDED
99 #define pud_val(x) native_pud_val(x)
100 #define __pud(x) native_make_pud(x)
101 #endif
102
103 #ifndef __PAGETABLE_PMD_FOLDED
104 #define pmd_val(x) native_pmd_val(x)
105 #define __pmd(x) native_make_pmd(x)
106 #endif
107
108 #define pte_val(x) native_pte_val(x)
109 #define __pte(x) native_make_pte(x)
110
111 #define arch_end_context_switch(prev) do {} while(0)
112
113 #endif /* CONFIG_PARAVIRT */
114
115 /*
116 * The following only work if pte_present() is true.
117 * Undefined behaviour if not..
118 */
pte_dirty(pte_t pte)119 static inline int pte_dirty(pte_t pte)
120 {
121 return pte_flags(pte) & _PAGE_DIRTY;
122 }
123
124
read_pkru(void)125 static inline u32 read_pkru(void)
126 {
127 if (boot_cpu_has(X86_FEATURE_OSPKE))
128 return __read_pkru();
129 return 0;
130 }
131
write_pkru(u32 pkru)132 static inline void write_pkru(u32 pkru)
133 {
134 if (boot_cpu_has(X86_FEATURE_OSPKE))
135 __write_pkru(pkru);
136 }
137
pte_young(pte_t pte)138 static inline int pte_young(pte_t pte)
139 {
140 return pte_flags(pte) & _PAGE_ACCESSED;
141 }
142
pmd_dirty(pmd_t pmd)143 static inline int pmd_dirty(pmd_t pmd)
144 {
145 return pmd_flags(pmd) & _PAGE_DIRTY;
146 }
147
pmd_young(pmd_t pmd)148 static inline int pmd_young(pmd_t pmd)
149 {
150 return pmd_flags(pmd) & _PAGE_ACCESSED;
151 }
152
pud_dirty(pud_t pud)153 static inline int pud_dirty(pud_t pud)
154 {
155 return pud_flags(pud) & _PAGE_DIRTY;
156 }
157
pud_young(pud_t pud)158 static inline int pud_young(pud_t pud)
159 {
160 return pud_flags(pud) & _PAGE_ACCESSED;
161 }
162
pte_write(pte_t pte)163 static inline int pte_write(pte_t pte)
164 {
165 return pte_flags(pte) & _PAGE_RW;
166 }
167
pte_huge(pte_t pte)168 static inline int pte_huge(pte_t pte)
169 {
170 return pte_flags(pte) & _PAGE_PSE;
171 }
172
pte_global(pte_t pte)173 static inline int pte_global(pte_t pte)
174 {
175 return pte_flags(pte) & _PAGE_GLOBAL;
176 }
177
pte_exec(pte_t pte)178 static inline int pte_exec(pte_t pte)
179 {
180 return !(pte_flags(pte) & _PAGE_NX);
181 }
182
pte_special(pte_t pte)183 static inline int pte_special(pte_t pte)
184 {
185 return pte_flags(pte) & _PAGE_SPECIAL;
186 }
187
188 /* Entries that were set to PROT_NONE are inverted */
189
190 static inline u64 protnone_mask(u64 val);
191
pte_pfn(pte_t pte)192 static inline unsigned long pte_pfn(pte_t pte)
193 {
194 phys_addr_t pfn = pte_val(pte);
195 pfn ^= protnone_mask(pfn);
196 return (pfn & PTE_PFN_MASK) >> PAGE_SHIFT;
197 }
198
pmd_pfn(pmd_t pmd)199 static inline unsigned long pmd_pfn(pmd_t pmd)
200 {
201 phys_addr_t pfn = pmd_val(pmd);
202 pfn ^= protnone_mask(pfn);
203 return (pfn & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
204 }
205
pud_pfn(pud_t pud)206 static inline unsigned long pud_pfn(pud_t pud)
207 {
208 phys_addr_t pfn = pud_val(pud);
209 pfn ^= protnone_mask(pfn);
210 return (pfn & pud_pfn_mask(pud)) >> PAGE_SHIFT;
211 }
212
p4d_pfn(p4d_t p4d)213 static inline unsigned long p4d_pfn(p4d_t p4d)
214 {
215 return (p4d_val(p4d) & p4d_pfn_mask(p4d)) >> PAGE_SHIFT;
216 }
217
pgd_pfn(pgd_t pgd)218 static inline unsigned long pgd_pfn(pgd_t pgd)
219 {
220 return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
221 }
222
p4d_large(p4d_t p4d)223 static inline int p4d_large(p4d_t p4d)
224 {
225 /* No 512 GiB pages yet */
226 return 0;
227 }
228
229 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
230
pmd_large(pmd_t pte)231 static inline int pmd_large(pmd_t pte)
232 {
233 return pmd_flags(pte) & _PAGE_PSE;
234 }
235
236 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_trans_huge(pmd_t pmd)237 static inline int pmd_trans_huge(pmd_t pmd)
238 {
239 return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
240 }
241
242 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
pud_trans_huge(pud_t pud)243 static inline int pud_trans_huge(pud_t pud)
244 {
245 return (pud_val(pud) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
246 }
247 #endif
248
249 #define has_transparent_hugepage has_transparent_hugepage
has_transparent_hugepage(void)250 static inline int has_transparent_hugepage(void)
251 {
252 return boot_cpu_has(X86_FEATURE_PSE);
253 }
254
255 #ifdef __HAVE_ARCH_PTE_DEVMAP
pmd_devmap(pmd_t pmd)256 static inline int pmd_devmap(pmd_t pmd)
257 {
258 return !!(pmd_val(pmd) & _PAGE_DEVMAP);
259 }
260
261 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
pud_devmap(pud_t pud)262 static inline int pud_devmap(pud_t pud)
263 {
264 return !!(pud_val(pud) & _PAGE_DEVMAP);
265 }
266 #else
pud_devmap(pud_t pud)267 static inline int pud_devmap(pud_t pud)
268 {
269 return 0;
270 }
271 #endif
272
pgd_devmap(pgd_t pgd)273 static inline int pgd_devmap(pgd_t pgd)
274 {
275 return 0;
276 }
277 #endif
278 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
279
pte_set_flags(pte_t pte,pteval_t set)280 static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
281 {
282 pteval_t v = native_pte_val(pte);
283
284 return native_make_pte(v | set);
285 }
286
pte_clear_flags(pte_t pte,pteval_t clear)287 static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
288 {
289 pteval_t v = native_pte_val(pte);
290
291 return native_make_pte(v & ~clear);
292 }
293
pte_mkclean(pte_t pte)294 static inline pte_t pte_mkclean(pte_t pte)
295 {
296 return pte_clear_flags(pte, _PAGE_DIRTY);
297 }
298
pte_mkold(pte_t pte)299 static inline pte_t pte_mkold(pte_t pte)
300 {
301 return pte_clear_flags(pte, _PAGE_ACCESSED);
302 }
303
pte_wrprotect(pte_t pte)304 static inline pte_t pte_wrprotect(pte_t pte)
305 {
306 return pte_clear_flags(pte, _PAGE_RW);
307 }
308
pte_mkexec(pte_t pte)309 static inline pte_t pte_mkexec(pte_t pte)
310 {
311 return pte_clear_flags(pte, _PAGE_NX);
312 }
313
pte_mkdirty(pte_t pte)314 static inline pte_t pte_mkdirty(pte_t pte)
315 {
316 return pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
317 }
318
pte_mkyoung(pte_t pte)319 static inline pte_t pte_mkyoung(pte_t pte)
320 {
321 return pte_set_flags(pte, _PAGE_ACCESSED);
322 }
323
pte_mkwrite(pte_t pte)324 static inline pte_t pte_mkwrite(pte_t pte)
325 {
326 return pte_set_flags(pte, _PAGE_RW);
327 }
328
pte_mkhuge(pte_t pte)329 static inline pte_t pte_mkhuge(pte_t pte)
330 {
331 return pte_set_flags(pte, _PAGE_PSE);
332 }
333
pte_clrhuge(pte_t pte)334 static inline pte_t pte_clrhuge(pte_t pte)
335 {
336 return pte_clear_flags(pte, _PAGE_PSE);
337 }
338
pte_mkglobal(pte_t pte)339 static inline pte_t pte_mkglobal(pte_t pte)
340 {
341 return pte_set_flags(pte, _PAGE_GLOBAL);
342 }
343
pte_clrglobal(pte_t pte)344 static inline pte_t pte_clrglobal(pte_t pte)
345 {
346 return pte_clear_flags(pte, _PAGE_GLOBAL);
347 }
348
pte_mkspecial(pte_t pte)349 static inline pte_t pte_mkspecial(pte_t pte)
350 {
351 return pte_set_flags(pte, _PAGE_SPECIAL);
352 }
353
pte_mkdevmap(pte_t pte)354 static inline pte_t pte_mkdevmap(pte_t pte)
355 {
356 return pte_set_flags(pte, _PAGE_SPECIAL|_PAGE_DEVMAP);
357 }
358
pmd_set_flags(pmd_t pmd,pmdval_t set)359 static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
360 {
361 pmdval_t v = native_pmd_val(pmd);
362
363 return native_make_pmd(v | set);
364 }
365
pmd_clear_flags(pmd_t pmd,pmdval_t clear)366 static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
367 {
368 pmdval_t v = native_pmd_val(pmd);
369
370 return native_make_pmd(v & ~clear);
371 }
372
pmd_mkold(pmd_t pmd)373 static inline pmd_t pmd_mkold(pmd_t pmd)
374 {
375 return pmd_clear_flags(pmd, _PAGE_ACCESSED);
376 }
377
pmd_mkclean(pmd_t pmd)378 static inline pmd_t pmd_mkclean(pmd_t pmd)
379 {
380 return pmd_clear_flags(pmd, _PAGE_DIRTY);
381 }
382
pmd_wrprotect(pmd_t pmd)383 static inline pmd_t pmd_wrprotect(pmd_t pmd)
384 {
385 return pmd_clear_flags(pmd, _PAGE_RW);
386 }
387
pmd_mkdirty(pmd_t pmd)388 static inline pmd_t pmd_mkdirty(pmd_t pmd)
389 {
390 return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
391 }
392
pmd_mkdevmap(pmd_t pmd)393 static inline pmd_t pmd_mkdevmap(pmd_t pmd)
394 {
395 return pmd_set_flags(pmd, _PAGE_DEVMAP);
396 }
397
pmd_mkhuge(pmd_t pmd)398 static inline pmd_t pmd_mkhuge(pmd_t pmd)
399 {
400 return pmd_set_flags(pmd, _PAGE_PSE);
401 }
402
pmd_mkyoung(pmd_t pmd)403 static inline pmd_t pmd_mkyoung(pmd_t pmd)
404 {
405 return pmd_set_flags(pmd, _PAGE_ACCESSED);
406 }
407
pmd_mkwrite(pmd_t pmd)408 static inline pmd_t pmd_mkwrite(pmd_t pmd)
409 {
410 return pmd_set_flags(pmd, _PAGE_RW);
411 }
412
pud_set_flags(pud_t pud,pudval_t set)413 static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
414 {
415 pudval_t v = native_pud_val(pud);
416
417 return native_make_pud(v | set);
418 }
419
pud_clear_flags(pud_t pud,pudval_t clear)420 static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear)
421 {
422 pudval_t v = native_pud_val(pud);
423
424 return native_make_pud(v & ~clear);
425 }
426
pud_mkold(pud_t pud)427 static inline pud_t pud_mkold(pud_t pud)
428 {
429 return pud_clear_flags(pud, _PAGE_ACCESSED);
430 }
431
pud_mkclean(pud_t pud)432 static inline pud_t pud_mkclean(pud_t pud)
433 {
434 return pud_clear_flags(pud, _PAGE_DIRTY);
435 }
436
pud_wrprotect(pud_t pud)437 static inline pud_t pud_wrprotect(pud_t pud)
438 {
439 return pud_clear_flags(pud, _PAGE_RW);
440 }
441
pud_mkdirty(pud_t pud)442 static inline pud_t pud_mkdirty(pud_t pud)
443 {
444 return pud_set_flags(pud, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
445 }
446
pud_mkdevmap(pud_t pud)447 static inline pud_t pud_mkdevmap(pud_t pud)
448 {
449 return pud_set_flags(pud, _PAGE_DEVMAP);
450 }
451
pud_mkhuge(pud_t pud)452 static inline pud_t pud_mkhuge(pud_t pud)
453 {
454 return pud_set_flags(pud, _PAGE_PSE);
455 }
456
pud_mkyoung(pud_t pud)457 static inline pud_t pud_mkyoung(pud_t pud)
458 {
459 return pud_set_flags(pud, _PAGE_ACCESSED);
460 }
461
pud_mkwrite(pud_t pud)462 static inline pud_t pud_mkwrite(pud_t pud)
463 {
464 return pud_set_flags(pud, _PAGE_RW);
465 }
466
467 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
pte_soft_dirty(pte_t pte)468 static inline int pte_soft_dirty(pte_t pte)
469 {
470 return pte_flags(pte) & _PAGE_SOFT_DIRTY;
471 }
472
pmd_soft_dirty(pmd_t pmd)473 static inline int pmd_soft_dirty(pmd_t pmd)
474 {
475 return pmd_flags(pmd) & _PAGE_SOFT_DIRTY;
476 }
477
pud_soft_dirty(pud_t pud)478 static inline int pud_soft_dirty(pud_t pud)
479 {
480 return pud_flags(pud) & _PAGE_SOFT_DIRTY;
481 }
482
pte_mksoft_dirty(pte_t pte)483 static inline pte_t pte_mksoft_dirty(pte_t pte)
484 {
485 return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
486 }
487
pmd_mksoft_dirty(pmd_t pmd)488 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
489 {
490 return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
491 }
492
pud_mksoft_dirty(pud_t pud)493 static inline pud_t pud_mksoft_dirty(pud_t pud)
494 {
495 return pud_set_flags(pud, _PAGE_SOFT_DIRTY);
496 }
497
pte_clear_soft_dirty(pte_t pte)498 static inline pte_t pte_clear_soft_dirty(pte_t pte)
499 {
500 return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
501 }
502
pmd_clear_soft_dirty(pmd_t pmd)503 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
504 {
505 return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY);
506 }
507
pud_clear_soft_dirty(pud_t pud)508 static inline pud_t pud_clear_soft_dirty(pud_t pud)
509 {
510 return pud_clear_flags(pud, _PAGE_SOFT_DIRTY);
511 }
512
513 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
514
515 /*
516 * Mask out unsupported bits in a present pgprot. Non-present pgprots
517 * can use those bits for other purposes, so leave them be.
518 */
massage_pgprot(pgprot_t pgprot)519 static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
520 {
521 pgprotval_t protval = pgprot_val(pgprot);
522
523 if (protval & _PAGE_PRESENT)
524 protval &= __supported_pte_mask;
525
526 return protval;
527 }
528
pfn_pte(unsigned long page_nr,pgprot_t pgprot)529 static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
530 {
531 phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
532 pfn ^= protnone_mask(pgprot_val(pgprot));
533 pfn &= PTE_PFN_MASK;
534 return __pte(pfn | massage_pgprot(pgprot));
535 }
536
pfn_pmd(unsigned long page_nr,pgprot_t pgprot)537 static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
538 {
539 phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
540 pfn ^= protnone_mask(pgprot_val(pgprot));
541 pfn &= PHYSICAL_PMD_PAGE_MASK;
542 return __pmd(pfn | massage_pgprot(pgprot));
543 }
544
pfn_pud(unsigned long page_nr,pgprot_t pgprot)545 static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot)
546 {
547 phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
548 pfn ^= protnone_mask(pgprot_val(pgprot));
549 pfn &= PHYSICAL_PUD_PAGE_MASK;
550 return __pud(pfn | massage_pgprot(pgprot));
551 }
552
pmd_mknotpresent(pmd_t pmd)553 static inline pmd_t pmd_mknotpresent(pmd_t pmd)
554 {
555 return pfn_pmd(pmd_pfn(pmd),
556 __pgprot(pmd_flags(pmd) & ~(_PAGE_PRESENT|_PAGE_PROTNONE)));
557 }
558
pud_mknotpresent(pud_t pud)559 static inline pud_t pud_mknotpresent(pud_t pud)
560 {
561 return pfn_pud(pud_pfn(pud),
562 __pgprot(pud_flags(pud) & ~(_PAGE_PRESENT|_PAGE_PROTNONE)));
563 }
564
565 static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask);
566
pte_modify(pte_t pte,pgprot_t newprot)567 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
568 {
569 pteval_t val = pte_val(pte), oldval = val;
570
571 /*
572 * Chop off the NX bit (if present), and add the NX portion of
573 * the newprot (if present):
574 */
575 val &= _PAGE_CHG_MASK;
576 val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK;
577 val = flip_protnone_guard(oldval, val, PTE_PFN_MASK);
578 return __pte(val);
579 }
580
pmd_modify(pmd_t pmd,pgprot_t newprot)581 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
582 {
583 pmdval_t val = pmd_val(pmd), oldval = val;
584
585 val &= _HPAGE_CHG_MASK;
586 val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK;
587 val = flip_protnone_guard(oldval, val, PHYSICAL_PMD_PAGE_MASK);
588 return __pmd(val);
589 }
590
591 /* mprotect needs to preserve PAT bits when updating vm_page_prot */
592 #define pgprot_modify pgprot_modify
pgprot_modify(pgprot_t oldprot,pgprot_t newprot)593 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
594 {
595 pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
596 pgprotval_t addbits = pgprot_val(newprot);
597 return __pgprot(preservebits | addbits);
598 }
599
600 #define pte_pgprot(x) __pgprot(pte_flags(x))
601 #define pmd_pgprot(x) __pgprot(pmd_flags(x))
602 #define pud_pgprot(x) __pgprot(pud_flags(x))
603 #define p4d_pgprot(x) __pgprot(p4d_flags(x))
604
605 #define canon_pgprot(p) __pgprot(massage_pgprot(p))
606
is_new_memtype_allowed(u64 paddr,unsigned long size,enum page_cache_mode pcm,enum page_cache_mode new_pcm)607 static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
608 enum page_cache_mode pcm,
609 enum page_cache_mode new_pcm)
610 {
611 /*
612 * PAT type is always WB for untracked ranges, so no need to check.
613 */
614 if (x86_platform.is_untracked_pat_range(paddr, paddr + size))
615 return 1;
616
617 /*
618 * Certain new memtypes are not allowed with certain
619 * requested memtype:
620 * - request is uncached, return cannot be write-back
621 * - request is write-combine, return cannot be write-back
622 * - request is write-through, return cannot be write-back
623 * - request is write-through, return cannot be write-combine
624 */
625 if ((pcm == _PAGE_CACHE_MODE_UC_MINUS &&
626 new_pcm == _PAGE_CACHE_MODE_WB) ||
627 (pcm == _PAGE_CACHE_MODE_WC &&
628 new_pcm == _PAGE_CACHE_MODE_WB) ||
629 (pcm == _PAGE_CACHE_MODE_WT &&
630 new_pcm == _PAGE_CACHE_MODE_WB) ||
631 (pcm == _PAGE_CACHE_MODE_WT &&
632 new_pcm == _PAGE_CACHE_MODE_WC)) {
633 return 0;
634 }
635
636 return 1;
637 }
638
639 pmd_t *populate_extra_pmd(unsigned long vaddr);
640 pte_t *populate_extra_pte(unsigned long vaddr);
641 #endif /* __ASSEMBLY__ */
642
643 #ifdef CONFIG_X86_32
644 # include <asm/pgtable_32.h>
645 #else
646 # include <asm/pgtable_64.h>
647 #endif
648
649 #ifndef __ASSEMBLY__
650 #include <linux/mm_types.h>
651 #include <linux/mmdebug.h>
652 #include <linux/log2.h>
653 #include <asm/fixmap.h>
654
pte_none(pte_t pte)655 static inline int pte_none(pte_t pte)
656 {
657 return !(pte.pte & ~(_PAGE_KNL_ERRATUM_MASK));
658 }
659
660 #define __HAVE_ARCH_PTE_SAME
pte_same(pte_t a,pte_t b)661 static inline int pte_same(pte_t a, pte_t b)
662 {
663 return a.pte == b.pte;
664 }
665
pte_present(pte_t a)666 static inline int pte_present(pte_t a)
667 {
668 return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
669 }
670
671 #ifdef __HAVE_ARCH_PTE_DEVMAP
pte_devmap(pte_t a)672 static inline int pte_devmap(pte_t a)
673 {
674 return (pte_flags(a) & _PAGE_DEVMAP) == _PAGE_DEVMAP;
675 }
676 #endif
677
678 #define pte_accessible pte_accessible
pte_accessible(struct mm_struct * mm,pte_t a)679 static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
680 {
681 if (pte_flags(a) & _PAGE_PRESENT)
682 return true;
683
684 if ((pte_flags(a) & _PAGE_PROTNONE) &&
685 mm_tlb_flush_pending(mm))
686 return true;
687
688 return false;
689 }
690
pmd_present(pmd_t pmd)691 static inline int pmd_present(pmd_t pmd)
692 {
693 /*
694 * Checking for _PAGE_PSE is needed too because
695 * split_huge_page will temporarily clear the present bit (but
696 * the _PAGE_PSE flag will remain set at all times while the
697 * _PAGE_PRESENT bit is clear).
698 */
699 return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
700 }
701
702 #ifdef CONFIG_NUMA_BALANCING
703 /*
704 * These work without NUMA balancing but the kernel does not care. See the
705 * comment in include/asm-generic/pgtable.h
706 */
pte_protnone(pte_t pte)707 static inline int pte_protnone(pte_t pte)
708 {
709 return (pte_flags(pte) & (_PAGE_PROTNONE | _PAGE_PRESENT))
710 == _PAGE_PROTNONE;
711 }
712
pmd_protnone(pmd_t pmd)713 static inline int pmd_protnone(pmd_t pmd)
714 {
715 return (pmd_flags(pmd) & (_PAGE_PROTNONE | _PAGE_PRESENT))
716 == _PAGE_PROTNONE;
717 }
718 #endif /* CONFIG_NUMA_BALANCING */
719
pmd_none(pmd_t pmd)720 static inline int pmd_none(pmd_t pmd)
721 {
722 /* Only check low word on 32-bit platforms, since it might be
723 out of sync with upper half. */
724 unsigned long val = native_pmd_val(pmd);
725 return (val & ~_PAGE_KNL_ERRATUM_MASK) == 0;
726 }
727
pmd_page_vaddr(pmd_t pmd)728 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
729 {
730 return (unsigned long)__va(pmd_val(pmd) & pmd_pfn_mask(pmd));
731 }
732
733 /*
734 * Currently stuck as a macro due to indirect forward reference to
735 * linux/mmzone.h's __section_mem_map_addr() definition:
736 */
737 #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
738
739 /*
740 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
741 *
742 * this macro returns the index of the entry in the pmd page which would
743 * control the given virtual address
744 */
pmd_index(unsigned long address)745 static inline unsigned long pmd_index(unsigned long address)
746 {
747 return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
748 }
749
750 /*
751 * Conversion functions: convert a page and protection to a page entry,
752 * and a page entry and page directory to the page they refer to.
753 *
754 * (Currently stuck as a macro because of indirect forward reference
755 * to linux/mm.h:page_to_nid())
756 */
757 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
758
759 /*
760 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
761 *
762 * this function returns the index of the entry in the pte page which would
763 * control the given virtual address
764 */
pte_index(unsigned long address)765 static inline unsigned long pte_index(unsigned long address)
766 {
767 return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
768 }
769
pte_offset_kernel(pmd_t * pmd,unsigned long address)770 static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
771 {
772 return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
773 }
774
pmd_bad(pmd_t pmd)775 static inline int pmd_bad(pmd_t pmd)
776 {
777 return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
778 }
779
pages_to_mb(unsigned long npg)780 static inline unsigned long pages_to_mb(unsigned long npg)
781 {
782 return npg >> (20 - PAGE_SHIFT);
783 }
784
785 #if CONFIG_PGTABLE_LEVELS > 2
pud_none(pud_t pud)786 static inline int pud_none(pud_t pud)
787 {
788 return (native_pud_val(pud) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
789 }
790
pud_present(pud_t pud)791 static inline int pud_present(pud_t pud)
792 {
793 return pud_flags(pud) & _PAGE_PRESENT;
794 }
795
pud_page_vaddr(pud_t pud)796 static inline unsigned long pud_page_vaddr(pud_t pud)
797 {
798 return (unsigned long)__va(pud_val(pud) & pud_pfn_mask(pud));
799 }
800
801 /*
802 * Currently stuck as a macro due to indirect forward reference to
803 * linux/mmzone.h's __section_mem_map_addr() definition:
804 */
805 #define pud_page(pud) pfn_to_page(pud_pfn(pud))
806
807 /* Find an entry in the second-level page table.. */
pmd_offset(pud_t * pud,unsigned long address)808 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
809 {
810 return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
811 }
812
pud_large(pud_t pud)813 static inline int pud_large(pud_t pud)
814 {
815 return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
816 (_PAGE_PSE | _PAGE_PRESENT);
817 }
818
pud_bad(pud_t pud)819 static inline int pud_bad(pud_t pud)
820 {
821 return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
822 }
823 #else
pud_large(pud_t pud)824 static inline int pud_large(pud_t pud)
825 {
826 return 0;
827 }
828 #endif /* CONFIG_PGTABLE_LEVELS > 2 */
829
pud_index(unsigned long address)830 static inline unsigned long pud_index(unsigned long address)
831 {
832 return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
833 }
834
835 #if CONFIG_PGTABLE_LEVELS > 3
p4d_none(p4d_t p4d)836 static inline int p4d_none(p4d_t p4d)
837 {
838 return (native_p4d_val(p4d) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
839 }
840
p4d_present(p4d_t p4d)841 static inline int p4d_present(p4d_t p4d)
842 {
843 return p4d_flags(p4d) & _PAGE_PRESENT;
844 }
845
p4d_page_vaddr(p4d_t p4d)846 static inline unsigned long p4d_page_vaddr(p4d_t p4d)
847 {
848 return (unsigned long)__va(p4d_val(p4d) & p4d_pfn_mask(p4d));
849 }
850
851 /*
852 * Currently stuck as a macro due to indirect forward reference to
853 * linux/mmzone.h's __section_mem_map_addr() definition:
854 */
855 #define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
856
857 /* Find an entry in the third-level page table.. */
pud_offset(p4d_t * p4d,unsigned long address)858 static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
859 {
860 return (pud_t *)p4d_page_vaddr(*p4d) + pud_index(address);
861 }
862
p4d_bad(p4d_t p4d)863 static inline int p4d_bad(p4d_t p4d)
864 {
865 unsigned long ignore_flags = _KERNPG_TABLE | _PAGE_USER;
866
867 if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
868 ignore_flags |= _PAGE_NX;
869
870 return (p4d_flags(p4d) & ~ignore_flags) != 0;
871 }
872 #endif /* CONFIG_PGTABLE_LEVELS > 3 */
873
p4d_index(unsigned long address)874 static inline unsigned long p4d_index(unsigned long address)
875 {
876 return (address >> P4D_SHIFT) & (PTRS_PER_P4D - 1);
877 }
878
879 #if CONFIG_PGTABLE_LEVELS > 4
pgd_present(pgd_t pgd)880 static inline int pgd_present(pgd_t pgd)
881 {
882 return pgd_flags(pgd) & _PAGE_PRESENT;
883 }
884
pgd_page_vaddr(pgd_t pgd)885 static inline unsigned long pgd_page_vaddr(pgd_t pgd)
886 {
887 return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
888 }
889
890 /*
891 * Currently stuck as a macro due to indirect forward reference to
892 * linux/mmzone.h's __section_mem_map_addr() definition:
893 */
894 #define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
895
896 /* to find an entry in a page-table-directory. */
p4d_offset(pgd_t * pgd,unsigned long address)897 static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
898 {
899 return (p4d_t *)pgd_page_vaddr(*pgd) + p4d_index(address);
900 }
901
pgd_bad(pgd_t pgd)902 static inline int pgd_bad(pgd_t pgd)
903 {
904 unsigned long ignore_flags = _PAGE_USER;
905
906 if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
907 ignore_flags |= _PAGE_NX;
908
909 return (pgd_flags(pgd) & ~ignore_flags) != _KERNPG_TABLE;
910 }
911
pgd_none(pgd_t pgd)912 static inline int pgd_none(pgd_t pgd)
913 {
914 /*
915 * There is no need to do a workaround for the KNL stray
916 * A/D bit erratum here. PGDs only point to page tables
917 * except on 32-bit non-PAE which is not supported on
918 * KNL.
919 */
920 return !native_pgd_val(pgd);
921 }
922 #endif /* CONFIG_PGTABLE_LEVELS > 4 */
923
924 #endif /* __ASSEMBLY__ */
925
926 /*
927 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
928 *
929 * this macro returns the index of the entry in the pgd page which would
930 * control the given virtual address
931 */
932 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
933
934 /*
935 * pgd_offset() returns a (pgd_t *)
936 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
937 */
938 #define pgd_offset_pgd(pgd, address) (pgd + pgd_index((address)))
939 /*
940 * a shortcut to get a pgd_t in a given mm
941 */
942 #define pgd_offset(mm, address) pgd_offset_pgd((mm)->pgd, (address))
943 /*
944 * a shortcut which implies the use of the kernel's pgd, instead
945 * of a process's
946 */
947 #define pgd_offset_k(address) pgd_offset(&init_mm, (address))
948
949
950 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
951 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
952
953 #ifndef __ASSEMBLY__
954
955 extern int direct_gbpages;
956 void init_mem_mapping(void);
957 void early_alloc_pgt_buf(void);
958 extern void memblock_find_dma_reserve(void);
959
960 #ifdef CONFIG_X86_64
961 /* Realmode trampoline initialization. */
962 extern pgd_t trampoline_pgd_entry;
init_trampoline_default(void)963 static inline void __meminit init_trampoline_default(void)
964 {
965 /* Default trampoline pgd value */
966 trampoline_pgd_entry = init_top_pgt[pgd_index(__PAGE_OFFSET)];
967 }
968 # ifdef CONFIG_RANDOMIZE_MEMORY
969 void __meminit init_trampoline(void);
970 # else
971 # define init_trampoline init_trampoline_default
972 # endif
973 #else
init_trampoline(void)974 static inline void init_trampoline(void) { }
975 #endif
976
977 /* local pte updates need not use xchg for locking */
native_local_ptep_get_and_clear(pte_t * ptep)978 static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
979 {
980 pte_t res = *ptep;
981
982 /* Pure native function needs no input for mm, addr */
983 native_pte_clear(NULL, 0, ptep);
984 return res;
985 }
986
native_local_pmdp_get_and_clear(pmd_t * pmdp)987 static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
988 {
989 pmd_t res = *pmdp;
990
991 native_pmd_clear(pmdp);
992 return res;
993 }
994
native_local_pudp_get_and_clear(pud_t * pudp)995 static inline pud_t native_local_pudp_get_and_clear(pud_t *pudp)
996 {
997 pud_t res = *pudp;
998
999 native_pud_clear(pudp);
1000 return res;
1001 }
1002
native_set_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte)1003 static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
1004 pte_t *ptep , pte_t pte)
1005 {
1006 native_set_pte(ptep, pte);
1007 }
1008
set_pmd_at(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp,pmd_t pmd)1009 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1010 pmd_t *pmdp, pmd_t pmd)
1011 {
1012 native_set_pmd(pmdp, pmd);
1013 }
1014
set_pud_at(struct mm_struct * mm,unsigned long addr,pud_t * pudp,pud_t pud)1015 static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
1016 pud_t *pudp, pud_t pud)
1017 {
1018 native_set_pud(pudp, pud);
1019 }
1020
1021 /*
1022 * We only update the dirty/accessed state if we set
1023 * the dirty bit by hand in the kernel, since the hardware
1024 * will do the accessed bit for us, and we don't want to
1025 * race with other CPU's that might be updating the dirty
1026 * bit at the same time.
1027 */
1028 struct vm_area_struct;
1029
1030 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1031 extern int ptep_set_access_flags(struct vm_area_struct *vma,
1032 unsigned long address, pte_t *ptep,
1033 pte_t entry, int dirty);
1034
1035 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1036 extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
1037 unsigned long addr, pte_t *ptep);
1038
1039 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1040 extern int ptep_clear_flush_young(struct vm_area_struct *vma,
1041 unsigned long address, pte_t *ptep);
1042
1043 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
ptep_get_and_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)1044 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
1045 pte_t *ptep)
1046 {
1047 pte_t pte = native_ptep_get_and_clear(ptep);
1048 return pte;
1049 }
1050
1051 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
ptep_get_and_clear_full(struct mm_struct * mm,unsigned long addr,pte_t * ptep,int full)1052 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1053 unsigned long addr, pte_t *ptep,
1054 int full)
1055 {
1056 pte_t pte;
1057 if (full) {
1058 /*
1059 * Full address destruction in progress; paravirt does not
1060 * care about updates and native needs no locking
1061 */
1062 pte = native_local_ptep_get_and_clear(ptep);
1063 } else {
1064 pte = ptep_get_and_clear(mm, addr, ptep);
1065 }
1066 return pte;
1067 }
1068
1069 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
ptep_set_wrprotect(struct mm_struct * mm,unsigned long addr,pte_t * ptep)1070 static inline void ptep_set_wrprotect(struct mm_struct *mm,
1071 unsigned long addr, pte_t *ptep)
1072 {
1073 clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
1074 }
1075
1076 #define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
1077
1078 #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1079
1080 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1081 extern int pmdp_set_access_flags(struct vm_area_struct *vma,
1082 unsigned long address, pmd_t *pmdp,
1083 pmd_t entry, int dirty);
1084 extern int pudp_set_access_flags(struct vm_area_struct *vma,
1085 unsigned long address, pud_t *pudp,
1086 pud_t entry, int dirty);
1087
1088 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1089 extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1090 unsigned long addr, pmd_t *pmdp);
1091 extern int pudp_test_and_clear_young(struct vm_area_struct *vma,
1092 unsigned long addr, pud_t *pudp);
1093
1094 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1095 extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
1096 unsigned long address, pmd_t *pmdp);
1097
1098
1099 #define __HAVE_ARCH_PMD_WRITE
pmd_write(pmd_t pmd)1100 static inline int pmd_write(pmd_t pmd)
1101 {
1102 return pmd_flags(pmd) & _PAGE_RW;
1103 }
1104
1105 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
pmdp_huge_get_and_clear(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp)1106 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr,
1107 pmd_t *pmdp)
1108 {
1109 return native_pmdp_get_and_clear(pmdp);
1110 }
1111
1112 #define __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
pudp_huge_get_and_clear(struct mm_struct * mm,unsigned long addr,pud_t * pudp)1113 static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
1114 unsigned long addr, pud_t *pudp)
1115 {
1116 return native_pudp_get_and_clear(pudp);
1117 }
1118
1119 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
pmdp_set_wrprotect(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp)1120 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1121 unsigned long addr, pmd_t *pmdp)
1122 {
1123 clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
1124 }
1125
1126 #define pud_write pud_write
pud_write(pud_t pud)1127 static inline int pud_write(pud_t pud)
1128 {
1129 return pud_flags(pud) & _PAGE_RW;
1130 }
1131
1132 /*
1133 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
1134 *
1135 * dst - pointer to pgd range anwhere on a pgd page
1136 * src - ""
1137 * count - the number of pgds to copy.
1138 *
1139 * dst and src can be on the same page, but the range must not overlap,
1140 * and must not cross a page boundary.
1141 */
clone_pgd_range(pgd_t * dst,pgd_t * src,int count)1142 static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
1143 {
1144 memcpy(dst, src, count * sizeof(pgd_t));
1145 #ifdef CONFIG_PAGE_TABLE_ISOLATION
1146 if (!static_cpu_has(X86_FEATURE_PTI))
1147 return;
1148 /* Clone the user space pgd as well */
1149 memcpy(kernel_to_user_pgdp(dst), kernel_to_user_pgdp(src),
1150 count * sizeof(pgd_t));
1151 #endif
1152 }
1153
1154 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
page_level_shift(enum pg_level level)1155 static inline int page_level_shift(enum pg_level level)
1156 {
1157 return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT;
1158 }
page_level_size(enum pg_level level)1159 static inline unsigned long page_level_size(enum pg_level level)
1160 {
1161 return 1UL << page_level_shift(level);
1162 }
page_level_mask(enum pg_level level)1163 static inline unsigned long page_level_mask(enum pg_level level)
1164 {
1165 return ~(page_level_size(level) - 1);
1166 }
1167
1168 /*
1169 * The x86 doesn't have any external MMU info: the kernel page
1170 * tables contain all the necessary information.
1171 */
update_mmu_cache(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)1172 static inline void update_mmu_cache(struct vm_area_struct *vma,
1173 unsigned long addr, pte_t *ptep)
1174 {
1175 }
update_mmu_cache_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmd)1176 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
1177 unsigned long addr, pmd_t *pmd)
1178 {
1179 }
update_mmu_cache_pud(struct vm_area_struct * vma,unsigned long addr,pud_t * pud)1180 static inline void update_mmu_cache_pud(struct vm_area_struct *vma,
1181 unsigned long addr, pud_t *pud)
1182 {
1183 }
1184
1185 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
pte_swp_mksoft_dirty(pte_t pte)1186 static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
1187 {
1188 return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
1189 }
1190
pte_swp_soft_dirty(pte_t pte)1191 static inline int pte_swp_soft_dirty(pte_t pte)
1192 {
1193 return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
1194 }
1195
pte_swp_clear_soft_dirty(pte_t pte)1196 static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
1197 {
1198 return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
1199 }
1200
1201 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
pmd_swp_mksoft_dirty(pmd_t pmd)1202 static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
1203 {
1204 return pmd_set_flags(pmd, _PAGE_SWP_SOFT_DIRTY);
1205 }
1206
pmd_swp_soft_dirty(pmd_t pmd)1207 static inline int pmd_swp_soft_dirty(pmd_t pmd)
1208 {
1209 return pmd_flags(pmd) & _PAGE_SWP_SOFT_DIRTY;
1210 }
1211
pmd_swp_clear_soft_dirty(pmd_t pmd)1212 static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
1213 {
1214 return pmd_clear_flags(pmd, _PAGE_SWP_SOFT_DIRTY);
1215 }
1216 #endif
1217 #endif
1218
1219 #define PKRU_AD_BIT 0x1
1220 #define PKRU_WD_BIT 0x2
1221 #define PKRU_BITS_PER_PKEY 2
1222
__pkru_allows_read(u32 pkru,u16 pkey)1223 static inline bool __pkru_allows_read(u32 pkru, u16 pkey)
1224 {
1225 int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY;
1226 return !(pkru & (PKRU_AD_BIT << pkru_pkey_bits));
1227 }
1228
__pkru_allows_write(u32 pkru,u16 pkey)1229 static inline bool __pkru_allows_write(u32 pkru, u16 pkey)
1230 {
1231 int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY;
1232 /*
1233 * Access-disable disables writes too so we need to check
1234 * both bits here.
1235 */
1236 return !(pkru & ((PKRU_AD_BIT|PKRU_WD_BIT) << pkru_pkey_bits));
1237 }
1238
pte_flags_pkey(unsigned long pte_flags)1239 static inline u16 pte_flags_pkey(unsigned long pte_flags)
1240 {
1241 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
1242 /* ifdef to avoid doing 59-bit shift on 32-bit values */
1243 return (pte_flags & _PAGE_PKEY_MASK) >> _PAGE_BIT_PKEY_BIT0;
1244 #else
1245 return 0;
1246 #endif
1247 }
1248
__pkru_allows_pkey(u16 pkey,bool write)1249 static inline bool __pkru_allows_pkey(u16 pkey, bool write)
1250 {
1251 u32 pkru = read_pkru();
1252
1253 if (!__pkru_allows_read(pkru, pkey))
1254 return false;
1255 if (write && !__pkru_allows_write(pkru, pkey))
1256 return false;
1257
1258 return true;
1259 }
1260
1261 /*
1262 * 'pteval' can come from a PTE, PMD or PUD. We only check
1263 * _PAGE_PRESENT, _PAGE_USER, and _PAGE_RW in here which are the
1264 * same value on all 3 types.
1265 */
__pte_access_permitted(unsigned long pteval,bool write)1266 static inline bool __pte_access_permitted(unsigned long pteval, bool write)
1267 {
1268 unsigned long need_pte_bits = _PAGE_PRESENT|_PAGE_USER;
1269
1270 if (write)
1271 need_pte_bits |= _PAGE_RW;
1272
1273 if ((pteval & need_pte_bits) != need_pte_bits)
1274 return 0;
1275
1276 return __pkru_allows_pkey(pte_flags_pkey(pteval), write);
1277 }
1278
1279 #define pte_access_permitted pte_access_permitted
pte_access_permitted(pte_t pte,bool write)1280 static inline bool pte_access_permitted(pte_t pte, bool write)
1281 {
1282 return __pte_access_permitted(pte_val(pte), write);
1283 }
1284
1285 #define pmd_access_permitted pmd_access_permitted
pmd_access_permitted(pmd_t pmd,bool write)1286 static inline bool pmd_access_permitted(pmd_t pmd, bool write)
1287 {
1288 return __pte_access_permitted(pmd_val(pmd), write);
1289 }
1290
1291 #define pud_access_permitted pud_access_permitted
pud_access_permitted(pud_t pud,bool write)1292 static inline bool pud_access_permitted(pud_t pud, bool write)
1293 {
1294 return __pte_access_permitted(pud_val(pud), write);
1295 }
1296
1297 #define __HAVE_ARCH_PFN_MODIFY_ALLOWED 1
1298 extern bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot);
1299
arch_has_pfn_modify_check(void)1300 static inline bool arch_has_pfn_modify_check(void)
1301 {
1302 return boot_cpu_has_bug(X86_BUG_L1TF);
1303 }
1304
1305 #include <asm-generic/pgtable.h>
1306 #endif /* __ASSEMBLY__ */
1307
1308 #endif /* _ASM_X86_PGTABLE_H */
1309