1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PGTABLE_H
3 #define _ASM_X86_PGTABLE_H
4
5 #include <linux/mem_encrypt.h>
6 #include <asm/page.h>
7 #include <asm/pgtable_types.h>
8
9 /*
10 * Macro to mark a page protection value as UC-
11 */
12 #define pgprot_noncached(prot) \
13 ((boot_cpu_data.x86 > 3) \
14 ? (__pgprot(pgprot_val(prot) | \
15 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS))) \
16 : (prot))
17
18 /*
19 * Macros to add or remove encryption attribute
20 */
21 #define pgprot_encrypted(prot) __pgprot(__sme_set(pgprot_val(prot)))
22 #define pgprot_decrypted(prot) __pgprot(__sme_clr(pgprot_val(prot)))
23
24 #ifndef __ASSEMBLY__
25 #include <asm/x86_init.h>
26 #include <asm/pkru.h>
27 #include <asm/fpu/api.h>
28 #include <asm-generic/pgtable_uffd.h>
29
30 extern pgd_t early_top_pgt[PTRS_PER_PGD];
31 bool __init __early_make_pgtable(unsigned long address, pmdval_t pmd);
32
33 void ptdump_walk_pgd_level(struct seq_file *m, struct mm_struct *mm);
34 void ptdump_walk_pgd_level_debugfs(struct seq_file *m, struct mm_struct *mm,
35 bool user);
36 void ptdump_walk_pgd_level_checkwx(void);
37 void ptdump_walk_user_pgd_level_checkwx(void);
38
39 #ifdef CONFIG_DEBUG_WX
40 #define debug_checkwx() ptdump_walk_pgd_level_checkwx()
41 #define debug_checkwx_user() ptdump_walk_user_pgd_level_checkwx()
42 #else
43 #define debug_checkwx() do { } while (0)
44 #define debug_checkwx_user() do { } while (0)
45 #endif
46
47 /*
48 * ZERO_PAGE is a global shared page that is always zero: used
49 * for zero-mapped memory areas etc..
50 */
51 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
52 __visible;
53 #define ZERO_PAGE(vaddr) ((void)(vaddr),virt_to_page(empty_zero_page))
54
55 extern spinlock_t pgd_lock;
56 extern struct list_head pgd_list;
57
58 extern struct mm_struct *pgd_page_get_mm(struct page *page);
59
60 extern pmdval_t early_pmd_flags;
61
62 #ifdef CONFIG_PARAVIRT_XXL
63 #include <asm/paravirt.h>
64 #else /* !CONFIG_PARAVIRT_XXL */
65 #define set_pte(ptep, pte) native_set_pte(ptep, pte)
66
67 #define set_pte_atomic(ptep, pte) \
68 native_set_pte_atomic(ptep, pte)
69
70 #define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
71
72 #ifndef __PAGETABLE_P4D_FOLDED
73 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
74 #define pgd_clear(pgd) (pgtable_l5_enabled() ? native_pgd_clear(pgd) : 0)
75 #endif
76
77 #ifndef set_p4d
78 # define set_p4d(p4dp, p4d) native_set_p4d(p4dp, p4d)
79 #endif
80
81 #ifndef __PAGETABLE_PUD_FOLDED
82 #define p4d_clear(p4d) native_p4d_clear(p4d)
83 #endif
84
85 #ifndef set_pud
86 # define set_pud(pudp, pud) native_set_pud(pudp, pud)
87 #endif
88
89 #ifndef __PAGETABLE_PUD_FOLDED
90 #define pud_clear(pud) native_pud_clear(pud)
91 #endif
92
93 #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
94 #define pmd_clear(pmd) native_pmd_clear(pmd)
95
96 #define pgd_val(x) native_pgd_val(x)
97 #define __pgd(x) native_make_pgd(x)
98
99 #ifndef __PAGETABLE_P4D_FOLDED
100 #define p4d_val(x) native_p4d_val(x)
101 #define __p4d(x) native_make_p4d(x)
102 #endif
103
104 #ifndef __PAGETABLE_PUD_FOLDED
105 #define pud_val(x) native_pud_val(x)
106 #define __pud(x) native_make_pud(x)
107 #endif
108
109 #ifndef __PAGETABLE_PMD_FOLDED
110 #define pmd_val(x) native_pmd_val(x)
111 #define __pmd(x) native_make_pmd(x)
112 #endif
113
114 #define pte_val(x) native_pte_val(x)
115 #define __pte(x) native_make_pte(x)
116
117 #define arch_end_context_switch(prev) do {} while(0)
118 #endif /* CONFIG_PARAVIRT_XXL */
119
120 /*
121 * The following only work if pte_present() is true.
122 * Undefined behaviour if not..
123 */
pte_dirty(pte_t pte)124 static inline int pte_dirty(pte_t pte)
125 {
126 return pte_flags(pte) & _PAGE_DIRTY;
127 }
128
pte_young(pte_t pte)129 static inline int pte_young(pte_t pte)
130 {
131 return pte_flags(pte) & _PAGE_ACCESSED;
132 }
133
pmd_dirty(pmd_t pmd)134 static inline int pmd_dirty(pmd_t pmd)
135 {
136 return pmd_flags(pmd) & _PAGE_DIRTY;
137 }
138
139 #define pmd_young pmd_young
pmd_young(pmd_t pmd)140 static inline int pmd_young(pmd_t pmd)
141 {
142 return pmd_flags(pmd) & _PAGE_ACCESSED;
143 }
144
pud_dirty(pud_t pud)145 static inline int pud_dirty(pud_t pud)
146 {
147 return pud_flags(pud) & _PAGE_DIRTY;
148 }
149
pud_young(pud_t pud)150 static inline int pud_young(pud_t pud)
151 {
152 return pud_flags(pud) & _PAGE_ACCESSED;
153 }
154
pte_write(pte_t pte)155 static inline int pte_write(pte_t pte)
156 {
157 return pte_flags(pte) & _PAGE_RW;
158 }
159
pte_huge(pte_t pte)160 static inline int pte_huge(pte_t pte)
161 {
162 return pte_flags(pte) & _PAGE_PSE;
163 }
164
pte_global(pte_t pte)165 static inline int pte_global(pte_t pte)
166 {
167 return pte_flags(pte) & _PAGE_GLOBAL;
168 }
169
pte_exec(pte_t pte)170 static inline int pte_exec(pte_t pte)
171 {
172 return !(pte_flags(pte) & _PAGE_NX);
173 }
174
pte_special(pte_t pte)175 static inline int pte_special(pte_t pte)
176 {
177 return pte_flags(pte) & _PAGE_SPECIAL;
178 }
179
180 /* Entries that were set to PROT_NONE are inverted */
181
182 static inline u64 protnone_mask(u64 val);
183
pte_pfn(pte_t pte)184 static inline unsigned long pte_pfn(pte_t pte)
185 {
186 phys_addr_t pfn = pte_val(pte);
187 pfn ^= protnone_mask(pfn);
188 return (pfn & PTE_PFN_MASK) >> PAGE_SHIFT;
189 }
190
pmd_pfn(pmd_t pmd)191 static inline unsigned long pmd_pfn(pmd_t pmd)
192 {
193 phys_addr_t pfn = pmd_val(pmd);
194 pfn ^= protnone_mask(pfn);
195 return (pfn & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
196 }
197
pud_pfn(pud_t pud)198 static inline unsigned long pud_pfn(pud_t pud)
199 {
200 phys_addr_t pfn = pud_val(pud);
201 pfn ^= protnone_mask(pfn);
202 return (pfn & pud_pfn_mask(pud)) >> PAGE_SHIFT;
203 }
204
p4d_pfn(p4d_t p4d)205 static inline unsigned long p4d_pfn(p4d_t p4d)
206 {
207 return (p4d_val(p4d) & p4d_pfn_mask(p4d)) >> PAGE_SHIFT;
208 }
209
pgd_pfn(pgd_t pgd)210 static inline unsigned long pgd_pfn(pgd_t pgd)
211 {
212 return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
213 }
214
215 #define p4d_leaf p4d_large
p4d_large(p4d_t p4d)216 static inline int p4d_large(p4d_t p4d)
217 {
218 /* No 512 GiB pages yet */
219 return 0;
220 }
221
222 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
223
224 #define pmd_leaf pmd_large
pmd_large(pmd_t pte)225 static inline int pmd_large(pmd_t pte)
226 {
227 return pmd_flags(pte) & _PAGE_PSE;
228 }
229
230 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
231 /* NOTE: when predicate huge page, consider also pmd_devmap, or use pmd_large */
pmd_trans_huge(pmd_t pmd)232 static inline int pmd_trans_huge(pmd_t pmd)
233 {
234 return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
235 }
236
237 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
pud_trans_huge(pud_t pud)238 static inline int pud_trans_huge(pud_t pud)
239 {
240 return (pud_val(pud) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
241 }
242 #endif
243
244 #define has_transparent_hugepage has_transparent_hugepage
has_transparent_hugepage(void)245 static inline int has_transparent_hugepage(void)
246 {
247 return boot_cpu_has(X86_FEATURE_PSE);
248 }
249
250 #ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
pmd_devmap(pmd_t pmd)251 static inline int pmd_devmap(pmd_t pmd)
252 {
253 return !!(pmd_val(pmd) & _PAGE_DEVMAP);
254 }
255
256 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
pud_devmap(pud_t pud)257 static inline int pud_devmap(pud_t pud)
258 {
259 return !!(pud_val(pud) & _PAGE_DEVMAP);
260 }
261 #else
pud_devmap(pud_t pud)262 static inline int pud_devmap(pud_t pud)
263 {
264 return 0;
265 }
266 #endif
267
pgd_devmap(pgd_t pgd)268 static inline int pgd_devmap(pgd_t pgd)
269 {
270 return 0;
271 }
272 #endif
273 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
274
pte_set_flags(pte_t pte,pteval_t set)275 static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
276 {
277 pteval_t v = native_pte_val(pte);
278
279 return native_make_pte(v | set);
280 }
281
pte_clear_flags(pte_t pte,pteval_t clear)282 static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
283 {
284 pteval_t v = native_pte_val(pte);
285
286 return native_make_pte(v & ~clear);
287 }
288
289 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
pte_uffd_wp(pte_t pte)290 static inline int pte_uffd_wp(pte_t pte)
291 {
292 return pte_flags(pte) & _PAGE_UFFD_WP;
293 }
294
pte_mkuffd_wp(pte_t pte)295 static inline pte_t pte_mkuffd_wp(pte_t pte)
296 {
297 return pte_set_flags(pte, _PAGE_UFFD_WP);
298 }
299
pte_clear_uffd_wp(pte_t pte)300 static inline pte_t pte_clear_uffd_wp(pte_t pte)
301 {
302 return pte_clear_flags(pte, _PAGE_UFFD_WP);
303 }
304 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
305
pte_mkclean(pte_t pte)306 static inline pte_t pte_mkclean(pte_t pte)
307 {
308 return pte_clear_flags(pte, _PAGE_DIRTY);
309 }
310
pte_mkold(pte_t pte)311 static inline pte_t pte_mkold(pte_t pte)
312 {
313 return pte_clear_flags(pte, _PAGE_ACCESSED);
314 }
315
pte_wrprotect(pte_t pte)316 static inline pte_t pte_wrprotect(pte_t pte)
317 {
318 return pte_clear_flags(pte, _PAGE_RW);
319 }
320
pte_mkexec(pte_t pte)321 static inline pte_t pte_mkexec(pte_t pte)
322 {
323 return pte_clear_flags(pte, _PAGE_NX);
324 }
325
pte_mkdirty(pte_t pte)326 static inline pte_t pte_mkdirty(pte_t pte)
327 {
328 return pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
329 }
330
pte_mkyoung(pte_t pte)331 static inline pte_t pte_mkyoung(pte_t pte)
332 {
333 return pte_set_flags(pte, _PAGE_ACCESSED);
334 }
335
pte_mkwrite(pte_t pte)336 static inline pte_t pte_mkwrite(pte_t pte)
337 {
338 return pte_set_flags(pte, _PAGE_RW);
339 }
340
pte_mkhuge(pte_t pte)341 static inline pte_t pte_mkhuge(pte_t pte)
342 {
343 return pte_set_flags(pte, _PAGE_PSE);
344 }
345
pte_clrhuge(pte_t pte)346 static inline pte_t pte_clrhuge(pte_t pte)
347 {
348 return pte_clear_flags(pte, _PAGE_PSE);
349 }
350
pte_mkglobal(pte_t pte)351 static inline pte_t pte_mkglobal(pte_t pte)
352 {
353 return pte_set_flags(pte, _PAGE_GLOBAL);
354 }
355
pte_clrglobal(pte_t pte)356 static inline pte_t pte_clrglobal(pte_t pte)
357 {
358 return pte_clear_flags(pte, _PAGE_GLOBAL);
359 }
360
pte_mkspecial(pte_t pte)361 static inline pte_t pte_mkspecial(pte_t pte)
362 {
363 return pte_set_flags(pte, _PAGE_SPECIAL);
364 }
365
pte_mkdevmap(pte_t pte)366 static inline pte_t pte_mkdevmap(pte_t pte)
367 {
368 return pte_set_flags(pte, _PAGE_SPECIAL|_PAGE_DEVMAP);
369 }
370
pmd_set_flags(pmd_t pmd,pmdval_t set)371 static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
372 {
373 pmdval_t v = native_pmd_val(pmd);
374
375 return native_make_pmd(v | set);
376 }
377
pmd_clear_flags(pmd_t pmd,pmdval_t clear)378 static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
379 {
380 pmdval_t v = native_pmd_val(pmd);
381
382 return native_make_pmd(v & ~clear);
383 }
384
385 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
pmd_uffd_wp(pmd_t pmd)386 static inline int pmd_uffd_wp(pmd_t pmd)
387 {
388 return pmd_flags(pmd) & _PAGE_UFFD_WP;
389 }
390
pmd_mkuffd_wp(pmd_t pmd)391 static inline pmd_t pmd_mkuffd_wp(pmd_t pmd)
392 {
393 return pmd_set_flags(pmd, _PAGE_UFFD_WP);
394 }
395
pmd_clear_uffd_wp(pmd_t pmd)396 static inline pmd_t pmd_clear_uffd_wp(pmd_t pmd)
397 {
398 return pmd_clear_flags(pmd, _PAGE_UFFD_WP);
399 }
400 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
401
pmd_mkold(pmd_t pmd)402 static inline pmd_t pmd_mkold(pmd_t pmd)
403 {
404 return pmd_clear_flags(pmd, _PAGE_ACCESSED);
405 }
406
pmd_mkclean(pmd_t pmd)407 static inline pmd_t pmd_mkclean(pmd_t pmd)
408 {
409 return pmd_clear_flags(pmd, _PAGE_DIRTY);
410 }
411
pmd_wrprotect(pmd_t pmd)412 static inline pmd_t pmd_wrprotect(pmd_t pmd)
413 {
414 return pmd_clear_flags(pmd, _PAGE_RW);
415 }
416
pmd_mkdirty(pmd_t pmd)417 static inline pmd_t pmd_mkdirty(pmd_t pmd)
418 {
419 return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
420 }
421
pmd_mkdevmap(pmd_t pmd)422 static inline pmd_t pmd_mkdevmap(pmd_t pmd)
423 {
424 return pmd_set_flags(pmd, _PAGE_DEVMAP);
425 }
426
pmd_mkhuge(pmd_t pmd)427 static inline pmd_t pmd_mkhuge(pmd_t pmd)
428 {
429 return pmd_set_flags(pmd, _PAGE_PSE);
430 }
431
pmd_mkyoung(pmd_t pmd)432 static inline pmd_t pmd_mkyoung(pmd_t pmd)
433 {
434 return pmd_set_flags(pmd, _PAGE_ACCESSED);
435 }
436
pmd_mkwrite(pmd_t pmd)437 static inline pmd_t pmd_mkwrite(pmd_t pmd)
438 {
439 return pmd_set_flags(pmd, _PAGE_RW);
440 }
441
pud_set_flags(pud_t pud,pudval_t set)442 static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
443 {
444 pudval_t v = native_pud_val(pud);
445
446 return native_make_pud(v | set);
447 }
448
pud_clear_flags(pud_t pud,pudval_t clear)449 static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear)
450 {
451 pudval_t v = native_pud_val(pud);
452
453 return native_make_pud(v & ~clear);
454 }
455
pud_mkold(pud_t pud)456 static inline pud_t pud_mkold(pud_t pud)
457 {
458 return pud_clear_flags(pud, _PAGE_ACCESSED);
459 }
460
pud_mkclean(pud_t pud)461 static inline pud_t pud_mkclean(pud_t pud)
462 {
463 return pud_clear_flags(pud, _PAGE_DIRTY);
464 }
465
pud_wrprotect(pud_t pud)466 static inline pud_t pud_wrprotect(pud_t pud)
467 {
468 return pud_clear_flags(pud, _PAGE_RW);
469 }
470
pud_mkdirty(pud_t pud)471 static inline pud_t pud_mkdirty(pud_t pud)
472 {
473 return pud_set_flags(pud, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
474 }
475
pud_mkdevmap(pud_t pud)476 static inline pud_t pud_mkdevmap(pud_t pud)
477 {
478 return pud_set_flags(pud, _PAGE_DEVMAP);
479 }
480
pud_mkhuge(pud_t pud)481 static inline pud_t pud_mkhuge(pud_t pud)
482 {
483 return pud_set_flags(pud, _PAGE_PSE);
484 }
485
pud_mkyoung(pud_t pud)486 static inline pud_t pud_mkyoung(pud_t pud)
487 {
488 return pud_set_flags(pud, _PAGE_ACCESSED);
489 }
490
pud_mkwrite(pud_t pud)491 static inline pud_t pud_mkwrite(pud_t pud)
492 {
493 return pud_set_flags(pud, _PAGE_RW);
494 }
495
496 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
pte_soft_dirty(pte_t pte)497 static inline int pte_soft_dirty(pte_t pte)
498 {
499 return pte_flags(pte) & _PAGE_SOFT_DIRTY;
500 }
501
pmd_soft_dirty(pmd_t pmd)502 static inline int pmd_soft_dirty(pmd_t pmd)
503 {
504 return pmd_flags(pmd) & _PAGE_SOFT_DIRTY;
505 }
506
pud_soft_dirty(pud_t pud)507 static inline int pud_soft_dirty(pud_t pud)
508 {
509 return pud_flags(pud) & _PAGE_SOFT_DIRTY;
510 }
511
pte_mksoft_dirty(pte_t pte)512 static inline pte_t pte_mksoft_dirty(pte_t pte)
513 {
514 return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
515 }
516
pmd_mksoft_dirty(pmd_t pmd)517 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
518 {
519 return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
520 }
521
pud_mksoft_dirty(pud_t pud)522 static inline pud_t pud_mksoft_dirty(pud_t pud)
523 {
524 return pud_set_flags(pud, _PAGE_SOFT_DIRTY);
525 }
526
pte_clear_soft_dirty(pte_t pte)527 static inline pte_t pte_clear_soft_dirty(pte_t pte)
528 {
529 return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
530 }
531
pmd_clear_soft_dirty(pmd_t pmd)532 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
533 {
534 return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY);
535 }
536
pud_clear_soft_dirty(pud_t pud)537 static inline pud_t pud_clear_soft_dirty(pud_t pud)
538 {
539 return pud_clear_flags(pud, _PAGE_SOFT_DIRTY);
540 }
541
542 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
543
544 /*
545 * Mask out unsupported bits in a present pgprot. Non-present pgprots
546 * can use those bits for other purposes, so leave them be.
547 */
massage_pgprot(pgprot_t pgprot)548 static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
549 {
550 pgprotval_t protval = pgprot_val(pgprot);
551
552 if (protval & _PAGE_PRESENT)
553 protval &= __supported_pte_mask;
554
555 return protval;
556 }
557
check_pgprot(pgprot_t pgprot)558 static inline pgprotval_t check_pgprot(pgprot_t pgprot)
559 {
560 pgprotval_t massaged_val = massage_pgprot(pgprot);
561
562 /* mmdebug.h can not be included here because of dependencies */
563 #ifdef CONFIG_DEBUG_VM
564 WARN_ONCE(pgprot_val(pgprot) != massaged_val,
565 "attempted to set unsupported pgprot: %016llx "
566 "bits: %016llx supported: %016llx\n",
567 (u64)pgprot_val(pgprot),
568 (u64)pgprot_val(pgprot) ^ massaged_val,
569 (u64)__supported_pte_mask);
570 #endif
571
572 return massaged_val;
573 }
574
pfn_pte(unsigned long page_nr,pgprot_t pgprot)575 static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
576 {
577 phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
578 pfn ^= protnone_mask(pgprot_val(pgprot));
579 pfn &= PTE_PFN_MASK;
580 return __pte(pfn | check_pgprot(pgprot));
581 }
582
pfn_pmd(unsigned long page_nr,pgprot_t pgprot)583 static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
584 {
585 phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
586 pfn ^= protnone_mask(pgprot_val(pgprot));
587 pfn &= PHYSICAL_PMD_PAGE_MASK;
588 return __pmd(pfn | check_pgprot(pgprot));
589 }
590
pfn_pud(unsigned long page_nr,pgprot_t pgprot)591 static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot)
592 {
593 phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
594 pfn ^= protnone_mask(pgprot_val(pgprot));
595 pfn &= PHYSICAL_PUD_PAGE_MASK;
596 return __pud(pfn | check_pgprot(pgprot));
597 }
598
pmd_mkinvalid(pmd_t pmd)599 static inline pmd_t pmd_mkinvalid(pmd_t pmd)
600 {
601 return pfn_pmd(pmd_pfn(pmd),
602 __pgprot(pmd_flags(pmd) & ~(_PAGE_PRESENT|_PAGE_PROTNONE)));
603 }
604
605 static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask);
606
pte_modify(pte_t pte,pgprot_t newprot)607 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
608 {
609 pteval_t val = pte_val(pte), oldval = val;
610
611 /*
612 * Chop off the NX bit (if present), and add the NX portion of
613 * the newprot (if present):
614 */
615 val &= _PAGE_CHG_MASK;
616 val |= check_pgprot(newprot) & ~_PAGE_CHG_MASK;
617 val = flip_protnone_guard(oldval, val, PTE_PFN_MASK);
618 return __pte(val);
619 }
620
pmd_modify(pmd_t pmd,pgprot_t newprot)621 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
622 {
623 pmdval_t val = pmd_val(pmd), oldval = val;
624
625 val &= _HPAGE_CHG_MASK;
626 val |= check_pgprot(newprot) & ~_HPAGE_CHG_MASK;
627 val = flip_protnone_guard(oldval, val, PHYSICAL_PMD_PAGE_MASK);
628 return __pmd(val);
629 }
630
631 /*
632 * mprotect needs to preserve PAT and encryption bits when updating
633 * vm_page_prot
634 */
635 #define pgprot_modify pgprot_modify
pgprot_modify(pgprot_t oldprot,pgprot_t newprot)636 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
637 {
638 pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
639 pgprotval_t addbits = pgprot_val(newprot) & ~_PAGE_CHG_MASK;
640 return __pgprot(preservebits | addbits);
641 }
642
643 #define pte_pgprot(x) __pgprot(pte_flags(x))
644 #define pmd_pgprot(x) __pgprot(pmd_flags(x))
645 #define pud_pgprot(x) __pgprot(pud_flags(x))
646 #define p4d_pgprot(x) __pgprot(p4d_flags(x))
647
648 #define canon_pgprot(p) __pgprot(massage_pgprot(p))
649
arch_filter_pgprot(pgprot_t prot)650 static inline pgprot_t arch_filter_pgprot(pgprot_t prot)
651 {
652 return canon_pgprot(prot);
653 }
654
is_new_memtype_allowed(u64 paddr,unsigned long size,enum page_cache_mode pcm,enum page_cache_mode new_pcm)655 static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
656 enum page_cache_mode pcm,
657 enum page_cache_mode new_pcm)
658 {
659 /*
660 * PAT type is always WB for untracked ranges, so no need to check.
661 */
662 if (x86_platform.is_untracked_pat_range(paddr, paddr + size))
663 return 1;
664
665 /*
666 * Certain new memtypes are not allowed with certain
667 * requested memtype:
668 * - request is uncached, return cannot be write-back
669 * - request is write-combine, return cannot be write-back
670 * - request is write-through, return cannot be write-back
671 * - request is write-through, return cannot be write-combine
672 */
673 if ((pcm == _PAGE_CACHE_MODE_UC_MINUS &&
674 new_pcm == _PAGE_CACHE_MODE_WB) ||
675 (pcm == _PAGE_CACHE_MODE_WC &&
676 new_pcm == _PAGE_CACHE_MODE_WB) ||
677 (pcm == _PAGE_CACHE_MODE_WT &&
678 new_pcm == _PAGE_CACHE_MODE_WB) ||
679 (pcm == _PAGE_CACHE_MODE_WT &&
680 new_pcm == _PAGE_CACHE_MODE_WC)) {
681 return 0;
682 }
683
684 return 1;
685 }
686
687 pmd_t *populate_extra_pmd(unsigned long vaddr);
688 pte_t *populate_extra_pte(unsigned long vaddr);
689
690 #ifdef CONFIG_PAGE_TABLE_ISOLATION
691 pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd);
692
693 /*
694 * Take a PGD location (pgdp) and a pgd value that needs to be set there.
695 * Populates the user and returns the resulting PGD that must be set in
696 * the kernel copy of the page tables.
697 */
pti_set_user_pgtbl(pgd_t * pgdp,pgd_t pgd)698 static inline pgd_t pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
699 {
700 if (!static_cpu_has(X86_FEATURE_PTI))
701 return pgd;
702 return __pti_set_user_pgtbl(pgdp, pgd);
703 }
704 #else /* CONFIG_PAGE_TABLE_ISOLATION */
pti_set_user_pgtbl(pgd_t * pgdp,pgd_t pgd)705 static inline pgd_t pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
706 {
707 return pgd;
708 }
709 #endif /* CONFIG_PAGE_TABLE_ISOLATION */
710
711 #endif /* __ASSEMBLY__ */
712
713
714 #ifdef CONFIG_X86_32
715 # include <asm/pgtable_32.h>
716 #else
717 # include <asm/pgtable_64.h>
718 #endif
719
720 #ifndef __ASSEMBLY__
721 #include <linux/mm_types.h>
722 #include <linux/mmdebug.h>
723 #include <linux/log2.h>
724 #include <asm/fixmap.h>
725
pte_none(pte_t pte)726 static inline int pte_none(pte_t pte)
727 {
728 return !(pte.pte & ~(_PAGE_KNL_ERRATUM_MASK));
729 }
730
731 #define __HAVE_ARCH_PTE_SAME
pte_same(pte_t a,pte_t b)732 static inline int pte_same(pte_t a, pte_t b)
733 {
734 return a.pte == b.pte;
735 }
736
pte_present(pte_t a)737 static inline int pte_present(pte_t a)
738 {
739 return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
740 }
741
742 #ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
pte_devmap(pte_t a)743 static inline int pte_devmap(pte_t a)
744 {
745 return (pte_flags(a) & _PAGE_DEVMAP) == _PAGE_DEVMAP;
746 }
747 #endif
748
749 #define pte_accessible pte_accessible
pte_accessible(struct mm_struct * mm,pte_t a)750 static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
751 {
752 if (pte_flags(a) & _PAGE_PRESENT)
753 return true;
754
755 if ((pte_flags(a) & _PAGE_PROTNONE) &&
756 mm_tlb_flush_pending(mm))
757 return true;
758
759 return false;
760 }
761
pmd_present(pmd_t pmd)762 static inline int pmd_present(pmd_t pmd)
763 {
764 /*
765 * Checking for _PAGE_PSE is needed too because
766 * split_huge_page will temporarily clear the present bit (but
767 * the _PAGE_PSE flag will remain set at all times while the
768 * _PAGE_PRESENT bit is clear).
769 */
770 return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
771 }
772
773 #ifdef CONFIG_NUMA_BALANCING
774 /*
775 * These work without NUMA balancing but the kernel does not care. See the
776 * comment in include/linux/pgtable.h
777 */
pte_protnone(pte_t pte)778 static inline int pte_protnone(pte_t pte)
779 {
780 return (pte_flags(pte) & (_PAGE_PROTNONE | _PAGE_PRESENT))
781 == _PAGE_PROTNONE;
782 }
783
pmd_protnone(pmd_t pmd)784 static inline int pmd_protnone(pmd_t pmd)
785 {
786 return (pmd_flags(pmd) & (_PAGE_PROTNONE | _PAGE_PRESENT))
787 == _PAGE_PROTNONE;
788 }
789 #endif /* CONFIG_NUMA_BALANCING */
790
pmd_none(pmd_t pmd)791 static inline int pmd_none(pmd_t pmd)
792 {
793 /* Only check low word on 32-bit platforms, since it might be
794 out of sync with upper half. */
795 unsigned long val = native_pmd_val(pmd);
796 return (val & ~_PAGE_KNL_ERRATUM_MASK) == 0;
797 }
798
pmd_page_vaddr(pmd_t pmd)799 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
800 {
801 return (unsigned long)__va(pmd_val(pmd) & pmd_pfn_mask(pmd));
802 }
803
804 /*
805 * Currently stuck as a macro due to indirect forward reference to
806 * linux/mmzone.h's __section_mem_map_addr() definition:
807 */
808 #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
809
810 /*
811 * Conversion functions: convert a page and protection to a page entry,
812 * and a page entry and page directory to the page they refer to.
813 *
814 * (Currently stuck as a macro because of indirect forward reference
815 * to linux/mm.h:page_to_nid())
816 */
817 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
818
pmd_bad(pmd_t pmd)819 static inline int pmd_bad(pmd_t pmd)
820 {
821 return (pmd_flags(pmd) & ~(_PAGE_USER | _PAGE_ACCESSED)) !=
822 (_KERNPG_TABLE & ~_PAGE_ACCESSED);
823 }
824
pages_to_mb(unsigned long npg)825 static inline unsigned long pages_to_mb(unsigned long npg)
826 {
827 return npg >> (20 - PAGE_SHIFT);
828 }
829
830 #if CONFIG_PGTABLE_LEVELS > 2
pud_none(pud_t pud)831 static inline int pud_none(pud_t pud)
832 {
833 return (native_pud_val(pud) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
834 }
835
pud_present(pud_t pud)836 static inline int pud_present(pud_t pud)
837 {
838 return pud_flags(pud) & _PAGE_PRESENT;
839 }
840
pud_pgtable(pud_t pud)841 static inline pmd_t *pud_pgtable(pud_t pud)
842 {
843 return (pmd_t *)__va(pud_val(pud) & pud_pfn_mask(pud));
844 }
845
846 /*
847 * Currently stuck as a macro due to indirect forward reference to
848 * linux/mmzone.h's __section_mem_map_addr() definition:
849 */
850 #define pud_page(pud) pfn_to_page(pud_pfn(pud))
851
852 #define pud_leaf pud_large
pud_large(pud_t pud)853 static inline int pud_large(pud_t pud)
854 {
855 return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
856 (_PAGE_PSE | _PAGE_PRESENT);
857 }
858
pud_bad(pud_t pud)859 static inline int pud_bad(pud_t pud)
860 {
861 return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
862 }
863 #else
864 #define pud_leaf pud_large
pud_large(pud_t pud)865 static inline int pud_large(pud_t pud)
866 {
867 return 0;
868 }
869 #endif /* CONFIG_PGTABLE_LEVELS > 2 */
870
871 #if CONFIG_PGTABLE_LEVELS > 3
p4d_none(p4d_t p4d)872 static inline int p4d_none(p4d_t p4d)
873 {
874 return (native_p4d_val(p4d) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
875 }
876
p4d_present(p4d_t p4d)877 static inline int p4d_present(p4d_t p4d)
878 {
879 return p4d_flags(p4d) & _PAGE_PRESENT;
880 }
881
p4d_pgtable(p4d_t p4d)882 static inline pud_t *p4d_pgtable(p4d_t p4d)
883 {
884 return (pud_t *)__va(p4d_val(p4d) & p4d_pfn_mask(p4d));
885 }
886
887 /*
888 * Currently stuck as a macro due to indirect forward reference to
889 * linux/mmzone.h's __section_mem_map_addr() definition:
890 */
891 #define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
892
p4d_bad(p4d_t p4d)893 static inline int p4d_bad(p4d_t p4d)
894 {
895 unsigned long ignore_flags = _KERNPG_TABLE | _PAGE_USER;
896
897 if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
898 ignore_flags |= _PAGE_NX;
899
900 return (p4d_flags(p4d) & ~ignore_flags) != 0;
901 }
902 #endif /* CONFIG_PGTABLE_LEVELS > 3 */
903
p4d_index(unsigned long address)904 static inline unsigned long p4d_index(unsigned long address)
905 {
906 return (address >> P4D_SHIFT) & (PTRS_PER_P4D - 1);
907 }
908
909 #if CONFIG_PGTABLE_LEVELS > 4
pgd_present(pgd_t pgd)910 static inline int pgd_present(pgd_t pgd)
911 {
912 if (!pgtable_l5_enabled())
913 return 1;
914 return pgd_flags(pgd) & _PAGE_PRESENT;
915 }
916
pgd_page_vaddr(pgd_t pgd)917 static inline unsigned long pgd_page_vaddr(pgd_t pgd)
918 {
919 return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
920 }
921
922 /*
923 * Currently stuck as a macro due to indirect forward reference to
924 * linux/mmzone.h's __section_mem_map_addr() definition:
925 */
926 #define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
927
928 /* to find an entry in a page-table-directory. */
p4d_offset(pgd_t * pgd,unsigned long address)929 static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
930 {
931 if (!pgtable_l5_enabled())
932 return (p4d_t *)pgd;
933 return (p4d_t *)pgd_page_vaddr(*pgd) + p4d_index(address);
934 }
935
pgd_bad(pgd_t pgd)936 static inline int pgd_bad(pgd_t pgd)
937 {
938 unsigned long ignore_flags = _PAGE_USER;
939
940 if (!pgtable_l5_enabled())
941 return 0;
942
943 if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
944 ignore_flags |= _PAGE_NX;
945
946 return (pgd_flags(pgd) & ~ignore_flags) != _KERNPG_TABLE;
947 }
948
pgd_none(pgd_t pgd)949 static inline int pgd_none(pgd_t pgd)
950 {
951 if (!pgtable_l5_enabled())
952 return 0;
953 /*
954 * There is no need to do a workaround for the KNL stray
955 * A/D bit erratum here. PGDs only point to page tables
956 * except on 32-bit non-PAE which is not supported on
957 * KNL.
958 */
959 return !native_pgd_val(pgd);
960 }
961 #endif /* CONFIG_PGTABLE_LEVELS > 4 */
962
963 #endif /* __ASSEMBLY__ */
964
965 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
966 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
967
968 #ifndef __ASSEMBLY__
969
970 extern int direct_gbpages;
971 void init_mem_mapping(void);
972 void early_alloc_pgt_buf(void);
973 extern void memblock_find_dma_reserve(void);
974 void __init poking_init(void);
975 unsigned long init_memory_mapping(unsigned long start,
976 unsigned long end, pgprot_t prot);
977
978 #ifdef CONFIG_X86_64
979 extern pgd_t trampoline_pgd_entry;
980 #endif
981
982 /* local pte updates need not use xchg for locking */
native_local_ptep_get_and_clear(pte_t * ptep)983 static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
984 {
985 pte_t res = *ptep;
986
987 /* Pure native function needs no input for mm, addr */
988 native_pte_clear(NULL, 0, ptep);
989 return res;
990 }
991
native_local_pmdp_get_and_clear(pmd_t * pmdp)992 static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
993 {
994 pmd_t res = *pmdp;
995
996 native_pmd_clear(pmdp);
997 return res;
998 }
999
native_local_pudp_get_and_clear(pud_t * pudp)1000 static inline pud_t native_local_pudp_get_and_clear(pud_t *pudp)
1001 {
1002 pud_t res = *pudp;
1003
1004 native_pud_clear(pudp);
1005 return res;
1006 }
1007
set_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte)1008 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1009 pte_t *ptep, pte_t pte)
1010 {
1011 set_pte(ptep, pte);
1012 }
1013
set_pmd_at(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp,pmd_t pmd)1014 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1015 pmd_t *pmdp, pmd_t pmd)
1016 {
1017 set_pmd(pmdp, pmd);
1018 }
1019
set_pud_at(struct mm_struct * mm,unsigned long addr,pud_t * pudp,pud_t pud)1020 static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
1021 pud_t *pudp, pud_t pud)
1022 {
1023 native_set_pud(pudp, pud);
1024 }
1025
1026 /*
1027 * We only update the dirty/accessed state if we set
1028 * the dirty bit by hand in the kernel, since the hardware
1029 * will do the accessed bit for us, and we don't want to
1030 * race with other CPU's that might be updating the dirty
1031 * bit at the same time.
1032 */
1033 struct vm_area_struct;
1034
1035 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1036 extern int ptep_set_access_flags(struct vm_area_struct *vma,
1037 unsigned long address, pte_t *ptep,
1038 pte_t entry, int dirty);
1039
1040 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1041 extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
1042 unsigned long addr, pte_t *ptep);
1043
1044 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1045 extern int ptep_clear_flush_young(struct vm_area_struct *vma,
1046 unsigned long address, pte_t *ptep);
1047
1048 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
ptep_get_and_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)1049 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
1050 pte_t *ptep)
1051 {
1052 pte_t pte = native_ptep_get_and_clear(ptep);
1053 return pte;
1054 }
1055
1056 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
ptep_get_and_clear_full(struct mm_struct * mm,unsigned long addr,pte_t * ptep,int full)1057 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1058 unsigned long addr, pte_t *ptep,
1059 int full)
1060 {
1061 pte_t pte;
1062 if (full) {
1063 /*
1064 * Full address destruction in progress; paravirt does not
1065 * care about updates and native needs no locking
1066 */
1067 pte = native_local_ptep_get_and_clear(ptep);
1068 } else {
1069 pte = ptep_get_and_clear(mm, addr, ptep);
1070 }
1071 return pte;
1072 }
1073
1074 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
ptep_set_wrprotect(struct mm_struct * mm,unsigned long addr,pte_t * ptep)1075 static inline void ptep_set_wrprotect(struct mm_struct *mm,
1076 unsigned long addr, pte_t *ptep)
1077 {
1078 clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
1079 }
1080
1081 #define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
1082
1083 #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1084
1085 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1086 extern int pmdp_set_access_flags(struct vm_area_struct *vma,
1087 unsigned long address, pmd_t *pmdp,
1088 pmd_t entry, int dirty);
1089 extern int pudp_set_access_flags(struct vm_area_struct *vma,
1090 unsigned long address, pud_t *pudp,
1091 pud_t entry, int dirty);
1092
1093 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1094 extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1095 unsigned long addr, pmd_t *pmdp);
1096 extern int pudp_test_and_clear_young(struct vm_area_struct *vma,
1097 unsigned long addr, pud_t *pudp);
1098
1099 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1100 extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
1101 unsigned long address, pmd_t *pmdp);
1102
1103
1104 #define pmd_write pmd_write
pmd_write(pmd_t pmd)1105 static inline int pmd_write(pmd_t pmd)
1106 {
1107 return pmd_flags(pmd) & _PAGE_RW;
1108 }
1109
1110 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
pmdp_huge_get_and_clear(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp)1111 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr,
1112 pmd_t *pmdp)
1113 {
1114 return native_pmdp_get_and_clear(pmdp);
1115 }
1116
1117 #define __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
pudp_huge_get_and_clear(struct mm_struct * mm,unsigned long addr,pud_t * pudp)1118 static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
1119 unsigned long addr, pud_t *pudp)
1120 {
1121 return native_pudp_get_and_clear(pudp);
1122 }
1123
1124 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
pmdp_set_wrprotect(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp)1125 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1126 unsigned long addr, pmd_t *pmdp)
1127 {
1128 clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
1129 }
1130
1131 #define pud_write pud_write
pud_write(pud_t pud)1132 static inline int pud_write(pud_t pud)
1133 {
1134 return pud_flags(pud) & _PAGE_RW;
1135 }
1136
1137 #ifndef pmdp_establish
1138 #define pmdp_establish pmdp_establish
pmdp_establish(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp,pmd_t pmd)1139 static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
1140 unsigned long address, pmd_t *pmdp, pmd_t pmd)
1141 {
1142 if (IS_ENABLED(CONFIG_SMP)) {
1143 return xchg(pmdp, pmd);
1144 } else {
1145 pmd_t old = *pmdp;
1146 WRITE_ONCE(*pmdp, pmd);
1147 return old;
1148 }
1149 }
1150 #endif
1151 /*
1152 * Page table pages are page-aligned. The lower half of the top
1153 * level is used for userspace and the top half for the kernel.
1154 *
1155 * Returns true for parts of the PGD that map userspace and
1156 * false for the parts that map the kernel.
1157 */
pgdp_maps_userspace(void * __ptr)1158 static inline bool pgdp_maps_userspace(void *__ptr)
1159 {
1160 unsigned long ptr = (unsigned long)__ptr;
1161
1162 return (((ptr & ~PAGE_MASK) / sizeof(pgd_t)) < PGD_KERNEL_START);
1163 }
1164
1165 #define pgd_leaf pgd_large
pgd_large(pgd_t pgd)1166 static inline int pgd_large(pgd_t pgd) { return 0; }
1167
1168 #ifdef CONFIG_PAGE_TABLE_ISOLATION
1169 /*
1170 * All top-level PAGE_TABLE_ISOLATION page tables are order-1 pages
1171 * (8k-aligned and 8k in size). The kernel one is at the beginning 4k and
1172 * the user one is in the last 4k. To switch between them, you
1173 * just need to flip the 12th bit in their addresses.
1174 */
1175 #define PTI_PGTABLE_SWITCH_BIT PAGE_SHIFT
1176
1177 /*
1178 * This generates better code than the inline assembly in
1179 * __set_bit().
1180 */
ptr_set_bit(void * ptr,int bit)1181 static inline void *ptr_set_bit(void *ptr, int bit)
1182 {
1183 unsigned long __ptr = (unsigned long)ptr;
1184
1185 __ptr |= BIT(bit);
1186 return (void *)__ptr;
1187 }
ptr_clear_bit(void * ptr,int bit)1188 static inline void *ptr_clear_bit(void *ptr, int bit)
1189 {
1190 unsigned long __ptr = (unsigned long)ptr;
1191
1192 __ptr &= ~BIT(bit);
1193 return (void *)__ptr;
1194 }
1195
kernel_to_user_pgdp(pgd_t * pgdp)1196 static inline pgd_t *kernel_to_user_pgdp(pgd_t *pgdp)
1197 {
1198 return ptr_set_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
1199 }
1200
user_to_kernel_pgdp(pgd_t * pgdp)1201 static inline pgd_t *user_to_kernel_pgdp(pgd_t *pgdp)
1202 {
1203 return ptr_clear_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
1204 }
1205
kernel_to_user_p4dp(p4d_t * p4dp)1206 static inline p4d_t *kernel_to_user_p4dp(p4d_t *p4dp)
1207 {
1208 return ptr_set_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
1209 }
1210
user_to_kernel_p4dp(p4d_t * p4dp)1211 static inline p4d_t *user_to_kernel_p4dp(p4d_t *p4dp)
1212 {
1213 return ptr_clear_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
1214 }
1215 #endif /* CONFIG_PAGE_TABLE_ISOLATION */
1216
1217 /*
1218 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
1219 *
1220 * dst - pointer to pgd range anywhere on a pgd page
1221 * src - ""
1222 * count - the number of pgds to copy.
1223 *
1224 * dst and src can be on the same page, but the range must not overlap,
1225 * and must not cross a page boundary.
1226 */
clone_pgd_range(pgd_t * dst,pgd_t * src,int count)1227 static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
1228 {
1229 memcpy(dst, src, count * sizeof(pgd_t));
1230 #ifdef CONFIG_PAGE_TABLE_ISOLATION
1231 if (!static_cpu_has(X86_FEATURE_PTI))
1232 return;
1233 /* Clone the user space pgd as well */
1234 memcpy(kernel_to_user_pgdp(dst), kernel_to_user_pgdp(src),
1235 count * sizeof(pgd_t));
1236 #endif
1237 }
1238
1239 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
page_level_shift(enum pg_level level)1240 static inline int page_level_shift(enum pg_level level)
1241 {
1242 return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT;
1243 }
page_level_size(enum pg_level level)1244 static inline unsigned long page_level_size(enum pg_level level)
1245 {
1246 return 1UL << page_level_shift(level);
1247 }
page_level_mask(enum pg_level level)1248 static inline unsigned long page_level_mask(enum pg_level level)
1249 {
1250 return ~(page_level_size(level) - 1);
1251 }
1252
1253 /*
1254 * The x86 doesn't have any external MMU info: the kernel page
1255 * tables contain all the necessary information.
1256 */
update_mmu_cache(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)1257 static inline void update_mmu_cache(struct vm_area_struct *vma,
1258 unsigned long addr, pte_t *ptep)
1259 {
1260 }
update_mmu_cache_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmd)1261 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
1262 unsigned long addr, pmd_t *pmd)
1263 {
1264 }
update_mmu_cache_pud(struct vm_area_struct * vma,unsigned long addr,pud_t * pud)1265 static inline void update_mmu_cache_pud(struct vm_area_struct *vma,
1266 unsigned long addr, pud_t *pud)
1267 {
1268 }
1269
1270 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
pte_swp_mksoft_dirty(pte_t pte)1271 static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
1272 {
1273 return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
1274 }
1275
pte_swp_soft_dirty(pte_t pte)1276 static inline int pte_swp_soft_dirty(pte_t pte)
1277 {
1278 return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
1279 }
1280
pte_swp_clear_soft_dirty(pte_t pte)1281 static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
1282 {
1283 return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
1284 }
1285
1286 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
pmd_swp_mksoft_dirty(pmd_t pmd)1287 static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
1288 {
1289 return pmd_set_flags(pmd, _PAGE_SWP_SOFT_DIRTY);
1290 }
1291
pmd_swp_soft_dirty(pmd_t pmd)1292 static inline int pmd_swp_soft_dirty(pmd_t pmd)
1293 {
1294 return pmd_flags(pmd) & _PAGE_SWP_SOFT_DIRTY;
1295 }
1296
pmd_swp_clear_soft_dirty(pmd_t pmd)1297 static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
1298 {
1299 return pmd_clear_flags(pmd, _PAGE_SWP_SOFT_DIRTY);
1300 }
1301 #endif
1302 #endif
1303
1304 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
pte_swp_mkuffd_wp(pte_t pte)1305 static inline pte_t pte_swp_mkuffd_wp(pte_t pte)
1306 {
1307 return pte_set_flags(pte, _PAGE_SWP_UFFD_WP);
1308 }
1309
pte_swp_uffd_wp(pte_t pte)1310 static inline int pte_swp_uffd_wp(pte_t pte)
1311 {
1312 return pte_flags(pte) & _PAGE_SWP_UFFD_WP;
1313 }
1314
pte_swp_clear_uffd_wp(pte_t pte)1315 static inline pte_t pte_swp_clear_uffd_wp(pte_t pte)
1316 {
1317 return pte_clear_flags(pte, _PAGE_SWP_UFFD_WP);
1318 }
1319
pmd_swp_mkuffd_wp(pmd_t pmd)1320 static inline pmd_t pmd_swp_mkuffd_wp(pmd_t pmd)
1321 {
1322 return pmd_set_flags(pmd, _PAGE_SWP_UFFD_WP);
1323 }
1324
pmd_swp_uffd_wp(pmd_t pmd)1325 static inline int pmd_swp_uffd_wp(pmd_t pmd)
1326 {
1327 return pmd_flags(pmd) & _PAGE_SWP_UFFD_WP;
1328 }
1329
pmd_swp_clear_uffd_wp(pmd_t pmd)1330 static inline pmd_t pmd_swp_clear_uffd_wp(pmd_t pmd)
1331 {
1332 return pmd_clear_flags(pmd, _PAGE_SWP_UFFD_WP);
1333 }
1334 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
1335
pte_flags_pkey(unsigned long pte_flags)1336 static inline u16 pte_flags_pkey(unsigned long pte_flags)
1337 {
1338 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
1339 /* ifdef to avoid doing 59-bit shift on 32-bit values */
1340 return (pte_flags & _PAGE_PKEY_MASK) >> _PAGE_BIT_PKEY_BIT0;
1341 #else
1342 return 0;
1343 #endif
1344 }
1345
__pkru_allows_pkey(u16 pkey,bool write)1346 static inline bool __pkru_allows_pkey(u16 pkey, bool write)
1347 {
1348 u32 pkru = read_pkru();
1349
1350 if (!__pkru_allows_read(pkru, pkey))
1351 return false;
1352 if (write && !__pkru_allows_write(pkru, pkey))
1353 return false;
1354
1355 return true;
1356 }
1357
1358 /*
1359 * 'pteval' can come from a PTE, PMD or PUD. We only check
1360 * _PAGE_PRESENT, _PAGE_USER, and _PAGE_RW in here which are the
1361 * same value on all 3 types.
1362 */
__pte_access_permitted(unsigned long pteval,bool write)1363 static inline bool __pte_access_permitted(unsigned long pteval, bool write)
1364 {
1365 unsigned long need_pte_bits = _PAGE_PRESENT|_PAGE_USER;
1366
1367 if (write)
1368 need_pte_bits |= _PAGE_RW;
1369
1370 if ((pteval & need_pte_bits) != need_pte_bits)
1371 return 0;
1372
1373 return __pkru_allows_pkey(pte_flags_pkey(pteval), write);
1374 }
1375
1376 #define pte_access_permitted pte_access_permitted
pte_access_permitted(pte_t pte,bool write)1377 static inline bool pte_access_permitted(pte_t pte, bool write)
1378 {
1379 return __pte_access_permitted(pte_val(pte), write);
1380 }
1381
1382 #define pmd_access_permitted pmd_access_permitted
pmd_access_permitted(pmd_t pmd,bool write)1383 static inline bool pmd_access_permitted(pmd_t pmd, bool write)
1384 {
1385 return __pte_access_permitted(pmd_val(pmd), write);
1386 }
1387
1388 #define pud_access_permitted pud_access_permitted
pud_access_permitted(pud_t pud,bool write)1389 static inline bool pud_access_permitted(pud_t pud, bool write)
1390 {
1391 return __pte_access_permitted(pud_val(pud), write);
1392 }
1393
1394 #define __HAVE_ARCH_PFN_MODIFY_ALLOWED 1
1395 extern bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot);
1396
arch_has_pfn_modify_check(void)1397 static inline bool arch_has_pfn_modify_check(void)
1398 {
1399 return boot_cpu_has_bug(X86_BUG_L1TF);
1400 }
1401
1402 #define arch_has_hw_pte_young arch_has_hw_pte_young
arch_has_hw_pte_young(void)1403 static inline bool arch_has_hw_pte_young(void)
1404 {
1405 return true;
1406 }
1407
1408 #ifdef CONFIG_XEN_PV
1409 #define arch_has_hw_nonleaf_pmd_young arch_has_hw_nonleaf_pmd_young
arch_has_hw_nonleaf_pmd_young(void)1410 static inline bool arch_has_hw_nonleaf_pmd_young(void)
1411 {
1412 return !cpu_feature_enabled(X86_FEATURE_XENPV);
1413 }
1414 #endif
1415
1416 #endif /* __ASSEMBLY__ */
1417
1418 #endif /* _ASM_X86_PGTABLE_H */
1419