• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This kernel test validates architecture page table helpers and
4  * accessors and helps in verifying their continued compliance with
5  * expected generic MM semantics.
6  *
7  * Copyright (C) 2019 ARM Ltd.
8  *
9  * Author: Anshuman Khandual <anshuman.khandual@arm.com>
10  */
11 #define pr_fmt(fmt) "debug_vm_pgtable: [%-25s]: " fmt, __func__
12 
13 #include <linux/gfp.h>
14 #include <linux/highmem.h>
15 #include <linux/hugetlb.h>
16 #include <linux/kernel.h>
17 #include <linux/kconfig.h>
18 #include <linux/mm.h>
19 #include <linux/mman.h>
20 #include <linux/mm_types.h>
21 #include <linux/module.h>
22 #include <linux/pfn_t.h>
23 #include <linux/printk.h>
24 #include <linux/pgtable.h>
25 #include <linux/random.h>
26 #include <linux/spinlock.h>
27 #include <linux/swap.h>
28 #include <linux/swapops.h>
29 #include <linux/start_kernel.h>
30 #include <linux/sched/mm.h>
31 #include <linux/io.h>
32 #include <asm/pgalloc.h>
33 #include <asm/tlbflush.h>
34 
35 /*
36  * Please refer Documentation/vm/arch_pgtable_helpers.rst for the semantics
37  * expectations that are being validated here. All future changes in here
38  * or the documentation need to be in sync.
39  */
40 
41 #define VMFLAGS	(VM_READ|VM_WRITE|VM_EXEC)
42 
43 /*
44  * On s390 platform, the lower 4 bits are used to identify given page table
45  * entry type. But these bits might affect the ability to clear entries with
46  * pxx_clear() because of how dynamic page table folding works on s390. So
47  * while loading up the entries do not change the lower 4 bits. It does not
48  * have affect any other platform. Also avoid the 62nd bit on ppc64 that is
49  * used to mark a pte entry.
50  */
51 #define S390_SKIP_MASK		GENMASK(3, 0)
52 #if __BITS_PER_LONG == 64
53 #define PPC64_SKIP_MASK		GENMASK(62, 62)
54 #else
55 #define PPC64_SKIP_MASK		0x0
56 #endif
57 #define ARCH_SKIP_MASK (S390_SKIP_MASK | PPC64_SKIP_MASK)
58 #define RANDOM_ORVALUE (GENMASK(BITS_PER_LONG - 1, 0) & ~ARCH_SKIP_MASK)
59 #define RANDOM_NZVALUE	GENMASK(7, 0)
60 
pte_basic_tests(unsigned long pfn,int idx)61 static void __init pte_basic_tests(unsigned long pfn, int idx)
62 {
63 	pgprot_t prot = protection_map[idx];
64 	pte_t pte = pfn_pte(pfn, prot);
65 	unsigned long val = idx, *ptr = &val;
66 
67 	pr_debug("Validating PTE basic (%pGv)\n", ptr);
68 
69 	/*
70 	 * This test needs to be executed after the given page table entry
71 	 * is created with pfn_pte() to make sure that protection_map[idx]
72 	 * does not have the dirty bit enabled from the beginning. This is
73 	 * important for platforms like arm64 where (!PTE_RDONLY) indicate
74 	 * dirty bit being set.
75 	 */
76 	WARN_ON(pte_dirty(pte_wrprotect(pte)));
77 
78 	WARN_ON(!pte_same(pte, pte));
79 	WARN_ON(!pte_young(pte_mkyoung(pte_mkold(pte))));
80 	WARN_ON(!pte_dirty(pte_mkdirty(pte_mkclean(pte))));
81 	WARN_ON(!pte_write(pte_mkwrite(pte_wrprotect(pte))));
82 	WARN_ON(pte_young(pte_mkold(pte_mkyoung(pte))));
83 	WARN_ON(pte_dirty(pte_mkclean(pte_mkdirty(pte))));
84 	WARN_ON(pte_write(pte_wrprotect(pte_mkwrite(pte))));
85 	WARN_ON(pte_dirty(pte_wrprotect(pte_mkclean(pte))));
86 	WARN_ON(!pte_dirty(pte_wrprotect(pte_mkdirty(pte))));
87 }
88 
pte_advanced_tests(struct mm_struct * mm,struct vm_area_struct * vma,pte_t * ptep,unsigned long pfn,unsigned long vaddr,pgprot_t prot)89 static void __init pte_advanced_tests(struct mm_struct *mm,
90 				      struct vm_area_struct *vma, pte_t *ptep,
91 				      unsigned long pfn, unsigned long vaddr,
92 				      pgprot_t prot)
93 {
94 	pte_t pte = pfn_pte(pfn, prot);
95 
96 	/*
97 	 * Architectures optimize set_pte_at by avoiding TLB flush.
98 	 * This requires set_pte_at to be not used to update an
99 	 * existing pte entry. Clear pte before we do set_pte_at
100 	 */
101 
102 	pr_debug("Validating PTE advanced\n");
103 	pte = pfn_pte(pfn, prot);
104 	set_pte_at(mm, vaddr, ptep, pte);
105 	ptep_set_wrprotect(mm, vaddr, ptep);
106 	pte = ptep_get(ptep);
107 	WARN_ON(pte_write(pte));
108 	ptep_get_and_clear(mm, vaddr, ptep);
109 	pte = ptep_get(ptep);
110 	WARN_ON(!pte_none(pte));
111 
112 	pte = pfn_pte(pfn, prot);
113 	pte = pte_wrprotect(pte);
114 	pte = pte_mkclean(pte);
115 	set_pte_at(mm, vaddr, ptep, pte);
116 	pte = pte_mkwrite(pte);
117 	pte = pte_mkdirty(pte);
118 	ptep_set_access_flags(vma, vaddr, ptep, pte, 1);
119 	pte = ptep_get(ptep);
120 	WARN_ON(!(pte_write(pte) && pte_dirty(pte)));
121 	ptep_get_and_clear_full(mm, vaddr, ptep, 1);
122 	pte = ptep_get(ptep);
123 	WARN_ON(!pte_none(pte));
124 
125 	pte = pfn_pte(pfn, prot);
126 	pte = pte_mkyoung(pte);
127 	set_pte_at(mm, vaddr, ptep, pte);
128 	ptep_test_and_clear_young(vma, vaddr, ptep);
129 	pte = ptep_get(ptep);
130 	WARN_ON(pte_young(pte));
131 }
132 
pte_savedwrite_tests(unsigned long pfn,pgprot_t prot)133 static void __init pte_savedwrite_tests(unsigned long pfn, pgprot_t prot)
134 {
135 	pte_t pte = pfn_pte(pfn, prot);
136 
137 	if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
138 		return;
139 
140 	pr_debug("Validating PTE saved write\n");
141 	WARN_ON(!pte_savedwrite(pte_mk_savedwrite(pte_clear_savedwrite(pte))));
142 	WARN_ON(pte_savedwrite(pte_clear_savedwrite(pte_mk_savedwrite(pte))));
143 }
144 
145 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_basic_tests(unsigned long pfn,int idx)146 static void __init pmd_basic_tests(unsigned long pfn, int idx)
147 {
148 	pgprot_t prot = protection_map[idx];
149 	unsigned long val = idx, *ptr = &val;
150 	pmd_t pmd;
151 
152 	if (!has_transparent_hugepage())
153 		return;
154 
155 	pr_debug("Validating PMD basic (%pGv)\n", ptr);
156 	pmd = pfn_pmd(pfn, prot);
157 
158 	/*
159 	 * This test needs to be executed after the given page table entry
160 	 * is created with pfn_pmd() to make sure that protection_map[idx]
161 	 * does not have the dirty bit enabled from the beginning. This is
162 	 * important for platforms like arm64 where (!PTE_RDONLY) indicate
163 	 * dirty bit being set.
164 	 */
165 	WARN_ON(pmd_dirty(pmd_wrprotect(pmd)));
166 
167 
168 	WARN_ON(!pmd_same(pmd, pmd));
169 	WARN_ON(!pmd_young(pmd_mkyoung(pmd_mkold(pmd))));
170 	WARN_ON(!pmd_dirty(pmd_mkdirty(pmd_mkclean(pmd))));
171 	WARN_ON(!pmd_write(pmd_mkwrite(pmd_wrprotect(pmd))));
172 	WARN_ON(pmd_young(pmd_mkold(pmd_mkyoung(pmd))));
173 	WARN_ON(pmd_dirty(pmd_mkclean(pmd_mkdirty(pmd))));
174 	WARN_ON(pmd_write(pmd_wrprotect(pmd_mkwrite(pmd))));
175 	WARN_ON(pmd_dirty(pmd_wrprotect(pmd_mkclean(pmd))));
176 	WARN_ON(!pmd_dirty(pmd_wrprotect(pmd_mkdirty(pmd))));
177 	/*
178 	 * A huge page does not point to next level page table
179 	 * entry. Hence this must qualify as pmd_bad().
180 	 */
181 	WARN_ON(!pmd_bad(pmd_mkhuge(pmd)));
182 }
183 
pmd_advanced_tests(struct mm_struct * mm,struct vm_area_struct * vma,pmd_t * pmdp,unsigned long pfn,unsigned long vaddr,pgprot_t prot,pgtable_t pgtable)184 static void __init pmd_advanced_tests(struct mm_struct *mm,
185 				      struct vm_area_struct *vma, pmd_t *pmdp,
186 				      unsigned long pfn, unsigned long vaddr,
187 				      pgprot_t prot, pgtable_t pgtable)
188 {
189 	pmd_t pmd;
190 
191 	if (!has_transparent_hugepage())
192 		return;
193 
194 	pr_debug("Validating PMD advanced\n");
195 	/* Align the address wrt HPAGE_PMD_SIZE */
196 	vaddr &= HPAGE_PMD_MASK;
197 
198 	pgtable_trans_huge_deposit(mm, pmdp, pgtable);
199 
200 	pmd = pfn_pmd(pfn, prot);
201 	set_pmd_at(mm, vaddr, pmdp, pmd);
202 	pmdp_set_wrprotect(mm, vaddr, pmdp);
203 	pmd = READ_ONCE(*pmdp);
204 	WARN_ON(pmd_write(pmd));
205 	pmdp_huge_get_and_clear(mm, vaddr, pmdp);
206 	pmd = READ_ONCE(*pmdp);
207 	WARN_ON(!pmd_none(pmd));
208 
209 	pmd = pfn_pmd(pfn, prot);
210 	pmd = pmd_wrprotect(pmd);
211 	pmd = pmd_mkclean(pmd);
212 	set_pmd_at(mm, vaddr, pmdp, pmd);
213 	pmd = pmd_mkwrite(pmd);
214 	pmd = pmd_mkdirty(pmd);
215 	pmdp_set_access_flags(vma, vaddr, pmdp, pmd, 1);
216 	pmd = READ_ONCE(*pmdp);
217 	WARN_ON(!(pmd_write(pmd) && pmd_dirty(pmd)));
218 	pmdp_huge_get_and_clear_full(vma, vaddr, pmdp, 1);
219 	pmd = READ_ONCE(*pmdp);
220 	WARN_ON(!pmd_none(pmd));
221 
222 	pmd = pmd_mkhuge(pfn_pmd(pfn, prot));
223 	pmd = pmd_mkyoung(pmd);
224 	set_pmd_at(mm, vaddr, pmdp, pmd);
225 	pmdp_test_and_clear_young(vma, vaddr, pmdp);
226 	pmd = READ_ONCE(*pmdp);
227 	WARN_ON(pmd_young(pmd));
228 
229 	/*  Clear the pte entries  */
230 	pmdp_huge_get_and_clear(mm, vaddr, pmdp);
231 	pgtable = pgtable_trans_huge_withdraw(mm, pmdp);
232 }
233 
pmd_leaf_tests(unsigned long pfn,pgprot_t prot)234 static void __init pmd_leaf_tests(unsigned long pfn, pgprot_t prot)
235 {
236 	pmd_t pmd;
237 
238 	if (!has_transparent_hugepage())
239 		return;
240 
241 	pr_debug("Validating PMD leaf\n");
242 	pmd = pfn_pmd(pfn, prot);
243 
244 	/*
245 	 * PMD based THP is a leaf entry.
246 	 */
247 	pmd = pmd_mkhuge(pmd);
248 	WARN_ON(!pmd_leaf(pmd));
249 }
250 
251 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
pmd_huge_tests(pmd_t * pmdp,unsigned long pfn,pgprot_t prot)252 static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot)
253 {
254 	pmd_t pmd;
255 
256 	if (!arch_ioremap_pmd_supported())
257 		return;
258 
259 	pr_debug("Validating PMD huge\n");
260 	/*
261 	 * X86 defined pmd_set_huge() verifies that the given
262 	 * PMD is not a populated non-leaf entry.
263 	 */
264 	WRITE_ONCE(*pmdp, __pmd(0));
265 	WARN_ON(!pmd_set_huge(pmdp, __pfn_to_phys(pfn), prot));
266 	WARN_ON(!pmd_clear_huge(pmdp));
267 	pmd = READ_ONCE(*pmdp);
268 	WARN_ON(!pmd_none(pmd));
269 }
270 #else /* CONFIG_HAVE_ARCH_HUGE_VMAP */
pmd_huge_tests(pmd_t * pmdp,unsigned long pfn,pgprot_t prot)271 static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot) { }
272 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
273 
pmd_savedwrite_tests(unsigned long pfn,pgprot_t prot)274 static void __init pmd_savedwrite_tests(unsigned long pfn, pgprot_t prot)
275 {
276 	pmd_t pmd;
277 
278 	if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
279 		return;
280 
281 	if (!has_transparent_hugepage())
282 		return;
283 
284 	pr_debug("Validating PMD saved write\n");
285 	pmd = pfn_pmd(pfn, prot);
286 	WARN_ON(!pmd_savedwrite(pmd_mk_savedwrite(pmd_clear_savedwrite(pmd))));
287 	WARN_ON(pmd_savedwrite(pmd_clear_savedwrite(pmd_mk_savedwrite(pmd))));
288 }
289 
290 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
pud_basic_tests(struct mm_struct * mm,unsigned long pfn,int idx)291 static void __init pud_basic_tests(struct mm_struct *mm, unsigned long pfn, int idx)
292 {
293 	pgprot_t prot = protection_map[idx];
294 	unsigned long val = idx, *ptr = &val;
295 	pud_t pud;
296 
297 	if (!has_transparent_hugepage())
298 		return;
299 
300 	pr_debug("Validating PUD basic (%pGv)\n", ptr);
301 	pud = pfn_pud(pfn, prot);
302 
303 	/*
304 	 * This test needs to be executed after the given page table entry
305 	 * is created with pfn_pud() to make sure that protection_map[idx]
306 	 * does not have the dirty bit enabled from the beginning. This is
307 	 * important for platforms like arm64 where (!PTE_RDONLY) indicate
308 	 * dirty bit being set.
309 	 */
310 	WARN_ON(pud_dirty(pud_wrprotect(pud)));
311 
312 	WARN_ON(!pud_same(pud, pud));
313 	WARN_ON(!pud_young(pud_mkyoung(pud_mkold(pud))));
314 	WARN_ON(!pud_dirty(pud_mkdirty(pud_mkclean(pud))));
315 	WARN_ON(pud_dirty(pud_mkclean(pud_mkdirty(pud))));
316 	WARN_ON(!pud_write(pud_mkwrite(pud_wrprotect(pud))));
317 	WARN_ON(pud_write(pud_wrprotect(pud_mkwrite(pud))));
318 	WARN_ON(pud_young(pud_mkold(pud_mkyoung(pud))));
319 	WARN_ON(pud_dirty(pud_wrprotect(pud_mkclean(pud))));
320 	WARN_ON(!pud_dirty(pud_wrprotect(pud_mkdirty(pud))));
321 
322 	if (mm_pmd_folded(mm))
323 		return;
324 
325 	/*
326 	 * A huge page does not point to next level page table
327 	 * entry. Hence this must qualify as pud_bad().
328 	 */
329 	WARN_ON(!pud_bad(pud_mkhuge(pud)));
330 }
331 
pud_advanced_tests(struct mm_struct * mm,struct vm_area_struct * vma,pud_t * pudp,unsigned long pfn,unsigned long vaddr,pgprot_t prot)332 static void __init pud_advanced_tests(struct mm_struct *mm,
333 				      struct vm_area_struct *vma, pud_t *pudp,
334 				      unsigned long pfn, unsigned long vaddr,
335 				      pgprot_t prot)
336 {
337 	pud_t pud;
338 
339 	if (!has_transparent_hugepage())
340 		return;
341 
342 	pr_debug("Validating PUD advanced\n");
343 	/* Align the address wrt HPAGE_PUD_SIZE */
344 	vaddr &= HPAGE_PUD_MASK;
345 
346 	pud = pfn_pud(pfn, prot);
347 	set_pud_at(mm, vaddr, pudp, pud);
348 	pudp_set_wrprotect(mm, vaddr, pudp);
349 	pud = READ_ONCE(*pudp);
350 	WARN_ON(pud_write(pud));
351 
352 #ifndef __PAGETABLE_PMD_FOLDED
353 	pudp_huge_get_and_clear(mm, vaddr, pudp);
354 	pud = READ_ONCE(*pudp);
355 	WARN_ON(!pud_none(pud));
356 #endif /* __PAGETABLE_PMD_FOLDED */
357 	pud = pfn_pud(pfn, prot);
358 	pud = pud_wrprotect(pud);
359 	pud = pud_mkclean(pud);
360 	set_pud_at(mm, vaddr, pudp, pud);
361 	pud = pud_mkwrite(pud);
362 	pud = pud_mkdirty(pud);
363 	pudp_set_access_flags(vma, vaddr, pudp, pud, 1);
364 	pud = READ_ONCE(*pudp);
365 	WARN_ON(!(pud_write(pud) && pud_dirty(pud)));
366 
367 #ifndef __PAGETABLE_PMD_FOLDED
368 	pudp_huge_get_and_clear_full(mm, vaddr, pudp, 1);
369 	pud = READ_ONCE(*pudp);
370 	WARN_ON(!pud_none(pud));
371 #endif /* __PAGETABLE_PMD_FOLDED */
372 
373 	pud = pfn_pud(pfn, prot);
374 	pud = pud_mkyoung(pud);
375 	set_pud_at(mm, vaddr, pudp, pud);
376 	pudp_test_and_clear_young(vma, vaddr, pudp);
377 	pud = READ_ONCE(*pudp);
378 	WARN_ON(pud_young(pud));
379 
380 	pudp_huge_get_and_clear(mm, vaddr, pudp);
381 }
382 
pud_leaf_tests(unsigned long pfn,pgprot_t prot)383 static void __init pud_leaf_tests(unsigned long pfn, pgprot_t prot)
384 {
385 	pud_t pud;
386 
387 	if (!has_transparent_hugepage())
388 		return;
389 
390 	pr_debug("Validating PUD leaf\n");
391 	pud = pfn_pud(pfn, prot);
392 	/*
393 	 * PUD based THP is a leaf entry.
394 	 */
395 	pud = pud_mkhuge(pud);
396 	WARN_ON(!pud_leaf(pud));
397 }
398 
399 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
pud_huge_tests(pud_t * pudp,unsigned long pfn,pgprot_t prot)400 static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot)
401 {
402 	pud_t pud;
403 
404 	if (!arch_ioremap_pud_supported())
405 		return;
406 
407 	pr_debug("Validating PUD huge\n");
408 	/*
409 	 * X86 defined pud_set_huge() verifies that the given
410 	 * PUD is not a populated non-leaf entry.
411 	 */
412 	WRITE_ONCE(*pudp, __pud(0));
413 	WARN_ON(!pud_set_huge(pudp, __pfn_to_phys(pfn), prot));
414 	WARN_ON(!pud_clear_huge(pudp));
415 	pud = READ_ONCE(*pudp);
416 	WARN_ON(!pud_none(pud));
417 }
418 #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
pud_huge_tests(pud_t * pudp,unsigned long pfn,pgprot_t prot)419 static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot) { }
420 #endif /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
421 
422 #else  /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
pud_basic_tests(struct mm_struct * mm,unsigned long pfn,int idx)423 static void __init pud_basic_tests(struct mm_struct *mm, unsigned long pfn, int idx) { }
pud_advanced_tests(struct mm_struct * mm,struct vm_area_struct * vma,pud_t * pudp,unsigned long pfn,unsigned long vaddr,pgprot_t prot)424 static void __init pud_advanced_tests(struct mm_struct *mm,
425 				      struct vm_area_struct *vma, pud_t *pudp,
426 				      unsigned long pfn, unsigned long vaddr,
427 				      pgprot_t prot)
428 {
429 }
pud_leaf_tests(unsigned long pfn,pgprot_t prot)430 static void __init pud_leaf_tests(unsigned long pfn, pgprot_t prot) { }
pud_huge_tests(pud_t * pudp,unsigned long pfn,pgprot_t prot)431 static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot)
432 {
433 }
434 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
435 #else  /* !CONFIG_TRANSPARENT_HUGEPAGE */
pmd_basic_tests(unsigned long pfn,int idx)436 static void __init pmd_basic_tests(unsigned long pfn, int idx) { }
pud_basic_tests(struct mm_struct * mm,unsigned long pfn,int idx)437 static void __init pud_basic_tests(struct mm_struct *mm, unsigned long pfn, int idx) { }
pmd_advanced_tests(struct mm_struct * mm,struct vm_area_struct * vma,pmd_t * pmdp,unsigned long pfn,unsigned long vaddr,pgprot_t prot,pgtable_t pgtable)438 static void __init pmd_advanced_tests(struct mm_struct *mm,
439 				      struct vm_area_struct *vma, pmd_t *pmdp,
440 				      unsigned long pfn, unsigned long vaddr,
441 				      pgprot_t prot, pgtable_t pgtable)
442 {
443 }
pud_advanced_tests(struct mm_struct * mm,struct vm_area_struct * vma,pud_t * pudp,unsigned long pfn,unsigned long vaddr,pgprot_t prot)444 static void __init pud_advanced_tests(struct mm_struct *mm,
445 				      struct vm_area_struct *vma, pud_t *pudp,
446 				      unsigned long pfn, unsigned long vaddr,
447 				      pgprot_t prot)
448 {
449 }
pmd_leaf_tests(unsigned long pfn,pgprot_t prot)450 static void __init pmd_leaf_tests(unsigned long pfn, pgprot_t prot) { }
pud_leaf_tests(unsigned long pfn,pgprot_t prot)451 static void __init pud_leaf_tests(unsigned long pfn, pgprot_t prot) { }
pmd_huge_tests(pmd_t * pmdp,unsigned long pfn,pgprot_t prot)452 static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot)
453 {
454 }
pud_huge_tests(pud_t * pudp,unsigned long pfn,pgprot_t prot)455 static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot)
456 {
457 }
pmd_savedwrite_tests(unsigned long pfn,pgprot_t prot)458 static void __init pmd_savedwrite_tests(unsigned long pfn, pgprot_t prot) { }
459 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
460 
p4d_basic_tests(unsigned long pfn,pgprot_t prot)461 static void __init p4d_basic_tests(unsigned long pfn, pgprot_t prot)
462 {
463 	p4d_t p4d;
464 
465 	pr_debug("Validating P4D basic\n");
466 	memset(&p4d, RANDOM_NZVALUE, sizeof(p4d_t));
467 	WARN_ON(!p4d_same(p4d, p4d));
468 }
469 
pgd_basic_tests(unsigned long pfn,pgprot_t prot)470 static void __init pgd_basic_tests(unsigned long pfn, pgprot_t prot)
471 {
472 	pgd_t pgd;
473 
474 	pr_debug("Validating PGD basic\n");
475 	memset(&pgd, RANDOM_NZVALUE, sizeof(pgd_t));
476 	WARN_ON(!pgd_same(pgd, pgd));
477 }
478 
479 #ifndef __PAGETABLE_PUD_FOLDED
pud_clear_tests(struct mm_struct * mm,pud_t * pudp)480 static void __init pud_clear_tests(struct mm_struct *mm, pud_t *pudp)
481 {
482 	pud_t pud = READ_ONCE(*pudp);
483 
484 	if (mm_pmd_folded(mm))
485 		return;
486 
487 	pr_debug("Validating PUD clear\n");
488 	pud = __pud(pud_val(pud) | RANDOM_ORVALUE);
489 	WRITE_ONCE(*pudp, pud);
490 	pud_clear(pudp);
491 	pud = READ_ONCE(*pudp);
492 	WARN_ON(!pud_none(pud));
493 }
494 
pud_populate_tests(struct mm_struct * mm,pud_t * pudp,pmd_t * pmdp)495 static void __init pud_populate_tests(struct mm_struct *mm, pud_t *pudp,
496 				      pmd_t *pmdp)
497 {
498 	pud_t pud;
499 
500 	if (mm_pmd_folded(mm))
501 		return;
502 
503 	pr_debug("Validating PUD populate\n");
504 	/*
505 	 * This entry points to next level page table page.
506 	 * Hence this must not qualify as pud_bad().
507 	 */
508 	pud_populate(mm, pudp, pmdp);
509 	pud = READ_ONCE(*pudp);
510 	WARN_ON(pud_bad(pud));
511 }
512 #else  /* !__PAGETABLE_PUD_FOLDED */
pud_clear_tests(struct mm_struct * mm,pud_t * pudp)513 static void __init pud_clear_tests(struct mm_struct *mm, pud_t *pudp) { }
pud_populate_tests(struct mm_struct * mm,pud_t * pudp,pmd_t * pmdp)514 static void __init pud_populate_tests(struct mm_struct *mm, pud_t *pudp,
515 				      pmd_t *pmdp)
516 {
517 }
518 #endif /* PAGETABLE_PUD_FOLDED */
519 
520 #ifndef __PAGETABLE_P4D_FOLDED
p4d_clear_tests(struct mm_struct * mm,p4d_t * p4dp)521 static void __init p4d_clear_tests(struct mm_struct *mm, p4d_t *p4dp)
522 {
523 	p4d_t p4d = READ_ONCE(*p4dp);
524 
525 	if (mm_pud_folded(mm))
526 		return;
527 
528 	pr_debug("Validating P4D clear\n");
529 	p4d = __p4d(p4d_val(p4d) | RANDOM_ORVALUE);
530 	WRITE_ONCE(*p4dp, p4d);
531 	p4d_clear(p4dp);
532 	p4d = READ_ONCE(*p4dp);
533 	WARN_ON(!p4d_none(p4d));
534 }
535 
p4d_populate_tests(struct mm_struct * mm,p4d_t * p4dp,pud_t * pudp)536 static void __init p4d_populate_tests(struct mm_struct *mm, p4d_t *p4dp,
537 				      pud_t *pudp)
538 {
539 	p4d_t p4d;
540 
541 	if (mm_pud_folded(mm))
542 		return;
543 
544 	pr_debug("Validating P4D populate\n");
545 	/*
546 	 * This entry points to next level page table page.
547 	 * Hence this must not qualify as p4d_bad().
548 	 */
549 	pud_clear(pudp);
550 	p4d_clear(p4dp);
551 	p4d_populate(mm, p4dp, pudp);
552 	p4d = READ_ONCE(*p4dp);
553 	WARN_ON(p4d_bad(p4d));
554 }
555 
pgd_clear_tests(struct mm_struct * mm,pgd_t * pgdp)556 static void __init pgd_clear_tests(struct mm_struct *mm, pgd_t *pgdp)
557 {
558 	pgd_t pgd = READ_ONCE(*pgdp);
559 
560 	if (mm_p4d_folded(mm))
561 		return;
562 
563 	pr_debug("Validating PGD clear\n");
564 	pgd = __pgd(pgd_val(pgd) | RANDOM_ORVALUE);
565 	WRITE_ONCE(*pgdp, pgd);
566 	pgd_clear(pgdp);
567 	pgd = READ_ONCE(*pgdp);
568 	WARN_ON(!pgd_none(pgd));
569 }
570 
pgd_populate_tests(struct mm_struct * mm,pgd_t * pgdp,p4d_t * p4dp)571 static void __init pgd_populate_tests(struct mm_struct *mm, pgd_t *pgdp,
572 				      p4d_t *p4dp)
573 {
574 	pgd_t pgd;
575 
576 	if (mm_p4d_folded(mm))
577 		return;
578 
579 	pr_debug("Validating PGD populate\n");
580 	/*
581 	 * This entry points to next level page table page.
582 	 * Hence this must not qualify as pgd_bad().
583 	 */
584 	p4d_clear(p4dp);
585 	pgd_clear(pgdp);
586 	pgd_populate(mm, pgdp, p4dp);
587 	pgd = READ_ONCE(*pgdp);
588 	WARN_ON(pgd_bad(pgd));
589 }
590 #else  /* !__PAGETABLE_P4D_FOLDED */
p4d_clear_tests(struct mm_struct * mm,p4d_t * p4dp)591 static void __init p4d_clear_tests(struct mm_struct *mm, p4d_t *p4dp) { }
pgd_clear_tests(struct mm_struct * mm,pgd_t * pgdp)592 static void __init pgd_clear_tests(struct mm_struct *mm, pgd_t *pgdp) { }
p4d_populate_tests(struct mm_struct * mm,p4d_t * p4dp,pud_t * pudp)593 static void __init p4d_populate_tests(struct mm_struct *mm, p4d_t *p4dp,
594 				      pud_t *pudp)
595 {
596 }
pgd_populate_tests(struct mm_struct * mm,pgd_t * pgdp,p4d_t * p4dp)597 static void __init pgd_populate_tests(struct mm_struct *mm, pgd_t *pgdp,
598 				      p4d_t *p4dp)
599 {
600 }
601 #endif /* PAGETABLE_P4D_FOLDED */
602 
pte_clear_tests(struct mm_struct * mm,pte_t * ptep,unsigned long pfn,unsigned long vaddr,pgprot_t prot)603 static void __init pte_clear_tests(struct mm_struct *mm, pte_t *ptep,
604 				   unsigned long pfn, unsigned long vaddr,
605 				   pgprot_t prot)
606 {
607 	pte_t pte = pfn_pte(pfn, prot);
608 
609 	pr_debug("Validating PTE clear\n");
610 #ifndef CONFIG_RISCV
611 	pte = __pte(pte_val(pte) | RANDOM_ORVALUE);
612 #endif
613 	set_pte_at(mm, vaddr, ptep, pte);
614 	barrier();
615 	pte_clear(mm, vaddr, ptep);
616 	pte = ptep_get(ptep);
617 	WARN_ON(!pte_none(pte));
618 }
619 
pmd_clear_tests(struct mm_struct * mm,pmd_t * pmdp)620 static void __init pmd_clear_tests(struct mm_struct *mm, pmd_t *pmdp)
621 {
622 	pmd_t pmd = READ_ONCE(*pmdp);
623 
624 	pr_debug("Validating PMD clear\n");
625 	pmd = __pmd(pmd_val(pmd) | RANDOM_ORVALUE);
626 	WRITE_ONCE(*pmdp, pmd);
627 	pmd_clear(pmdp);
628 	pmd = READ_ONCE(*pmdp);
629 	WARN_ON(!pmd_none(pmd));
630 }
631 
pmd_populate_tests(struct mm_struct * mm,pmd_t * pmdp,pgtable_t pgtable)632 static void __init pmd_populate_tests(struct mm_struct *mm, pmd_t *pmdp,
633 				      pgtable_t pgtable)
634 {
635 	pmd_t pmd;
636 
637 	pr_debug("Validating PMD populate\n");
638 	/*
639 	 * This entry points to next level page table page.
640 	 * Hence this must not qualify as pmd_bad().
641 	 */
642 	pmd_populate(mm, pmdp, pgtable);
643 	pmd = READ_ONCE(*pmdp);
644 	WARN_ON(pmd_bad(pmd));
645 }
646 
pte_special_tests(unsigned long pfn,pgprot_t prot)647 static void __init pte_special_tests(unsigned long pfn, pgprot_t prot)
648 {
649 	pte_t pte = pfn_pte(pfn, prot);
650 
651 	if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL))
652 		return;
653 
654 	pr_debug("Validating PTE special\n");
655 	WARN_ON(!pte_special(pte_mkspecial(pte)));
656 }
657 
pte_protnone_tests(unsigned long pfn,pgprot_t prot)658 static void __init pte_protnone_tests(unsigned long pfn, pgprot_t prot)
659 {
660 	pte_t pte = pfn_pte(pfn, prot);
661 
662 	if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
663 		return;
664 
665 	pr_debug("Validating PTE protnone\n");
666 	WARN_ON(!pte_protnone(pte));
667 	WARN_ON(!pte_present(pte));
668 }
669 
670 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_protnone_tests(unsigned long pfn,pgprot_t prot)671 static void __init pmd_protnone_tests(unsigned long pfn, pgprot_t prot)
672 {
673 	pmd_t pmd;
674 
675 	if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
676 		return;
677 
678 	if (!has_transparent_hugepage())
679 		return;
680 
681 	pr_debug("Validating PMD protnone\n");
682 	pmd = pmd_mkhuge(pfn_pmd(pfn, prot));
683 	WARN_ON(!pmd_protnone(pmd));
684 	WARN_ON(!pmd_present(pmd));
685 }
686 #else  /* !CONFIG_TRANSPARENT_HUGEPAGE */
pmd_protnone_tests(unsigned long pfn,pgprot_t prot)687 static void __init pmd_protnone_tests(unsigned long pfn, pgprot_t prot) { }
688 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
689 
690 #ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
pte_devmap_tests(unsigned long pfn,pgprot_t prot)691 static void __init pte_devmap_tests(unsigned long pfn, pgprot_t prot)
692 {
693 	pte_t pte = pfn_pte(pfn, prot);
694 
695 	pr_debug("Validating PTE devmap\n");
696 	WARN_ON(!pte_devmap(pte_mkdevmap(pte)));
697 }
698 
699 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_devmap_tests(unsigned long pfn,pgprot_t prot)700 static void __init pmd_devmap_tests(unsigned long pfn, pgprot_t prot)
701 {
702 	pmd_t pmd;
703 
704 	if (!has_transparent_hugepage())
705 		return;
706 
707 	pr_debug("Validating PMD devmap\n");
708 	pmd = pfn_pmd(pfn, prot);
709 	WARN_ON(!pmd_devmap(pmd_mkdevmap(pmd)));
710 }
711 
712 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
pud_devmap_tests(unsigned long pfn,pgprot_t prot)713 static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot)
714 {
715 	pud_t pud;
716 
717 	if (!has_transparent_hugepage())
718 		return;
719 
720 	pr_debug("Validating PUD devmap\n");
721 	pud = pfn_pud(pfn, prot);
722 	WARN_ON(!pud_devmap(pud_mkdevmap(pud)));
723 }
724 #else  /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
pud_devmap_tests(unsigned long pfn,pgprot_t prot)725 static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot) { }
726 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
727 #else  /* CONFIG_TRANSPARENT_HUGEPAGE */
pmd_devmap_tests(unsigned long pfn,pgprot_t prot)728 static void __init pmd_devmap_tests(unsigned long pfn, pgprot_t prot) { }
pud_devmap_tests(unsigned long pfn,pgprot_t prot)729 static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot) { }
730 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
731 #else
pte_devmap_tests(unsigned long pfn,pgprot_t prot)732 static void __init pte_devmap_tests(unsigned long pfn, pgprot_t prot) { }
pmd_devmap_tests(unsigned long pfn,pgprot_t prot)733 static void __init pmd_devmap_tests(unsigned long pfn, pgprot_t prot) { }
pud_devmap_tests(unsigned long pfn,pgprot_t prot)734 static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot) { }
735 #endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */
736 
pte_soft_dirty_tests(unsigned long pfn,pgprot_t prot)737 static void __init pte_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
738 {
739 	pte_t pte = pfn_pte(pfn, prot);
740 
741 	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
742 		return;
743 
744 	pr_debug("Validating PTE soft dirty\n");
745 	WARN_ON(!pte_soft_dirty(pte_mksoft_dirty(pte)));
746 	WARN_ON(pte_soft_dirty(pte_clear_soft_dirty(pte)));
747 }
748 
pte_swap_soft_dirty_tests(unsigned long pfn,pgprot_t prot)749 static void __init pte_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
750 {
751 	pte_t pte = pfn_pte(pfn, prot);
752 
753 	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
754 		return;
755 
756 	pr_debug("Validating PTE swap soft dirty\n");
757 	WARN_ON(!pte_swp_soft_dirty(pte_swp_mksoft_dirty(pte)));
758 	WARN_ON(pte_swp_soft_dirty(pte_swp_clear_soft_dirty(pte)));
759 }
760 
761 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_soft_dirty_tests(unsigned long pfn,pgprot_t prot)762 static void __init pmd_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
763 {
764 	pmd_t pmd;
765 
766 	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
767 		return;
768 
769 	if (!has_transparent_hugepage())
770 		return;
771 
772 	pr_debug("Validating PMD soft dirty\n");
773 	pmd = pfn_pmd(pfn, prot);
774 	WARN_ON(!pmd_soft_dirty(pmd_mksoft_dirty(pmd)));
775 	WARN_ON(pmd_soft_dirty(pmd_clear_soft_dirty(pmd)));
776 }
777 
pmd_swap_soft_dirty_tests(unsigned long pfn,pgprot_t prot)778 static void __init pmd_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
779 {
780 	pmd_t pmd;
781 
782 	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) ||
783 		!IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION))
784 		return;
785 
786 	if (!has_transparent_hugepage())
787 		return;
788 
789 	pr_debug("Validating PMD swap soft dirty\n");
790 	pmd = pfn_pmd(pfn, prot);
791 	WARN_ON(!pmd_swp_soft_dirty(pmd_swp_mksoft_dirty(pmd)));
792 	WARN_ON(pmd_swp_soft_dirty(pmd_swp_clear_soft_dirty(pmd)));
793 }
794 #else  /* !CONFIG_ARCH_HAS_PTE_DEVMAP */
pmd_soft_dirty_tests(unsigned long pfn,pgprot_t prot)795 static void __init pmd_soft_dirty_tests(unsigned long pfn, pgprot_t prot) { }
pmd_swap_soft_dirty_tests(unsigned long pfn,pgprot_t prot)796 static void __init pmd_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
797 {
798 }
799 #endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */
800 
pte_swap_tests(unsigned long pfn,pgprot_t prot)801 static void __init pte_swap_tests(unsigned long pfn, pgprot_t prot)
802 {
803 	swp_entry_t swp;
804 	pte_t pte;
805 
806 	pr_debug("Validating PTE swap\n");
807 	pte = pfn_pte(pfn, prot);
808 	swp = __pte_to_swp_entry(pte);
809 	pte = __swp_entry_to_pte(swp);
810 	WARN_ON(pfn != pte_pfn(pte));
811 }
812 
813 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
pmd_swap_tests(unsigned long pfn,pgprot_t prot)814 static void __init pmd_swap_tests(unsigned long pfn, pgprot_t prot)
815 {
816 	swp_entry_t swp;
817 	pmd_t pmd;
818 
819 	if (!has_transparent_hugepage())
820 		return;
821 
822 	pr_debug("Validating PMD swap\n");
823 	pmd = pfn_pmd(pfn, prot);
824 	swp = __pmd_to_swp_entry(pmd);
825 	pmd = __swp_entry_to_pmd(swp);
826 	WARN_ON(pfn != pmd_pfn(pmd));
827 }
828 #else  /* !CONFIG_ARCH_ENABLE_THP_MIGRATION */
pmd_swap_tests(unsigned long pfn,pgprot_t prot)829 static void __init pmd_swap_tests(unsigned long pfn, pgprot_t prot) { }
830 #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
831 
swap_migration_tests(void)832 static void __init swap_migration_tests(void)
833 {
834 	struct page *page;
835 	swp_entry_t swp;
836 
837 	if (!IS_ENABLED(CONFIG_MIGRATION))
838 		return;
839 
840 	pr_debug("Validating swap migration\n");
841 	/*
842 	 * swap_migration_tests() requires a dedicated page as it needs to
843 	 * be locked before creating a migration entry from it. Locking the
844 	 * page that actually maps kernel text ('start_kernel') can be real
845 	 * problematic. Lets allocate a dedicated page explicitly for this
846 	 * purpose that will be freed subsequently.
847 	 */
848 	page = alloc_page(GFP_KERNEL);
849 	if (!page) {
850 		pr_err("page allocation failed\n");
851 		return;
852 	}
853 
854 	/*
855 	 * make_migration_entry() expects given page to be
856 	 * locked, otherwise it stumbles upon a BUG_ON().
857 	 */
858 	__SetPageLocked(page);
859 	swp = make_migration_entry(page, 1);
860 	WARN_ON(!is_migration_entry(swp));
861 	WARN_ON(!is_write_migration_entry(swp));
862 
863 	make_migration_entry_read(&swp);
864 	WARN_ON(!is_migration_entry(swp));
865 	WARN_ON(is_write_migration_entry(swp));
866 
867 	swp = make_migration_entry(page, 0);
868 	WARN_ON(!is_migration_entry(swp));
869 	WARN_ON(is_write_migration_entry(swp));
870 	__ClearPageLocked(page);
871 	__free_page(page);
872 }
873 
874 #ifdef CONFIG_HUGETLB_PAGE
hugetlb_basic_tests(unsigned long pfn,pgprot_t prot)875 static void __init hugetlb_basic_tests(unsigned long pfn, pgprot_t prot)
876 {
877 	struct page *page;
878 	pte_t pte;
879 
880 	pr_debug("Validating HugeTLB basic\n");
881 	/*
882 	 * Accessing the page associated with the pfn is safe here,
883 	 * as it was previously derived from a real kernel symbol.
884 	 */
885 	page = pfn_to_page(pfn);
886 	pte = mk_huge_pte(page, prot);
887 
888 	WARN_ON(!huge_pte_dirty(huge_pte_mkdirty(pte)));
889 	WARN_ON(!huge_pte_write(huge_pte_mkwrite(huge_pte_wrprotect(pte))));
890 	WARN_ON(huge_pte_write(huge_pte_wrprotect(huge_pte_mkwrite(pte))));
891 
892 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
893 	pte = pfn_pte(pfn, prot);
894 
895 	WARN_ON(!pte_huge(pte_mkhuge(pte)));
896 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
897 }
898 #else  /* !CONFIG_HUGETLB_PAGE */
hugetlb_basic_tests(unsigned long pfn,pgprot_t prot)899 static void __init hugetlb_basic_tests(unsigned long pfn, pgprot_t prot) { }
900 #endif /* CONFIG_HUGETLB_PAGE */
901 
902 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_thp_tests(unsigned long pfn,pgprot_t prot)903 static void __init pmd_thp_tests(unsigned long pfn, pgprot_t prot)
904 {
905 	pmd_t pmd;
906 
907 	if (!has_transparent_hugepage())
908 		return;
909 
910 	pr_debug("Validating PMD based THP\n");
911 	/*
912 	 * pmd_trans_huge() and pmd_present() must return positive after
913 	 * MMU invalidation with pmd_mkinvalid(). This behavior is an
914 	 * optimization for transparent huge page. pmd_trans_huge() must
915 	 * be true if pmd_page() returns a valid THP to avoid taking the
916 	 * pmd_lock when others walk over non transhuge pmds (i.e. there
917 	 * are no THP allocated). Especially when splitting a THP and
918 	 * removing the present bit from the pmd, pmd_trans_huge() still
919 	 * needs to return true. pmd_present() should be true whenever
920 	 * pmd_trans_huge() returns true.
921 	 */
922 	pmd = pfn_pmd(pfn, prot);
923 	WARN_ON(!pmd_trans_huge(pmd_mkhuge(pmd)));
924 
925 #ifndef __HAVE_ARCH_PMDP_INVALIDATE
926 	WARN_ON(!pmd_trans_huge(pmd_mkinvalid(pmd_mkhuge(pmd))));
927 	WARN_ON(!pmd_present(pmd_mkinvalid(pmd_mkhuge(pmd))));
928 #endif /* __HAVE_ARCH_PMDP_INVALIDATE */
929 }
930 
931 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
pud_thp_tests(unsigned long pfn,pgprot_t prot)932 static void __init pud_thp_tests(unsigned long pfn, pgprot_t prot)
933 {
934 	pud_t pud;
935 
936 	if (!has_transparent_hugepage())
937 		return;
938 
939 	pr_debug("Validating PUD based THP\n");
940 	pud = pfn_pud(pfn, prot);
941 	WARN_ON(!pud_trans_huge(pud_mkhuge(pud)));
942 
943 	/*
944 	 * pud_mkinvalid() has been dropped for now. Enable back
945 	 * these tests when it comes back with a modified pud_present().
946 	 *
947 	 * WARN_ON(!pud_trans_huge(pud_mkinvalid(pud_mkhuge(pud))));
948 	 * WARN_ON(!pud_present(pud_mkinvalid(pud_mkhuge(pud))));
949 	 */
950 }
951 #else  /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
pud_thp_tests(unsigned long pfn,pgprot_t prot)952 static void __init pud_thp_tests(unsigned long pfn, pgprot_t prot) { }
953 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
954 #else  /* !CONFIG_TRANSPARENT_HUGEPAGE */
pmd_thp_tests(unsigned long pfn,pgprot_t prot)955 static void __init pmd_thp_tests(unsigned long pfn, pgprot_t prot) { }
pud_thp_tests(unsigned long pfn,pgprot_t prot)956 static void __init pud_thp_tests(unsigned long pfn, pgprot_t prot) { }
957 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
958 
get_random_vaddr(void)959 static unsigned long __init get_random_vaddr(void)
960 {
961 	unsigned long random_vaddr, random_pages, total_user_pages;
962 
963 	total_user_pages = (TASK_SIZE - FIRST_USER_ADDRESS) / PAGE_SIZE;
964 
965 	random_pages = get_random_long() % total_user_pages;
966 	random_vaddr = FIRST_USER_ADDRESS + random_pages * PAGE_SIZE;
967 
968 	return random_vaddr;
969 }
970 
debug_vm_pgtable(void)971 static int __init debug_vm_pgtable(void)
972 {
973 	struct vm_area_struct *vma;
974 	struct mm_struct *mm;
975 	pgd_t *pgdp;
976 	p4d_t *p4dp, *saved_p4dp;
977 	pud_t *pudp, *saved_pudp;
978 	pmd_t *pmdp, *saved_pmdp, pmd;
979 	pte_t *ptep;
980 	pgtable_t saved_ptep;
981 	pgprot_t prot, protnone;
982 	phys_addr_t paddr;
983 	unsigned long vaddr, pte_aligned, pmd_aligned;
984 	unsigned long pud_aligned, p4d_aligned, pgd_aligned;
985 	spinlock_t *ptl = NULL;
986 	int idx;
987 
988 	pr_info("Validating architecture page table helpers\n");
989 	prot = vm_get_page_prot(VMFLAGS);
990 	vaddr = get_random_vaddr();
991 	mm = mm_alloc();
992 	if (!mm) {
993 		pr_err("mm_struct allocation failed\n");
994 		return 1;
995 	}
996 
997 	/*
998 	 * __P000 (or even __S000) will help create page table entries with
999 	 * PROT_NONE permission as required for pxx_protnone_tests().
1000 	 */
1001 	protnone = __P000;
1002 
1003 	vma = vm_area_alloc(mm);
1004 	if (!vma) {
1005 		pr_err("vma allocation failed\n");
1006 		return 1;
1007 	}
1008 
1009 	/*
1010 	 * PFN for mapping at PTE level is determined from a standard kernel
1011 	 * text symbol. But pfns for higher page table levels are derived by
1012 	 * masking lower bits of this real pfn. These derived pfns might not
1013 	 * exist on the platform but that does not really matter as pfn_pxx()
1014 	 * helpers will still create appropriate entries for the test. This
1015 	 * helps avoid large memory block allocations to be used for mapping
1016 	 * at higher page table levels.
1017 	 */
1018 	paddr = __pa_symbol(&start_kernel);
1019 
1020 	pte_aligned = (paddr & PAGE_MASK) >> PAGE_SHIFT;
1021 	pmd_aligned = (paddr & PMD_MASK) >> PAGE_SHIFT;
1022 	pud_aligned = (paddr & PUD_MASK) >> PAGE_SHIFT;
1023 	p4d_aligned = (paddr & P4D_MASK) >> PAGE_SHIFT;
1024 	pgd_aligned = (paddr & PGDIR_MASK) >> PAGE_SHIFT;
1025 	WARN_ON(!pfn_valid(pte_aligned));
1026 
1027 	pgdp = pgd_offset(mm, vaddr);
1028 	p4dp = p4d_alloc(mm, pgdp, vaddr);
1029 	pudp = pud_alloc(mm, p4dp, vaddr);
1030 	pmdp = pmd_alloc(mm, pudp, vaddr);
1031 	/*
1032 	 * Allocate pgtable_t
1033 	 */
1034 	if (pte_alloc(mm, pmdp)) {
1035 		pr_err("pgtable allocation failed\n");
1036 		return 1;
1037 	}
1038 
1039 	/*
1040 	 * Save all the page table page addresses as the page table
1041 	 * entries will be used for testing with random or garbage
1042 	 * values. These saved addresses will be used for freeing
1043 	 * page table pages.
1044 	 */
1045 	pmd = READ_ONCE(*pmdp);
1046 	saved_p4dp = p4d_offset(pgdp, 0UL);
1047 	saved_pudp = pud_offset(p4dp, 0UL);
1048 	saved_pmdp = pmd_offset(pudp, 0UL);
1049 	saved_ptep = pmd_pgtable(pmd);
1050 
1051 	/*
1052 	 * Iterate over the protection_map[] to make sure that all
1053 	 * the basic page table transformation validations just hold
1054 	 * true irrespective of the starting protection value for a
1055 	 * given page table entry.
1056 	 */
1057 	for (idx = 0; idx < ARRAY_SIZE(protection_map); idx++) {
1058 		pte_basic_tests(pte_aligned, idx);
1059 		pmd_basic_tests(pmd_aligned, idx);
1060 		pud_basic_tests(mm, pud_aligned, idx);
1061 	}
1062 
1063 	/*
1064 	 * Both P4D and PGD level tests are very basic which do not
1065 	 * involve creating page table entries from the protection
1066 	 * value and the given pfn. Hence just keep them out from
1067 	 * the above iteration for now to save some test execution
1068 	 * time.
1069 	 */
1070 	p4d_basic_tests(p4d_aligned, prot);
1071 	pgd_basic_tests(pgd_aligned, prot);
1072 
1073 	pmd_leaf_tests(pmd_aligned, prot);
1074 	pud_leaf_tests(pud_aligned, prot);
1075 
1076 	pte_savedwrite_tests(pte_aligned, protnone);
1077 	pmd_savedwrite_tests(pmd_aligned, protnone);
1078 
1079 	pte_special_tests(pte_aligned, prot);
1080 	pte_protnone_tests(pte_aligned, protnone);
1081 	pmd_protnone_tests(pmd_aligned, protnone);
1082 
1083 	pte_devmap_tests(pte_aligned, prot);
1084 	pmd_devmap_tests(pmd_aligned, prot);
1085 	pud_devmap_tests(pud_aligned, prot);
1086 
1087 	pte_soft_dirty_tests(pte_aligned, prot);
1088 	pmd_soft_dirty_tests(pmd_aligned, prot);
1089 	pte_swap_soft_dirty_tests(pte_aligned, prot);
1090 	pmd_swap_soft_dirty_tests(pmd_aligned, prot);
1091 
1092 	pte_swap_tests(pte_aligned, prot);
1093 	pmd_swap_tests(pmd_aligned, prot);
1094 
1095 	swap_migration_tests();
1096 
1097 	pmd_thp_tests(pmd_aligned, prot);
1098 	pud_thp_tests(pud_aligned, prot);
1099 
1100 	hugetlb_basic_tests(pte_aligned, prot);
1101 
1102 	/*
1103 	 * Page table modifying tests. They need to hold
1104 	 * proper page table lock.
1105 	 */
1106 
1107 	ptep = pte_offset_map_lock(mm, pmdp, vaddr, &ptl);
1108 	pte_clear_tests(mm, ptep, pte_aligned, vaddr, prot);
1109 	pte_advanced_tests(mm, vma, ptep, pte_aligned, vaddr, prot);
1110 	pte_unmap_unlock(ptep, ptl);
1111 
1112 	ptl = pmd_lock(mm, pmdp);
1113 	pmd_clear_tests(mm, pmdp);
1114 	pmd_advanced_tests(mm, vma, pmdp, pmd_aligned, vaddr, prot, saved_ptep);
1115 	pmd_huge_tests(pmdp, pmd_aligned, prot);
1116 	pmd_populate_tests(mm, pmdp, saved_ptep);
1117 	spin_unlock(ptl);
1118 
1119 	ptl = pud_lock(mm, pudp);
1120 	pud_clear_tests(mm, pudp);
1121 	pud_advanced_tests(mm, vma, pudp, pud_aligned, vaddr, prot);
1122 	pud_huge_tests(pudp, pud_aligned, prot);
1123 	pud_populate_tests(mm, pudp, saved_pmdp);
1124 	spin_unlock(ptl);
1125 
1126 	spin_lock(&mm->page_table_lock);
1127 	p4d_clear_tests(mm, p4dp);
1128 	pgd_clear_tests(mm, pgdp);
1129 	p4d_populate_tests(mm, p4dp, saved_pudp);
1130 	pgd_populate_tests(mm, pgdp, saved_p4dp);
1131 	spin_unlock(&mm->page_table_lock);
1132 
1133 	p4d_free(mm, saved_p4dp);
1134 	pud_free(mm, saved_pudp);
1135 	pmd_free(mm, saved_pmdp);
1136 	pte_free(mm, saved_ptep);
1137 
1138 	vm_area_free(vma);
1139 	mm_dec_nr_puds(mm);
1140 	mm_dec_nr_pmds(mm);
1141 	mm_dec_nr_ptes(mm);
1142 	mmdrop(mm);
1143 	return 0;
1144 }
1145 late_initcall(debug_vm_pgtable);
1146